text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""Client side of the heat engine RPC API."""
from oslo_utils import reflection
from heat.common import messaging
from heat.rpc import api as rpc_api
class EngineClient(object):
"""Client side of the heat engine rpc API.
API version history::
1.0 - Initial version.
1.1 - Add support_status argument to list_resource_types()
1.4 - Add support for service list
1.9 - Add template_type option to generate_template()
1.10 - Add support for software config list
1.11 - Add support for template versions list
1.12 - Add with_detail option for stack resources list
1.13 - Add support for template functions list
1.14 - Add cancel_with_rollback option to stack_cancel_update
1.15 - Add preview_update_stack() call
1.16 - Adds version, type_name to list_resource_types()
1.17 - Add files to validate_template
1.18 - Add show_nested to validate_template
1.19 - Add show_output and list_outputs for returning stack outputs
1.20 - Add resolve_outputs to stack show
1.21 - Add deployment_id to create_software_deployment
1.22 - Add support for stack export
1.23 - Add environment_files to create/update/preview/validate
1.24 - Adds ignorable_errors to validate_template
1.25 - list_stack_resource filter update
1.26 - Add mark_unhealthy
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
self._client = messaging.get_rpc_client(
topic=rpc_api.ENGINE_TOPIC,
version=self.BASE_RPC_API_VERSION)
@staticmethod
def make_msg(method, **kwargs):
return method, kwargs
def call(self, ctxt, msg, version=None):
method, kwargs = msg
if version is not None:
client = self._client.prepare(version=version)
else:
client = self._client
return client.call(ctxt, method, **kwargs)
def cast(self, ctxt, msg, version=None):
method, kwargs = msg
if version is not None:
client = self._client.prepare(version=version)
else:
client = self._client
return client.cast(ctxt, method, **kwargs)
def local_error_name(self, error):
"""Returns the name of the error with any _Remote postfix removed.
:param error: Remote raised error to derive the name from.
"""
error_name = reflection.get_class_name(error, fully_qualified=False)
return error_name.split('_Remote')[0]
def ignore_error_named(self, error, name):
"""Raises the error unless its local name matches the supplied name.
:param error: Remote raised error to derive the local name from.
:param name: Name to compare local name to.
"""
if self.local_error_name(error) != name:
raise error
def identify_stack(self, ctxt, stack_name):
"""Returns the full stack identifier for a single, live stack.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('identify_stack',
stack_name=stack_name))
def list_stacks(self, ctxt, limit=None, marker=None, sort_keys=None,
sort_dir=None, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
"""Returns attributes of all stacks.
It supports pagination (``limit`` and ``marker``), sorting
(``sort_keys`` and ``sort_dir``) and filtering (``filters``) of the
results.
:param ctxt: RPC context.
:param limit: the number of stacks to list (integer or string)
:param marker: the ID of the last item in the previous page
:param sort_keys: an array of fields used to sort the list
:param sort_dir: the direction of the sort ('asc' or 'desc')
:param filters: a dict with attribute:value to filter the list
:param tenant_safe: if true, scope the request by the current tenant
:param show_deleted: if true, show soft-deleted stacks
:param show_nested: if true, show nested stacks
:param show_hidden: if true, show hidden stacks
:param tags: show stacks containing these tags, combine multiple
tags using the boolean AND expression
:param tags_any: show stacks containing these tags, combine multiple
tags using the boolean OR expression
:param not_tags: show stacks not containing these tags, combine
multiple tags using the boolean AND expression
:param not_tags_any: show stacks not containing these tags, combine
multiple tags using the boolean OR expression
:returns: a list of stacks
"""
return self.call(ctxt,
self.make_msg('list_stacks', limit=limit,
sort_keys=sort_keys, marker=marker,
sort_dir=sort_dir, filters=filters,
tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden,
tags=tags, tags_any=tags_any,
not_tags=not_tags,
not_tags_any=not_tags_any),
version='1.8')
def count_stacks(self, ctxt, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
"""Returns the number of stacks that match the given filters.
:param ctxt: RPC context.
:param filters: a dict of ATTR:VALUE to match against stacks
:param tenant_safe: if true, scope the request by the current tenant
:param show_deleted: if true, count will include the deleted stacks
:param show_nested: if true, count will include nested stacks
:param show_hidden: if true, count will include hidden stacks
:param tags: count stacks containing these tags, combine multiple tags
using the boolean AND expression
:param tags_any: count stacks containing these tags, combine multiple
tags using the boolean OR expression
:param not_tags: count stacks not containing these tags, combine
multiple tags using the boolean AND expression
:param not_tags_any: count stacks not containing these tags, combine
multiple tags using the boolean OR expression
:returns: an integer representing the number of matched stacks
"""
return self.call(ctxt, self.make_msg('count_stacks',
filters=filters,
tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden,
tags=tags,
tags_any=tags_any,
not_tags=not_tags,
not_tags_any=not_tags_any),
version='1.8')
def show_stack(self, ctxt, stack_identity, resolve_outputs=True):
"""Returns detailed information about one or all stacks.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to show, or None to
show all
:param resolve_outputs: If True, stack outputs will be resolved
"""
return self.call(ctxt, self.make_msg('show_stack',
stack_identity=stack_identity,
resolve_outputs=resolve_outputs),
version='1.20')
def preview_stack(self, ctxt, stack_name, template, params, files,
args, environment_files=None):
"""Simulates a new stack using the provided template.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to create.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
"""
return self.call(ctxt,
self.make_msg('preview_stack', stack_name=stack_name,
template=template,
params=params, files=files,
environment_files=environment_files,
args=args),
version='1.23')
def create_stack(self, ctxt, stack_name, template, params, files,
args, environment_files=None):
"""Creates a new stack using the template provided.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to create.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
"""
return self._create_stack(ctxt, stack_name, template, params, files,
args, environment_files=environment_files)
def _create_stack(self, ctxt, stack_name, template, params, files,
args, environment_files=None,
owner_id=None, nested_depth=0, user_creds_id=None,
stack_user_project_id=None, parent_resource_name=None):
"""Internal interface for engine-to-engine communication via RPC.
Allows some additional options which should not be exposed to users via
the API:
:param owner_id: parent stack ID for nested stacks
:param nested_depth: nested depth for nested stacks
:param user_creds_id: user_creds record for nested stack
:param stack_user_project_id: stack user project for nested stack
:param parent_resource_name: the parent resource name
"""
return self.call(
ctxt, self.make_msg('create_stack', stack_name=stack_name,
template=template,
params=params, files=files,
environment_files=environment_files,
args=args, owner_id=owner_id,
nested_depth=nested_depth,
user_creds_id=user_creds_id,
stack_user_project_id=stack_user_project_id,
parent_resource_name=parent_resource_name),
version='1.23')
def update_stack(self, ctxt, stack_identity, template, params,
files, args, environment_files=None):
"""Updates an existing stack based on the provided template and params.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to create.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
"""
return self.call(ctxt,
self.make_msg('update_stack',
stack_identity=stack_identity,
template=template,
params=params,
files=files,
environment_files=environment_files,
args=args),
version='1.23')
def preview_update_stack(self, ctxt, stack_identity, template, params,
files, args, environment_files=None):
"""Returns the resources that would be changed in an update.
Based on the provided template and parameters.
Requires RPC version 1.15 or above.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you wish to update.
:param template: New template for the stack.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
"""
return self.call(ctxt,
self.make_msg('preview_update_stack',
stack_identity=stack_identity,
template=template,
params=params,
files=files,
environment_files=environment_files,
args=args,
),
version='1.23')
def validate_template(self, ctxt, template, params=None, files=None,
environment_files=None, show_nested=False,
ignorable_errors=None):
"""Uses the stack parser to check the validity of a template.
:param ctxt: RPC context.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment/template.
:param environment_files: ordered list of environment file names
included in the files dict
:param show_nested: if True nested templates will be validated
:param ignorable_errors: List of error_code to be ignored as part of
validation
"""
return self.call(ctxt, self.make_msg(
'validate_template',
template=template,
params=params,
files=files,
show_nested=show_nested,
environment_files=environment_files,
ignorable_errors=ignorable_errors),
version='1.24')
def authenticated_to_backend(self, ctxt):
"""Validate the credentials in the RPC context.
Verify that the credentials in the RPC context are valid for the
current cloud backend.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('authenticated_to_backend'))
def get_template(self, ctxt, stack_identity):
"""Get the template.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to see.
"""
return self.call(ctxt, self.make_msg('get_template',
stack_identity=stack_identity))
def delete_stack(self, ctxt, stack_identity, cast=True):
"""Deletes a given stack.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to delete.
:param cast: cast the message or use call (default: True)
"""
rpc_method = self.cast if cast else self.call
return rpc_method(ctxt,
self.make_msg('delete_stack',
stack_identity=stack_identity))
def abandon_stack(self, ctxt, stack_identity):
"""Deletes a given stack but resources would not be deleted.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to abandon.
"""
return self.call(ctxt,
self.make_msg('abandon_stack',
stack_identity=stack_identity))
def list_resource_types(self,
ctxt,
support_status=None,
type_name=None,
heat_version=None):
"""Get a list of valid resource types.
:param ctxt: RPC context.
:param support_status: Support status of resource type
:param type_name: Resource type's name (regular expression allowed)
:param version: Heat version
"""
return self.call(ctxt, self.make_msg('list_resource_types',
support_status=support_status,
type_name=type_name,
heat_version=heat_version),
version='1.16')
def list_template_versions(self, ctxt):
"""Get a list of available template versions.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('list_template_versions'),
version='1.11')
def list_template_functions(self, ctxt, template_version):
"""Get a list of available functions in a given template.
:param ctxt: RPC context
:param template_name : name of the template which function list you
want to get
"""
return self.call(ctxt, self.make_msg(
'list_template_functions', template_version=template_version),
version='1.13')
def resource_schema(self, ctxt, type_name):
"""Get the schema for a resource type.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('resource_schema',
type_name=type_name))
def generate_template(self, ctxt, type_name, template_type='cfn'):
"""Generate a template based on the specified type.
:param ctxt: RPC context.
:param type_name: The resource type name to generate a template for.
:param template_type: the template type to generate, cfn or hot.
"""
return self.call(ctxt, self.make_msg('generate_template',
type_name=type_name,
template_type=template_type),
version='1.9')
def list_events(self, ctxt, stack_identity, filters=None, limit=None,
marker=None, sort_keys=None, sort_dir=None,):
"""Lists all events associated with a given stack.
It supports pagination (``limit`` and ``marker``),
sorting (``sort_keys`` and ``sort_dir``) and filtering(filters)
of the results.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to get events for
:param filters: a dict with attribute:value to filter the list
:param limit: the number of events to list (integer or string)
:param marker: the ID of the last event in the previous page
:param sort_keys: an array of fields used to sort the list
:param sort_dir: the direction of the sort ('asc' or 'desc').
"""
return self.call(ctxt, self.make_msg('list_events',
stack_identity=stack_identity,
filters=filters,
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir))
def describe_stack_resource(self, ctxt, stack_identity, resource_name,
with_attr=False):
"""Get detailed resource information about a particular resource.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
"""
return self.call(ctxt,
self.make_msg('describe_stack_resource',
stack_identity=stack_identity,
resource_name=resource_name,
with_attr=with_attr),
version='1.2')
def find_physical_resource(self, ctxt, physical_resource_id):
"""Return an identifier for the resource.
:param ctxt: RPC context.
:param physcial_resource_id: The physical resource ID to look up.
"""
return self.call(ctxt,
self.make_msg(
'find_physical_resource',
physical_resource_id=physical_resource_id))
def describe_stack_resources(self, ctxt, stack_identity, resource_name):
"""Get detailed resource information about one or more resources.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
"""
return self.call(ctxt, self.make_msg('describe_stack_resources',
stack_identity=stack_identity,
resource_name=resource_name))
def list_stack_resources(self, ctxt, stack_identity,
nested_depth=0, with_detail=False,
filters=None):
"""List the resources belonging to a stack.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param nested_depth: Levels of nested stacks of which list resources.
:param with_detail: show detail for resources in list.
:param filters: a dict with attribute:value to search the resources
"""
return self.call(ctxt,
self.make_msg('list_stack_resources',
stack_identity=stack_identity,
nested_depth=nested_depth,
with_detail=with_detail,
filters=filters),
version='1.25')
def stack_suspend(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_suspend',
stack_identity=stack_identity))
def stack_resume(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_resume',
stack_identity=stack_identity))
def stack_check(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_check',
stack_identity=stack_identity))
def stack_cancel_update(self, ctxt, stack_identity,
cancel_with_rollback=True):
return self.call(ctxt,
self.make_msg(
'stack_cancel_update',
stack_identity=stack_identity,
cancel_with_rollback=cancel_with_rollback),
version='1.14')
def resource_signal(self, ctxt, stack_identity, resource_name, details,
sync_call=False):
"""Generate an alarm on the resource.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
:param details: the details of the signal.
"""
return self.call(ctxt, self.make_msg('resource_signal',
stack_identity=stack_identity,
resource_name=resource_name,
details=details,
sync_call=sync_call),
version='1.3')
def resource_mark_unhealthy(self, ctxt, stack_identity, resource_name,
mark_unhealthy, resource_status_reason=None):
"""Mark the resource as unhealthy or healthy.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
:param mark_unhealthy: indicates whether the resource is unhealthy.
:param resource_status_reason: reason for health change.
"""
return self.call(
ctxt,
self.make_msg('resource_mark_unhealthy',
stack_identity=stack_identity,
resource_name=resource_name,
mark_unhealthy=mark_unhealthy,
resource_status_reason=resource_status_reason),
version='1.26')
def create_watch_data(self, ctxt, watch_name, stats_data):
"""Creates data for CloudWatch and WaitConditions.
This could be used by CloudWatch and WaitConditions and treat HA
service events like any other CloudWatch.
:param ctxt: RPC context.
:param watch_name: Name of the watch/alarm
:param stats_data: The data to post.
"""
return self.call(ctxt, self.make_msg('create_watch_data',
watch_name=watch_name,
stats_data=stats_data))
def show_watch(self, ctxt, watch_name):
"""Returns the attributes of one watch/alarm.
The show_watch method returns the attributes of one watch
or all watches if no watch_name is passed.
:param ctxt: RPC context.
:param watch_name: Name of the watch/alarm you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('show_watch',
watch_name=watch_name))
def show_watch_metric(self, ctxt, metric_namespace=None, metric_name=None):
"""Returns the datapoints for a metric.
The show_watch_metric method returns the datapoints associated
with a specified metric, or all metrics if no metric_name is passed.
:param ctxt: RPC context.
:param metric_namespace: Name of the namespace you want to see,
or None to see all
:param metric_name: Name of the metric you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('show_watch_metric',
metric_namespace=metric_namespace,
metric_name=metric_name))
def set_watch_state(self, ctxt, watch_name, state):
"""Temporarily set the state of a given watch.
:param ctxt: RPC context.
:param watch_name: Name of the watch
:param state: State (must be one defined in WatchRule class)
"""
return self.call(ctxt, self.make_msg('set_watch_state',
watch_name=watch_name,
state=state))
def get_revision(self, ctxt):
return self.call(ctxt, self.make_msg('get_revision'))
def show_software_config(self, cnxt, config_id):
return self.call(cnxt, self.make_msg('show_software_config',
config_id=config_id))
def list_software_configs(self, cnxt, limit=None, marker=None,
tenant_safe=True):
return self.call(cnxt,
self.make_msg('list_software_configs',
limit=limit,
marker=marker,
tenant_safe=tenant_safe),
version='1.10')
def create_software_config(self, cnxt, group, name, config,
inputs=None, outputs=None, options=None):
inputs = inputs or []
outputs = outputs or []
options = options or {}
return self.call(cnxt, self.make_msg('create_software_config',
group=group,
name=name,
config=config,
inputs=inputs,
outputs=outputs,
options=options))
def delete_software_config(self, cnxt, config_id):
return self.call(cnxt, self.make_msg('delete_software_config',
config_id=config_id))
def list_software_deployments(self, cnxt, server_id=None):
return self.call(cnxt, self.make_msg('list_software_deployments',
server_id=server_id))
def metadata_software_deployments(self, cnxt, server_id):
return self.call(cnxt, self.make_msg('metadata_software_deployments',
server_id=server_id))
def show_software_deployment(self, cnxt, deployment_id):
return self.call(cnxt, self.make_msg('show_software_deployment',
deployment_id=deployment_id))
def create_software_deployment(self, cnxt, server_id, config_id=None,
input_values=None, action='INIT',
status='COMPLETE', status_reason='',
stack_user_project_id=None,
deployment_id=None):
input_values = input_values or {}
return self.call(cnxt, self.make_msg(
'create_software_deployment',
server_id=server_id,
config_id=config_id,
deployment_id=deployment_id,
input_values=input_values,
action=action,
status=status,
status_reason=status_reason,
stack_user_project_id=stack_user_project_id))
def update_software_deployment(self, cnxt, deployment_id,
config_id=None, input_values=None,
output_values=None, action=None,
status=None, status_reason=None,
updated_at=None):
return self.call(
cnxt, self.make_msg('update_software_deployment',
deployment_id=deployment_id,
config_id=config_id,
input_values=input_values,
output_values=output_values,
action=action,
status=status,
status_reason=status_reason,
updated_at=updated_at),
version='1.5')
def delete_software_deployment(self, cnxt, deployment_id):
return self.call(cnxt, self.make_msg('delete_software_deployment',
deployment_id=deployment_id))
def signal_software_deployment(self, cnxt, deployment_id, details,
updated_at=None):
return self.call(
cnxt, self.make_msg('signal_software_deployment',
deployment_id=deployment_id,
details=details,
updated_at=updated_at),
version='1.6')
def stack_snapshot(self, ctxt, stack_identity, name):
return self.call(ctxt, self.make_msg('stack_snapshot',
stack_identity=stack_identity,
name=name))
def show_snapshot(self, cnxt, stack_identity, snapshot_id):
return self.call(cnxt, self.make_msg('show_snapshot',
stack_identity=stack_identity,
snapshot_id=snapshot_id))
def delete_snapshot(self, cnxt, stack_identity, snapshot_id):
return self.call(cnxt, self.make_msg('delete_snapshot',
stack_identity=stack_identity,
snapshot_id=snapshot_id))
def stack_list_snapshots(self, cnxt, stack_identity):
return self.call(cnxt, self.make_msg('stack_list_snapshots',
stack_identity=stack_identity))
def stack_restore(self, cnxt, stack_identity, snapshot_id):
return self.call(cnxt, self.make_msg('stack_restore',
stack_identity=stack_identity,
snapshot_id=snapshot_id))
def list_services(self, cnxt):
return self.call(cnxt, self.make_msg('list_services'), version='1.4')
def list_outputs(self, cntx, stack_identity):
return self.call(cntx, self.make_msg('list_outputs',
stack_identity=stack_identity),
version='1.19')
def show_output(self, cntx, stack_identity, output_key):
return self.call(cntx, self.make_msg('show_output',
stack_identity=stack_identity,
output_key=output_key),
version='1.19')
def export_stack(self, ctxt, stack_identity):
"""Exports the stack data in JSON format.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to export.
"""
return self.call(ctxt,
self.make_msg('export_stack',
stack_identity=stack_identity),
version='1.22')
| {
"content_hash": "7da04148e59cc7e787f7d64116479724",
"timestamp": "",
"source": "github",
"line_count": 763,
"max_line_length": 79,
"avg_line_length": 46.2437745740498,
"alnum_prop": 0.5345765786192042,
"repo_name": "jasondunsmore/heat",
"id": "be4ece09dd25f20eca69aba6a5645a0411fbbd22",
"size": "35893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/rpc/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7819626"
},
{
"name": "Shell",
"bytes": "33158"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django_stripe.shortcuts import stripe
from django_stripe.contrib.registration.backends import StripeSubscriptionBackend
from django_stripe.contrib.registration.signals import user_registered
from django_stripe.signals import (upcoming_invoice_updated, invoice_ready, \
recurring_payment_failed, subscription_final_payment_attempt_failed, StripeWebhook)
from .models import UserProfile
@receiver(post_save, sender=User, \
dispatch_uid='profiles.receivers.create_user_profile')
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile, new = UserProfile.objects.get_or_create(user=instance)
@receiver(user_registered, sender=StripeSubscriptionBackend, \
dispatch_uid='profiles.receivers.link_stripe_customer')
def link_stripe_customer(sender, user, request, customer, plan=None, **kwargs):
user_profile = user.get_profile()
user_profile.customer_id = customer.id
user_profile.card_last4 = customer.active_card.last_4
user_profile.plan = plan
try:
user_profile.trial_end = datetime.utcfromtimestamp(customer.subscription.trial_end)
except AttributeError:
pass
user_profile.save()
upcoming_invoice_updated.send(sender=None, customer=customer)
@receiver(invoice_ready, sender=StripeWebhook, \
dispatch_uid='profiles.receivers.invoice_user')
def invoice_user(sender, customer, invoice, **kwargs):
try:
user_profile = UserProfile.objects.get(customer_id=customer)
amount = int(user_profile.collaborator_count * user_profile.get_price())
if not user_profile.trialing and amount > 0:
stripe.InvoiceItem.create( \
customer=customer,
amount=amount * 100,
currency='usd',
description="%s Collaborators" \
% user_profile.collaborator_count
)
upcoming_invoice_updated.send(sender=None, customer=customer)
except UserProfile.DoesNotExist:
pass
@receiver(recurring_payment_failed, sender=StripeWebhook, \
dispatch_uid='profiles.receviers.update_payment_attempts')
def update_payment_attempts(sender, customer, attempt, payment, **kwargs):
try:
user_profile = UserProfile.objects.get(customer_id=customer)
user_profile.payment_attempts = int(attempt)
user_profile.last_payment_attempt = datetime.utcfromtimestamp(payment['time'])
user_profile.save()
except UserProfile.DoesNotExist:
pass
@receiver(subscription_final_payment_attempt_failed, sender=StripeWebhook, \
dispatch_uid='profiles.receviers.lock_account')
def lock_account(sender, customer, subscription, **kwargs):
try:
user = User.objects.get(profile__customer_id=customer)
user.is_active = False
user.save()
except User.DoesNotExist:
pass
| {
"content_hash": "8dd8d62aae6c10759f015c657f22091b",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 91,
"avg_line_length": 38.82051282051282,
"alnum_prop": 0.7123513870541611,
"repo_name": "amccloud/django-stripe",
"id": "d5d45a75e267f0a6625dafdd3592800dd699f925",
"size": "3028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/profiles/receivers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26475"
}
],
"symlink_target": ""
} |
"""A HTTPSConnection/Handler with additional proxy and cert validation features.
In particular, monkey patches in Python r74203 to provide support for CONNECT
proxies and adds SSL cert validation if the ssl module is present.
"""
__author__ = "{frew,nick.johnson}@google.com (Fred Wulff and Nick Johnson)"
import base64
import httplib
import logging
import re
import socket
import urllib2
from urllib import splittype
from urllib import splituser
from urllib import splitpasswd
class InvalidCertificateException(httplib.HTTPException):
"""Raised when a certificate is provided with an invalid hostname."""
def __init__(self, host, cert, reason):
"""Constructor.
Args:
host: The hostname the connection was made to.
cert: The SSL certificate (as a dictionary) the host returned.
"""
httplib.HTTPException.__init__(self)
self.host = host
self.cert = cert
self.reason = reason
def __str__(self):
return ('Host %s returned an invalid certificate (%s): %s\n'
'To learn more, see '
'http://code.google.com/appengine/kb/general.html#rpcssl' %
(self.host, self.reason, self.cert))
def can_validate_certs():
"""Return True if we have the SSL package and can validate certificates."""
try:
import ssl
return True
except ImportError:
return False
def _create_fancy_connection(tunnel_host=None, key_file=None,
cert_file=None, ca_certs=None):
# This abomination brought to you by the fact that
# the HTTPHandler creates the connection instance in the middle
# of do_open so we need to add the tunnel host to the class.
class PresetProxyHTTPSConnection(httplib.HTTPSConnection):
"""An HTTPS connection that uses a proxy defined by the enclosing scope."""
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self._tunnel_host = tunnel_host
if tunnel_host:
logging.debug("Creating preset proxy https conn: %s", tunnel_host)
self.key_file = key_file
self.cert_file = cert_file
self.ca_certs = ca_certs
try:
import ssl
if self.ca_certs:
self.cert_reqs = ssl.CERT_REQUIRED
else:
self.cert_reqs = ssl.CERT_NONE
except ImportError:
pass
def _tunnel(self):
self._set_hostport(self._tunnel_host, None)
logging.info("Connecting through tunnel to: %s:%d",
self.host, self.port)
self.send("CONNECT %s:%d HTTP/1.0\r\n\r\n" % (self.host, self.port))
response = self.response_class(self.sock, strict=self.strict,
method=self._method)
(_, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error, "Tunnel connection failed: %d %s" % (
code, message.strip())
while True:
line = response.fp.readline()
if line == "\r\n":
break
def _get_valid_hosts_for_cert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns']
else:
# Return a list of commonName fields
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _validate_certificate_hostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._get_valid_hosts_for_cert(cert)
for host in hosts:
# Convert the glob-style hostname expression (eg, '*.google.com') into a
# valid regular expression.
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
# TODO(frew): When we drop support for <2.6 (in the far distant future),
# change this to socket.create_connection.
self.sock = _create_connection((self.host, self.port))
if self._tunnel_host:
self._tunnel()
# ssl and FakeSocket got deprecated. Try for the new hotness of wrap_ssl,
# with fallback.
try:
import ssl
self.sock = ssl.wrap_socket(self.sock,
keyfile=self.key_file,
certfile=self.cert_file,
ca_certs=self.ca_certs,
cert_reqs=self.cert_reqs)
if self.cert_reqs & ssl.CERT_REQUIRED:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._validate_certificate_hostname(cert, hostname):
raise InvalidCertificateException(hostname, cert,
'hostname mismatch')
except ImportError:
ssl = socket.ssl(self.sock,
keyfile=self.key_file,
certfile=self.cert_file)
self.sock = httplib.FakeSocket(self.sock, ssl)
return PresetProxyHTTPSConnection
# Here to end of _create_connection copied wholesale from Python 2.6"s socket.py
_GLOBAL_DEFAULT_TIMEOUT = object()
def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used.
"""
msg = "getaddrinfo returns an empty list"
host, port = address
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.connect(sa)
return sock
except socket.error, msg:
if sock is not None:
sock.close()
raise socket.error, msg
class FancyRequest(urllib2.Request):
"""A request that allows the use of a CONNECT proxy."""
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self._tunnel_host = None
self._key_file = None
self._cert_file = None
self._ca_certs = None
def set_proxy(self, host, type):
saved_type = None
if self.get_type() == "https" and not self._tunnel_host:
self._tunnel_host = self.get_host()
saved_type = self.get_type()
urllib2.Request.set_proxy(self, host, type)
if saved_type:
# Don't set self.type, we want to preserve the
# type for tunneling.
self.type = saved_type
def set_ssl_info(self, key_file=None, cert_file=None, ca_certs=None):
self._key_file = key_file
self._cert_file = cert_file
self._ca_certs = ca_certs
class FancyProxyHandler(urllib2.ProxyHandler):
"""A ProxyHandler that works with CONNECT-enabled proxies."""
# Taken verbatim from /usr/lib/python2.5/urllib2.py
def _parse_proxy(self, proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:[email protected]')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:[email protected]:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:[email protected]/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:[email protected]:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:[email protected]/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:[email protected]')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
def proxy_open(self, req, proxy, type):
# This block is copied wholesale from Python2.6 urllib2.
# It is idempotent, so the superclass method call executes as normal
# if invoked.
orig_type = req.get_type()
proxy_type, user, password, hostport = self._parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if user and password:
user_pass = "%s:%s" % (urllib2.unquote(user), urllib2.unquote(password))
creds = base64.b64encode(user_pass).strip()
# Later calls overwrite earlier calls for the same header
req.add_header("Proxy-authorization", "Basic " + creds)
hostport = urllib2.unquote(hostport)
req.set_proxy(hostport, proxy_type)
# This condition is the change
if orig_type == "https":
return None
return urllib2.ProxyHandler.proxy_open(self, req, proxy, type)
class FancyHTTPSHandler(urllib2.HTTPSHandler):
"""An HTTPSHandler that works with CONNECT-enabled proxies."""
def do_open(self, http_class, req):
# Intentionally very specific so as to opt for false negatives
# rather than false positives.
try:
return urllib2.HTTPSHandler.do_open(
self,
_create_fancy_connection(req._tunnel_host,
req._key_file,
req._cert_file,
req._ca_certs),
req)
except urllib2.URLError, url_error:
try:
import ssl
if (type(url_error.reason) == ssl.SSLError and
url_error.reason.args[0] == 1):
# Display the reason to the user. Need to use args for python2.5
# compat.
raise InvalidCertificateException(req.host, '',
url_error.reason.args[1])
except ImportError:
pass
raise url_error
# We have to implement this so that we persist the tunneling behavior
# through redirects.
class FancyRedirectHandler(urllib2.HTTPRedirectHandler):
"""A redirect handler that persists CONNECT-enabled proxy information."""
def redirect_request(self, req, *args, **kwargs):
new_req = urllib2.HTTPRedirectHandler.redirect_request(
self, req, *args, **kwargs)
# Same thing as in our set_proxy implementation, but in this case
# we"ve only got a Request to work with, so it was this or copy
# everything over piecemeal.
if hasattr(req, "_tunnel_host") and isinstance(new_req, urllib2.Request):
if new_req.get_type() == "https":
new_req._tunnel_host = new_req.get_host()
new_req.set_proxy(req.host, "https")
new_req.type = "https"
new_req._key_file = req._key_file
new_req._cert_file = req._cert_file
new_req._ca_certs = req._ca_certs
return new_req
| {
"content_hash": "e7c60bdbcfb3ded07cc322b177cde36b",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 80,
"avg_line_length": 34.873655913978496,
"alnum_prop": 0.6256070299853542,
"repo_name": "octavioturra/aritial",
"id": "1ae40b3a840d0b1e88d1752b8416f8b1f7ea95fd",
"size": "13104",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "google_appengine/lib/fancy_urllib/fancy_urllib/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400492"
},
{
"name": "CSS",
"bytes": "51574"
},
{
"name": "Erlang",
"bytes": "147"
},
{
"name": "HTML",
"bytes": "131267"
},
{
"name": "JavaScript",
"bytes": "277238"
},
{
"name": "Perl",
"bytes": "392"
},
{
"name": "Python",
"bytes": "5116447"
},
{
"name": "Shell",
"bytes": "763"
}
],
"symlink_target": ""
} |
"""
A management command which deletes expired accounts (e.g.,
accounts which signed up but never activated) from the database.
Calls ``RegistrationProfile.objects.delete_expired_users()``, which
contains the actual logic for determining which accounts are deleted.
"""
from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User
from xml.dom.minidom import parse
import sys
import random
import string
def get_users_from_cs_xml(cs_xml_fn):
doc = parse(cs_xml_fn)
for student_el in doc.getElementsByTagName("student"):
student = {
'username': student_el.getAttribute('dir'),
'name': student_el.getAttribute('name'),
'email': student_el.getAttribute('email'),
'year': student_el.getAttribute('year'),
}
yield student
class Command(NoArgsCommand):
help = "Import users from cs.usu.edu.ru/home. Put default.xml to STDIN"
def handle_noargs(self, **options):
for student in get_users_from_cs_xml(sys.stdin):
last_name, first_name = student['name'].split(' ', 1)
username = student['username']
email = student['email']
user, created = User.objects.get_or_create(username=username, first_name=first_name, last_name=last_name,
email=email)
if (user.password == "") or (user.has_usable_password() is False):
user.set_password(''.join(random.choice(string.letters) for i in xrange(20)))
user.save()
print "{0} {1}".format(user, user.get_full_name().encode("utf-8"))
| {
"content_hash": "e06f16a52ed8f62202c0cf0f3805df91",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 117,
"avg_line_length": 35.340425531914896,
"alnum_prop": 0.6285370258880193,
"repo_name": "znick/anytask",
"id": "31cd29fe46fb2cc9e365cab3624eaf907308847e",
"size": "1661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anytask/users/management/commands/import_cs_users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "89720"
},
{
"name": "Dockerfile",
"bytes": "7709"
},
{
"name": "HTML",
"bytes": "826638"
},
{
"name": "JavaScript",
"bytes": "296467"
},
{
"name": "Less",
"bytes": "7302"
},
{
"name": "Python",
"bytes": "965878"
},
{
"name": "Shell",
"bytes": "30922"
}
],
"symlink_target": ""
} |
"""
Helpers for converting Strava's units to something more practical.
These are really just thin wrappers to the brilliant 'units' python library.
"""
from units import unit
import units.predefined
# Setup the units we will use in this module.
units.predefined.define_units()
meter = meters = unit('m')
second = seconds = unit('s')
hour = hours = unit('h')
foot = feet = unit('ft')
mile = miles = unit('mi')
kilometer = kilometers = unit('km')
meters_per_second = meter / second
miles_per_hour = mph = mile / hour
kilometers_per_hour = kph = kilometer / hour
kilogram = kilograms = kg = kgs = unit('kg')
pound = pounds = lb = lbs = unit('lb')
def c2f(celsius):
""" Convert Celcius to Farenheit """
return (9.0 / 5.0) * celsius + 32
def timedelta_to_seconds(td):
"""
Converts a timedelta to total seconds, including support for microseconds.
Return value is (potentially truncated) integer.
(This is built-in in Python >= 2.7, but we are still supporting Python 2.6 here.)
:param td: The timedelta object
:type td: :class:`datetime.timedelta`
:return: The number of total seconds in the timedelta object.
:rtype: int
"""
if td is None:
return None
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
| {
"content_hash": "d4a2967c05db10c82079714f49f890cf",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 85,
"avg_line_length": 28.195652173913043,
"alnum_prop": 0.6707787201233616,
"repo_name": "Wisees/stravalib",
"id": "7f19e168f608415dcbf2e67a23a8dae2d3c5371e",
"size": "1297",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "stravalib/unithelper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "176548"
}
],
"symlink_target": ""
} |
import cards
import hands
import preflop_sim
import afterflop_sim
import pickle
# add more features if we have time
features = { 'high-pair':0, 'middle-pair':1, 'low-pair':2, '2-pair-good':3, '3-kind':4, 'straight':5, 'flush':6, 'full-house':7, '4-kind':8, 'straight-flush':9, 'really-good-high':10, 'good-high':11, 'middle-high':12, 'bad-high':13, 'really-bad-high':14, '2-pair-bad':15 }
def getHandCode(herohand, table):
handscore = 0
herohandplus = herohand.list_rep() + table.list_rep()
evaluated = hands.evaluate_hand(herohandplus)
if evaluated[2] > 3:
handscore = evaluated[2]
elif evaluated[2] == 3:
high_pair = evaluated[1][0]
highest_card = True
for e in evaluated[1]:
if high_pair < e:
highest_card = False
if highest_card:
handscore = afterflop_sim.features['2-pair-good']
else:
handscore = afterflop_sim.features['2-pair-bad']
elif evaluated[2] == 2:
high_pair = evaluated[1][0]
num_card_greater = 0
for e in evaluated[1]:
if high_pair < e:
num_card_greater += 1
if num_card_greater == 0:
handscore = afterflop_sim.features['high-pair']
elif num_card_greater == 1:
handscore = afterflop_sim.features['middle-pair']
else:
handscore = afterflop_sim.features['low-pair']
elif evaluated[2] == 1:
hand_strength = preflop_sim.getPreflopStrength(herohand)
win_ratio = hand_strength[0] / (hand_strength[0] + hand_strength[2])
if win_ratio > afterflop_sim.REALLYGOODHAND:
handscore = features['really-good-high']
elif win_ratio > afterflop_sim.GOODHAND:
handscore = features['good-high']
elif win_ratio > afterflop_sim.MIDDLEHAND:
handscore = features['middle-high']
elif win_ratio > afterflop_sim.BADHAND:
handscore = features['bad-high']
else:
handscore = features['really-bad-high']
return handscore
def simulate(filename = "postriver_values", trials = 0):
#mat = []
#for j in range(16):
# mat.append([0,0,0])
mat = pickle.load(open(filename, "rb"))
for i in range(trials):
theDeck = cards.Deck()
theDeck.shuffle()
herohand = cards.Hand()
adversaryhand = cards.Hand()
table = cards.Hand()
for j in range(2):
herohand.add_card(theDeck.deal_card())
adversaryhand.add_card(theDeck.deal_card())
for j in range(5):
table.add_card(theDeck.deal_card())
handscore = getHandCode(herohand, table)
result = hands.compare_hands(herohand, adversaryhand, table)
if result == 'left':
mat[handscore][0] += 1
elif result == 'none':
mat[handscore][1] += 1
elif result == 'right':
mat[handscore][2] += 1
print mat
pickle.dump(mat, open(filename, "wb"))
def getStrength(hand, table, filename = "postriver_values"):
mat = pickle.load(open(filename, "rb"))
code = getHandCode(hand, table)
chances = mat[code]
s = chances[0] + chances[1] + chances[2]
return [chances[0] / float(s), chances[1] / float(s), chances[2] / float(s)]
#simulate("postriver_values", 900000)
def printMatrix(filename = "postriver_values"):
mat = pickle.load(open(filename, "rb"))
print mat
| {
"content_hash": "ff2fe64d8325c77db20c39b363df9d3d",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 272,
"avg_line_length": 30.423076923076923,
"alnum_prop": 0.6425410872313527,
"repo_name": "pmaddi/CPSC458_Final-Project",
"id": "c3cac786a055fb51c259f0a7222a58866882f859",
"size": "3164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "afterriver_sim.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56157"
}
],
"symlink_target": ""
} |
__author__ = 'Franko'
from google.appengine.ext import ndb
from lib.models import Randomstevilka
from datetime import datetime
import time
import random
from google.appengine.api import users
def Random(stevilka,params):
R=params
#samo kontrola
alijerandom=preveri_random()
stevilka=int(stevilka)
glavna_stevilka= generiraj_random()
glavna_stevilka=int(glavna_stevilka)
if stevilka < glavna_stevilka:
tekst = "up"
elif stevilka > glavna_stevilka:
tekst = "down"
else:
tekst = "ok"
for user in Randomstevilka.query(Randomstevilka.aktivna == True):
user.aktivna = False
user.put()
stposkusov=povecaj_poskuse(stevilka,tekst)
parametri={"random":alijerandom,"randomnumber":glavna_stevilka, "uganil":tekst,"zadnji":stevilka,"stposkusov":stposkusov}
R.update(parametri)
return R
def preveri_random():
oldseznam = Randomstevilka.query(Randomstevilka.aktivna == True).fetch()
if oldseznam:
alfa = True
else:
alfa = False
return alfa
def generiraj_random():
beta = random.randrange(0,10000,1)
kontrola = preveri_random()
if kontrola == False:
nova_random = Randomstevilka(stevilo=beta)
nova_random.put()
time.sleep(1)
else:
oldseznam = Randomstevilka.query(Randomstevilka.aktivna == True).fetch()
beta=oldseznam[0].stevilo
return beta
def povecaj_poskuse(ugib,updown):
user = users.get_current_user()
emailprejemnika = user.email()
for user in Randomstevilka.query(Randomstevilka.aktivna == True):
user.vposkusih += 1
user.zadnjiposkus = ugib
user.zadnirezultat = updown
user.uganil = emailprejemnika
user.put()
return user.vposkusih
| {
"content_hash": "343d9728f1a4cbd57ccd042330c77eff",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 129,
"avg_line_length": 31.59016393442623,
"alnum_prop": 0.6196159833938765,
"repo_name": "igorpodobnik/koncniprojek",
"id": "86a4a5d2ccd531e4b02cb57036d7da8a49cddd85",
"size": "1927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/ugani.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "926"
},
{
"name": "HTML",
"bytes": "20123"
},
{
"name": "JavaScript",
"bytes": "365"
},
{
"name": "Python",
"bytes": "72605"
}
],
"symlink_target": ""
} |
"""
This module defines all the hotword detection engines present in the app.
Presently, it support
* PocketSphinx KeyPhrase Search for Hotword Detection
* Snowboy Hotword Detection
While Snowboy gives marginally better results, if it is unavailable on your device,
you may use PocketSphinx
"""
from .snowboy_detector import SnowboyDetector
from .sphinx_detector import PocketSphinxDetector
| {
"content_hash": "c281bf7aeac80b2efef9b52f8391e819",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 83,
"avg_line_length": 35.63636363636363,
"alnum_prop": 0.8214285714285714,
"repo_name": "betterclever/susi_linux",
"id": "f49895a6f180bfd6e57d83439e8c5ec72ec31047",
"size": "392",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "main/hotword_engine/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "48385"
},
{
"name": "Shell",
"bytes": "8581"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.script.script import Script
from resource_management.libraries import functions
from resource_management.libraries.functions.format import format
from ambari_commons import OSCheck
if OSCheck.is_windows_family():
from params_windows import *
else:
from params_linux import *
hbase_pid_dir = config['configurations']['ams-hbase-env']['hbase_pid_dir']
hbase_user = ams_user
ams_collector_pid_dir = config['configurations']['ams-env']['metrics_collector_pid_dir']
ams_monitor_pid_dir = config['configurations']['ams-env']['metrics_monitor_pid_dir']
ams_grafana_pid_dir = config['configurations']['ams-grafana-env']['metrics_grafana_pid_dir']
monitor_pid_file = format("{ams_monitor_pid_dir}/ambari-metrics-monitor.pid")
grafana_pid_file = format("{ams_grafana_pid_dir}/grafana-server.pid")
security_enabled = config['configurations']['cluster-env']['security_enabled']
ams_hbase_conf_dir = format("{hbase_conf_dir}")
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hostname = config['hostname']
tmp_dir = Script.get_tmp_dir()
| {
"content_hash": "2dc43789206c790239fd82b62ce2a5b3",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 114,
"avg_line_length": 43.18181818181818,
"alnum_prop": 0.7752631578947369,
"repo_name": "radicalbit/ambari",
"id": "6057a8b8e4441f88144e98af5212791bab1b08e8",
"size": "1922",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/status_params.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "42212"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "1287531"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "88056"
},
{
"name": "HTML",
"bytes": "5098825"
},
{
"name": "Java",
"bytes": "29006663"
},
{
"name": "JavaScript",
"bytes": "17274453"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "314333"
},
{
"name": "PowerShell",
"bytes": "2087991"
},
{
"name": "Python",
"bytes": "14584206"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "14478"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "741459"
},
{
"name": "Vim script",
"bytes": "5813"
}
],
"symlink_target": ""
} |
'''
This script is intended to evaluate dataset using SVM and 10-folds cross validation in n times.
'''
import collections
import csv
import datetime
import json
import os
import random
import re
import statistics
import sys
import time as t
import nltk
import nltk.classify
from nltk.metrics import scores
from sklearn.svm import LinearSVC
from modules import cleaner, tokenizer
n = 30
folds = 10
def tweet_features(tweet):
features = {}
tweet = cleaner.clean(tweet)
for word in tweet.split():
features["{}".format(word)] = tweet.count(word)
return features
def f1(precision, recall):
return 2 * ((precision * recall) / (precision + recall))
with open(os.path.join(os.path.dirname(__file__), 'result/generated_datasets/overlap/0.7/traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
traffic_tweets = [(line[0], line[1]) for line in dataset]
with open(os.path.join(os.path.dirname(__file__), 'result/generated_datasets/overlap/0.7/non_traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
non_traffic_tweets = [(line[0], line[1]) for line in dataset]
# random.shuffle(traffic_tweets)
# random.shuffle(non_traffic_tweets)
# if sys.argv[1] == "balance":
# traffic_tweets = traffic_tweets[:min([len(traffic_tweets), len(non_traffic_tweets)])]
# non_traffic_tweets = non_traffic_tweets[:min([len(traffic_tweets), len(non_traffic_tweets)])]
cv_times = []
cv_true_positives = []
cv_true_negatives = []
cv_false_positives = []
cv_false_negatives = []
cv_accuracies = []
cv_precisions = []
cv_recalls = []
cv_f_measures = []
for x in range(n):
labeled_tweets = (traffic_tweets + non_traffic_tweets)
random.shuffle(labeled_tweets)
times = []
true_positives = []
true_negatives = []
false_positives = []
false_negatives = []
accuracies = []
precisions = []
recalls = []
f_measures = []
for i in range(folds):
train_set = [(tweet_features(tweet), category) for (tweet, category) in labeled_tweets[0 : i * int(len(labeled_tweets) / folds)]] + \
[(tweet_features(tweet), category) for (tweet, category) in labeled_tweets[(i + 1) * int(len(labeled_tweets) / folds) : len(labeled_tweets)]]
test_set = [(tweet_features(tweet), category) for (tweet, category) in labeled_tweets[i * int(len(labeled_tweets) / folds) : (i + 1) * int(len(labeled_tweets) / folds)]]
print('\rn: {}/{}\tfolds: {}/{} '.format(x + 1, n, i + 1, folds), end='')
# SVM
start_time = t.time()
svm_classifier = nltk.classify.SklearnClassifier(LinearSVC(max_iter=10000)).train(train_set)
time = round(t.time() - start_time, 2)
accuracy = nltk.classify.accuracy(svm_classifier, test_set)
true_positive = 0
true_negative = 0
false_positive = 0
false_negative = 0
for i, (feature, label) in enumerate(test_set):
observed = svm_classifier.classify(feature)
if label == 'traffic' and observed == 'traffic':
true_positive += 1
if label == 'non_traffic' and observed == 'non_traffic':
true_negative += 1
if label == 'traffic' and observed == 'non_traffic':
false_positive += 1
if label == 'non_traffic' and observed == 'traffic':
false_negative += 1
precision = true_positive / (true_positive + false_positive)
recall = true_positive / (true_positive + false_negative)
f_measure = f1(precision, recall)
times.append(time)
true_positives.append(true_positive)
true_negatives.append(true_negative)
false_positives.append(false_positive)
false_negatives.append(false_negative)
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
f_measures.append(f_measure)
cv_times.append(statistics.mean(times))
cv_true_positives.append(statistics.mean(true_positives))
cv_true_negatives.append(statistics.mean(true_negatives))
cv_false_positives.append(statistics.mean(false_positives))
cv_false_negatives.append(statistics.mean(false_negatives))
cv_accuracies.append(statistics.mean(accuracies))
cv_precisions.append(statistics.mean(precisions))
cv_recalls.append(statistics.mean(recalls))
cv_f_measures.append(statistics.mean(f_measures))
print('\nSVM Classifier:')
print('\tAverage training time: {}'.format(statistics.mean(cv_times)))
print('\tAverage true positive: {}'.format(statistics.mean(cv_true_positives)))
print('\tAverage true negative: {}'.format(statistics.mean(cv_true_negatives)))
print('\tAverage false positives: {}'.format(statistics.mean(cv_false_positives)))
print('\tAverage false negatives: {}'.format(statistics.mean(cv_false_negatives)))
print('\tAverage accuracy: {}'.format(statistics.mean(cv_accuracies)))
print('\tAverage precision: {}'.format(statistics.mean(cv_precisions)))
print('\tAverage recall: {}'.format(statistics.mean(cv_recalls)))
print('\tAverage F-Measure: {}'.format(statistics.mean(cv_f_measures))) | {
"content_hash": "d6ebe9b7c3d7356640c85b84ec8487db",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 177,
"avg_line_length": 37.50359712230216,
"alnum_prop": 0.6602723959332438,
"repo_name": "dwiajik/twit-macet-mining-v3",
"id": "c6e69adc08b24a67aec30cee77757fc394848963",
"size": "5213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evaluate_n_ten_folds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63706"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.forms.widgets import SelectMultiple
from django.utils.translation import ugettext_lazy as _
from fobi.base import FormFieldPlugin, get_theme
from fobi.constants import (
SUBMIT_VALUE_AS_VAL,
SUBMIT_VALUE_AS_REPR
)
from fobi.helpers import get_select_field_choices, safe_text
from . import UID
from .fields import MultipleChoiceWithMaxField
from .forms import SelectMultipleWithMaxInputForm
from .settings import SUBMIT_VALUE_AS
__title__ = 'fobi.contrib.plugins.form_elements.fields.' \
'select_multiple_with_max.base'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('SelectMultipleWithMaxInputPlugin',)
theme = get_theme(request=None, as_instance=True)
class SelectMultipleWithMaxInputPlugin(FormFieldPlugin):
"""Select multiple with max field plugin."""
uid = UID
name = _("Select multiple with max")
group = _("Fields")
form = SelectMultipleWithMaxInputForm
def get_choices(self):
"""Get choices.
Might be used in integration plugins.
"""
return get_select_field_choices(self.data.choices)
def get_form_field_instances(self, request=None, form_entry=None,
form_element_entries=None, **kwargs):
"""Get form field instances."""
choices = get_select_field_choices(self.data.choices)
field_kwargs = {
'label': self.data.label,
'help_text': self.data.help_text,
'initial': self.data.initial,
'required': self.data.required,
'choices': choices,
'widget': SelectMultiple(
attrs={'class': theme.form_element_html_class}
),
}
if self.data.max_choices:
field_kwargs['max_choices'] = self.data.max_choices
return [(self.data.name, MultipleChoiceWithMaxField, field_kwargs)]
def prepare_plugin_form_data(self, cleaned_data):
"""Prepare plugin form data.
Might be used in integration plugins.
"""
# In case if we should submit value as is, we don't return anything.
# In other cases, we proceed further.
if SUBMIT_VALUE_AS != SUBMIT_VALUE_AS_VAL:
# Get the object
values = cleaned_data.get(self.data.name, None)
# Get choices
choices = dict(self.get_choices())
# Returned value
ret_values = []
for value in values:
# Handle the submitted form value
if value in choices:
label = safe_text(choices.get(value))
# Should be returned as repr
if SUBMIT_VALUE_AS == SUBMIT_VALUE_AS_REPR:
value = label
# Should be returned as mix
else:
value = "{0} ({1})".format(label, value)
ret_values.append(value)
# Overwrite ``cleaned_data`` of the ``form`` with object
# qualifier.
cleaned_data[self.data.name] = ret_values
# It's critically important to return the ``form`` with updated
# ``cleaned_data``
return cleaned_data
def submit_plugin_form_data(self, form_entry, request, form,
form_element_entries=None, **kwargs):
"""Submit plugin form data/process.
:param fobi.models.FormEntry form_entry: Instance of
``fobi.models.FormEntry``.
:param django.http.HttpRequest request:
:param django.forms.Form form:
"""
# In case if we should submit value as is, we don't return anything.
# In other cases, we proceed further.
if SUBMIT_VALUE_AS != SUBMIT_VALUE_AS_VAL:
# Get the object
values = form.cleaned_data.get(self.data.name, None)
# Get choices
choices = dict(get_select_field_choices(self.data.choices))
# Returned value
ret_values = []
for value in values:
# Handle the submitted form value
if value in choices:
label = safe_text(choices.get(value))
# Should be returned as repr
if SUBMIT_VALUE_AS == SUBMIT_VALUE_AS_REPR:
value = label
# Should be returned as mix
else:
value = "{0} ({1})".format(label, value)
ret_values.append(value)
# Overwrite ``cleaned_data`` of the ``form`` with object
# qualifier.
form.cleaned_data[self.data.name] = ret_values
# It's critically important to return the ``form`` with updated
# ``cleaned_data``
return form
| {
"content_hash": "6f1da392c9cc793d1a62bb1838f90ccf",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 76,
"avg_line_length": 34.013698630136986,
"alnum_prop": 0.5698751510269835,
"repo_name": "mansonul/events",
"id": "bf0a54b50bd7a95216f9ff5b260d31d90815ca4c",
"size": "4966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/contrib/plugins/form_elements/fields/select_multiple_with_max/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "90251"
},
{
"name": "HTML",
"bytes": "186225"
},
{
"name": "JavaScript",
"bytes": "43221"
},
{
"name": "Python",
"bytes": "804726"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
} |
import os
import os.path
from subprocess import Popen, PIPE
import time
from datetime import datetime
import socket
import httplib, urllib
import urllib2
from datetime import datetime
import logging
import logging.handlers
import db
import csv
import json
import tempfile
import ala
import paramiko
class HPCConfig:
cakeAppBaseURL = "http://climatebird2.qern.qcif.edu.au/Edgar"
#cakeAppBaseURL = "http://localhost/~robert/ap03"
nextSpeciesURL= cakeAppBaseURL + "/species/get_next_job_and_assume_queued"
sshUser = "jc155857"
sshHPCDestination = "login.hpc.jcu.edu.au"
# Determine the paths to the different files
# The working dir is the modelling dir
workingDir = os.path.join(os.path.dirname(__file__), '../')
#workingDir = os.path.join('/', 'Users', 'robert', 'Git_WA', 'Edgar', 'modelling')
importingWorkingDir = os.path.join(workingDir, '../', 'importing')
importingConfigPath = os.path.join(importingWorkingDir, 'config.json')
binDir = os.path.join(workingDir, 'bin')
modelSppScriptPath = os.path.join(binDir, 'modelspp.sh')
queueJobScriptPath = os.path.join(binDir, 'queueJob.sh')
localQueueJobScriptPath = os.path.join(binDir, 'local_queueJob.sh')
checkJobStatusScriptPath = os.path.join(binDir, 'checkJobStatus.sh')
@staticmethod
def getSpeciesReportURL(speciesId):
return HPCConfig.cakeAppBaseURL + "/species/job_status/" + speciesId
@staticmethod
def connectDB():
config = None
with open(HPCConfig.importingConfigPath, 'rb') as f:
config = json.load(f)
db.connect(config)
return db
@staticmethod
def disposeDB():
db.engine.dispose()
| {
"content_hash": "6bea96e56ae1760cdc6840cfcb2577b0",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 86,
"avg_line_length": 29.74137931034483,
"alnum_prop": 0.6927536231884058,
"repo_name": "jcu-eresearch/Edgar",
"id": "0c43ae30d31c7aa7e419d389346cd9357b341b51",
"size": "1725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modelling/src/hpc_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "777"
},
{
"name": "Batchfile",
"bytes": "2899"
},
{
"name": "CSS",
"bytes": "120535"
},
{
"name": "CoffeeScript",
"bytes": "47912"
},
{
"name": "HTML",
"bytes": "880607"
},
{
"name": "JavaScript",
"bytes": "285300"
},
{
"name": "PHP",
"bytes": "7118750"
},
{
"name": "PLpgSQL",
"bytes": "13498"
},
{
"name": "Python",
"bytes": "169237"
},
{
"name": "R",
"bytes": "593"
},
{
"name": "Shell",
"bytes": "17548"
}
],
"symlink_target": ""
} |
from . import AWSObject, AWSProperty, PropsDictType, Tags
from .validators import boolean, double, integer
from .validators.wafv2 import (
validate_comparison_operator,
validate_custom_response_bodies,
validate_ipaddress_version,
validate_positional_constraint,
validate_statement,
validate_statements,
validate_transformation_type,
wafv2_custom_body_response_content,
wafv2_custom_body_response_content_type,
)
class IPSet(AWSObject):
"""
`IPSet <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafv2-ipset.html>`__
"""
resource_type = "AWS::WAFv2::IPSet"
props: PropsDictType = {
"Addresses": ([str], True),
"Description": (str, False),
"IPAddressVersion": (validate_ipaddress_version, True),
"Name": (str, False),
"Scope": (str, True),
"Tags": (Tags, False),
}
class LoggingConfigurationFieldToMatch(AWSProperty):
"""
`LoggingConfigurationFieldToMatch <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-loggingconfiguration-fieldtomatch.html>`__
"""
props: PropsDictType = {
"JsonBody": (dict, False),
"Method": (dict, False),
"QueryString": (dict, False),
"SingleHeader": (dict, False),
"UriPath": (dict, False),
}
class LoggingConfiguration(AWSObject):
"""
`LoggingConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafv2-loggingconfiguration.html>`__
"""
resource_type = "AWS::WAFv2::LoggingConfiguration"
props: PropsDictType = {
"LogDestinationConfigs": ([str], True),
"LoggingFilter": (dict, False),
"RedactedFields": ([LoggingConfigurationFieldToMatch], False),
"ResourceArn": (str, True),
}
class RegexPatternSet(AWSObject):
"""
`RegexPatternSet <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafv2-regexpatternset.html>`__
"""
resource_type = "AWS::WAFv2::RegexPatternSet"
props: PropsDictType = {
"Description": (str, False),
"Name": (str, False),
"RegularExpressionList": ([str], True),
"Scope": (str, True),
"Tags": (Tags, False),
}
class CustomResponseBody(AWSProperty):
"""
`CustomResponseBody <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-customresponsebody.html>`__
"""
props: PropsDictType = {
"Content": (wafv2_custom_body_response_content, True),
"ContentType": (wafv2_custom_body_response_content_type, True),
}
class ImmunityTimeProperty(AWSProperty):
"""
`ImmunityTimeProperty <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-immunitytimeproperty.html>`__
"""
props: PropsDictType = {
"ImmunityTime": (integer, True),
}
class CaptchaConfig(AWSProperty):
"""
`CaptchaConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-captchaconfig.html>`__
"""
props: PropsDictType = {
"ImmunityTimeProperty": (ImmunityTimeProperty, False),
}
class Label(AWSProperty):
"""
`Label <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-label.html>`__
"""
props: PropsDictType = {
"Name": (str, True),
}
class CustomHTTPHeader(AWSProperty):
"""
`CustomHTTPHeader <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-customhttpheader.html>`__
"""
props: PropsDictType = {
"Name": (str, True),
"Value": (str, True),
}
class CustomRequestHandling(AWSProperty):
"""
`CustomRequestHandling <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-customrequesthandling.html>`__
"""
props: PropsDictType = {
"InsertHeaders": ([CustomHTTPHeader], True),
}
class AllowAction(AWSProperty):
"""
`AllowAction <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-allowaction.html>`__
"""
props: PropsDictType = {
"CustomRequestHandling": (CustomRequestHandling, False),
}
class CustomResponse(AWSProperty):
"""
`CustomResponse <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-customresponse.html>`__
"""
props: PropsDictType = {
"CustomResponseBodyKey": (str, False),
"ResponseCode": (integer, True),
"ResponseHeaders": ([CustomHTTPHeader], False),
}
class BlockAction(AWSProperty):
"""
`BlockAction <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-blockaction.html>`__
"""
props: PropsDictType = {
"CustomResponse": (CustomResponse, False),
}
class CaptchaAction(AWSProperty):
"""
`CaptchaAction <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-captchaaction.html>`__
"""
props: PropsDictType = {
"CustomRequestHandling": (CustomRequestHandling, False),
}
class CountAction(AWSProperty):
"""
`CountAction <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-countaction.html>`__
"""
props: PropsDictType = {
"CustomRequestHandling": (CustomRequestHandling, False),
}
class RuleAction(AWSProperty):
"""
`RuleAction <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-ruleaction.html>`__
"""
props: PropsDictType = {
"Allow": (AllowAction, False),
"Block": (BlockAction, False),
"Captcha": (CaptchaAction, False),
"Count": (CountAction, False),
}
class AndStatement(AWSProperty):
"""
`AndStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-andstatement.html>`__
"""
props: PropsDictType = {
"Statements": (validate_statements, True),
}
class Body(AWSProperty):
"""
`Body <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-body.html>`__
"""
props: PropsDictType = {
"OversizeHandling": (str, False),
}
class CookieMatchPattern(AWSProperty):
"""
`CookieMatchPattern <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-cookiematchpattern.html>`__
"""
props: PropsDictType = {
"All": (dict, False),
"ExcludedCookies": ([str], False),
"IncludedCookies": ([str], False),
}
class Cookies(AWSProperty):
"""
`Cookies <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-cookies.html>`__
"""
props: PropsDictType = {
"MatchPattern": (CookieMatchPattern, True),
"MatchScope": (str, True),
"OversizeHandling": (str, True),
}
class HeaderMatchPattern(AWSProperty):
"""
`HeaderMatchPattern <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-headermatchpattern.html>`__
"""
props: PropsDictType = {
"All": (dict, False),
"ExcludedHeaders": ([str], False),
"IncludedHeaders": ([str], False),
}
class Headers(AWSProperty):
"""
`Headers <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-headers.html>`__
"""
props: PropsDictType = {
"MatchPattern": (HeaderMatchPattern, True),
"MatchScope": (str, True),
"OversizeHandling": (str, True),
}
class JsonMatchPattern(AWSProperty):
"""
`JsonMatchPattern <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-jsonmatchpattern.html>`__
"""
props: PropsDictType = {
"All": (dict, False),
"IncludedPaths": ([str], False),
}
class JsonBody(AWSProperty):
"""
`JsonBody <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-jsonbody.html>`__
"""
props: PropsDictType = {
"InvalidFallbackBehavior": (str, False),
"MatchPattern": (JsonMatchPattern, True),
"MatchScope": (str, True),
"OversizeHandling": (str, False),
}
class FieldToMatch(AWSProperty):
"""
`FieldToMatch <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-fieldtomatch.html>`__
"""
props: PropsDictType = {
"AllQueryArguments": (dict, False),
"Body": (Body, False),
"Cookies": (Cookies, False),
"Headers": (Headers, False),
"JsonBody": (JsonBody, False),
"Method": (dict, False),
"QueryString": (dict, False),
"SingleHeader": (dict, False),
"SingleQueryArgument": (dict, False),
"UriPath": (dict, False),
}
class TextTransformation(AWSProperty):
"""
`TextTransformation <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-texttransformation.html>`__
"""
props: PropsDictType = {
"Priority": (integer, True),
"Type": (validate_transformation_type, True),
}
class ByteMatchStatement(AWSProperty):
"""
`ByteMatchStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-bytematchstatement.html>`__
"""
props: PropsDictType = {
"FieldToMatch": (FieldToMatch, True),
"PositionalConstraint": (validate_positional_constraint, True),
"SearchString": (str, False),
"SearchStringBase64": (str, False),
"TextTransformations": ([TextTransformation], True),
}
class ForwardedIPConfiguration(AWSProperty):
"""
`ForwardedIPConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-forwardedipconfiguration.html>`__
"""
props: PropsDictType = {
"FallbackBehavior": (str, True),
"HeaderName": (str, True),
}
class GeoMatchStatement(AWSProperty):
"""
`GeoMatchStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-geomatchstatement.html>`__
"""
props: PropsDictType = {
"CountryCodes": ([str], False),
"ForwardedIPConfig": (ForwardedIPConfiguration, False),
}
class IPSetForwardedIPConfiguration(AWSProperty):
"""
`IPSetForwardedIPConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-ipsetforwardedipconfiguration.html>`__
"""
props: PropsDictType = {
"FallbackBehavior": (str, True),
"HeaderName": (str, True),
"Position": (str, True),
}
class IPSetReferenceStatement(AWSProperty):
"""
`IPSetReferenceStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-ipsetreferencestatement.html>`__
"""
props: PropsDictType = {
"Arn": (str, True),
"IPSetForwardedIPConfig": (IPSetForwardedIPConfiguration, False),
}
class LabelMatchStatement(AWSProperty):
"""
`LabelMatchStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-labelmatchstatement.html>`__
"""
props: PropsDictType = {
"Key": (str, True),
"Scope": (str, True),
}
class ExcludedRule(AWSProperty):
"""
`ExcludedRule <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-excludedrule.html>`__
"""
props: PropsDictType = {
"Name": (str, True),
}
class FieldIdentifier(AWSProperty):
"""
`FieldIdentifier <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-fieldidentifier.html>`__
"""
props: PropsDictType = {
"Identifier": (str, True),
}
class ManagedRuleGroupConfig(AWSProperty):
"""
`ManagedRuleGroupConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-managedrulegroupconfig.html>`__
"""
props: PropsDictType = {
"LoginPath": (str, False),
"PasswordField": (FieldIdentifier, False),
"PayloadType": (str, False),
"UsernameField": (FieldIdentifier, False),
}
class ManagedRuleGroupStatement(AWSProperty):
"""
`ManagedRuleGroupStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-managedrulegroupstatement.html>`__
"""
props: PropsDictType = {
"ExcludedRules": ([ExcludedRule], False),
"ManagedRuleGroupConfigs": ([ManagedRuleGroupConfig], False),
"Name": (str, True),
"ScopeDownStatement": (validate_statement, False),
"VendorName": (str, True),
"Version": (str, False),
}
class NotStatement(AWSProperty):
"""
`NotStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-notstatement.html>`__
"""
props: PropsDictType = {
"Statement": (validate_statement, True),
}
class OrStatement(AWSProperty):
"""
`OrStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-orstatement.html>`__
"""
props: PropsDictType = {
"Statements": (validate_statements, True),
}
class RateBasedStatement(AWSProperty):
"""
`RateBasedStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-ratebasedstatement.html>`__
"""
props: PropsDictType = {
"AggregateKeyType": (str, True),
"ForwardedIPConfig": (ForwardedIPConfiguration, False),
"Limit": (integer, True),
"ScopeDownStatement": (validate_statement, False),
}
class RegexMatchStatement(AWSProperty):
"""
`RegexMatchStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-regexmatchstatement.html>`__
"""
props: PropsDictType = {
"FieldToMatch": (FieldToMatch, True),
"RegexString": (str, True),
"TextTransformations": ([TextTransformation], True),
}
class RegexPatternSetReferenceStatement(AWSProperty):
"""
`RegexPatternSetReferenceStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-regexpatternsetreferencestatement.html>`__
"""
props: PropsDictType = {
"Arn": (str, True),
"FieldToMatch": (FieldToMatch, True),
"TextTransformations": ([TextTransformation], True),
}
class RuleGroupReferenceStatement(AWSProperty):
"""
`RuleGroupReferenceStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-rulegroupreferencestatement.html>`__
"""
props: PropsDictType = {
"Arn": (str, True),
"ExcludedRules": ([ExcludedRule], False),
}
class SizeConstraintStatement(AWSProperty):
"""
`SizeConstraintStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-sizeconstraintstatement.html>`__
"""
props: PropsDictType = {
"ComparisonOperator": (validate_comparison_operator, True),
"FieldToMatch": (FieldToMatch, True),
"Size": (double, True),
"TextTransformations": ([TextTransformation], True),
}
class SqliMatchStatement(AWSProperty):
"""
`SqliMatchStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-sqlimatchstatement.html>`__
"""
props: PropsDictType = {
"FieldToMatch": (FieldToMatch, True),
"SensitivityLevel": (str, False),
"TextTransformations": ([TextTransformation], True),
}
class XssMatchStatement(AWSProperty):
"""
`XssMatchStatement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-xssmatchstatement.html>`__
"""
props: PropsDictType = {
"FieldToMatch": (FieldToMatch, True),
"TextTransformations": ([TextTransformation], True),
}
class Statement(AWSProperty):
"""
`Statement <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-statement.html>`__
"""
props: PropsDictType = {
"AndStatement": (AndStatement, False),
"ByteMatchStatement": (ByteMatchStatement, False),
"GeoMatchStatement": (GeoMatchStatement, False),
"IPSetReferenceStatement": (IPSetReferenceStatement, False),
"LabelMatchStatement": (LabelMatchStatement, False),
"ManagedRuleGroupStatement": (ManagedRuleGroupStatement, False),
"NotStatement": (NotStatement, False),
"OrStatement": (OrStatement, False),
"RateBasedStatement": (RateBasedStatement, False),
"RegexMatchStatement": (RegexMatchStatement, False),
"RegexPatternSetReferenceStatement": (RegexPatternSetReferenceStatement, False),
"RuleGroupReferenceStatement": (RuleGroupReferenceStatement, False),
"SizeConstraintStatement": (SizeConstraintStatement, False),
"SqliMatchStatement": (SqliMatchStatement, False),
"XssMatchStatement": (XssMatchStatement, False),
}
class VisibilityConfig(AWSProperty):
"""
`VisibilityConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-visibilityconfig.html>`__
"""
props: PropsDictType = {
"CloudWatchMetricsEnabled": (boolean, True),
"MetricName": (str, True),
"SampledRequestsEnabled": (boolean, True),
}
class RuleGroupRule(AWSProperty):
"""
`RuleGroupRule <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-rulegroup-rule.html>`__
"""
props: PropsDictType = {
"Action": (RuleAction, False),
"CaptchaConfig": (CaptchaConfig, False),
"Name": (str, True),
"Priority": (integer, True),
"RuleLabels": ([Label], False),
"Statement": (validate_statement, True),
"VisibilityConfig": (VisibilityConfig, True),
}
class RuleGroup(AWSObject):
"""
`RuleGroup <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafv2-rulegroup.html>`__
"""
resource_type = "AWS::WAFv2::RuleGroup"
props: PropsDictType = {
"Capacity": (integer, True),
"CustomResponseBodies": (validate_custom_response_bodies, False),
"Description": (str, False),
"Name": (str, False),
"Rules": ([RuleGroupRule], False),
"Scope": (str, True),
"Tags": (Tags, False),
"VisibilityConfig": (VisibilityConfig, True),
}
class DefaultAction(AWSProperty):
"""
`DefaultAction <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-defaultaction.html>`__
"""
props: PropsDictType = {
"Allow": (AllowAction, False),
"Block": (BlockAction, False),
}
class OverrideAction(AWSProperty):
"""
`OverrideAction <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-overrideaction.html>`__
"""
props: PropsDictType = {
"Count": (dict, False),
"None": (dict, False),
}
class WebACLRule(AWSProperty):
"""
`WebACLRule <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafv2-webacl-rule.html>`__
"""
props: PropsDictType = {
"Action": (RuleAction, False),
"CaptchaConfig": (CaptchaConfig, False),
"Name": (str, True),
"OverrideAction": (OverrideAction, False),
"Priority": (integer, True),
"RuleLabels": ([Label], False),
"Statement": (validate_statement, True),
"VisibilityConfig": (VisibilityConfig, True),
}
class WebACL(AWSObject):
"""
`WebACL <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafv2-webacl.html>`__
"""
resource_type = "AWS::WAFv2::WebACL"
props: PropsDictType = {
"CaptchaConfig": (CaptchaConfig, False),
"CustomResponseBodies": (validate_custom_response_bodies, False),
"DefaultAction": (DefaultAction, True),
"Description": (str, False),
"Name": (str, False),
"Rules": ([WebACLRule], False),
"Scope": (str, True),
"Tags": (Tags, False),
"VisibilityConfig": (VisibilityConfig, True),
}
class WebACLAssociation(AWSObject):
"""
`WebACLAssociation <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafv2-webaclassociation.html>`__
"""
resource_type = "AWS::WAFv2::WebACLAssociation"
props: PropsDictType = {
"ResourceArn": (str, True),
"WebACLArn": (str, True),
}
| {
"content_hash": "7570ae00a21c3323bff31faf314906a8",
"timestamp": "",
"source": "github",
"line_count": 689,
"max_line_length": 172,
"avg_line_length": 30.494920174165458,
"alnum_prop": 0.6596544667079149,
"repo_name": "cloudtools/troposphere",
"id": "631fbfdbbb7413b471691e7fd15044b754ea846f",
"size": "21183",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "troposphere/wafv2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "2754"
},
{
"name": "Python",
"bytes": "2305574"
},
{
"name": "Shell",
"bytes": "625"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from pyes.tests import ESTestCase
from pyes import exceptions
class ErrorReportingTestCase(ESTestCase):
def setUp(self):
super(ErrorReportingTestCase, self).setUp()
#self.conn.indices.set_alias('test-alias', ['_river'])
#self.conn.indices.delete_alias('test-alias', ['_river'])
self.conn.indices.delete_index_if_exists('test-index2')
def tearDown(self):
#self.conn.indices.set_alias('test-alias', ['_river'])
#self.conn.indices.delete_alias('test-alias', ['_river'])
self.conn.indices.delete_index_if_exists('test-index2')
def testCreateDeleteAliases(self):
"""Test errors thrown when creating or deleting aliases.
"""
self.assertTrue('acknowledged' in self.conn.indices.create_index(self.index_name))
# Check initial output of get_indices.
result = self.conn.indices.get_indices(include_aliases=True)
self.assertTrue('test-index' in result)
self.assertEqual(result['test-index'], {'num_docs': 0})
self.assertTrue('test-alias' not in result)
# Check getting a missing alias.
err = self.checkRaises(exceptions.IndexMissingException,
self.conn.indices.get_alias, 'test-alias')
self.assertEqual(str(err), '[test-alias] missing')
# Check deleting a missing alias (doesn't return a error).
self.conn.indices.delete_alias("test-alias", self.index_name)
# Add an alias from test-alias to test-index
self.conn.indices.change_aliases([['add', 'test-index', 'test-alias', {}]])
self.assertEqual(self.conn.indices.get_alias("test-alias"), ['test-index'])
# Adding an alias to a missing index fails
err = self.checkRaises(exceptions.IndexMissingException,
self.conn.indices.change_aliases,
[['add', 'test-missing-index', 'test-alias', {}]])
self.assertEqual(str(err), '[test-missing-index] missing')
self.assertEqual(self.conn.indices.get_alias("test-alias"), ['test-index'])
# # An alias can't be deleted using delete_index.
# err = self.checkRaises(exceptions.NotFoundException,
# self.conn.delete_index, 'test-alias')
# self.assertEqual(str(err), '[test-alias] missing')
# Check return value from indices.get_indices now.
result = self.conn.indices.get_indices(include_aliases=True)
self.assertTrue('test-index' in result)
self.assertEqual(result['test-index'], {'num_docs': 0})
self.assertTrue('test-alias' in result)
self.assertEqual(result['test-alias'], {'alias_for': ['test-index'], 'num_docs': 0})
result = self.conn.indices.get_indices(include_aliases=False)
self.assertTrue('test-index' in result)
self.assertEqual(result['test-index'], {'num_docs': 0})
self.assertTrue('test-alias' not in result)
# Add an alias to test-index2.
self.assertTrue('ok' in self.conn.indices.create_index("test-index2"))
self.conn.indices.change_aliases([['add', 'test-index2', 'test-alias', {}]])
self.assertEqual(sorted(self.conn.indices.get_alias("test-alias")),
['test-index', 'test-index2'])
# Check deleting multiple indices from an alias.
self.conn.indices.delete_alias("test-alias", [self.index_name, "test-index2"])
self.checkRaises(exceptions.IndexMissingException, self.conn.indices.get_alias, 'test-alias')
# Check deleting multiple indices from a missing alias (still no error)
self.conn.indices.delete_alias("test-alias", [self.index_name, "test-index2"])
# Check that we still get an error for a missing alias.
err = self.checkRaises(exceptions.IndexMissingException,
self.conn.indices.get_alias, 'test-alias')
self.assertEqual(str(err), '[test-alias] missing')
def testWriteToAlias(self):
self.assertTrue('acknowledged' in self.conn.indices.create_index(self.index_name))
self.assertTrue('acknowledged' in self.conn.indices.create_index("test-index2"))
self.assertTrue('acknowledged' in self.conn.indices.set_alias("test-alias", ['test-index']))
self.assertTrue('acknowledged' in self.conn.indices.set_alias("test-alias2", ['test-index', 'test-index2']))
# Can write to indices.aliases only if they point to exactly one index.
self.conn.index(dict(title='doc1'), 'test-index', 'testtype')
self.conn.index(dict(title='doc1'), 'test-index2', 'testtype')
self.conn.index(dict(title='doc1'), 'test-alias', 'testtype')
self.checkRaises(exceptions.ElasticSearchIllegalArgumentException,
self.conn.index, dict(title='doc1'),
'test-alias2', 'testtype')
self.conn.indices.refresh() # ensure that the documents have been indexed.
# Check the document counts for each index or alias.
result = self.conn.indices.get_indices(include_aliases=True)
self.assertEqual(result['test-index'], {'num_docs': 2})
self.assertEqual(result['test-index2'], {'num_docs': 1})
self.assertEqual(result['test-alias'], {'alias_for': ['test-index'], 'num_docs': 2})
self.assertEqual(result['test-alias2'], {'alias_for': ['test-index', 'test-index2'], 'num_docs': 3})
if __name__ == "__main__":
import unittest
unittest.main()
| {
"content_hash": "130630ca36eb9284daf84c71bd60e176",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 116,
"avg_line_length": 50.52777777777778,
"alnum_prop": 0.6468755726589701,
"repo_name": "Fiedzia/pyes",
"id": "4d0d385603dc6884b64f9283a9d9341ae5f86b58",
"size": "5481",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/test_aliases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1143"
},
{
"name": "Python",
"bytes": "600684"
},
{
"name": "Shell",
"bytes": "1476"
}
],
"symlink_target": ""
} |
"""
Batch alignment script using Toil
Runs BWA to produce the SAM followed by BAMSORT to produce the BAM
If the option is specified (s3_dir), the output bam will be placed
in S3. ~/.boto config file and S3AM: https://github.com/BD2KGenomics/s3am
are required for this step.
Dependencies:
Docker - apt-get install docker.io
Toil - pip install toil
S3AM* - pip install --pre S3AM (optional)
Curl - apt-get install curl
"""
import argparse
import base64
from collections import OrderedDict
import hashlib
import os
import subprocess
import multiprocessing
import shutil
from toil.job import Job
from toil_scripts import download_from_s3_url
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', required=True, help='configuration file: uuid,url,url,...')
parser.add_argument('-r', '--ref', required=True, help='Reference fasta file')
parser.add_argument('-m', '--amb', required=True, help='Reference fasta file (amb)')
parser.add_argument('-n', '--ann', required=True, help='Reference fasta file (ann)')
parser.add_argument('-b', '--bwt', required=True, help='Reference fasta file (bwt)')
parser.add_argument('-p', '--pac', required=True, help='Reference fasta file (pac)')
parser.add_argument('-a', '--sa', required=True, help='Reference fasta file (sa)')
parser.add_argument('-f', '--fai', required=True, help='Reference fasta file (fai)')
parser.add_argument('-s', '--ssec', help='Path to Key File for SSE-C Encryption')
parser.add_argument('-o', '--out', default=None, help='full path where final results will be output')
parser.add_argument('-3', '--s3_dir', default=None, help='S3 Directory, starting with bucket name. e.g.: '
'cgl-driver-projects/ckcc/rna-seq-samples/')
return parser
# Convenience Functions
def generate_unique_key(master_key_path, url):
"""
Input1: Path to the BD2K Master Key (for S3 Encryption)
Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam)
Returns: 32-byte unique key generated for that URL
"""
with open(master_key_path, 'r') as f:
master_key = f.read()
assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \
'Key: {}, Length: {}'.format(master_key, len(master_key))
new_key = hashlib.sha256(master_key + url).digest()
assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key)
return new_key
def download_encrypted_file(work_dir, url, key_path, name):
"""
Downloads encrypted file from S3
Input1: Working directory
Input2: S3 URL to be downloaded
Input3: Path to key necessary for decryption
Input4: name of file to be downloaded
"""
file_path = os.path.join(work_dir, name)
key = generate_unique_key(key_path, url)
encoded_key = base64.b64encode(key)
encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest())
h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'
h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)
h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5)
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path)
def download_from_url(job, url, fname):
"""
Downloads a file from a URL and places it in the jobStore
Input1: Toil job instance
Input2: Input arguments
Input3: jobstore id dictionary
Input4: Name of key used to access url in input_args
"""
work_dir = job.fileStore.getLocalTempDir()
file_path = os.path.join(work_dir, fname)
if not os.path.exists(file_path):
if url.startswith('s3:'):
download_from_s3_url(file_path, url)
else:
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path)
return job.fileStore.writeGlobalFile(file_path)
def return_input_paths(job, work_dir, ids, *args):
"""
Returns the paths of files from the FileStore
Input1: Toil job instance
Input2: Working directory
Input3: jobstore id dictionary
Input4: names of files to be returned from the jobstore
Returns: path(s) to the file(s) requested -- unpack these!
"""
paths = OrderedDict()
for name in args:
if not os.path.exists(os.path.join(work_dir, name)):
file_path = job.fileStore.readGlobalFile(ids[name], os.path.join(work_dir, name))
else:
file_path = os.path.join(work_dir, name)
paths[name] = file_path
if len(args) == 1:
return file_path
return paths.values()
def move_to_output_dir(work_dir, output_dir, uuid=None, files=list()):
"""
Moves files from work_dir to output_dir
Input1: Working directory
Input2: Output directory
Input3: UUID to be preprended onto file name
Input4: list of file names to be moved from working dir to output dir
"""
for fname in files:
if uuid is None:
shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, fname))
else:
shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname)))
# Start of Job Functions
def batch_start(job, input_args):
"""
Downloads shared files that are used by all samples for alignment and places them in the jobstore.
"""
shared_files = ['ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai']
shared_ids = {}
for fname in shared_files:
url = input_args[fname]
shared_ids[fname] = job.addChildJobFn(download_from_url, url, fname).rv()
job.addFollowOnJobFn(spawn_batch_jobs, shared_ids, input_args)
def spawn_batch_jobs(job, shared_ids, input_args):
"""
Spawns an alignment job for every sample in the input configuration file
"""
samples = []
config = input_args['config']
with open(config, 'r') as f_in:
for line in f_in:
line = line.strip().split(',')
uuid = line[0]
urls = line[1:]
samples.append((uuid, urls))
for sample in samples:
job.addChildJobFn(alignment, shared_ids, input_args, sample, cores=32, memory='20 G', disk='100 G')
def alignment(job, ids, input_args, sample):
"""
Runs BWA and then Bamsort on the supplied fastqs for this sample
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
"""
uuid, urls = sample
# ids['bam'] = job.fileStore.getEmptyFileStoreID()
work_dir = job.fileStore.getLocalTempDir()
output_dir = input_args['output_dir']
key_path = input_args['ssec']
cores = multiprocessing.cpu_count()
# I/O
return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann',
'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai')
# Get fastqs associated with this sample
for url in urls:
download_encrypted_file(work_dir, url, key_path, os.path.basename(url))
# Parameters for BWA and Bamsort
docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)]
bwa_command = ["jvivian/bwa",
"mem",
"-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid),
"-T", str(0),
"-t", str(cores),
"/data/ref.fa"] + [os.path.join('/data/', os.path.basename(x)) for x in urls]
bamsort_command = ["jeltje/biobambam",
"/usr/local/bin/bamsort",
"inputformat=sam",
"level=1",
"inputthreads={}".format(cores),
"outputthreads={}".format(cores),
"calmdnm=1",
"calmdnmrecompindetonly=1",
"calmdnmreference=/data/ref.fa",
"I=/data/{}".format(uuid + '.sam')]
# Piping the output to a file handle
with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bwa_command, stdout=f_out)
with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out)
# Save in JobStore
# job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam'))
# Copy file to S3
if input_args['s3_dir']:
job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G')
# Move file in output_dir
if input_args['output_dir']:
move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam'])
def upload_bam_to_s3(job, ids, input_args, sample):
"""
Uploads output BAM from sample to S3
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
"""
uuid, urls = sample
key_path = input_args['ssec']
work_dir = job.fileStore.getLocalTempDir()
# Parse s3_dir to get bucket and s3 path
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
bucket_dir = '/'.join(s3_dir.lstrip('/').split('/')[1:])
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, bucket_dir, uuid + '.bam')
#I/O
job.fileStore.readGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
# Generate keyfile for upload
with open(os.path.join(work_dir, uuid + '.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Commands to upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, uuid + '.key'),
'file://{}'.format(os.path.join(work_dir, uuid + '.bam')),
bucket_name,
os.path.join(bucket_dir, uuid + '.bam')]
subprocess.check_call(s3am_command)
if __name__ == "__main__":
# Define Parser object and add to toil
parser = build_parser()
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Store input_URLs for downloading
inputs = {'config': args.config,
'ref.fa': args.ref,
'ref.fa.amb': args.amb,
'ref.fa.ann': args.ann,
'ref.fa.bwt': args.bwt,
'ref.fa.pac': args.pac,
'ref.fa.sa': args.sa,
'ref.fa.fai': args.fai,
'ssec':args.ssec,
'output_dir': args.out,
's3_dir': args.s3_dir,
'cpu_count': None}
# Launch jobs
Job.Runner.startToil(Job.wrapJobFn(batch_start, inputs), args)
| {
"content_hash": "d0386243a4def62ae0053e6d2f2d43a2",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 120,
"avg_line_length": 39.023809523809526,
"alnum_prop": 0.6064673581452105,
"repo_name": "BD2KGenomics/toil-scripts",
"id": "67ee6b41bd0fd6f41d6253100ba4a0f9626ef21c",
"size": "11512",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3068"
},
{
"name": "Python",
"bytes": "284050"
},
{
"name": "Shell",
"bytes": "8996"
}
],
"symlink_target": ""
} |
"""SCons.Tool.fortran
Tool-specific initialization for a generic Posix f77/f90 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/fortran.py 3603 2008/10/10 05:46:45 scons"
import re
import string
import SCons.Action
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
from SCons.Tool.FortranCommon import add_all_to_env, add_fortran_to_env
compilers = ['f95', 'f90', 'f77']
def generate(env):
add_all_to_env(env)
add_fortran_to_env(env)
fc = env.Detect(compilers) or 'f77'
env['SHFORTRAN'] = fc
env['FORTRAN'] = fc
def exists(env):
return env.Detect(compilers)
| {
"content_hash": "cfdcdd0cf8397ff8d516a574ae1dedba",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 83,
"avg_line_length": 34.05263157894737,
"alnum_prop": 0.7557959814528593,
"repo_name": "frew/simpleproto",
"id": "ef6be0a2a399f712d4988bb5e64259e6ab026f69",
"size": "1941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scons-local-1.1.0/SCons/Tool/fortran.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "30217"
},
{
"name": "Protocol Buffer",
"bytes": "1960"
},
{
"name": "Python",
"bytes": "1704215"
}
],
"symlink_target": ""
} |
import argparse
import pysam
import sys
import Tools
import pdb
DELTA = 50
def Close(a, b):
return (abs(a-b) < DELTA)
# ---*==========*-----
# --*===*----------
fromTo = 0
contains = 1
partial = 2
def Order(aln, boundaries):
tb = boundaries[aln.tName]
qb = boundaries[aln.title]
min
if (Close(tb[0], aln.tStart) and not Close(qb[0], aln.qStart)):
return (aln.title, aln.tName, fromTo, aln.strand, aln.tStart)
if (Close(tb[1], aln.tEnd) and not Close(qb[1], aln.qEnd)):
return (aln.tName, aln.title, fromTo, aln.strand, aln.qStart)
if (not Close(tb[0], aln.tStart) and not Close(tb[1], aln.tEnd) and
Close(qb[0], aln.qStart) and Close(qb[1], aln.qEnd)):
return (aln.tName, aln.title, contains, aln.strand, aln.tStart)
if (Close(tb[0], aln.tStart) and Close(tb[1], aln.tEnd) and
not Close(qb[0], aln.qStart) and not Close(qb[1], aln.qEnd)):
return (aln.tName, aln.title, contains, aln.strand, aln.qStart)
else:
# partial alignment
if (aln.tStart - tb[0] > aln.qStart - qb[0]):
return (aln.tName, aln.title, partial, aln.tStart)
else:
return (aln.title, aln.tName, partial, aln.qStart)
class Vertex:
def __init__(self, pos, read):
self.pos = [pos]
self.read = [read]
self.outRed = []
self.inRed = []
self.black = [] # black edges are multi-directional
self.index = 0
self.visited = 0
class Read:
def __init__(self, name, vertexList):
self.name = name
self.vertexList = vertexList
def AssignIndices(reads):
index = 0
for read in reads.values():
for vertex in read:
vertex.index = index
index += 1
def InitializeReadVertexList(length, seqIndex, k):
vlist = [Vertex(i,seqIndex) for i in range(0,length-k+1)]
for i in range(1,length-k):
vlist[i-1].outRed.append(vlist[i])
vlist[i-1].inRed.append(vlist[i])
return vlist
ap = argparse.ArgumentParser(description="Build simple consensus from pairwise alignments.")
ap.add_argument("sam", help="Input sam file")
ap.add_argument("--k", help="Block size.", default=7, type=int)
ap.add_argument("--kgraph", help="Write the graph out.", default=None)
ap.add_argument("--rgraph", help="Write the reads-graph out.", default=None)
ap.add_argument("--minCount", help="Only print vertices if they have at least this many out edges.", default=0, type=int)
ap.add_argument("--delta", help="Wiggle room for alignment endpoints.", dest='DELTA')
args = ap.parse_args()
def SetBoundaries(samFileName, readBoundaries):
samFile = open(samFileName, 'r')
line = samFile.readline()
while (line[0] == '@'):
line = samFile.readline()
while (line != ""):
aln = Tools.SAMEntry(line)
line = samFile.readline()
if (aln.tName == aln.title):
continue
if (aln.tName not in readBoundaries):
readBoundaries[aln.tName] = [aln.tStart, aln.tEnd]
else:
readBoundaries[aln.tName][0] = min(readBoundaries[aln.tName][0], aln.tStart)
readBoundaries[aln.tName][1] = max(readBoundaries[aln.tName][1], aln.tEnd)
if (aln.title not in readBoundaries):
readBoundaries[aln.title] = [aln.qStart, aln.qEnd]
else:
readBoundaries[aln.title][0] = min(readBoundaries[aln.title][0], aln.qStart)
readBoundaries[aln.title][1] = max(readBoundaries[aln.title][1], aln.qEnd)
def StoreReadGraph(samFileName, readBoundaries, readGraph):
samFile = open(samFileName, 'r')
line = samFile.readline()
while (line[0] == '@'):
line = samFile.readline()
while (line is not None and line != ""):
aln = Tools.SAMEntry(line)
if (aln.tName == aln.title):
line = samFile.readline()
continue
order = Order(aln, readBoundaries)
if (order is None):
print "t: {},{}: {}-{} q: {},{}: {}-{} strand {}".format(readBoundaries[aln.tName][0],readBoundaries[aln.tName][1],
aln.tStart, aln.tEnd,
readBoundaries[aln.title][0],readBoundaries[aln.title][1],
aln.qStart, aln.qEnd, aln.strand)
else:
if (order[0] not in readGraph):
readGraph[order[0]] = {}
if (order[1] not in readGraph[order[0]]):
readGraph[order[0]][order[1]] = order[2]
line = samFile.readline()
def MakeForward(pos, strand, length, k):
if (strand == 0):
return pos
else:
return length - pos - k
def PrintGraph(reads, readNames, graphName):
graphOut = open(graphName, 'w')
# first print read edges.
# graphOut.write("graph qvgraph {\n")
for read in reads.values():
for i in range(len(read)-1):
if (len(read[i].black) >= args.minCount and
len(read[i+1].black) >= args.minCount):
graphOut.write(" {} red {} \n".format(read[i].index, read[i+1].index))
# now output black edges
for name,read in reads.iteritems():
for i in range(len(read)):
if (len(read[i].black) < args.minCount):
continue
for blackEdge in read[i].black:
destRead = reads[readNames[blackEdge[0]]]
if (name > destRead):
pass
if (len(destRead[blackEdge[1]].black) >= args.minCount):
# graphOut.write(" {} -> {} [ color=\"black\" ]\n".format(read[i].index, destRead[blackEdge[1]].index))
graphOut.write(" {} black {} \n".format(read[i].index, destRead[blackEdge[1]].index))
graphOut.close()
# graphOut.write("}\n")
samFile= open(args.sam, 'r')
line = samFile.readline()
reads = {}
readIndex = 0
readLengths = {}
readIndex = {}
index = 0
readNames = []
readBoundaries = {}
SetBoundaries(args.sam, readBoundaries)
for k,v in readBoundaries.iteritems():
print k + " " + str(v)
readGraph = {}
print "storing read graph."
StoreReadGraph(args.sam, readBoundaries, readGraph)
if (args.rgraph is not None):
graphOut = open(args.rgraph, 'w')
for src in readGraph.keys():
for dest in readGraph[src].keys():
if (readGraph[src][dest] != contains):
graphOut.write(str(src) + " " + str(readGraph[src][dest]) + " " + str(dest) + "\n")
graphOut.close()
while (line[0] == '@'):
if (line[0:3] == "@SQ"):
vals = line[3:].split()
name = Tools.GetKV("SN:", vals)
length = int(Tools.GetKV("LN:", vals))
reads[name] = InitializeReadVertexList(length, index, args.k)
print "initialized " + name
readLengths[name] = length
readIndex[name] = index
readNames.append(name)
index +=1
line = samFile.readline()
#
# Make a list that may be indexed by number and not name.
#
readList = [ Read(name, reads[name]) for name in readNames ]
prevQuery = ""
while (line != ""):
aln = Tools.SAMEntry(line)
qPos = aln.qStart - 1
tPos = aln.tStart - 1
tList = reads[aln.tName]
qList = reads[aln.title]
tLength = len(tList)+args.k-1
qLength = len(qList)+args.k-1
tIndex = readIndex[aln.tName]
qIndex = readIndex[aln.title]
q = 0
t = 0
qForward = 0
if (aln.tName == aln.title):
line = samFile.readline()
print "skipping () " + aln.title + " " + aln.tName
continue
#
# Apply some filtering for which alignments are considered.
#
# rule 1. target must be longer than the query
if (len(tList) < len(qList)):
continue
# rule 2. only one alignment to the target per query
if (aln.title != prevQuery):
targetAlignCount = {}
else:
if (aln.tName not in targetAlignCount):
targetAlignCount[aln.tName] = 1
else:
print "skipping (multi) " + aln.title + " " + aln.tName
continue
prevQuery = aln.title
nMatches = 0
for i in range(len(aln.ops)):
if (aln.ops[i] == 'M'):
# the alignment contains a sufficient match.
if (aln.lengths[i] >= args.k):
for i1 in range(aln.lengths[i] - args.k + 1):
q = qPos + i1
t = tPos + i1
qForward = MakeForward(q, aln.strand, qLength, args.k)
if (t > len(tList)):
print "error! " + str(i) + " " + str(len(aln.ops))+ " " + str(t) + " " + str(len(tList))
sys.exit(1)
if (qForward >= len(qList)):
print "error! " + str(i) + " " + str(qForward) + " " + str(len(qList))
sys.exit(1)
tList[t].black.append((qIndex, qForward, aln.strand))
qList[qForward].black.append((tIndex, t, aln.strand))
nMatches += 1
qPos += aln.lengths[i]
tPos += aln.lengths[i]
elif (aln.ops[i] == 'D'):
tPos += aln.lengths[i]
elif (aln.ops[i] == 'I'):
qPos += aln.lengths[i]
line = samFile.readline()
def AddVertexToConflictSet(vertex, conflictSet):
i = vertex.read[0]
if (i not in conflictSet):
conflictSet[i] = []
conflictSet[i].append(vertex.pos[0])
def SearchForConflict(readList, vertex, conflictSet, level=1):
AddVertexToConflictSet(vertex, conflictSet)
vertex.visited = 1
# print str(vertex.read)
# print str(vertex.pos)
# print "level: " + str(level)
for outEdge in vertex.black:
# print str(level) + " visiting " + str(outEdge)
if (readList[outEdge[0]].vertexList[outEdge[1]].visited == 0):
readList[outEdge[0]].vertexList[outEdge[1]].visited = 1
SearchForConflict(readList, readList[outEdge[0]].vertexList[outEdge[1]], conflictSet, level+1)
print "done visiting all out edges for " + str(vertex)
def IsVertexConflicted(readList, conflictSet):
for k,v in conflictSet.iteritems():
if (len(v) > 1):
print k
print v
print readList[k].name + " " + ', '.join([str(a) for a in v])
return True
return False
for vlist in reads.values():
for i in range(len(vlist)):
if (len(vlist[i].black) > 0):
conflictSet = {}
SearchForConflict(readList, vlist[i], conflictSet)
if (IsVertexConflicted(readList, conflictSet)):
print "found conflict"
print str(conflictSet)
print "Searched for conflicts for " + str(i)
#for name in reads.keys():
# i = 0;
#
# prevPos = 0
# vlist = reads[name]
# nVertex = len(vlist)
# steps = []
# while (i < nVertex):
# while (i < nVertex and len(vlist[i].black) == 0):
# i += 1
# if (i < nVertex):
# if (len(vlist[i].black) == 1):
# other = vlist[i].black[0][0]
# else:
# other = "*"
# steps.append((i - prevPos, len(vlist[i].black), other))
# prevPos = i
# i+=1
# steps.append((i - prevPos, 0, "*"))
# print str(steps)
AssignIndices(reads)
if (args.kgraph is not None):
PrintGraph(reads, readNames, args.kgraph)
| {
"content_hash": "e525cdafc8c4ec206b828ec44efb0835",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 128,
"avg_line_length": 35.25766871165644,
"alnum_prop": 0.5535931790499391,
"repo_name": "EichlerLab/chm1_scripts",
"id": "ee59daa09a0a08bd21e52e6c8317f1de6d729b7d",
"size": "11518",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SamCons.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "50662"
},
{
"name": "Java",
"bytes": "488"
},
{
"name": "OpenEdge ABL",
"bytes": "45"
},
{
"name": "Perl",
"bytes": "1402"
},
{
"name": "Python",
"bytes": "364036"
},
{
"name": "R",
"bytes": "50744"
},
{
"name": "Shell",
"bytes": "22061"
}
],
"symlink_target": ""
} |
"""Copyright 2022 The MediaPipe Authors.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| {
"content_hash": "3532df09bd4ffc7d8a1e23d79a1f3957",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 37.0625,
"alnum_prop": 0.7841483979763912,
"repo_name": "google/mediapipe",
"id": "6a840518948927aa2cd812411fceebade73546da",
"size": "593",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mediapipe/tasks/python/audio/core/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "514"
},
{
"name": "C",
"bytes": "76928"
},
{
"name": "C++",
"bytes": "10897312"
},
{
"name": "Dockerfile",
"bytes": "2659"
},
{
"name": "HTML",
"bytes": "4090"
},
{
"name": "Java",
"bytes": "1151252"
},
{
"name": "JavaScript",
"bytes": "6380"
},
{
"name": "Makefile",
"bytes": "1625"
},
{
"name": "Objective-C",
"bytes": "125458"
},
{
"name": "Objective-C++",
"bytes": "131706"
},
{
"name": "Python",
"bytes": "1272093"
},
{
"name": "Shell",
"bytes": "19580"
},
{
"name": "Starlark",
"bytes": "1277085"
},
{
"name": "TypeScript",
"bytes": "169026"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mocker', '0005_auto_20171012_2103'),
]
operations = [
migrations.RemoveField(
model_name='mocker',
name='custom_header',
),
migrations.AddField(
model_name='customheader',
name='mocker',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to='mocker.Mocker', verbose_name='Mocker'),
preserve_default=False,
),
]
| {
"content_hash": "e4b4b3f3152115b42b649a7ed347cc32",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 137,
"avg_line_length": 26.958333333333332,
"alnum_prop": 0.6058732612055642,
"repo_name": "paveu/api_mocker",
"id": "b9732a8072a87f16cb833e14e4af7bfe246bbb41",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apimocker/mocker/migrations/0006_auto_20171012_2124.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "98"
},
{
"name": "HTML",
"bytes": "3020"
},
{
"name": "JavaScript",
"bytes": "4258"
},
{
"name": "Python",
"bytes": "44113"
},
{
"name": "Shell",
"bytes": "7565"
}
],
"symlink_target": ""
} |
"""A module for registering all known parsers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import parsers
from grr_response_core.lib.parsers import chrome_history
from grr_response_core.lib.parsers import config_file
from grr_response_core.lib.parsers import cron_file_parser
from grr_response_core.lib.parsers import eficheck_parser
from grr_response_core.lib.parsers import firefox3_history
from grr_response_core.lib.parsers import ie_history
from grr_response_core.lib.parsers import linux_cmd_parser
from grr_response_core.lib.parsers import linux_file_parser
from grr_response_core.lib.parsers import linux_pam_parser
from grr_response_core.lib.parsers import linux_release_parser
from grr_response_core.lib.parsers import linux_service_parser
from grr_response_core.lib.parsers import linux_software_parser
from grr_response_core.lib.parsers import linux_sysctl_parser
from grr_response_core.lib.parsers import osx_file_parser
from grr_response_core.lib.parsers import osx_launchd
from grr_response_core.lib.parsers import windows_persistence
from grr_response_core.lib.parsers import windows_registry_parser
from grr_response_core.lib.parsers import wmi_parser
def Register():
"""Adds all known parsers to the registry."""
# pyformat: disable
# Command parsers.
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"Dpkg", linux_cmd_parser.DpkgCmdParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"Dmidecode", linux_cmd_parser.DmidecodeCmdParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"Eficheck", eficheck_parser.EficheckCmdParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"Mount", config_file.MountCmdParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"OsxSpHardware", osx_file_parser.OSXSPHardwareDataTypeParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"Ps", linux_cmd_parser.PsCmdParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"Rpm", linux_cmd_parser.RpmCmdParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"SshdConfig", config_file.SshdConfigCmdParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"Sysctl", linux_sysctl_parser.SysctlCmdParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"YumList", linux_cmd_parser.YumListCmdParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"YumRepolist", linux_cmd_parser.YumRepolistCmdParser)
# Grep parsers.
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"Passwd", linux_file_parser.PasswdBufferParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"Netgroup", linux_file_parser.NetgroupBufferParser)
# WMI query parsers.
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register(
"WmiEventConsumer", wmi_parser.WMIEventConsumerParser)
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register(
"WmiInstalledSoftware", wmi_parser.WMIInstalledSoftwareParser)
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register(
"WmiHotfixesSoftware", wmi_parser.WMIHotfixesSoftwareParser)
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register(
"WmiUser", wmi_parser.WMIUserParser)
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register(
"WmiLogicalDisks", wmi_parser.WMILogicalDisksParser)
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register(
"WmiCsp", wmi_parser.WMIComputerSystemProductParser)
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register(
"WmiInterfaces", wmi_parser.WMIInterfacesParser)
# Registry value parsers.
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"WinCcs", windows_registry_parser.CurrentControlSetKBParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"WinCodepage", windows_registry_parser.CodepageParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"WinEnvironment", windows_registry_parser.WinEnvironmentParser)
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register(
"WinServices", windows_registry_parser.WinServicesParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"WinSystemDrive", windows_registry_parser.WinSystemDriveParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"WinSystemRoot", windows_registry_parser.WinSystemRootParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"WinTimezone", windows_registry_parser.WinTimezoneParser)
# Registry parsers.
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"WinAllUsersProfileEnvVar",
windows_registry_parser.AllUsersProfileEnvironmentVariable)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"WinProfileDirEnvVar",
windows_registry_parser.ProfilesDirectoryEnvironmentVariable)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"WinUserSids",
windows_registry_parser.WinUserSids)
# Artifact file parsers.
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"DarwinPersistenceMechanism",
osx_launchd.DarwinPersistenceMechanismsParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register(
"WindowsPersistenceMechanism",
windows_persistence.WindowsPersistenceMechanismsParser)
# Registry multi-parsers.
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register(
"WinUserSpecialDirs", windows_registry_parser.WinUserSpecialDirs)
# Artifact file multi-parsers.
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register(
"OsxUsers", osx_file_parser.OSXUsersParser)
# File parsers.
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"ChromeHistory", chrome_history.ChromeHistoryParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"CronAtAllAllowDeny", config_file.CronAtAllowDenyParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"CronTab", cron_file_parser.CronTabParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"FirefoxHistory", firefox3_history.FirefoxHistoryParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"IeHistory", ie_history.IEHistoryParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"LinuxWtmp", linux_file_parser.LinuxWtmpParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"Mtab", config_file.MtabParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"Netgroup", linux_file_parser.NetgroupParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"NfsExports", config_file.NfsExportsParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"Ntpd", config_file.NtpdParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"PackageSource", config_file.PackageSourceParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"Passwd", linux_file_parser.PasswdParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"Path", linux_file_parser.PathParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"SshdConfigFile", config_file.SshdConfigParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"Sudoers", config_file.SudoersParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"OsxLaunchdPlist", osx_file_parser.OSXLaunchdPlistParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"OSXInstallHistoryPlist", osx_file_parser.OSXInstallHistoryPlistParser)
try:
from debian import deb822 # pylint: disable=g-import-not-at-top
parsers.SINGLE_FILE_PARSER_FACTORY.Register(
"DpkgStatusParser",
lambda: linux_software_parser.DebianPackagesStatusParser(deb822))
except ImportError:
pass
# File multi-parsers.
parsers.MULTI_FILE_PARSER_FACTORY.Register(
"LinuxBaseShadow", linux_file_parser.LinuxBaseShadowParser)
parsers.MULTI_FILE_PARSER_FACTORY.Register(
"LinuxLsbInit", linux_service_parser.LinuxLSBInitParser)
parsers.MULTI_FILE_PARSER_FACTORY.Register(
"LinuxXinetd", linux_service_parser.LinuxXinetdParser)
parsers.MULTI_FILE_PARSER_FACTORY.Register(
"LinuxSysvInit", linux_service_parser.LinuxSysVInitParser)
parsers.MULTI_FILE_PARSER_FACTORY.Register(
"LinuxPam", linux_pam_parser.PAMParser)
parsers.MULTI_FILE_PARSER_FACTORY.Register(
"LinuxReleaseInfo", linux_release_parser.LinuxReleaseParser)
parsers.MULTI_FILE_PARSER_FACTORY.Register(
"PciDevicesInfo", linux_file_parser.PCIDevicesInfoParser)
parsers.MULTI_FILE_PARSER_FACTORY.Register(
"ProcSys", linux_sysctl_parser.ProcSysParser)
parsers.MULTI_FILE_PARSER_FACTORY.Register(
"Rsyslog", config_file.RsyslogParser)
# pyformat: enable
| {
"content_hash": "3b4f4d776b6ae833ffa2d059fb21748f",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 77,
"avg_line_length": 45.32432432432432,
"alnum_prop": 0.7773404889683959,
"repo_name": "dunkhong/grr",
"id": "8fb919ba7f99a08c0af2a862abeb8e78204950df",
"size": "8407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/core/grr_response_core/lib/parsers/all.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "882"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "36745"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "193751"
},
{
"name": "JavaScript",
"bytes": "12795"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7430923"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "49155"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "10560"
},
{
"name": "TypeScript",
"bytes": "56756"
}
],
"symlink_target": ""
} |
import json
import unittest
import unittest.mock as mock
from blinkpy.web_tests.flake_suppressor import web_tests_expectations
from blinkpy.web_tests.flake_suppressor import web_tests_queries as queries
from blinkpy.web_tests.flake_suppressor import web_tests_tag_utils as tag_utils
from blinkpy.web_tests.flake_suppressor import web_tests_results as results_module
from flake_suppressor_common import unittest_utils as uu
from flake_suppressor_common import tag_utils as common_tag_utils
class WebTestQueriesUnittest(unittest.TestCase):
def setUp(self) -> None:
common_tag_utils.SetTagUtilsImplementation(tag_utils.WebTestsTagUtils)
expectation_processor = (
web_tests_expectations.WebTestsExpectationProcessor())
result_processor = results_module.WebTestsResultProcessor(
expectation_processor)
self._querier_instance = queries.WebTestsBigQueryQuerier(
1, 'project', result_processor)
self._querier_instance._submitted_builds = set(
['build-1234', 'build-2345'])
self._subprocess_patcher = mock.patch(
'flake_suppressor_common.queries.subprocess.run')
self._subprocess_mock = self._subprocess_patcher.start()
self.addCleanup(self._subprocess_patcher.stop)
def testIgnoredTags(self) -> None:
"""Tests that ignored tags are removed and their counts merged."""
def SideEffect(*_, **kwargs) -> uu.FakeProcess:
query = kwargs['input']
if 'submitted_builds' in query:
# Try results.
query_result = [
{
'typ_tags': ['linux', 'trusty'],
'test_name': 'foo/bar/linux',
'result_count': '25',
},
{
'typ_tags': ['linux', 'x86'],
'test_name': 'foo/bar/linux',
'result_count': '50',
},
]
else:
# CI results.
query_result = [{
'typ_tags': ['win', 'x86'],
'test_name': 'foo/bar/windows',
'result_count': '100',
}, {
'typ_tags': ['win'],
'test_name': 'foo/bar/windows',
'result_count': '50',
}, {
'typ_tags': ['mac'],
'test_name': 'foo/bar/mac',
'result_count': '200',
}, {
'typ_tags': ['linux'],
'test_name': 'foo/bar/linux',
'result_count': '300',
}]
return uu.FakeProcess(stdout=json.dumps(query_result))
self._subprocess_mock.side_effect = SideEffect
result_counts = self._querier_instance.GetResultCounts()
for rc, val in result_counts.items():
print(rc)
print(val)
expected_result_counts = {
tuple(['win']): {
'foo/bar/windows': 150,
},
tuple(['mac']): {
'foo/bar/mac': 200,
},
tuple(['linux']): {
'foo/bar/linux': 375,
},
}
self.assertEqual(result_counts, expected_result_counts)
self.assertEqual(self._subprocess_mock.call_count, 2)
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"content_hash": "b7638d3e018cf919a014c3021127bc12",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 82,
"avg_line_length": 38.833333333333336,
"alnum_prop": 0.5155937052932761,
"repo_name": "chromium/chromium",
"id": "a2a7f1b96e31152bb0e1bc799615083990ec13a5",
"size": "3696",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/blink/tools/blinkpy/web_tests/flake_suppressor/web_tests_queries_unittest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
This module is deprecated. Please use `airflow.providers.google.cloud.operators.gcs_to_gcs`.
"""
import warnings
from airflow.providers.google.cloud.operators.gcs_to_gcs import GCSToGCSOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.operators.gcs_to_gcs`.",
DeprecationWarning, stacklevel=2
)
class GoogleCloudStorageToGoogleCloudStorageOperator(GCSToGCSOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.operators.gcs_to_gcs.GCSToGCSOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.operators.gcs_to_gcs.GCSToGCSOperator`.""",
DeprecationWarning, stacklevel=2
)
super().__init__(*args, **kwargs)
| {
"content_hash": "b041cb32cca70f5db9a59f32b9efafe0",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 99,
"avg_line_length": 32,
"alnum_prop": 0.6979166666666666,
"repo_name": "wileeam/airflow",
"id": "309353f57c728874f22172400054327c585b1386",
"size": "1651",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/gcs_to_gcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17179"
},
{
"name": "HTML",
"bytes": "148281"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "9763694"
},
{
"name": "Shell",
"bytes": "221331"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip)
import pytest
import yaml
import json
with open('tests/vars/sensor.vars', 'r') as f:
try:
yml_vars = yaml.load(f)
except yaml.YAMLError as e:
print(e)
@pytest.mark.parametrize("group_id", yml_vars.get('kafka_groups'))
def test_connection_to_kafka(host, group_id):
results = host.run('/opt/kafka/bin/kafka-consumer-groups.sh '
'--bootstrap-server {host}:{p} '
'--describe --group {gid}'.format(host='localhost', p='9092', gid=group_id))
assert 'Error:' not in results.stdout
def test_logstash_connection_to_elasticsearch(host):
result = host.run('curl {host}:{p}/_node/stats/pipelines/main'.format(host='localhost', p='9600'))
result = json.loads(result.stdout)
assert result['pipelines']['main']['events']['out'] != '0'
@pytest.mark.parametrize("topic", yml_vars.get('topics'))
def test_kafka_topics(host, topic):
results = host.run('/opt/kafka/bin/kafka-topics.sh --list --zookeeper {host}:{p}'.format(
host='localhost', p='2181'))
assert topic in results.stdout
| {
"content_hash": "5687813aefdfbf59e1549800f67e50d4",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 102,
"avg_line_length": 40,
"alnum_prop": 0.628030303030303,
"repo_name": "rocknsm/SimpleRock",
"id": "20a789a54c22b11d67edca884b8a6cf02dc46600",
"size": "1473",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_sensor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "73411"
},
{
"name": "Ruby",
"bytes": "70764"
},
{
"name": "Shell",
"bytes": "5345"
}
],
"symlink_target": ""
} |
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
return None == root or (None == root.left and None == root.right) or self.isMirror(root.left, root.right)
def isMirror(self, left, right):
return (None == left and None == right) or (None != left and None != right and left.val == right.val and self.isMirror(left.left, right.right) and self.isMirror(left.right, right.left))
| {
"content_hash": "f282e6acadd44560555cf61a0b9e37dc",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 193,
"avg_line_length": 43.45454545454545,
"alnum_prop": 0.6213389121338913,
"repo_name": "FeiZhan/Algo-Collection",
"id": "4c55dce4f8ce7c526915f346cfdcb9996e6a32fb",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "answers/leetcode/Symmetric Tree/Symmetric Tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "892410"
},
{
"name": "Java",
"bytes": "743448"
},
{
"name": "JavaScript",
"bytes": "3093"
},
{
"name": "Python",
"bytes": "93383"
},
{
"name": "Shell",
"bytes": "891"
}
],
"symlink_target": ""
} |
import re
def readBP(filename):
file = open(filename, 'r')
while file.readline().find("ORIGIN") == -1:
pass
contents = file.read()
return re.sub('[^AGCTagct]', '', contents).upper()
def writeBP(filename, bp):
file = open(filename, 'w')
file.write("LOCUS\n")
file.write("ORIGIN\n")
file.write(bp)
file.close()
#def readAPE()
#def writeAPE() | {
"content_hash": "9437430268c077b80062fa7514d407ac",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 51,
"avg_line_length": 19.61111111111111,
"alnum_prop": 0.6543909348441926,
"repo_name": "tricorder42/pcr-simulator-2015",
"id": "a545074dcae396a0790da042a75aaec8ba29f4a3",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16175"
}
],
"symlink_target": ""
} |
__author__ = "Satish Palaniappan"
import os
import pickle
from os import path
import learner
'''
Pickle Formats
2#review,pos_neg
3#review,pos_neg,score
4#review,pos_neg,score,title
'''
def learn_save(newLearner,feature_i,label_i):
newLearner.clearOld()
newLearner.loadXY(feature_index = feature_i,label_index = label_i)
print("Vecs Loaded!")
newLearner.featurizeXY()
print("Features extracted!")
newLearner.reduceDimension()
newLearner.trainModel()
print("Model Trained!")
newLearner.saveModel()
print("Model Saved!")
def save_obj(obj, name ):
with open(name, 'wb') as f:
pickle.dump(obj, f, protocol=2)
def load_obj(name ):
with open( name, 'rb') as f:
return pickle.load(f)
def chkMake(dirPath):
if not os.path.exists(dirPath):
os.makedirs(dirPath)
return dirPath
def getFilesDir(pathDir):
files = [f for f in os.listdir(pathDir) if ".pkl" in f]
fd = os.listdir(pathDir)
directories = list(set(fd)-set(files))
return[files,directories]
baseModelPath = "../model/review/"
baseDataPath = "../data/pickle/review/"
files,directories = getFilesDir(baseDataPath)
topDomains = directories + files # All top level domains
# topDomains = ["places.pkl"]
subDomainMap = dict()
for di in directories:
subDomainMap[di] = os.listdir(baseDataPath + di + "/")
# tempPaths = []
for domain in topDomains:
data_path = []
if ".pkl" in domain:
data_path.append (baseDataPath + domain)
else:
for i in subDomainMap[domain]:
data_path.append (baseDataPath + domain + "/" + i)
for p in data_path:
print(p)
# newLearner = learner.TextLearner(p)
with learner.TextLearner(p) as newLearner:
data_type = newLearner.load_data()
if len(data_path) == 1:
model_path = baseModelPath + domain.replace(".pkl","") + "/"
else:
model_path = baseModelPath + domain + "/" + p.replace(baseDataPath + domain + "/","").replace(".pkl","") + "/"
if data_type == 2 or data_type == 3:
chkMake(model_path)
newLearner.addModelDetails(model_p = model_path)
learn_save(newLearner,0,1)
# tempPaths.append(model_path)
else:
for j in ["title","review_text"]:
chkMake(model_path + j + "/")
newLearner.addModelDetails(model_p = (model_path + j + "/"))
if j == "title":
learn_save(newLearner,3,1)
# tempPaths.append((model_path + j + "/"))
else:
learn_save(newLearner,0,1)
# tempPaths.append((model_path + j + "/"))
# for t in tempPaths:
# print(t)
| {
"content_hash": "4fd4adc3c28a883109a15cc9f7c458ef",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 114,
"avg_line_length": 25.102040816326532,
"alnum_prop": 0.6626016260162602,
"repo_name": "tpsatish95/Universal-MultiDomain-Sentiment-Classifier",
"id": "0d3e08357defbda5324b482da391345c4028d109",
"size": "2490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Trainers/masterReviewsTrainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "4112"
},
{
"name": "Python",
"bytes": "42676"
},
{
"name": "Thrift",
"bytes": "407"
}
],
"symlink_target": ""
} |
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.stdpy import threading
import errno
from panda3d.core import TP_normal
import select
import socket
import urlparse
from toontown.rpc.ToontownRPCConnection import ToontownRPCConnection
class ToontownRPCServer:
notify = directNotify.newCategory('ToontownRPCServer')
def __init__(self, endpoint, handler):
self.handler = handler
# Parse the endpoint:
url = urlparse.urlparse(endpoint)
# We only support the http scheme:
if url.scheme != 'http':
self.notify.warning('Invalid scheme for endpoint: ' + str(url.scheme))
# Parse the hostname, and port:
self.hostname = url.hostname or 'localhost'
self.port = url.port or 8080
self.listenerSocket = None
self.connections = {}
self.dispatchThreads = {}
def getUniqueName(self):
"""
Returns a unique identifier for this instance. This is primarily used
for creating unique task names.
"""
return 'ToontownRPCServer-' + str(id(self))
def start(self, useTaskChain=False):
"""
Serve until stop() is called.
"""
taskChain = None
if useTaskChain and (not taskMgr.hasTaskChain('ToontownRPCServer')):
taskChain = 'ToontownRPCServer'
taskMgr.setupTaskChain(taskChain, numThreads=1, threadPriority=TP_normal)
# Create a socket to listen for incoming connections:
self.listenerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listenerSocket.setblocking(0)
self.listenerSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listenerSocket.bind((self.hostname, self.port))
self.listenerSocket.listen(5)
# Start polling:
taskName = self.getUniqueName() + '-pollTask'
taskMgr.add(self.pollTask, taskName, taskChain=taskChain)
def stop(self):
"""
Stop serving.
"""
# Stop polling:
taskName = self.getUniqueName() + '-pollTask'
assert taskMgr.hasTaskNamed(taskName)
taskMgr.remove(taskName)
# Close any open connections:
for k, v in self.connections.items():
v.close()
del self.connections[k]
# Shutdown and close the listener socket:
try:
self.listenerSocket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self.listenerSocket.close()
self.listenerSocket = None
def dispatchThread(self, socket):
"""
Call dispatchUntilEmpty() on the provided socket's connection, and then
clean up.
"""
connection = self.connections.get(socket)
if connection is None:
return
connection.dispatchUntilEmpty()
connection.close()
if socket in self.connections:
del self.connections[socket]
if socket in self.dispatchThreads:
del self.dispatchThreads[socket]
def pollOnce(self):
"""
Poll for incoming data once.
"""
try:
rlist = select.select([self.listenerSocket] + self.connections.keys(), [], [])[0]
except:
# It's likely that one or more of our sockets is no longer valid.
# If it's our listener socket, we can't continue:
try:
self.listenerSocket.fileno()
except:
self.notify.error('The listener socket is no longer valid!')
# Otherwise, discard the faulty sockets, and wait for the next poll
# iteration:
for socket in self.connections.keys():
try:
socket.fileno()
socket.getpeername()
except:
del self.connections[socket]
if socket in self.dispatchThreads:
del self.dispatchThreads[socket]
return
if self.listenerSocket in rlist:
self.handleNewConnection()
for socket in rlist:
connection = self.connections.get(socket)
if connection is None:
continue
if socket in self.dispatchThreads:
continue
self.dispatchThreads[socket] = threading.Thread(
target=self.dispatchThread, args=[socket])
self.dispatchThreads[socket].start()
def pollTask(self, task):
"""
Continuously poll for incoming data.
"""
self.pollOnce()
return task.cont
def handleNewConnection(self):
"""
Handle an incoming connection.
"""
try:
conn = self.listenerSocket.accept()[0]
self.connections[conn] = ToontownRPCConnection(conn, self.handler)
except socket.error, e:
if e.args[0] != errno.EWOULDBLOCK:
raise e
| {
"content_hash": "d5e59aba63318306d1d0535b7bb72c7b",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 93,
"avg_line_length": 32.4025974025974,
"alnum_prop": 0.593186372745491,
"repo_name": "linktlh/Toontown-journey",
"id": "1f1a1b913c81b7bc736cad271b9af8950101d8b5",
"size": "4990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/rpc/ToontownRPCServer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import mock
import sys
import pytest
from org.company.frobnicator import Frobnicator
def fake_get(url, headers=None):
return mock.Mock(json=[{'apples': 4}])
@mock.patch('org.company.frobnicator.requests.get', side_effect=fake_get)
def test_queries(requests_get):
""" Shows how requests.get is mocked in a test.
NB:
requests is not instantiated in a constructor
"""
Frobnicator().queries()
if __name__ == "__main__":
pytest.main(sys.argv)
| {
"content_hash": "e3d0d4d20a2c14925e457a3d17a49dc6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 73,
"avg_line_length": 21.863636363636363,
"alnum_prop": 0.6756756756756757,
"repo_name": "noelevans/sandpit",
"id": "5963ae1c296dd627f390b3d3034af3d9fa6c5c94",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_examples.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7565"
},
{
"name": "HTML",
"bytes": "4003856"
},
{
"name": "Julia",
"bytes": "2285"
},
{
"name": "Jupyter Notebook",
"bytes": "257479"
},
{
"name": "OpenEdge ABL",
"bytes": "1071"
},
{
"name": "Perl",
"bytes": "1003"
},
{
"name": "Python",
"bytes": "383797"
},
{
"name": "R",
"bytes": "16913"
},
{
"name": "Shell",
"bytes": "11957"
},
{
"name": "TypeScript",
"bytes": "112"
},
{
"name": "Vim script",
"bytes": "5639"
}
],
"symlink_target": ""
} |
"""hotspots visualization."""
from apps.managers.team_mgr.models import Team
def supply(request, page_name):
""" Handle the request for viz_hotspots widget."""
_ = request
_ = page_name
all_lounges = Team.objects.order_by('name').all()
return {
"all_lounges": all_lounges,
}
| {
"content_hash": "f245a460ca28a2ad8ac10af18f1619a9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 54,
"avg_line_length": 19.875,
"alnum_prop": 0.6163522012578616,
"repo_name": "KendyllD/boukenda-project",
"id": "65a1b67df04cedaf9137afb37e905a327027fe90",
"size": "318",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "makahiki/apps/widgets/viz_hotspots/views.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
__author__ = 'Chirstoph Reimers'
__email__ = '[email protected]'
__version__ = '0.1.0.b1'
| {
"content_hash": "6bcf210d770df5399a1f3aeeecb153e7",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 54,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.672566371681416,
"repo_name": "creimers/djangocms_oembed",
"id": "7e4c8ab007b931fa972c19127c64c57aa68e70a9",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cmsplugin_oembed/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "36"
},
{
"name": "HTML",
"bytes": "545"
},
{
"name": "Python",
"bytes": "3931"
}
],
"symlink_target": ""
} |
import os
import sys
import time
from . import error
class Logger:
""" Abstract class for all logger implementations.
Concrete classes will log messages using various methods,
e.g. write to a file.
"""
(ERROR, INFO, DEBUG) = (0, 1, 2)
TYPES = ("ERROR", "Info", "Debug")
debug_level = ERROR
def __init__(self, debug_level):
self.debug_level = int(debug_level)
def log(self, string, level):
""" Log a message """
if level > self.debug_level:
return
self._actual_log(string, level)
def _actual_log(self, string, level):
""" Actually perform the logging (to be implemented in subclasses) """
pass
def shutdown(self):
""" Action to perform when closing the logger """
pass
@staticmethod
def time():
""" Get a nicely formatted time string """
return time.strftime("%a %d %Y %H:%M:%S", time.localtime())
def format(self, string, level):
""" Format the error message in a standard way """
display_level = self.TYPES[level]
return "- [%s] {%s} %s" % (display_level, self.time(), string)
class WindowLogger(Logger):
""" Log messages to a window.
The window object is passed in on construction, but
only created if a message is written.
"""
def __init__(self, debug_level, window):
self.window = window
super(WindowLogger, self).__init__(debug_level)
def shutdown(self):
if self.window is not None:
self.window.is_open = False
def _actual_log(self, string, level):
if not self.window.is_open:
self.window.create("rightbelow 6new")
self.window.write(self.format(string, level)+"\n")
class FileLogger(Logger):
""" Log messages to a window.
The window object is passed in on construction, but
only created if a message is written.
"""
def __init__(self, debug_level, filename):
self.filename = os.path.expanduser(filename)
self.f = None
super(FileLogger, self).__init__(debug_level)
def __open(self):
try:
self.f = open(self.filename, 'w', encoding='utf-8')
except IOError as e:
raise error.LogError("Invalid file name '%s' for log file: %s"
% (self.filename, e))
except:
raise error.LogError("Error using file '%s' as a log file: %s"
% (self.filename, sys.exc_info()[0]))
def shutdown(self):
if self.f is not None:
self.f.close()
def _actual_log(self, string, level):
if self.f is None:
self.__open()
self.f.write(self.format(string, level)+"\n")
self.f.flush()
class Log:
loggers = {}
def __init__(self, string, level=Logger.INFO):
Log.log(string, level)
@classmethod
def log(cls, string, level=Logger.INFO):
for logger in cls.loggers.values():
logger.log(string, level)
@classmethod
def set_logger(cls, logger):
k = logger.__class__.__name__
if k in cls.loggers:
cls.loggers[k].shutdown()
cls.loggers[k] = logger
@classmethod
def remove_logger(cls, type):
if type in cls.loggers:
cls.loggers[type].shutdown()
return True
print("Failed to find logger %s in list of loggers" % type)
return False
@classmethod
def shutdown(cls):
for logger in cls.loggers.values():
logger.shutdown()
cls.loggers = {}
| {
"content_hash": "d9743783b64a48083515ac157222c142",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 78,
"avg_line_length": 26.5,
"alnum_prop": 0.5701997780244173,
"repo_name": "joonty/vdebug",
"id": "7db9a5ff79988598943d083e2bf0434fda2a677e",
"size": "3604",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python3/vdebug/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "5738"
},
{
"name": "Python",
"bytes": "157139"
},
{
"name": "Ruby",
"bytes": "17383"
},
{
"name": "Shell",
"bytes": "4733"
},
{
"name": "Vim script",
"bytes": "13259"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from celery.utils.log import get_task_logger
from djcelery_transactions import task
logger = get_task_logger(__name__)
@task
def send_message(message_id):
from .models import Message, STATUS_SENT, STATUS_FAILED
message = Message.objects.select_related('org', 'user').get(pk=message_id)
client = message.org.get_temba_client()
try:
client.create_broadcast(message.text, contacts=[c.uuid for c in message.recipients.all()])
message.status = STATUS_SENT
message.save(update_fields=('status',))
logger.info("Sent message %d from user #%d" % (message.pk, message.sent_by.pk))
except Exception:
message.status = STATUS_FAILED
message.save(update_fields=('status',))
logger.error("Sending message %d failed" % message.pk, exc_info=1)
| {
"content_hash": "09d8eae3b5f94afa6ff939f91af852ed",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 98,
"avg_line_length": 30.464285714285715,
"alnum_prop": 0.6811254396248535,
"repo_name": "ewheeler/tracpro",
"id": "358e6919cdddf583ee5f50fc04e76cd5f8900f4a",
"size": "853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracpro/msgs/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "23025"
},
{
"name": "CoffeeScript",
"bytes": "9891"
},
{
"name": "HTML",
"bytes": "36894"
},
{
"name": "Python",
"bytes": "233852"
}
],
"symlink_target": ""
} |
class Machine( object ):
def __init__( self, memory, register_file, debug, reset_addr=0x400 ):
self.rf = register_file
self.mem = memory
self.pc = reset_addr
self .debug = debug
self.rf .debug = debug
self.mem.debug = debug
# common registers
self.status = 0
self.stats_en = 0
self.num_insts = 0
self.stat_num_insts = 0
# we need a dedicated running flag because status could be 0 on a
# syscall_exit
self.running = True
def fetch_pc( self ):
return self.pc
| {
"content_hash": "bfa67bd3a87487a66e55c7b52bd0ef25",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 24.869565217391305,
"alnum_prop": 0.5769230769230769,
"repo_name": "futurecore/pydgin",
"id": "ead1dd03438573d4132833ee44d295c59c53fabc",
"size": "888",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pydgin/machine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "274757"
},
{
"name": "C",
"bytes": "317741"
},
{
"name": "C++",
"bytes": "15025"
},
{
"name": "M4",
"bytes": "8046"
},
{
"name": "Makefile",
"bytes": "31048"
},
{
"name": "Python",
"bytes": "364884"
},
{
"name": "Shell",
"bytes": "24284"
}
],
"symlink_target": ""
} |
"""Command for removing public keys to users."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.compute.users import client as users_client
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.command_lib.compute.users import utils as user_utils
from googlecloudsdk.command_lib.util import gaia
class RemoveKeys(base_classes.NoOutputAsyncMutator):
"""Remove a public key from a Google Compute Engine user.
*{command}* removes public keys from a Google Compute Engine user.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'--fingerprints',
type=arg_parsers.ArgList(min_length=1),
metavar='FINGERPRINT',
help='The fingerprints of the public keys to remove from the user.')
user_utils.AddUserArgument(parser, '', custom_help=(
'If provided, the name of the user to remove public keys from. '
'Else, the default user will be used.'))
@property
def service(self):
return self.clouduseraccounts.users
@property
def method(self):
return 'RemovePublicKey'
@property
def resource_type(self):
return 'users'
@property
def messages(self):
return self.clouduseraccounts.MESSAGES_MODULE
def CreateRequests(self, args):
name = args.name
if not name:
name = gaia.GetDefaultAccountName(self.http)
user_ref = self.clouduseraccounts_resources.Parse(
name, collection='clouduseraccounts.users')
if args.fingerprints:
fingerprints = args.fingerprints
else:
fetcher = users_client.UserResourceFetcher(
self.clouduseraccounts, self.project, self.http, self.batch_url)
fingerprints = [k.fingerprint for k in
fetcher.LookupUser(user_ref.Name()).publicKeys]
# Generate warning before deleting.
prompt_list = ['[{0}]'.format(fingerprint) for fingerprint in fingerprints]
prompt_title = ('The following public keys will be removed from the user ' +
user_ref.Name())
utils.PromptForDeletionHelper(None, prompt_list, prompt_title=prompt_title)
requests = []
for fingerprint in fingerprints:
request = self.messages.ClouduseraccountsUsersRemovePublicKeyRequest(
project=self.project,
fingerprint=fingerprint,
user=user_ref.Name())
requests.append(request)
return requests
RemoveKeys.detailed_help = {
'EXAMPLES': """\
To remove all public keys for a user, run:
$ {command} example-user
To remove a specific public key, first describe the user
(using `gcloud compute users describe example-user`) to determine the
fingerprints of the public keys you wish
to remove. Then run:
$ {command} example-user --fingerprints b3ca856958b524f3f12c3e43f6c9065d
""",
}
| {
"content_hash": "5379b9688f7d86e605f97244de9d645f",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 82,
"avg_line_length": 31.225806451612904,
"alnum_prop": 0.6924931129476584,
"repo_name": "KaranToor/MA450",
"id": "4e0527fb8f41eaa8d564e1fabe33fe4708219fa4",
"size": "3499",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/.install/.backup/lib/surface/compute/users/remove_keys.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from os import path
from celery import shared_task
from celery import group
from celery import chain
from tarfile import open as tar_open
from json import loads as json_loads
from jsonschema import validate as json_validate
from jsonschema import ValidationError as JsonValidationError
from re import compile as re_compile
from re import search as re_search
from hashlib import sha1
from shutil import copyfile
from ws4redis.publisher import RedisPublisher
from ws4redis.redis_store import RedisMessage
from django.conf import settings
from author.utils import splitext_all
from author.models import UploadedTaskDeployStatus
from author.models import UploadedTaskFile
from author.models import UploadedTaskImage
from author.models import Connection
from game.models import Task
@shared_task
def deploy_uploaded_task(uploaded_task):
workflow = chain(format_checks.s() |
untar_task.s() |
move_files.s() |
group([email_docker_deployers.s(), make_task.s()])
)
workflow.delay(uploaded_task)
@shared_task
def format_checks(uploaded_task):
if not uploaded_task.is_uploaded():
return uploaded_task
error_status = UploadedTaskDeployStatus(
uploaded_task=uploaded_task,
phase=UploadedTaskDeployStatus.PHASE_FORMAT_CHECK
)
untarred_path, ext = splitext_all(uploaded_task.path)
supported_exts = ['.tar.gz', '.tar.bz2', '.tar']
if ext not in supported_exts:
msg = 'Unsupported format "{ext}". Should be one of {supported}.'
msg = msg.format(ext=ext, supported=', '.join(supported_exts))
error_status.message = msg
error_status.save()
return uploaded_task
tar_file = None
try:
tar_file = tar_open(uploaded_task.path)
except Exception, ex:
error_status.message = 'Error opening tar file: %s' % str(ex)
error_status.save()
return uploaded_task
for name in tar_file.getnames():
if not name.startswith('task'):
msg = ('There is a file "{filename}" that is not within "task" '
'directory. All files should reside within "task" '
'directory.')
msg = msg.format(filename=name)
error_status.message = msg
error_status.save()
return uploaded_task
task_file_member = None
try:
task_file_member = tar_file.getmember('task/task.json')
except KeyError:
msg = ('File "task/task.json" is not found. This file must present in '
'each uploaded task archive')
error_status.message = msg
error_status.save()
return uploaded_task
if not task_file_member.isfile():
msg = ('File "task/task.json" is not a file, but it is expected to be '
'a file')
error_status.message = msg
error_status.save()
return uploaded_task
task_file = tar_file.extractfile(task_file_member)
task_json = None
try:
task_json = json_loads(task_file.read())
except Exception, ex:
msg = 'Error reading JSON object from "task/task.json": %s' % str(ex)
error_status.message = msg
error_status.save()
return uploaded_task
schema_json = None
try:
schema_file = file(path.join(settings.AUX_FILES_DIR,
'task-schema',
'v0.2',
'task-schema-0.2.json'))
schema_str = schema_file.read()
schema_file.close()
schema_json = json_loads(schema_str)
except Exception, ex:
msg = 'Error reading JSON schema file: %s' % str(ex)
error_status.message = msg
error_status.save()
return uploaded_task
try:
json_validate(task_json, schema_json)
except JsonValidationError, ex:
msg = 'File "task/task.json" is incorrect: %s' % str(ex)
error_status.message = msg
error_status.save()
return uploaded_task
mentioned_files = [
task_json['desc_ru'],
task_json['desc_en'],
task_json['writeup_ru'],
task_json['writeup_en'],
]
mentioned_images = []
if 'images' in task_json:
for image_obj in task_json['images']:
mentioned_images.append(image_obj['filename'])
mentioned_files += mentioned_images
for mentioned_file in mentioned_files:
archive_path = 'task/%s' % mentioned_file
try:
tar_file.getmember(archive_path)
except KeyError:
msg = ('The file "{filename}" mentioned in "task/task.json" '
'does not appear to be in the archive. Please, check '
'the content of the uploaded archive')
msg = msg.format(filename=archive_path)
error_status.message = msg
error_status.save()
return uploaded_task
template_strings = {}
for fieldname in ['desc_ru', 'desc_en', 'writeup_ru', 'writeup_en']:
filename = task_json[fieldname]
member = tar_file.getmember('task/%s' % filename)
member_file = tar_file.extractfile(member)
template_strings[filename] = member_file.read()
existing_filenames = []
file_re = re_compile(r'^task/(.+)$')
for filename in tar_file.getnames():
if filename == 'task/task.json':
continue
so = re_search(file_re, filename)
if so:
existing_filenames.append(so.group(1))
images_filenames = mentioned_images
tcp_ports_map = {}
udp_ports_map = {}
if 'images' in task_json:
for image in task_json['images']:
if 'tcp_ports' in image:
tcp_ports_map[image['filename']] = image['tcp_ports']
if 'udp_ports' in image:
udp_ports_map[image['filename']] = image['udp_ports']
def check_links(template_str, existing_filenames, images_filenames,
tcp_ports_map, udp_ports_map):
existing_filenames, tmp_filenames = [], existing_filenames
for filename in tmp_filenames:
existing_filenames.append(filename.replace('.', '_'))
images_filenames, tmp_filenames = [], images_filenames
for filename in tmp_filenames:
images_filenames.append(filename.replace('.', '_'))
tcp_ports_map, tmp_ports_map = {}, tcp_ports_map
for filename in tmp_ports_map:
tcp_ports_map[filename.replace('.', '_')] = tmp_ports_map[filename]
udp_ports_map, tmp_ports_map = {}, udp_ports_map
for filename in tmp_ports_map:
udp_ports_map[filename.replace('.', '_')] = tmp_ports_map[filename]
temp_var_re = re_compile(r'\{\{[^}]*\}\}')
image_var_re = re_compile(r'^.+_(?:tcp|udp)\d+$')
temp_vars = []
image_temp_vars = []
so = re_search(temp_var_re, template_str)
while so:
temp_var = so.group()[2:-2].strip()
if temp_var.count('.') == 0:
temp_vars.append(temp_var)
elif temp_var.count('.') == 1:
image_name, image_field = temp_var.split('.')
if image_field not in ['host', 'port']:
raise Exception('Unexpected image field in template '
'variable reference: %s. The fields '
'"host" and "port" are the only fields '
'that are expected.' % temp_var)
if not re_search(image_var_re, image_name):
raise Exception('Found docker-image template variable '
'with unexpected ending "%s". '
'Expected endings are "_tcpXXXXX" and '
'"_udpXXXXX".' % image_name)
image_temp_vars.append(image_name)
else:
raise Exception('Invalid template variable. '
'Too many dots: %s.' % temp_var)
template_str = template_str[so.end():]
so = re_search(temp_var_re, template_str)
for temp_var in temp_vars:
if temp_var not in existing_filenames:
msg = ('Found template variable "{filename}" that '
'references a file that is not present in the '
'uploaded archive')
raise Exception(msg.format(filename=temp_var))
tcp_port_re = re_compile(r'^(.+)_tcp(\d+)$')
udp_port_re = re_compile(r'^(.+)_udp(\d+)$')
for temp_var in image_temp_vars:
so = re_search(tcp_port_re, temp_var)
if so:
name = so.group(1)
if name not in images_filenames:
msg = ('Found template variable "{filename}" that '
'references a docker image that is not present '
'in the uploaded archive')
raise Exception(msg.format(filename=name))
port = int(so.group(2))
if name not in tcp_ports_map or port not in tcp_ports_map[name]:
raise Exception('Found docker-image template variable '
'"%s" that references tcp-port %s that is '
'not mentioned in the corresponding '
'"tcp_ports" field in "task/task.json" '
'file' % (temp_var, port))
so = re_search(udp_port_re, temp_var)
if so:
name = so.group(1)
if name not in images_filenames:
msg = ('Found template variable "{filename}" that '
'references a docker image that is not present '
'in the uploaded archive')
raise Exception(msg.format(filename=name))
port = int(so.group(2))
if name not in udp_ports_map or port not in udp_ports_map[name]:
raise Exception('Found docker-image template variable '
'"%s" that references udp-port %s that is '
'not mentioned in the corresponding '
'"udp_ports" field in "task/task.json" '
'file' % (temp_var, port))
for filename, template_str in template_strings.iteritems():
try:
check_links(template_str, existing_filenames, images_filenames,
tcp_ports_map, udp_ports_map)
except Exception, ex:
msg = 'Error checking links in "{filename}" file: {content}.'
error_status.message = msg.format(filename=filename,
content=str(ex))
error_status.save()
return uploaded_task
uploaded_task.format_checks_passed = True
uploaded_task.save()
return uploaded_task
@shared_task
def untar_task(uploaded_task):
if not uploaded_task.is_correct():
return uploaded_task
error_status = UploadedTaskDeployStatus(
uploaded_task=uploaded_task,
phase=UploadedTaskDeployStatus.PHASE_UNTAR,
)
uploaded_path = uploaded_task.path
untarred_path, ext = splitext_all(uploaded_path)
try:
tar_file = tar_open(uploaded_task.path)
except Exception, ex:
error_status.message = 'Error opening tar file: %s' % str(ex)
error_status.save()
return uploaded_task
try:
tar_file.extractall(path=untarred_path)
except Exception, ex:
error_status.message = 'Error untarring file: %s' % str(ex)
error_status.save()
return uploaded_task
task_json = None
try:
task_json_filename = path.join(untarred_path, 'task', 'task.json')
task_json_file = file(task_json_filename, 'rb')
task_json_str = task_json_file.read()
task_json_file.close()
task_json = json_loads(task_json_str)
except Exception, ex:
error_status.message = 'Error opening "task.json" file: %s' % str(ex)
error_status.save()
return uploaded_task
images_filenames = []
tcp_ports_map = {}
udp_ports_map = {}
if 'images' in task_json:
for image in task_json['images']:
images_filenames.append(image['filename'])
if 'tcp_ports' in image:
ports_string = ','.join(map(str, image['tcp_ports']))
tcp_ports_map[image['filename']] = ports_string
else:
tcp_ports_map[image['filename']] = ''
if 'udp_ports' in image:
ports_string = ','.join(map(str, image['udp_ports']))
udp_ports_map[image['filename']] = ports_string
else:
udp_ports_map[image['filename']] = ''
for filename in tar_file.getnames():
if tar_file.getmember(filename).isdir():
continue
base_filename = path.basename(filename)
if base_filename == 'task.json':
continue
if base_filename in images_filenames:
uti = UploadedTaskImage(
uploaded_task=uploaded_task,
original_name=base_filename,
tcp_ports_str=tcp_ports_map[base_filename],
udp_ports_str=udp_ports_map[base_filename],
untarred_path=path.join(untarred_path, filename),
)
uti.save()
for tcp_port in tcp_ports_map[base_filename].split(","):
if tcp_port != "":
conn = Connection(
uploaded_image=uti,
protocol="tcp",
sport=tcp_port,
)
conn.save()
for udp_port in udp_ports_map[base_filename].split(","):
if udp_port != "":
conn = Connection(
uploaded_image=uti,
protocol="udp",
sport=udp_port,
)
conn.save()
else:
utf = UploadedTaskFile(
uploaded_task=uploaded_task,
original_name=base_filename,
untarred_path=path.join(untarred_path, filename),
)
utf.save()
uploaded_task.untarred_path = untarred_path
uploaded_task.save()
return uploaded_task
@shared_task
def move_files(uploaded_task):
if not uploaded_task.is_untarred():
return uploaded_task
for task_file_obj in uploaded_task.files.all():
try:
sha1obj = sha1()
task_file = file(task_file_obj.untarred_path, 'rb')
chunk = task_file.read(4096)
while len(chunk) > 0:
sha1obj.update(chunk)
chunk = task_file.read(4096)
task_file.close()
original_name, original_ext = splitext_all(
task_file_obj.original_name
)
new_name = '%s_%s%s' % (
original_name,
sha1obj.hexdigest(),
original_ext,
)
new_path = path.join(settings.UPLOADED_FILES_DIR, new_name)
copyfile(task_file_obj.untarred_path, new_path)
task_file_obj.relative_path = new_name
task_file_obj.save()
except Exception, ex:
msg = 'Error copying file "{filename}": {reason}'.format(
filename=task_file_obj.original_name,
reason=str(ex),
)
error_status = UploadedTaskDeployStatus(
uploaded_task=uploaded_task,
phase=UploadedTaskDeployStatus.PHASE_MOVE_FILES,
)
error_status.message = msg
error_status.save()
return uploaded_task
for task_image_obj in uploaded_task.images.all():
try:
sha1obj = sha1()
task_image = file(task_image_obj.untarred_path, 'rb')
chunk = task_image.read(4096)
while len(chunk) > 0:
sha1obj.update(chunk)
chunk = task_image.read(4096)
task_image.close()
original_name, original_ext = splitext_all(
task_image_obj.original_name
)
new_name = '%s_%s%s' % (
original_name,
sha1obj.hexdigest(),
original_ext,
)
new_path = path.join(settings.UPLOADED_IMAGES_DIR, new_name)
copyfile(task_image_obj.untarred_path, new_path)
task_image_obj.relative_path = new_name
task_image_obj.save()
except Exception, ex:
msg = 'Error copying image file "{filename}": {reason}'.format(
filename=task_image_obj.original_name,
reason=str(ex),
)
error_status = UploadedTaskDeployStatus(
uploaded_task=uploaded_task,
phase=UploadedTaskDeployStatus.PHASE_MOVE_FILES,
)
error_status.message = msg
error_status.save()
return uploaded_task
return uploaded_task
@shared_task
def email_docker_deployers(uploaded_task):
if not uploaded_task.has_docker_images():
return uploaded_task
return uploaded_task
@shared_task
def make_task(uploaded_task):
if not uploaded_task.files_are_deployed():
return uploaded_task
error_status = UploadedTaskDeployStatus(
uploaded_task=uploaded_task,
phase=UploadedTaskDeployStatus.PHASE_MAKE_TASK,
)
task_json = None
try:
task_json_filename = path.join(uploaded_task.untarred_path,
'task',
'task.json')
task_json_file = file(task_json_filename, 'rb')
task_json_str = task_json_file.read()
task_json_file.close()
task_json = json_loads(task_json_str)
except Exception, ex:
error_status.message = 'Error loading "task.json" file: %s' % str(ex)
error_status.save()
return uploaded_task
task_params = {
'title_ru': task_json['title_ru'],
'title_en': task_json['title_en'],
'category': task_json['category'],
'cost': task_json['cost'],
'flag': task_json['flag'],
}
for field in ['desc_ru', 'desc_en', 'writeup_ru', 'writeup_en']:
filename = task_json[field]
fileobj = uploaded_task.files.get(original_name=filename)
filepath = fileobj.get_full_path()
tmpfile = file(filepath, 'rb')
contents = tmpfile.read()
tmpfile.close()
task_params[field] = contents
check_re = re_compile(r'^(?P<mods>it|ti|i|t|)(?P<method>equals|regex)$')
so = re_search(check_re, task_json['flag_comp'])
mods = so.group('mods')
method = so.group('method')
if 't' in mods:
task_params['is_trimmed_check'] = True
if 'i' in mods:
task_params['is_case_insensitive_check'] = True
if method == 'equals':
task_params['check'] = Task.EQUALS_CHECK
if method == 'regex':
task_params['check'] = Task.REGEX_CHECK
task = Task(**task_params)
task.save()
uploaded_task.task = task
uploaded_task.save()
redis_publisher = RedisPublisher(facility='tasks', broadcast=True)
message = RedisMessage('tasks')
redis_publisher.publish_message(message)
return uploaded_task
| {
"content_hash": "2107c96f555d233f16d8c22bf35d9bb1",
"timestamp": "",
"source": "github",
"line_count": 513,
"max_line_length": 80,
"avg_line_length": 38.32943469785575,
"alnum_prop": 0.5502212276865178,
"repo_name": "stefantsov/blackbox3",
"id": "5141733ac3e1654f58215cdce52af758d987bdef",
"size": "19663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "author/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2241"
},
{
"name": "HTML",
"bytes": "39706"
},
{
"name": "JavaScript",
"bytes": "133810"
},
{
"name": "Python",
"bytes": "95948"
}
],
"symlink_target": ""
} |
from embeddings.embedding import Embedding
class ConcatEmbedding(Embedding):
"""
A concatenation of multiple embeddings
"""
def __init__(self, embeddings, default='none'):
"""
Args:
embeddings: embeddings to concatenate.
default: how to embed words that are out of vocabulary. Can use zeros, return ``None``, or generate random between ``[-0.1, 0.1]``.
"""
for e in embeddings:
assert isinstance(e, Embedding), '{} is not an Embedding object'.format(e)
assert default in {'none', 'random', 'zero'}
self.embeddings = embeddings
self.default = default
def emb(self, word, default=None):
if default is None:
default = self.default
emb = []
for e in self.embeddings:
emb += e.emb(word, default=default)
return emb
| {
"content_hash": "cf463db7cbda10e7b9f6275d0798231b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 143,
"avg_line_length": 30.482758620689655,
"alnum_prop": 0.5871040723981901,
"repo_name": "vzhong/embeddings",
"id": "edcc081f94d20e80a119ebbb2cf55d6e70473d68",
"size": "884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "embeddings/concat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "362"
},
{
"name": "Python",
"bytes": "23065"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
} |
import sys
from os import path
def run(verbosity=1,doctest=False):
"""Run NetworkX tests.
Parameters
----------
verbosity: integer, optional
Level of detail in test reports. Higher numbers provide more detail.
doctest: bool, optional
True to run doctests in code modules
"""
try:
import nose
except ImportError:
raise ImportError(\
"The nose package is needed to run the NetworkX tests.")
sys.stderr.write("Running NetworkX tests:")
nx_install_dir=path.join(path.dirname(__file__), path.pardir)
argv=[' ','--verbosity=%d'%verbosity,
'-w',nx_install_dir,
'-exe']
if doctest:
argv.extend(['--with-doctest','--doctest-extension=txt'])
nose.run(argv=argv)
if __name__=="__main__":
run()
| {
"content_hash": "918ba2af5065e325e6a1615e2da7ea5f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 24.205882352941178,
"alnum_prop": 0.5978128797083839,
"repo_name": "rainest/dance-partner-matching",
"id": "5e0565a814912ed796d5c3717ea6020a1fc599b1",
"size": "845",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "networkx/tests/test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1745363"
},
{
"name": "Shell",
"bytes": "348"
}
],
"symlink_target": ""
} |
class NDLIBError (Exception):
"""General Errors"""
def __init__ (self, value):
self.value = value
def __str__ (self):
return repr(self.value)
| {
"content_hash": "67f9f16ad4973b87cf7b88e89ff0f99a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 29,
"avg_line_length": 19.75,
"alnum_prop": 0.6075949367088608,
"repo_name": "openconnectome/ocplib",
"id": "b8b785d9d0a426c93a9981bdd8d2d708a763bbc6",
"size": "754",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ndliberror.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "42724"
},
{
"name": "C++",
"bytes": "23454"
},
{
"name": "Python",
"bytes": "30154"
}
],
"symlink_target": ""
} |
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.transformations import DynamicMatrixTransform
from .fk4 import FK4NoETerms
from .fk5 import FK5
from .galactic import Galactic
from .utils import EQUINOX_B1950, EQUINOX_J2000
# Galactic to/from FK4/FK5 ----------------------->
# can't be static because the equinox is needed
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, Galactic)
def fk5_to_gal(fk5coord, galframe):
# need precess to J2000 first
return (
rotation_matrix(180 - Galactic._lon0_J2000.degree, 'z')
@ rotation_matrix(90 - Galactic._ngp_J2000.dec.degree, 'y')
@ rotation_matrix(Galactic._ngp_J2000.ra.degree, 'z')
@ fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000)
)
@frame_transform_graph.transform(DynamicMatrixTransform, Galactic, FK5)
def _gal_to_fk5(galcoord, fk5frame):
return matrix_transpose(fk5_to_gal(fk5frame, galcoord))
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, Galactic)
def fk4_to_gal(fk4coords, galframe):
return (
rotation_matrix(180 - Galactic._lon0_B1950.degree, 'z')
@ rotation_matrix(90 - Galactic._ngp_B1950.dec.degree, 'y')
@ rotation_matrix(Galactic._ngp_B1950.ra.degree, 'z')
@ fk4coords._precession_matrix(fk4coords.equinox, EQUINOX_B1950)
)
@frame_transform_graph.transform(DynamicMatrixTransform, Galactic, FK4NoETerms)
def gal_to_fk4(galcoords, fk4frame):
return matrix_transpose(fk4_to_gal(fk4frame, galcoords))
| {
"content_hash": "a281ac5e74f28fd6d33e7f02935b6b30",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 82,
"avg_line_length": 39.80487804878049,
"alnum_prop": 0.7346813725490197,
"repo_name": "larrybradley/astropy",
"id": "a9d4fb44e7fcabd9e6f5f002ec66173e21327741",
"size": "1697",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astropy/coordinates/builtin_frames/galactic_transforms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12335716"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
from latinol import *
| {
"content_hash": "1a96075c6f4d9f9c851dc8cbc697d0c4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.7727272727272727,
"repo_name": "hso/latinol.py",
"id": "215af86d064ba0a8f47794f2a55844036109aa59",
"size": "22",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "latinol/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1960"
}
],
"symlink_target": ""
} |
"""
This module is the test suite of the update CLI option for bowl.
Created on 27 July 2014
@author: Charlie Lewis
"""
from bowl.cli_opts import update
class TestClass:
"""
This class is responsible for all tests in the update CLI option.
"""
def test_cli_opts_update(self):
assert 1
| {
"content_hash": "de15efe8721963868b80a7d0f92a6496",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 69,
"avg_line_length": 20.8,
"alnum_prop": 0.6826923076923077,
"repo_name": "cglewis/bowl",
"id": "c5759230fd4e1f01b1bd243aa56a37dde85c9a65",
"size": "312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cli_opts/test_cli_opts_update.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16424"
},
{
"name": "JavaScript",
"bytes": "54587"
},
{
"name": "Makefile",
"bytes": "6754"
},
{
"name": "Python",
"bytes": "217531"
},
{
"name": "Shell",
"bytes": "10141"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import flask
import os
import threading
import time
import webbrowser
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
_basedir = os.path.join("..", os.path.dirname(__file__))
app = flask.Flask(__name__, static_path="/unused")
PORT=5009
http_server = HTTPServer(WSGIContainer(app))
"""this is a simple server to facilitate developing the docs. by
serving up static files from this server, we avoid the need to use a
symlink.
"""
@app.route('/')
def welcome():
return """
<h1>Welcome to the Bokeh documentation server</h1>
You probably want to go to <a href="/en/latest/index.html"> Index</a>
"""
@app.route('/en/latest/<path:filename>')
def send_pic(filename):
return flask.send_from_directory(
os.path.join(_basedir,"sphinx/_build/html/"), filename)
def open_browser():
# Child process
time.sleep(0.5)
webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
def serve_http():
http_server.listen(PORT)
IOLoop.instance().start()
def shutdown_server():
ioloop = IOLoop.instance()
ioloop.add_callback(ioloop.stop)
print("Asked Server to shut down.")
def ui():
time.sleep(0.5)
input("Press <ENTER> to exit...\n")
if __name__ == "__main__":
print("\nStarting Bokeh plot server on port %d..." % PORT)
print("Visit http://localhost:%d/en/latest/index.html to see plots\n" % PORT)
t_server = threading.Thread(target=serve_http)
t_server.start()
t_browser = threading.Thread(target=open_browser)
t_browser.start()
ui()
shutdown_server()
t_server.join()
t_browser.join()
print("Server shut down.")
| {
"content_hash": "213acc707d104c4a1949b3e31ab26953",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 81,
"avg_line_length": 23.32,
"alnum_prop": 0.6718124642652945,
"repo_name": "phobson/bokeh",
"id": "4c7cae008a699a9c4ad63905c2f04b1dc669f4c8",
"size": "1749",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sphinx/docserver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1710"
},
{
"name": "CSS",
"bytes": "406385"
},
{
"name": "CoffeeScript",
"bytes": "1027741"
},
{
"name": "HTML",
"bytes": "44799"
},
{
"name": "JavaScript",
"bytes": "9840"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "1161"
},
{
"name": "Python",
"bytes": "2044764"
},
{
"name": "Shell",
"bytes": "15839"
},
{
"name": "TypeScript",
"bytes": "25843"
}
],
"symlink_target": ""
} |
from unittest.mock import patch, MagicMock
from django.core.urlresolvers import reverse
from django.test import RequestFactory
from test_plus.test import TestCase
from ..models import (
Minion,
MinionData
)
from ..views import(
MinionCreateView,
MinionDetailView
)
class TestMinionCreateView(TestCase):
def setUp(self):
self.user = self.make_user()
self.factory = RequestFactory()
def test_get(self):
request = self.factory.get(reverse('minions:minion-create'))
request.user = self.user
response = MinionCreateView.as_view()(request)
self.assertEqual(response.status_code, 200)
# self.assertEqual(response.context_data['user'], self.data)
# self.assertEqual(response.context_data['request'], request)
@patch('Minion.save', MagicMock(name="save"))
def test_post(self):
data = {
'name': 'test_minion'
}
request = self.factory.post(reverse('minions:minion-create'), data)
request.user = self.user
response = MinionCreateView.as_view()(request)
self.assertEqual(response.status_code, 302)
self.assertTrue(Minion.save.called)
self.assertEqual(Minion.save.call_count, 1)
| {
"content_hash": "58dabf117a55bcbdc7c6d8d59708779b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 75,
"avg_line_length": 28.976744186046513,
"alnum_prop": 0.6653290529695024,
"repo_name": "Farforr/overlord",
"id": "7211d3623d9e2700a60f1a403b3786f8eb810e82",
"size": "1246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "overlord/minions/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "848"
},
{
"name": "HTML",
"bytes": "24768"
},
{
"name": "JavaScript",
"bytes": "3150"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "73524"
},
{
"name": "Shell",
"bytes": "4542"
}
],
"symlink_target": ""
} |
from lxml import html
from base_test import BaseTest
from web import model
class UsersTestCase(BaseTest):
"""
Contains tests for the users blueprint
"""
def _user_add(self, init_user_email):
rv = self.app.post('/admin/users/add/', data={
"name": "Johnny Test",
"email": init_user_email,
"password": "password",
"confirm_password": "password",
"misc_data": "{}",
"contest_ids": "",
"user_role_ids": "",
}, follow_redirects=True)
self.assertEqual(rv.status_code, 200, "Failed to add user")
rv = self.app.get('/admin/users/')
root = html.fromstring(rv.data)
page_user_emails = [x.text for x in root.cssselect(".user_email")]
self.assertIn(init_user_email, page_user_emails, "User was not added")
def _user_edit(self, old_email, new_email):
user_id = model.User.query.filter_by(email=old_email).one().id
rv = self.app.post('/admin/users/add/', data={
"user_id": user_id,
"name": "Johnny Test",
"email": new_email,
"username": "",
"password": "",
"confirm_password": "",
"misc_data": "{}",
"contest_ids": "",
"user_role_ids": "",
}, follow_redirects=True)
self.assertEqual(rv.status_code, 200, "Failed to edit user")
rv = self.app.get('/admin/users/')
root = html.fromstring(rv.data)
page_user_emails = [x.text for x in root.cssselect(".user_email")]
self.assertIn(new_email, page_user_emails, "User was not edited")
def _user_del(self, email):
user_id = model.User.query.filter_by(email=email).one().id
rv = self.app.get('/admin/users/del/{}'.format(user_id), follow_redirects=True)
self.assertEqual(rv.status_code, 200, "Failed to delete user")
rv = self.app.get('/admin/users/')
root = html.fromstring(rv.data)
page_user_emails = [x.text for x in root.cssselect(".user_email")]
self.assertNotIn(email, page_user_emails, "User was not deleted")
def test_user_crud(self):
init_user_email = "[email protected]"
edit_user_email = "[email protected]"
self.login("[email protected]", "pass")
self._user_add(init_user_email)
self._user_edit(init_user_email, edit_user_email)
self._user_del(edit_user_email)
| {
"content_hash": "e1c278f2e8901514363b31d28fe2039c",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 87,
"avg_line_length": 35.710144927536234,
"alnum_prop": 0.5722402597402597,
"repo_name": "snobbs/code_court",
"id": "afc4f19905ecb16e5b914f61818b1273bef781c9",
"size": "2464",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code_court/courthouse/test/users_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "119040"
},
{
"name": "HTML",
"bytes": "523394"
},
{
"name": "JavaScript",
"bytes": "1912233"
},
{
"name": "Python",
"bytes": "158426"
},
{
"name": "Shell",
"bytes": "399"
},
{
"name": "Vue",
"bytes": "19720"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('lobpcg',parent_package,top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| {
"content_hash": "b40ad665db6e0f8bd2a0944d7cbfcc33",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 64,
"avg_line_length": 29.214285714285715,
"alnum_prop": 0.7041564792176039,
"repo_name": "ryfeus/lambda-packs",
"id": "5015d3252bdd86c37a2e4a27c26d134ff7b09ec3",
"size": "409",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "Tensorflow_LightGBM_Scipy_nightly/source/scipy/sparse/linalg/eigen/lobpcg/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
"""
Compile YOLO-V2 in DarkNet Models
=================================
**Author**: `Siju Samuel <https://siju-samuel.github.io/>`_
This article is an introductory tutorial to deploy darknet models with NNVM.
All the required models and libraries will be downloaded from the internet by the script.
This script runs the YOLO-V2 Model with the bounding boxes
Darknet parsing have dependancy with CFFI and CV2 library
Please install CFFI and CV2 before executing this script
.. code-block:: bash
pip install cffi
pip install opencv-python
"""
import nnvm
import nnvm.frontend.darknet
import nnvm.testing.darknet
import matplotlib.pyplot as plt
import numpy as np
import tvm
import sys
from ctypes import *
from tvm.contrib.download import download
from nnvm.testing.darknet import __darknetffi__
# Model name
MODEL_NAME = 'yolo'
######################################################################
# Download required files
# -----------------------
# Download cfg and weights file if first time.
CFG_NAME = MODEL_NAME + '.cfg'
WEIGHTS_NAME = MODEL_NAME + '.weights'
REPO_URL = 'https://github.com/siju-samuel/darknet/blob/master/'
CFG_URL = REPO_URL + 'cfg/' + CFG_NAME + '?raw=true'
WEIGHTS_URL = 'https://pjreddie.com/media/files/' + WEIGHTS_NAME
download(CFG_URL, CFG_NAME)
download(WEIGHTS_URL, WEIGHTS_NAME)
# Download and Load darknet library
if sys.platform in ['linux', 'linux2']:
DARKNET_LIB = 'libdarknet2.0.so'
DARKNET_URL = REPO_URL + 'lib/' + DARKNET_LIB + '?raw=true'
elif sys.platform == 'darwin':
DARKNET_LIB = 'libdarknet_mac2.0.so'
DARKNET_URL = REPO_URL + 'lib_osx/' + DARKNET_LIB + '?raw=true'
else:
err = "Darknet lib is not supported on {} platform".format(sys.platform)
raise NotImplementedError(err)
download(DARKNET_URL, DARKNET_LIB)
DARKNET_LIB = __darknetffi__.dlopen('./' + DARKNET_LIB)
cfg = "./" + str(CFG_NAME)
weights = "./" + str(WEIGHTS_NAME)
net = DARKNET_LIB.load_network(cfg.encode('utf-8'), weights.encode('utf-8'), 0)
dtype = 'float32'
batch_size = 1
print("Converting darknet to nnvm symbols...")
sym, params = nnvm.frontend.darknet.from_darknet(net, dtype)
######################################################################
# Compile the model on NNVM
# -------------------------
# compile the model
target = 'llvm'
ctx = tvm.cpu(0)
data = np.empty([batch_size, net.c, net.h, net.w], dtype)
shape = {'data': data.shape}
print("Compiling the model...")
with nnvm.compiler.build_config(opt_level=2):
graph, lib, params = nnvm.compiler.build(sym, target, shape, dtype, params)
######################################################################
# Load a test image
# --------------------------------------------------------------------
test_image = 'dog.jpg'
print("Loading the test image...")
img_url = 'https://github.com/siju-samuel/darknet/blob/master/data/' + \
test_image + '?raw=true'
download(img_url, test_image)
data = nnvm.testing.darknet.load_image(test_image, net.w, net.h)
######################################################################
# Execute on TVM Runtime
# ----------------------
# The process is no different from other examples.
from tvm.contrib import graph_runtime
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input('data', tvm.nd.array(data.astype(dtype)))
m.set_input(**params)
# execute
print("Running the test image...")
m.run()
# get outputs
out_shape = (net.outputs,)
tvm_out = m.get_output(0).asnumpy().flatten()
# do the detection and bring up the bounding boxes
thresh = 0.24
hier_thresh = 0.5
img = nnvm.testing.darknet.load_image_color(test_image)
_, im_h, im_w = img.shape
probs = []
boxes = []
region_layer = net.layers[net.n - 1]
boxes, probs = nnvm.testing.yolo2_detection.get_region_boxes(
region_layer, im_w, im_h, net.w, net.h,
thresh, probs, boxes, 1, tvm_out)
boxes, probs = nnvm.testing.yolo2_detection.do_nms_sort(
boxes, probs,
region_layer.w*region_layer.h*region_layer.n, region_layer.classes, 0.3)
coco_name = 'coco.names'
coco_url = 'https://github.com/siju-samuel/darknet/blob/master/data/' + coco_name + '?raw=true'
font_name = 'arial.ttf'
font_url = 'https://github.com/siju-samuel/darknet/blob/master/data/' + font_name + '?raw=true'
download(coco_url, coco_name)
download(font_url, font_name)
with open(coco_name) as f:
content = f.readlines()
names = [x.strip() for x in content]
nnvm.testing.yolo2_detection.draw_detections(
img, region_layer.w*region_layer.h*region_layer.n,
thresh, boxes, probs, names, region_layer.classes)
plt.imshow(img.transpose(1, 2, 0))
plt.show()
| {
"content_hash": "d3b9fb3b40bd5073561a0443de95f295",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 95,
"avg_line_length": 32.13986013986014,
"alnum_prop": 0.6372932985204526,
"repo_name": "mlperf/training_results_v0.6",
"id": "87ab60fc28504256cb78f8f91e51dd730917c960",
"size": "4596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/tutorials/nnvm/from_darknet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
from openprocurement_client.client import Client, EDRClient
from openprocurement_client.dasu_client import DasuClient
from openprocurement_client.document_service_client \
import DocumentServiceClient
from openprocurement_client.plan import PlansClient
from openprocurement_client.contract import ContractingClient
from openprocurement_client.exceptions import IdNotFound
from restkit.errors import RequestFailed, BadStatusLine, ResourceError
from retrying import retry
from time import sleep
import os
import urllib
def retry_if_request_failed(exception):
status_code = getattr(exception, 'status_code', None)
print(status_code)
if 500 <= status_code < 600 or status_code in (409, 429, 412):
return True
else:
return isinstance(exception, BadStatusLine)
class StableClient(Client):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableClient, self).request(*args, **kwargs)
class StableDsClient(DocumentServiceClient):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableDsClient, self).request(*args, **kwargs)
def prepare_api_wrapper(key, resource, host_url, api_version, ds_client=None):
return StableClient(key, resource, host_url, api_version,
ds_client=ds_client)
def prepare_ds_api_wrapper(ds_host_url, auth_ds):
return StableDsClient(ds_host_url, auth_ds)
class ContractingStableClient(ContractingClient):
@retry(stop_max_attempt_number=100, wait_random_min=500, wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(ContractingStableClient, self).request(*args, **kwargs)
def prepare_contract_api_wrapper(key, host_url, api_version, ds_client=None):
return ContractingStableClient(key, host_url, api_version, ds_client=ds_client)
class StableEDRClient(EDRClient):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
try:
res = super(StableEDRClient, self).request(*args, **kwargs)
except ResourceError as re:
if re.status_int == 429:
sleep(int(re.response.headers.get('Retry-After', '30')))
raise re
else:
return res
def prepare_edr_wrapper(host_url, api_version, username, password):
return StableEDRClient(host_url, api_version, username, password)
def get_complaint_internal_id(tender, complaintID):
try:
for complaint in tender.data.complaints:
if complaint.complaintID == complaintID:
return complaint.id
except AttributeError:
pass
try:
for award in tender.data.awards:
for complaint in award.complaints:
if complaint.complaintID == complaintID:
return complaint.id
except AttributeError:
pass
raise IdNotFound
def get_document_by_id(data, doc_id):
for document in data.get('documents', []):
if doc_id in document.get('title', ''):
return document
for complaint in data.get('complaints', []):
for document in complaint.get('documents', []):
if doc_id in document.get('title', ''):
return document
for contract in data.get('contracts', []):
for document in contract.get('documents', []):
if doc_id in document.get('title', ''):
return document
for award in data.get('awards', []):
for document in award.get('documents', []):
if doc_id in document.get('title', ''):
return document
for complaint in award.get('complaints', []):
for document in complaint.get('documents', []):
if doc_id in document.get('title', ''):
return document
for cancellation in data.get('cancellations', []):
for document in cancellation.get('documents', []):
if doc_id in document.get('title', ''):
return document
for bid in data.get('bids', []):
for document in bid.get('documents', []):
if doc_id in document.get('title', ''):
return document
raise Exception('Document with id {} not found'.format(doc_id))
def get_tenders_by_funder_id(client,
funder_id=None,
descending=True,
tender_id_field='tenderID',
opt_fields=('funders',)):
params = {'offset': '',
'opt_fields': ','.join((tender_id_field,) + opt_fields),
'descending': descending}
tender_list = True
client._update_params(params)
tenders_with_funder = {}
while tender_list and not tenders_with_funder:
tender_list = client.get_tenders()
for tender in tender_list:
if 'funders' in tender:
tenders_with_funder[tender[tender_id_field]] = [el['identifier']['id'] for el in tender['funders']]
# In case we are looking for a specific funder
if funder_id:
tenders_with_funder = {k: v for k, v in tenders_with_funder.items() if funder_id in v}
if not tenders_with_funder:
raise IdNotFound
else:
return tenders_with_funder
def download_file_from_url(url, path_to_save_file):
f = open(path_to_save_file, 'wb')
f.write(urllib.urlopen(url).read())
f.close()
return os.path.basename(f.name)
class StableClient_plan(PlansClient):
@retry(stop_max_attempt_number=100, wait_random_min=500, wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableClient_plan, self).request(*args, **kwargs)
def prepare_plan_api_wrapper(key, host_url, api_version):
return StableClient_plan(key, host_url, api_version)
class StableClient_dasu(DasuClient):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableClient_dasu, self).request(*args, **kwargs)
def prepare_dasu_api_wrapper(key, resource, host_url, api_version, ds_client=None):
print key
return StableClient_dasu(key, resource, host_url, api_version,
ds_client=ds_client) | {
"content_hash": "0b62c5b8a7caa5a541915f900d6bb06f",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 126,
"avg_line_length": 38.47701149425287,
"alnum_prop": 0.6442120985810306,
"repo_name": "kosaniak/robot_tests",
"id": "8aa3cbcdc7d88dc3da28d59130ca5a5b07e1f007",
"size": "6695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "op_robot_tests/tests_files/brokers/openprocurement_client_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "84517"
},
{
"name": "RobotFramework",
"bytes": "672122"
}
],
"symlink_target": ""
} |
print "|--------------------------------------------|"
print "| Starting Tutorial 1 |"
print "|--------------------------------------------|"
print 'media path = ' + scene.getMediaPath()
print 'num of pawns in the scene = ' + str(scene.getNumPawns())
print 'num of characters in the scene = ' + str(scene.getNumCharacters())
| {
"content_hash": "78c89cb37d59b9f609face39b31aa006",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 73,
"avg_line_length": 35.9,
"alnum_prop": 0.44846796657381616,
"repo_name": "gsi-upm/SmartSim",
"id": "a44ce6ed33e51303dce397008966c28f70bba8d4",
"size": "359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartbody/data/examples/Tutorials/1_UsingPython.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11708"
},
{
"name": "C",
"bytes": "941473"
},
{
"name": "C#",
"bytes": "733730"
},
{
"name": "C++",
"bytes": "16389947"
},
{
"name": "CMake",
"bytes": "114424"
},
{
"name": "D",
"bytes": "175403"
},
{
"name": "GLSL",
"bytes": "45459"
},
{
"name": "Groff",
"bytes": "2619"
},
{
"name": "HTML",
"bytes": "1128698"
},
{
"name": "Inno Setup",
"bytes": "8592"
},
{
"name": "Java",
"bytes": "371478"
},
{
"name": "M4",
"bytes": "16806"
},
{
"name": "Makefile",
"bytes": "240549"
},
{
"name": "Objective-C",
"bytes": "4511"
},
{
"name": "Objective-C++",
"bytes": "29141"
},
{
"name": "Pascal",
"bytes": "13551"
},
{
"name": "Protocol Buffer",
"bytes": "3178"
},
{
"name": "Python",
"bytes": "989019"
},
{
"name": "Rust",
"bytes": "105"
},
{
"name": "Shell",
"bytes": "248995"
},
{
"name": "Smalltalk",
"bytes": "1540"
},
{
"name": "Smarty",
"bytes": "179"
},
{
"name": "XSLT",
"bytes": "3925"
}
],
"symlink_target": ""
} |
from django import template
register = template.Library()
def tablecols(data, cols):
rows = []
row = []
index = 0
for user in data:
row.append(user)
index = index + 1
if index % cols == 0:
rows.append(row)
row = []
# Still stuff missing?
if len(row) > 0:
rows.append(row)
return rows
register.filter_function(tablecols) | {
"content_hash": "e2018152fc95b03756a6c465a3e0a4e0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 35,
"avg_line_length": 22.444444444444443,
"alnum_prop": 0.556930693069307,
"repo_name": "theiviaxx/Frog",
"id": "e142f523c31e0bc4274c81673f026e4c7190230f",
"size": "1699",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "frog/templatetags/tablecols.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "22225"
},
{
"name": "JavaScript",
"bytes": "57292"
},
{
"name": "Python",
"bytes": "215494"
}
],
"symlink_target": ""
} |
"""
Example of the CEFBrowser widget embedded in a UI with tabs and controls, as
known from Chrome on Windows, Mac OS X or Linux.
"""
import functools
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.factory import Factory
from kivy.garden.cefpython import CEFBrowser
from kivy.lang import Builder
from kivy.properties import *
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.scrollview import ScrollView
from kivy.uix.stencilview import StencilView
from kivy.uix.textinput import TextInput
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.widget import Widget
controls_size = 36
class TabbedCEFBrowserTab(GridLayout):
text = StringProperty()
url = StringProperty()
last_tab = None
__tabbed_cef_browser = None
__cef_browser = None
def __init__(self, tabbed_cef_browser, url="", text="", cef_browser=None):
super(TabbedCEFBrowserTab, self).__init__(rows=1, size_hint=(None, 1), width=controls_size*10)
self.__tabbed_cef_browser = tabbed_cef_browser
self.url = url
self.text = text
if cef_browser:
self.__cef_browser = cef_browser
self.__configure_cef_browser()
self.__toggle_button = ToggleButton(text=text, group="tabs", font_size=controls_size/2, size_hint=(1, 1), text_size=(controls_size*10, controls_size), shorten=True, shorten_from="right", valign="middle", padding_x=5)
self.__toggle_button.bind(size=self.__toggle_button.setter("text_size"))
self.add_widget(self.__toggle_button)
self.__close_button = Button(text="X", background_color=(1,0,0,1), font_size=controls_size/2, size_hint=(None, 1), width=controls_size)
self.__close_button.bind(on_press=self.close)
def on_toggle_state(toggle_button, new_state):
if new_state=="down":
toggle_button.bold = True
self.add_widget(self.__close_button)
self.__tabbed_cef_browser._set_tab(self)
else:
toggle_button.bold = False
self.remove_widget(self.__close_button)
self.__toggle_button.bind(state=on_toggle_state)
def on_text(self, new_text):
self.__toggle_button.text = new_text
self.bind(text=on_text)
def select(self):
self.__toggle_button.trigger_action()
def close(self, *largs):
self.cef_browser._browser.CloseBrowser()
@property
def cef_browser(self):
if not self.__cef_browser:
self.__cef_browser = CEFBrowser(self.url)
self.__configure_cef_browser()
return self.__cef_browser
def __configure_cef_browser(self):
self.__cef_browser.popup_policy = CEFBrowser.always_allow_popups
def popup_new_tab_handler(browser, popup_browser):
self.__tabbed_cef_browser.add_tab(TabbedCEFBrowserTab(self.__tabbed_cef_browser, cef_browser=popup_browser))
self.__cef_browser.popup_handler = popup_new_tab_handler
def close_tab_handler(browser, *largs):
self.__tabbed_cef_browser.remove_tab(self)
self.__cef_browser.close_handler = close_tab_handler
self.__cef_browser.bind(url=self.setter("url"))
self.__cef_browser.bind(title=self.setter("text"))
def on_load_start(browser, *largs):
self.__tabbed_cef_browser._load_button.text = "Go" if self.__tabbed_cef_browser._url_input.focus else "x"
def on_load_end(browser, *largs):
self.__tabbed_cef_browser._load_button.text = "Go" if self.__tabbed_cef_browser._url_input.focus else "r"
self.__tabbed_cef_browser._back_button.disabled = not self.__tabbed_cef_browser._current_browser.can_go_back
self.__tabbed_cef_browser._forward_button.disabled = not self.__tabbed_cef_browser._current_browser.can_go_forward
self.__cef_browser.bind(on_load_start=on_load_start)
self.__cef_browser.bind(on_load_end=on_load_end)
self.__cef_browser.bind(on_load_error=on_load_end)
class TabbedCEFBrowser(GridLayout):
def __init__(self, urls=["http://www.rentouch.ch"], *largs, **dargs):
super(TabbedCEFBrowser, self).__init__(cols=1, *largs, **dargs)
gl = GridLayout(rows=1, size_hint=(1, None), height=controls_size)
self.current_tab = None
self.__tab_bar_scroll = ScrollView(size_hint=(1, 1))
self.__tab_bar_grid = GridLayout(rows=1, size_hint=(None, 1))
self.__tab_bar_grid.bind(minimum_width=self.__tab_bar_grid.setter("width"))
last_tab = None
for url in urls:
this_tab = TabbedCEFBrowserTab(self, url, url)
this_tab.last_tab = last_tab
self.__tab_bar_grid.add_widget(this_tab)
last_tab = this_tab
self.current_tab = last_tab
self.__tab_bar_scroll.add_widget(self.__tab_bar_grid)
self.__tab_bar_scroll.bind(height=self.__tab_bar_grid.setter("height"))
gl.add_widget(self.__tab_bar_scroll)
self.__tab_bar_new = Button(text="+", font_size=controls_size/2, size_hint=(None, 1), width=controls_size)
def on_new_tab(but):
self.add_tab(TabbedCEFBrowserTab(self, "http://google.com", "Google"))
def focus_url_input(*largs):
self._url_input.focus = True
Clock.schedule_once(focus_url_input, 0)
self.__tab_bar_new.bind(on_press=on_new_tab)
gl.add_widget(self.__tab_bar_new)
self.__control_bar_grid = GridLayout(rows=1, size_hint=(1, None), height=controls_size)
self._back_button = Button(text="<", font_size=controls_size/2, size_hint=(None, 1), width=controls_size)
def on_back_press(back_button):
self._current_browser.go_back()
self._back_button.bind(on_press=on_back_press)
self._forward_button = Button(text=">", font_size=controls_size/2, size_hint=(None, 1), width=controls_size)
def on_forward_press(forward_button):
self._current_browser.go_forward()
self._forward_button.bind(on_press=on_forward_press)
self._url_input = TextInput(text="http://", font_size=controls_size/2, size_hint=(1, 1), multiline=False)
def on_url_focus(url_input, new_focus):
if new_focus:
def fn(*largs):
url_input.select_all()
Clock.schedule_once(fn, 0)
self._load_button.text = "Go"
else:
url_input.text = self._current_browser.url
self._load_button.text = "x" if self._current_browser.is_loading else "r"
self._url_input.bind(focus=on_url_focus)
def on_url_validate(url_input):
self._current_browser.url = self._url_input.text
self._url_input.bind(on_text_validate=on_url_validate)
self._load_button = Button(text="Go", font_size=controls_size/2, size_hint=(None, 1), width=controls_size)
def on_load_button(load_button):
if self._url_input.focus:
self._current_browser.url = self._url_input.text
elif self._current_browser.is_loading:
self._current_browser.stop_loading()
else:
print(dir(self._current_browser))
self._current_browser.reload()
self._load_button.bind(on_press=on_load_button)
self.__control_bar_grid.add_widget(self._back_button)
self.__control_bar_grid.add_widget(self._forward_button)
self.__control_bar_grid.add_widget(self._url_input)
self.__control_bar_grid.add_widget(self._load_button)
self._current_browser = CEFBrowser()
self.add_widget(gl)
self.add_widget(self.__control_bar_grid)
self.add_widget(self._current_browser)
self.select_first_tab()
def select_first_tab(self):
for tab in self.__tab_bar_grid.children:
tab.select()
break
@property
def tabs(self):
return self.__tab_bar_grid.children
def add_tab(self, new_tab):
self.__tab_bar_grid.add_widget(new_tab)
new_tab.select()
def remove_tab(self, remove_tab):
self.__tab_bar_grid.remove_widget(remove_tab)
self.current_tab = remove_tab.last_tab
remove_tab.last_tab.select()
def _set_tab(self, new_tab):
if self.current_tab!=new_tab:
ct = self.current_tab
tmp = ct
while tmp:
if tmp.last_tab==new_tab:
tmp.last_tab = new_tab.last_tab
tmp = tmp.last_tab
new_tab.last_tab = ct
self.current_tab = new_tab
def url_input_set_text(browser, url):
self._url_input.text = url
if self._url_input.focus:
self._url_input.select_all()
try:
self._current_browser.unbind(url=url_input_set_text)
except:
pass
def old_tab_remove_keyboard(browser, *largs):
browser.release_keyboard()
Clock.schedule_once(functools.partial(old_tab_remove_keyboard, self._current_browser))
self.remove_widget(self._current_browser)
self._url_input.text = new_tab.url
self._current_browser = new_tab.cef_browser
self.add_widget(self._current_browser)
self._current_browser.bind(url=url_input_set_text)
if __name__ == '__main__':
class CEFApp(App):
def timeout(self, *largs):
tb = self.tb
tb.__tab_bar_height = 26
tb.tabs[tb.selected_tab]["browser"].navigation_bar_hei = 26
def build(self):
#Clock.schedule_once(self.timeout, 5)
self.tb = TabbedCEFBrowser(urls=["http://jegger.ch/datapool/app/test_popup.html", "http://kivy.org",
"https://github.com/kivy-garden/garden.cefpython",
"http://code.google.com/p/cefpython/",
"http://code.google.com/p/chromiumembedded/", "about:blank"], pos=(20,10), size_hint=(None, None), size=(Window.width-40, Window.height-20))
return self.tb
CEFApp().run()
cefpython.Shutdown()
| {
"content_hash": "f0536348b46d60298a3ff48a0a1b899d",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 224,
"avg_line_length": 45.36725663716814,
"alnum_prop": 0.6129913196137716,
"repo_name": "jegger/garden.cefpython",
"id": "ed7b276c054d4e5205f746242cef488ee8ba2ecd",
"size": "10300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/tabbed.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "17559"
},
{
"name": "Python",
"bytes": "72000"
}
],
"symlink_target": ""
} |
from django.db.models import Model
from django.db.models import ForeignKey
from django.db.models import BooleanField
from django.db.models import CharField
from django.db.models import TextField
from django.db.models import DateField
from django.db.models import DateTimeField
from django.db.models import URLField
from django.db.models import ManyToManyField
from django.utils.translation import ugettext as _
from django.dispatch import receiver
from allauth.account.signals import user_signed_up
from imagekit.models.fields import ProcessedImageField
from imagekit.processors import ResizeToFit
@receiver(user_signed_up)
def signed_up_callback(sender, **kwargs):
account = Account()
account.user = kwargs["user"]
# TODO preset account.source based on HTTP_REFERER
account.save()
SOURCE_CHOICES = [
('OTHER', _('OTHER')),
('COUCHSURFING', _('COUCHSURFING')),
('FACEBOOK', _('FACEBOOK')),
('FRIENDS', _('FRIENDS')),
('GOOGLE', _('GOOGLE')),
('TWITTER', _('TWITTER')),
]
def _upload_to(instance, filename, **kwargs):
return "account/passport/%s.%s" % (instance.id, 'jpeg')
class Account(Model):
# main data
user = ForeignKey('auth.User', unique=True, related_name="accounts")
description = TextField(blank=True)
source = CharField(max_length=64, choices=SOURCE_CHOICES, default='OTHER')
mobile = CharField(max_length=1024, blank=True)
links = ManyToManyField('link.Link', null=True, blank=True)
passport = ProcessedImageField(upload_to=_upload_to, null=True, blank=True,
processors=[ResizeToFit(1024, 768)],
format='JPEG', options={'quality': 90})
# meta
# created_by = self
# updated_by = self
created_on = DateTimeField(auto_now_add=True)
updated_on = DateTimeField(auto_now=True)
def get_name():
return self.user.username
def __unicode__(self):
return self.user.username
class Meta:
ordering = ['user__username']
| {
"content_hash": "0bf6a13de74e45fe215698644d5c6c09",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 80,
"avg_line_length": 30.328358208955223,
"alnum_prop": 0.6776574803149606,
"repo_name": "F483/bikesurf.org",
"id": "9d320155a56f4cb14f27c67b5b535b0a393fbbcd",
"size": "2178",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apps/account/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1950764"
},
{
"name": "HTML",
"bytes": "6079063"
},
{
"name": "JavaScript",
"bytes": "284942"
},
{
"name": "Makefile",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "215658"
},
{
"name": "Ruby",
"bytes": "4418"
},
{
"name": "Shell",
"bytes": "553"
}
],
"symlink_target": ""
} |
from concurrent.futures import ThreadPoolExecutor
import grpc
from qrl.core import logger
from qrl.core.qrlnode import QRLNode
from qrl.generated.qrl_pb2_grpc import add_P2PNodeServicer_to_server, add_PublicAPIServicer_to_server
from qrl.generated.qrlbase_pb2_grpc import add_BaseServicer_to_server
from qrl.services.APIService import APIService
from qrl.services.BaseService import BaseService
from qrl.services.P2PService import P2PService
def start_services(node: QRLNode):
server = grpc.server(ThreadPoolExecutor(max_workers=1))
add_BaseServicer_to_server(BaseService(node), server)
add_P2PNodeServicer_to_server(P2PService(node), server)
add_PublicAPIServicer_to_server(APIService(node), server)
server.add_insecure_port("[::]:9009")
server.start()
logger.debug("grpc node - started !")
return server
| {
"content_hash": "c7e17d7650c41f62ce3e4ddcc53023c8",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 101,
"avg_line_length": 33.8,
"alnum_prop": 0.7846153846153846,
"repo_name": "elliottdehn/QRL",
"id": "9beca33290bc0f8c1cae4097d61ab20b647ccde3",
"size": "998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qrl/services/services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2276"
},
{
"name": "HTML",
"bytes": "20501"
},
{
"name": "JavaScript",
"bytes": "22142"
},
{
"name": "Python",
"bytes": "431741"
},
{
"name": "Shell",
"bytes": "1096"
}
],
"symlink_target": ""
} |
import logging
import os
import django
try:
import honeypot
except ImportError:
honeypot = None
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 'iamasecretkeydonttellanyone'
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'site_news'
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3'
}
}
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_DIRS = [
os.path.join(BASE_DIR, 'tests', 'templates'),
]
FIXTURE_DIRS = (os.path.join(BASE_DIR, 'tests', 'fixtures'),)
ROOT_URLCONF = 'tests.urls'
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
PASSWORD_HASHERS = {
'django.contrib.auth.hashers.MD5PasswordHasher',
}
if django.VERSION[:2] < (1, 6):
TEST_RUNNER = 'discover_runner.DiscoverRunner'
logging.getLogger('site_news').addHandler(logging.NullHandler())
SOUTH_TESTS_MIGRATE = False
| {
"content_hash": "767c8c24e72876a8853e79b988b51951",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 64,
"avg_line_length": 22.633333333333333,
"alnum_prop": 0.7106038291605302,
"repo_name": "maxicecilia/django_site_news",
"id": "641d9dc1b302c8fa5e9c10d885bba376a6b4993f",
"size": "1358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1140"
},
{
"name": "Python",
"bytes": "22501"
}
],
"symlink_target": ""
} |
import collections
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
try:
return set(self) == set(other)
except TypeError:
return False
if __name__ == '__main__':
s = OrderedSet('abracadaba')
t = OrderedSet('simsalabim')
print(s | t)
print(s & t)
print(s - t)
| {
"content_hash": "2d0775ec74c6eb73805e726c2380b943",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 26.535211267605632,
"alnum_prop": 0.48089171974522293,
"repo_name": "hanw/p4-hlir",
"id": "b34e7c46e140966dcbe7b332b8fc7683de8c3cf2",
"size": "2559",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "p4_hlir/util/OrderedSet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "361669"
}
],
"symlink_target": ""
} |
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Evcoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| {
"content_hash": "73bc72fd5a095bd8f3fad1b0877c2ba3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.7085889570552147,
"repo_name": "evcoin/evcoin",
"id": "742abedae6832cb74cb68b4ae4bdba65086702fa",
"size": "893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/qt/clean_mac_info_plist.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103297"
},
{
"name": "C++",
"bytes": "2522853"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "14696"
},
{
"name": "Objective-C",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69714"
},
{
"name": "Shell",
"bytes": "9702"
},
{
"name": "TypeScript",
"bytes": "5236293"
}
],
"symlink_target": ""
} |
"""The Laplace distribution class."""
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import special_math
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.stats import quantiles
__all__ = [
'Laplace',
]
class Laplace(distribution.AutoCompositeTensorDistribution):
"""The Laplace distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) of this distribution is,
```none
pdf(x; mu, sigma) = exp(-|x - mu| / sigma) / Z
Z = 2 sigma
```
where `loc = mu`, `scale = sigma`, and `Z` is the normalization constant.
Note that the Laplace distribution can be thought of two exponential
distributions spliced together 'back-to-back.'
The Laplace distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Laplace(loc=0, scale=1)
Y = loc + scale * X
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name='Laplace'):
"""Construct Laplace distribution with parameters `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g., `loc / scale` is a valid operation).
Args:
loc: Floating point tensor which characterizes the location (center)
of the distribution.
scale: Positive floating point tensor which characterizes the spread of
the distribution.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value '`NaN`' to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` are of different dtype.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale], tf.float32)
self._loc = tensor_util.convert_nonref_to_tensor(
loc, name='loc', dtype=dtype)
self._scale = tensor_util.convert_nonref_to_tensor(
scale, name='scale', dtype=dtype)
dtype_util.assert_same_float_dtype([self._loc, self._scale])
super(Laplace, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
loc=parameter_properties.ParameterProperties(),
scale=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))
# pylint: enable=g-long-lambda
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _event_shape_tensor(self):
return tf.constant([], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
shape = ps.concat([[n], self._batch_shape_tensor(loc=loc, scale=scale)], 0)
# Uniform variates must be sampled from the open-interval `(-1, 1)` rather
# than `[-1, 1)`. In the case of `(0, 1)` we'd use
# `np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny` because it is the
# smallest, positive, 'normal' number. However, the concept of subnormality
# exists only at zero; here we need the smallest usable number larger than
# -1, i.e., `-1 + eps/2`.
dt = dtype_util.as_numpy_dtype(self.dtype)
uniform_samples = samplers.uniform(
shape=shape,
minval=np.nextafter(dt(-1.), dt(1.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (loc - scale * tf.sign(uniform_samples) *
tf.math.log1p(-tf.abs(uniform_samples)))
def _log_prob(self, x):
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
z = (x - loc) / scale
return -tf.abs(z) - np.log(2.) - tf.math.log(scale)
def _log_cdf(self, x):
return special_math.log_cdf_laplace(self._z(x))
def _log_survival_function(self, x):
return special_math.log_cdf_laplace(-self._z(x))
def _cdf(self, x):
z = self._z(x)
return 0.5 - 0.5 * tf.sign(z) * tf.math.expm1(-tf.abs(z))
def _entropy(self):
scale = tf.convert_to_tensor(self.scale)
return tf.broadcast_to(np.log(2.) + 1 + tf.math.log(scale),
self._batch_shape_tensor(scale=scale))
def _mean(self):
loc = tf.convert_to_tensor(self.loc)
return tf.broadcast_to(loc, self._batch_shape_tensor(loc=loc))
def _stddev(self):
scale = tf.convert_to_tensor(self.scale)
return tf.broadcast_to(np.sqrt(2.) * scale,
self._batch_shape_tensor(scale=scale))
def _median(self):
return self._mean()
def _mode(self):
return self._mean()
def _quantile(self, p):
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
return tf.where(p > 0.5,
loc - scale * (
tf.constant(np.log(2), dtype=p.dtype) +
tf.math.log1p(-p)),
loc + scale * tf.math.log(2 * p))
def _z(self, x):
return (x - self.loc) / self.scale
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
@classmethod
def _maximum_likelihood_parameters(cls, value):
median = quantiles.percentile(value, 50., axis=0, interpolation='linear')
return {'loc': median,
'scale': tf.reduce_mean(tf.abs(value - median), axis=0)}
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self._scale):
assertions.append(assert_util.assert_positive(
self._scale, message='Argument `scale` must be positive.'))
return assertions
@kullback_leibler.RegisterKL(Laplace, Laplace)
def _kl_laplace_laplace(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Laplace.
Args:
a: instance of a Laplace distribution object.
b: instance of a Laplace distribution object.
name: Python `str` name to use for created operations.
Default value: `None` (i.e., `'kl_laplace_laplace'`).
Returns:
kl_div: Batchwise KL(a || b)
"""
with tf.name_scope(name or 'kl_laplace_laplace'):
# Consistent with
# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 38
distance = tf.abs(a.loc - b.loc)
a_scale = tf.convert_to_tensor(a.scale)
b_scale = tf.convert_to_tensor(b.scale)
delta_log_scale = tf.math.log(a_scale) - tf.math.log(b_scale)
return (-delta_log_scale +
distance / b_scale - 1. +
tf.exp(-distance / a_scale + delta_log_scale))
| {
"content_hash": "482d6cb5a99fedebeefe99d3eaa194f3",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 81,
"avg_line_length": 36.04255319148936,
"alnum_prop": 0.6606847697756789,
"repo_name": "tensorflow/probability",
"id": "8a8c0ee20da485cec8c3d81993a79b46358a308a",
"size": "9148",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_probability/python/distributions/laplace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55552121"
},
{
"name": "Python",
"bytes": "17339674"
},
{
"name": "Shell",
"bytes": "24852"
},
{
"name": "Starlark",
"bytes": "663851"
}
],
"symlink_target": ""
} |
from enum import Enum
class JudgeResultStatus(Enum):
ACCEPT = 1
WRONG_ANSWER = 2
PRESENTATION_ERROR = 2
| {
"content_hash": "21e75ca201fdb9f2488eaac9f58b8482",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 30,
"avg_line_length": 16.857142857142858,
"alnum_prop": 0.6864406779661016,
"repo_name": "AJudge-team/AJudge",
"id": "359a1a007183b2ebfc1e9398acf482ba55e381e3",
"size": "118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34874"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
import os
import sys
import threading
import traceback
import queue
from pathlib import Path
from datetime import datetime
import requests
import yaml
BASE_URL = os.getenv("UPSTREAM_URL", "https://api.github.com/repos/")
WORKING_DIR = os.getenv("TUNASYNC_WORKING_DIR")
WORKERS = int(os.getenv("WORKERS", "8"))
FAST_SKIP = bool(os.getenv("FAST_SKIP", ""))
def get_repos():
try:
with open('/repos.yaml') as f:
content = f.read()
except FileNotFoundError:
content = os.getenv("REPOS", None)
if content is None:
raise Exception("Loading /repos.yaml file and reading REPOS env both failed")
repos = yaml.safe_load(content)
if isinstance(repos, list):
return repos
else:
repos = repos['repos']
if not isinstance(repos, list):
raise Exception("Can not inspect repo list from the given file/env")
return repos
REPOS = get_repos()
# connect and read timeout value
TIMEOUT_OPTION = (7, 10)
total_size = 0
def sizeof_fmt(num, suffix='iB'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.2f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.2f%s%s" % (num, 'Y', suffix)
# wrap around requests.get to use token if available
def github_get(*args, **kwargs):
headers = kwargs['headers'] if 'headers' in kwargs else {}
if 'GITHUB_TOKEN' in os.environ:
headers['Authorization'] = 'token {}'.format(
os.environ['GITHUB_TOKEN'])
kwargs['headers'] = headers
return requests.get(*args, **kwargs)
def do_download(remote_url: str, dst_file: Path, remote_ts: float):
# NOTE the stream=True parameter below
with github_get(remote_url, stream=True) as r:
r.raise_for_status()
with open(dst_file, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024**2):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
# f.flush()
os.utime(dst_file, (remote_ts, remote_ts))
def downloading_worker(q):
while True:
item = q.get()
if item is None:
break
url, dst_file, working_dir, updated = item
print("downloading", url, "to",
dst_file.relative_to(working_dir), flush=True)
try:
do_download(url, dst_file, updated)
except Exception:
print("Failed to download", url, flush=True)
if dst_file.is_file():
dst_file.unlink()
q.task_done()
def create_workers(n):
task_queue = queue.Queue()
for i in range(n):
t = threading.Thread(target=downloading_worker, args=(task_queue, ))
t.start()
return task_queue
def ensure_safe_name(filename):
filename = filename.replace('\0', ' ')
if filename == '.':
return ' .'
elif filename == '..':
return '. .'
else:
return filename.replace('/', '\\').replace('\\', '_')
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--base-url", default=BASE_URL)
parser.add_argument("--working-dir", default=WORKING_DIR)
parser.add_argument("--workers", default=WORKERS, type=int,
help='number of concurrent downloading jobs')
parser.add_argument("--fast-skip", action='store_true', default=FAST_SKIP,
help='do not verify size and timestamp of existing files')
args = parser.parse_args()
if args.working_dir is None:
raise Exception("Working Directory is None")
working_dir = Path(args.working_dir)
task_queue = create_workers(args.workers)
remote_filelist = []
cleaning = False
def download(release, release_dir, tarball=False):
global total_size
if tarball:
url = release['tarball_url']
updated = datetime.strptime(
release['published_at'], '%Y-%m-%dT%H:%M:%SZ').timestamp()
dst_file = release_dir / 'repo-snapshot.tar.gz'
remote_filelist.append(dst_file.relative_to(working_dir))
if dst_file.is_file():
print("skipping", dst_file.relative_to(working_dir), flush=True)
else:
dst_file.parent.mkdir(parents=True, exist_ok=True)
task_queue.put((url, dst_file, working_dir, updated))
for asset in release['assets']:
url = asset['browser_download_url']
updated = datetime.strptime(
asset['updated_at'], '%Y-%m-%dT%H:%M:%SZ').timestamp()
dst_file = release_dir / ensure_safe_name(asset['name'])
remote_filelist.append(dst_file.relative_to(working_dir))
total_size += asset['size']
if dst_file.is_file():
if args.fast_skip:
print("fast skipping", dst_file.relative_to(
working_dir), flush=True)
continue
else:
stat = dst_file.stat()
local_filesize = stat.st_size
local_mtime = stat.st_mtime
# print(f"{local_filesize} vs {asset['size']}")
# print(f"{local_mtime} vs {updated}")
if local_mtime > updated or \
asset['size'] == local_filesize and local_mtime == updated:
print("skipping", dst_file.relative_to(
working_dir), flush=True)
continue
else:
dst_file.parent.mkdir(parents=True, exist_ok=True)
task_queue.put((url, dst_file, working_dir, updated))
def link_latest(name, repo_dir):
try:
os.unlink(repo_dir / "LatestRelease")
except OSError:
pass
try:
os.symlink(name, repo_dir / "LatestRelease")
except OSError:
pass
for cfg in REPOS:
flat = False # build a folder for each release
versions = 1 # keep only one release
tarball = False # do not download the tarball
prerelease = False # filter out pre-releases
if isinstance(cfg, str):
repo = cfg
else:
repo = cfg["repo"]
if "versions" in cfg:
versions = cfg["versions"]
if "flat" in cfg:
flat = cfg["flat"]
if "tarball" in cfg:
tarball = cfg["tarball"]
if "pre_release" in cfg:
prerelease = cfg["pre_release"]
repo_dir = working_dir / Path(repo)
print(f"syncing {repo} to {repo_dir}")
try:
r = github_get(f"{args.base_url}{repo}/releases")
r.raise_for_status()
releases = r.json()
except:
traceback.print_exc()
break
n_downloaded = 0
for release in releases:
if not release['draft'] and (prerelease or not release['prerelease']):
name = ensure_safe_name(release['name'] or release['tag_name'])
if len(name) == 0:
print("Error: Unnamed release")
continue
download(release, (repo_dir if flat else repo_dir / name), tarball)
if n_downloaded == 0 and not flat:
# create a symbolic link to the latest release folder
link_latest(name, repo_dir)
n_downloaded += 1
if versions > 0 and n_downloaded >= versions:
break
if n_downloaded == 0:
print(f"Error: No release version found for {repo}")
continue
else:
cleaning = True
# block until all tasks are done
task_queue.join()
# stop workers
for i in range(args.workers):
task_queue.put(None)
if cleaning:
local_filelist = []
for local_file in working_dir.glob('**/*'):
if local_file.is_file():
local_filelist.append(local_file.relative_to(working_dir))
for old_file in set(local_filelist) - set(remote_filelist):
print("deleting", old_file, flush=True)
old_file = working_dir / old_file
old_file.unlink()
for local_dir in working_dir.glob('*/*/*'):
if local_dir.is_dir():
try:
# remove empty dirs only
local_dir.rmdir()
except:
pass
print("Total size is", sizeof_fmt(total_size, suffix=""))
if __name__ == "__main__":
main()
# vim: ts=4 sw=4 sts=4 expandtab
| {
"content_hash": "2f153319bae2d348cf337007c60ae32c",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 89,
"avg_line_length": 32.57620817843866,
"alnum_prop": 0.5404541823576401,
"repo_name": "ustclug/ustcmirror-images",
"id": "c8270b1a7cf239a6352cb9c4029ae42900f118b2",
"size": "8763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "github-release/tunasync/github-release.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "317"
},
{
"name": "Dockerfile",
"bytes": "7933"
},
{
"name": "Haskell",
"bytes": "16015"
},
{
"name": "Julia",
"bytes": "473"
},
{
"name": "Perl",
"bytes": "32407"
},
{
"name": "Python",
"bytes": "62548"
},
{
"name": "Rust",
"bytes": "1555"
},
{
"name": "Shell",
"bytes": "62850"
}
],
"symlink_target": ""
} |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the PTransform and descendants."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import operator
import re
import sys
import typing
import unittest
from builtins import map
from builtins import range
from builtins import zip
from functools import reduce
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
import hamcrest as hc
from nose.plugins.attrib import attr
import apache_beam as beam
import apache_beam.pvalue as pvalue
import apache_beam.transforms.combiners as combine
import apache_beam.typehints as typehints
from apache_beam.io.iobase import Read
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import WindowInto
from apache_beam.transforms import window
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.ptransform import PTransform
from apache_beam.transforms.window import TimestampedValue
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
from apache_beam.typehints.typehints_test import TypeHintTestCase
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
# Disable frequent lint warning due to pipe operator for chaining transforms.
# pylint: disable=expression-not-assigned
class PTransformTest(unittest.TestCase):
# Enable nose tests running in parallel
_multiprocess_can_split_ = True
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def assertStartswith(self, msg, prefix):
self.assertTrue(
msg.startswith(prefix), '"%s" does not start with "%s"' % (msg, prefix))
def test_str(self):
self.assertEqual(
'<PTransform(PTransform) label=[PTransform]>', str(PTransform()))
pa = TestPipeline()
res = pa | 'ALabel' >> beam.Impulse()
self.assertEqual('AppliedPTransform(ALabel, Impulse)', str(res.producer))
pc = TestPipeline()
res = pc | beam.Impulse()
inputs_tr = res.producer.transform
inputs_tr.inputs = ('ci', )
self.assertEqual(
"<Impulse(PTransform) label=[Impulse] inputs=('ci',)>", str(inputs_tr))
pd = TestPipeline()
res = pd | beam.Impulse()
side_tr = res.producer.transform
side_tr.side_inputs = (4, )
self.assertEqual(
'<Impulse(PTransform) label=[Impulse] side_inputs=(4,)>', str(side_tr))
inputs_tr.side_inputs = ('cs', )
self.assertEqual(
"""<Impulse(PTransform) label=[Impulse] """
"""inputs=('ci',) side_inputs=('cs',)>""",
str(inputs_tr))
def test_do_with_do_fn(self):
class AddNDoFn(beam.DoFn):
def process(self, element, addon):
return [element + addon]
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.ParDo(AddNDoFn(), 10)
assert_that(result, equal_to([11, 12, 13]))
def test_do_with_unconstructed_do_fn(self):
class MyDoFn(beam.DoFn):
def process(self):
pass
with self.assertRaises(ValueError):
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
pcoll | 'Do' >> beam.ParDo(MyDoFn) # Note the lack of ()'s
def test_do_with_callable(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.FlatMap(lambda x, addon: [x + addon], 10)
assert_that(result, equal_to([11, 12, 13]))
def test_do_with_side_input_as_arg(self):
with TestPipeline() as pipeline:
side = pipeline | 'Side' >> beam.Create([10])
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.FlatMap(
lambda x, addon: [x + addon], pvalue.AsSingleton(side))
assert_that(result, equal_to([11, 12, 13]))
def test_do_with_side_input_as_keyword_arg(self):
with TestPipeline() as pipeline:
side = pipeline | 'Side' >> beam.Create([10])
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.FlatMap(
lambda x, addon: [x + addon], addon=pvalue.AsSingleton(side))
assert_that(result, equal_to([11, 12, 13]))
def test_do_with_do_fn_returning_string_raises_warning(self):
with self.assertRaises(typehints.TypeCheckError) as cm:
with TestPipeline() as pipeline:
pipeline._options.view_as(TypeOptions).runtime_type_check = True
pcoll = pipeline | 'Start' >> beam.Create(['2', '9', '3'])
pcoll | 'Do' >> beam.FlatMap(lambda x: x + '1')
# Since the DoFn directly returns a string we should get an
# error warning us when the pipeliene runs.
expected_error_prefix = (
'Returning a str from a ParDo or FlatMap '
'is discouraged.')
self.assertStartswith(cm.exception.args[0], expected_error_prefix)
def test_do_with_do_fn_returning_dict_raises_warning(self):
with self.assertRaises(typehints.TypeCheckError) as cm:
with TestPipeline() as pipeline:
pipeline._options.view_as(TypeOptions).runtime_type_check = True
pcoll = pipeline | 'Start' >> beam.Create(['2', '9', '3'])
pcoll | 'Do' >> beam.FlatMap(lambda x: {x: '1'})
# Since the DoFn directly returns a dict we should get an error warning
# us when the pipeliene runs.
expected_error_prefix = (
'Returning a dict from a ParDo or FlatMap '
'is discouraged.')
self.assertStartswith(cm.exception.args[0], expected_error_prefix)
def test_do_with_multiple_outputs_maintains_unique_name(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
r1 = pcoll | 'A' >> beam.FlatMap(lambda x: [x + 1]).with_outputs(main='m')
r2 = pcoll | 'B' >> beam.FlatMap(lambda x: [x + 2]).with_outputs(main='m')
assert_that(r1.m, equal_to([2, 3, 4]), label='r1')
assert_that(r2.m, equal_to([3, 4, 5]), label='r2')
@attr('ValidatesRunner')
def test_impulse(self):
with TestPipeline() as pipeline:
result = pipeline | beam.Impulse() | beam.Map(lambda _: 0)
assert_that(result, equal_to([0]))
# TODO(BEAM-3544): Disable this test in streaming temporarily.
# Remove sickbay-streaming tag after it's resolved.
@attr('ValidatesRunner', 'sickbay-streaming')
def test_read_metrics(self):
from apache_beam.io.utils import CountingSource
class CounterDoFn(beam.DoFn):
def __init__(self):
# This counter is unused.
self.received_records = Metrics.counter(
self.__class__, 'receivedRecords')
def process(self, element):
self.received_records.inc()
pipeline = TestPipeline()
(pipeline | Read(CountingSource(100)) | beam.ParDo(CounterDoFn()))
res = pipeline.run()
res.wait_until_finish()
# This counter is defined in utils.CountingSource.
metric_results = res.metrics().query(
MetricsFilter().with_name('recordsRead'))
outputs_counter = metric_results['counters'][0]
self.assertStartswith(outputs_counter.key.step, 'Read')
self.assertEqual(outputs_counter.key.metric.name, 'recordsRead')
self.assertEqual(outputs_counter.committed, 100)
self.assertEqual(outputs_counter.attempted, 100)
@attr('ValidatesRunner')
def test_par_do_with_multiple_outputs_and_using_yield(self):
class SomeDoFn(beam.DoFn):
"""A custom DoFn using yield."""
def process(self, element):
yield element
if element % 2 == 0:
yield pvalue.TaggedOutput('even', element)
else:
yield pvalue.TaggedOutput('odd', element)
with TestPipeline() as pipeline:
nums = pipeline | 'Some Numbers' >> beam.Create([1, 2, 3, 4])
results = nums | 'ClassifyNumbers' >> beam.ParDo(SomeDoFn()).with_outputs(
'odd', 'even', main='main')
assert_that(results.main, equal_to([1, 2, 3, 4]))
assert_that(results.odd, equal_to([1, 3]), label='assert:odd')
assert_that(results.even, equal_to([2, 4]), label='assert:even')
@attr('ValidatesRunner')
def test_par_do_with_multiple_outputs_and_using_return(self):
def some_fn(v):
if v % 2 == 0:
return [v, pvalue.TaggedOutput('even', v)]
return [v, pvalue.TaggedOutput('odd', v)]
with TestPipeline() as pipeline:
nums = pipeline | 'Some Numbers' >> beam.Create([1, 2, 3, 4])
results = nums | 'ClassifyNumbers' >> beam.FlatMap(some_fn).with_outputs(
'odd', 'even', main='main')
assert_that(results.main, equal_to([1, 2, 3, 4]))
assert_that(results.odd, equal_to([1, 3]), label='assert:odd')
assert_that(results.even, equal_to([2, 4]), label='assert:even')
@attr('ValidatesRunner')
def test_undeclared_outputs(self):
with TestPipeline() as pipeline:
nums = pipeline | 'Some Numbers' >> beam.Create([1, 2, 3, 4])
results = nums | 'ClassifyNumbers' >> beam.FlatMap(
lambda x: [
x,
pvalue.TaggedOutput('even' if x % 2 == 0 else 'odd', x),
pvalue.TaggedOutput('extra', x)
]).with_outputs()
assert_that(results[None], equal_to([1, 2, 3, 4]))
assert_that(results.odd, equal_to([1, 3]), label='assert:odd')
assert_that(results.even, equal_to([2, 4]), label='assert:even')
@attr('ValidatesRunner')
def test_multiple_empty_outputs(self):
with TestPipeline() as pipeline:
nums = pipeline | 'Some Numbers' >> beam.Create([1, 3, 5])
results = nums | 'ClassifyNumbers' >> beam.FlatMap(
lambda x:
[x, pvalue.TaggedOutput('even'
if x % 2 == 0 else 'odd', x)]).with_outputs()
assert_that(results[None], equal_to([1, 3, 5]))
assert_that(results.odd, equal_to([1, 3, 5]), label='assert:odd')
assert_that(results.even, equal_to([]), label='assert:even')
def test_do_requires_do_fn_returning_iterable(self):
# This function is incorrect because it returns an object that isn't an
# iterable.
def incorrect_par_do_fn(x):
return x + 5
with self.assertRaises(typehints.TypeCheckError) as cm:
with TestPipeline() as pipeline:
pipeline._options.view_as(TypeOptions).runtime_type_check = True
pcoll = pipeline | 'Start' >> beam.Create([2, 9, 3])
pcoll | 'Do' >> beam.FlatMap(incorrect_par_do_fn)
# It's a requirement that all user-defined functions to a ParDo return
# an iterable.
expected_error_prefix = 'FlatMap and ParDo must return an iterable.'
self.assertStartswith(cm.exception.args[0], expected_error_prefix)
def test_do_fn_with_finish(self):
class MyDoFn(beam.DoFn):
def process(self, element):
pass
def finish_bundle(self):
yield WindowedValue('finish', -1, [window.GlobalWindow()])
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.ParDo(MyDoFn())
# May have many bundles, but each has a start and finish.
def matcher():
def match(actual):
equal_to(['finish'])(list(set(actual)))
equal_to([1])([actual.count('finish')])
return match
assert_that(result, matcher())
def test_do_fn_with_windowing_in_finish_bundle(self):
windowfn = window.FixedWindows(2)
class MyDoFn(beam.DoFn):
def process(self, element):
yield TimestampedValue('process' + str(element), 5)
def finish_bundle(self):
yield WindowedValue('finish', 1, [windowfn])
pipeline = TestPipeline()
result = (
pipeline
| 'Start' >> beam.Create([1])
| beam.ParDo(MyDoFn())
| WindowInto(windowfn)
| 'create tuple' >> beam.Map(
lambda v,
t=beam.DoFn.TimestampParam,
w=beam.DoFn.WindowParam: (v, t, w.start, w.end)))
expected_process = [('process1', Timestamp(5), Timestamp(4), Timestamp(6))]
expected_finish = [('finish', Timestamp(1), Timestamp(0), Timestamp(2))]
assert_that(result, equal_to(expected_process + expected_finish))
pipeline.run()
def test_do_fn_with_start(self):
class MyDoFn(beam.DoFn):
def __init__(self):
self.state = 'init'
def start_bundle(self):
self.state = 'started'
def process(self, element):
if self.state == 'started':
yield 'started'
self.state = 'process'
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.ParDo(MyDoFn())
# May have many bundles, but each has a start and finish.
def matcher():
def match(actual):
equal_to(['started'])(list(set(actual)))
equal_to([1])([actual.count('started')])
return match
assert_that(result, matcher())
def test_do_fn_with_start_error(self):
class MyDoFn(beam.DoFn):
def start_bundle(self):
return [1]
def process(self, element):
pass
with self.assertRaises(RuntimeError):
with TestPipeline() as p:
p | 'Start' >> beam.Create([1, 2, 3]) | 'Do' >> beam.ParDo(MyDoFn())
def test_filter(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3, 4])
result = pcoll | 'Filter' >> beam.Filter(lambda x: x % 2 == 0)
assert_that(result, equal_to([2, 4]))
class _MeanCombineFn(beam.CombineFn):
def create_accumulator(self):
return (0, 0)
def add_input(self, sum_count, element):
(sum_, count) = sum_count
return sum_ + element, count + 1
def merge_accumulators(self, accumulators):
sums, counts = zip(*accumulators)
return sum(sums), sum(counts)
def extract_output(self, sum_count):
(sum_, count) = sum_count
if not count:
return float('nan')
return sum_ / float(count)
def test_combine_with_combine_fn(self):
vals = [1, 2, 3, 4, 5, 6, 7]
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create(vals)
result = pcoll | 'Mean' >> beam.CombineGlobally(self._MeanCombineFn())
assert_that(result, equal_to([sum(vals) // len(vals)]))
def test_combine_with_callable(self):
vals = [1, 2, 3, 4, 5, 6, 7]
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create(vals)
result = pcoll | beam.CombineGlobally(sum)
assert_that(result, equal_to([sum(vals)]))
def test_combine_with_side_input_as_arg(self):
values = [1, 2, 3, 4, 5, 6, 7]
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create(values)
divisor = pipeline | 'Divisor' >> beam.Create([2])
result = pcoll | 'Max' >> beam.CombineGlobally(
# Multiples of divisor only.
lambda vals,
d: max(v for v in vals if v % d == 0),
pvalue.AsSingleton(divisor)).without_defaults()
filt_vals = [v for v in values if v % 2 == 0]
assert_that(result, equal_to([max(filt_vals)]))
def test_combine_per_key_with_combine_fn(self):
vals_1 = [1, 2, 3, 4, 5, 6, 7]
vals_2 = [2, 4, 6, 8, 10, 12, 14]
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create(
([('a', x) for x in vals_1] + [('b', x) for x in vals_2]))
result = pcoll | 'Mean' >> beam.CombinePerKey(self._MeanCombineFn())
assert_that(
result,
equal_to([('a', sum(vals_1) // len(vals_1)),
('b', sum(vals_2) // len(vals_2))]))
def test_combine_per_key_with_callable(self):
vals_1 = [1, 2, 3, 4, 5, 6, 7]
vals_2 = [2, 4, 6, 8, 10, 12, 14]
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create(
([('a', x) for x in vals_1] + [('b', x) for x in vals_2]))
result = pcoll | beam.CombinePerKey(sum)
assert_that(result, equal_to([('a', sum(vals_1)), ('b', sum(vals_2))]))
def test_combine_per_key_with_side_input_as_arg(self):
vals_1 = [1, 2, 3, 4, 5, 6, 7]
vals_2 = [2, 4, 6, 8, 10, 12, 14]
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create(
([('a', x) for x in vals_1] + [('b', x) for x in vals_2]))
divisor = pipeline | 'Divisor' >> beam.Create([2])
result = pcoll | beam.CombinePerKey(
lambda vals,
d: max(v for v in vals if v % d == 0),
pvalue.AsSingleton(divisor)) # Multiples of divisor only.
m_1 = max(v for v in vals_1 if v % 2 == 0)
m_2 = max(v for v in vals_2 if v % 2 == 0)
assert_that(result, equal_to([('a', m_1), ('b', m_2)]))
def test_group_by_key(self):
pipeline = TestPipeline()
pcoll = pipeline | 'start' >> beam.Create([(1, 1), (2, 1), (3, 1), (1, 2),
(2, 2), (1, 3)])
result = pcoll | 'Group' >> beam.GroupByKey() | _SortLists
assert_that(result, equal_to([(1, [1, 2, 3]), (2, [1, 2]), (3, [1])]))
pipeline.run()
def test_group_by_key_reiteration(self):
class MyDoFn(beam.DoFn):
def process(self, gbk_result):
key, value_list = gbk_result
sum_val = 0
# Iterate the GBK result for multiple times.
for _ in range(0, 17):
sum_val += sum(value_list)
return [(key, sum_val)]
with TestPipeline() as pipeline:
pcoll = pipeline | 'start' >> beam.Create([(1, 1), (1, 2), (1, 3),
(1, 4)])
result = (
pcoll | 'Group' >> beam.GroupByKey()
| 'Reiteration-Sum' >> beam.ParDo(MyDoFn()))
assert_that(result, equal_to([(1, 170)]))
def test_partition_with_partition_fn(self):
class SomePartitionFn(beam.PartitionFn):
def partition_for(self, element, num_partitions, offset):
return (element % 3) + offset
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([0, 1, 2, 3, 4, 5, 6, 7, 8])
# Attempt nominal partition operation.
partitions = pcoll | 'Part 1' >> beam.Partition(SomePartitionFn(), 4, 1)
assert_that(partitions[0], equal_to([]))
assert_that(partitions[1], equal_to([0, 3, 6]), label='p1')
assert_that(partitions[2], equal_to([1, 4, 7]), label='p2')
assert_that(partitions[3], equal_to([2, 5, 8]), label='p3')
# Check that a bad partition label will yield an error. For the
# DirectRunner, this error manifests as an exception.
with self.assertRaises(ValueError):
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([0, 1, 2, 3, 4, 5, 6, 7, 8])
partitions = pcoll | beam.Partition(SomePartitionFn(), 4, 10000)
def test_partition_with_callable(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([0, 1, 2, 3, 4, 5, 6, 7, 8])
partitions = (
pcoll |
'part' >> beam.Partition(lambda e, n, offset: (e % 3) + offset, 4, 1))
assert_that(partitions[0], equal_to([]))
assert_that(partitions[1], equal_to([0, 3, 6]), label='p1')
assert_that(partitions[2], equal_to([1, 4, 7]), label='p2')
assert_that(partitions[3], equal_to([2, 5, 8]), label='p3')
def test_partition_followed_by_flatten_and_groupbykey(self):
"""Regression test for an issue with how partitions are handled."""
pipeline = TestPipeline()
contents = [('aa', 1), ('bb', 2), ('aa', 2)]
created = pipeline | 'A' >> beam.Create(contents)
partitioned = created | 'B' >> beam.Partition(lambda x, n: len(x) % n, 3)
flattened = partitioned | 'C' >> beam.Flatten()
grouped = flattened | 'D' >> beam.GroupByKey() | _SortLists
assert_that(grouped, equal_to([('aa', [1, 2]), ('bb', [2])]))
pipeline.run()
@attr('ValidatesRunner')
def test_flatten_pcollections(self):
with TestPipeline() as pipeline:
pcoll_1 = pipeline | 'Start 1' >> beam.Create([0, 1, 2, 3])
pcoll_2 = pipeline | 'Start 2' >> beam.Create([4, 5, 6, 7])
result = (pcoll_1, pcoll_2) | 'Flatten' >> beam.Flatten()
assert_that(result, equal_to([0, 1, 2, 3, 4, 5, 6, 7]))
def test_flatten_no_pcollections(self):
with TestPipeline() as pipeline:
with self.assertRaises(ValueError):
() | 'PipelineArgMissing' >> beam.Flatten()
result = () | 'Empty' >> beam.Flatten(pipeline=pipeline)
assert_that(result, equal_to([]))
@attr('ValidatesRunner')
def test_flatten_one_single_pcollection(self):
with TestPipeline() as pipeline:
input = [0, 1, 2, 3]
pcoll = pipeline | 'Input' >> beam.Create(input)
result = (pcoll, ) | 'Single Flatten' >> beam.Flatten()
assert_that(result, equal_to(input))
# TODO(BEAM-9002): Does not work in streaming mode on Dataflow.
@attr('ValidatesRunner', 'sickbay-streaming')
def test_flatten_same_pcollections(self):
with TestPipeline() as pipeline:
pc = pipeline | beam.Create(['a', 'b'])
assert_that((pc, pc, pc) | beam.Flatten(), equal_to(['a', 'b'] * 3))
def test_flatten_pcollections_in_iterable(self):
with TestPipeline() as pipeline:
pcoll_1 = pipeline | 'Start 1' >> beam.Create([0, 1, 2, 3])
pcoll_2 = pipeline | 'Start 2' >> beam.Create([4, 5, 6, 7])
result = [pcoll for pcoll in (pcoll_1, pcoll_2)] | beam.Flatten()
assert_that(result, equal_to([0, 1, 2, 3, 4, 5, 6, 7]))
@attr('ValidatesRunner')
def test_flatten_a_flattened_pcollection(self):
with TestPipeline() as pipeline:
pcoll_1 = pipeline | 'Start 1' >> beam.Create([0, 1, 2, 3])
pcoll_2 = pipeline | 'Start 2' >> beam.Create([4, 5, 6, 7])
pcoll_3 = pipeline | 'Start 3' >> beam.Create([8, 9])
pcoll_12 = (pcoll_1, pcoll_2) | 'Flatten' >> beam.Flatten()
pcoll_123 = (pcoll_12, pcoll_3) | 'Flatten again' >> beam.Flatten()
assert_that(pcoll_123, equal_to([x for x in range(10)]))
def test_flatten_input_type_must_be_iterable(self):
# Inputs to flatten *must* be an iterable.
with self.assertRaises(ValueError):
4 | beam.Flatten()
def test_flatten_input_type_must_be_iterable_of_pcolls(self):
# Inputs to flatten *must* be an iterable of PCollections.
with self.assertRaises(TypeError):
{'l': 'test'} | beam.Flatten()
with self.assertRaises(TypeError):
set([1, 2, 3]) | beam.Flatten()
@attr('ValidatesRunner')
def test_flatten_multiple_pcollections_having_multiple_consumers(self):
with TestPipeline() as pipeline:
input = pipeline | 'Start' >> beam.Create(['AA', 'BBB', 'CC'])
def split_even_odd(element):
tag = 'even_length' if len(element) % 2 == 0 else 'odd_length'
return pvalue.TaggedOutput(tag, element)
even_length, odd_length = (input | beam.Map(split_even_odd)
.with_outputs('even_length', 'odd_length'))
merged = (even_length, odd_length) | 'Flatten' >> beam.Flatten()
assert_that(merged, equal_to(['AA', 'BBB', 'CC']))
assert_that(even_length, equal_to(['AA', 'CC']), label='assert:even')
assert_that(odd_length, equal_to(['BBB']), label='assert:odd')
def test_co_group_by_key_on_list(self):
pipeline = TestPipeline()
pcoll_1 = pipeline | 'Start 1' >> beam.Create([('a', 1), ('a', 2), ('b', 3),
('c', 4)])
pcoll_2 = pipeline | 'Start 2' >> beam.Create([('a', 5), ('a', 6), ('c', 7),
('c', 8)])
result = (pcoll_1, pcoll_2) | beam.CoGroupByKey() | _SortLists
assert_that(
result,
equal_to([('a', ([1, 2], [5, 6])), ('b', ([3], [])),
('c', ([4], [7, 8]))]))
pipeline.run()
def test_co_group_by_key_on_iterable(self):
pipeline = TestPipeline()
pcoll_1 = pipeline | 'Start 1' >> beam.Create([('a', 1), ('a', 2), ('b', 3),
('c', 4)])
pcoll_2 = pipeline | 'Start 2' >> beam.Create([('a', 5), ('a', 6), ('c', 7),
('c', 8)])
result = [pc for pc in (pcoll_1, pcoll_2)] | beam.CoGroupByKey()
result |= _SortLists
assert_that(
result,
equal_to([('a', ([1, 2], [5, 6])), ('b', ([3], [])),
('c', ([4], [7, 8]))]))
pipeline.run()
def test_co_group_by_key_on_dict(self):
pipeline = TestPipeline()
pcoll_1 = pipeline | 'Start 1' >> beam.Create([('a', 1), ('a', 2), ('b', 3),
('c', 4)])
pcoll_2 = pipeline | 'Start 2' >> beam.Create([('a', 5), ('a', 6), ('c', 7),
('c', 8)])
result = {'X': pcoll_1, 'Y': pcoll_2} | beam.CoGroupByKey()
result |= _SortLists
assert_that(
result,
equal_to([('a', {
'X': [1, 2], 'Y': [5, 6]
}), ('b', {
'X': [3], 'Y': []
}), ('c', {
'X': [4], 'Y': [7, 8]
})]))
pipeline.run()
def test_group_by_key_input_must_be_kv_pairs(self):
with self.assertRaises(typehints.TypeCheckError) as e:
with TestPipeline() as pipeline:
pcolls = pipeline | 'A' >> beam.Create([1, 2, 3, 4, 5])
pcolls | 'D' >> beam.GroupByKey()
self.assertStartswith(
e.exception.args[0],
'Input type hint violation at D: expected '
'Tuple[TypeVariable[K], TypeVariable[V]]')
def test_group_by_key_only_input_must_be_kv_pairs(self):
with self.assertRaises(typehints.TypeCheckError) as cm:
with TestPipeline() as pipeline:
pcolls = pipeline | 'A' >> beam.Create(['a', 'b', 'f'])
pcolls | 'D' >> beam.GroupByKey()
expected_error_prefix = (
'Input type hint violation at D: expected '
'Tuple[TypeVariable[K], TypeVariable[V]]')
self.assertStartswith(cm.exception.args[0], expected_error_prefix)
def test_keys_and_values(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([(3, 1), (2, 1), (1, 1), (3, 2),
(2, 2), (3, 3)])
keys = pcoll.apply(beam.Keys('keys'))
vals = pcoll.apply(beam.Values('vals'))
assert_that(keys, equal_to([1, 2, 2, 3, 3, 3]), label='assert:keys')
assert_that(vals, equal_to([1, 1, 1, 2, 2, 3]), label='assert:vals')
def test_kv_swap(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create([(6, 3), (1, 2), (7, 1), (5, 2),
(3, 2)])
result = pcoll.apply(beam.KvSwap(), label='swap')
assert_that(result, equal_to([(1, 7), (2, 1), (2, 3), (2, 5), (3, 6)]))
def test_distinct(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create(
[6, 3, 1, 1, 9, 'pleat', 'pleat', 'kazoo', 'navel'])
result = pcoll.apply(beam.Distinct())
assert_that(result, equal_to([1, 3, 6, 9, 'pleat', 'kazoo', 'navel']))
def test_chained_ptransforms(self):
with TestPipeline() as pipeline:
t = (
beam.Map(lambda x: (x, 1))
| beam.GroupByKey()
| beam.Map(lambda x_ones: (x_ones[0], sum(x_ones[1]))))
result = pipeline | 'Start' >> beam.Create(['a', 'a', 'b']) | t
assert_that(result, equal_to([('a', 2), ('b', 1)]))
def test_apply_to_list(self):
self.assertCountEqual([1, 2, 3],
[0, 1, 2] | 'AddOne' >> beam.Map(lambda x: x + 1))
self.assertCountEqual([1],
[0, 1, 2] | 'Odd' >> beam.Filter(lambda x: x % 2))
self.assertCountEqual([1, 2, 100, 3], ([1, 2, 3], [100]) | beam.Flatten())
join_input = ([('k', 'a')], [('k', 'b'), ('k', 'c')])
self.assertCountEqual([('k', (['a'], ['b', 'c']))],
join_input | beam.CoGroupByKey() | _SortLists)
def test_multi_input_ptransform(self):
class DisjointUnion(PTransform):
def expand(self, pcollections):
return (
pcollections
| beam.Flatten()
| beam.Map(lambda x: (x, None))
| beam.GroupByKey()
| beam.Map(lambda kv: kv[0]))
self.assertEqual([1, 2, 3], sorted(([1, 2], [2, 3]) | DisjointUnion()))
def test_apply_to_crazy_pvaluish(self):
class NestedFlatten(PTransform):
"""A PTransform taking and returning nested PValueish.
Takes as input a list of dicts, and returns a dict with the corresponding
values flattened.
"""
def _extract_input_pvalues(self, pvalueish):
pvalueish = list(pvalueish)
return pvalueish, sum([list(p.values()) for p in pvalueish], [])
def expand(self, pcoll_dicts):
keys = reduce(operator.or_, [set(p.keys()) for p in pcoll_dicts])
res = {}
for k in keys:
res[k] = [p[k] for p in pcoll_dicts if k in p] | k >> beam.Flatten()
return res
res = [{
'a': [1, 2, 3]
}, {
'a': [4, 5, 6], 'b': ['x', 'y', 'z']
}, {
'a': [7, 8], 'b': ['x', 'y'], 'c': []
}] | NestedFlatten()
self.assertEqual(3, len(res))
self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8], sorted(res['a']))
self.assertEqual(['x', 'x', 'y', 'y', 'z'], sorted(res['b']))
self.assertEqual([], sorted(res['c']))
def test_named_tuple(self):
MinMax = collections.namedtuple('MinMax', ['min', 'max'])
class MinMaxTransform(PTransform):
def expand(self, pcoll):
return MinMax(
min=pcoll | beam.CombineGlobally(min).without_defaults(),
max=pcoll | beam.CombineGlobally(max).without_defaults())
res = [1, 2, 4, 8] | MinMaxTransform()
self.assertIsInstance(res, MinMax)
self.assertEqual(res, MinMax(min=[1], max=[8]))
flat = res | beam.Flatten()
self.assertEqual(sorted(flat), [1, 8])
def test_tuple_twice(self):
class Duplicate(PTransform):
def expand(self, pcoll):
return pcoll, pcoll
res1, res2 = [1, 2, 4, 8] | Duplicate()
self.assertEqual(sorted(res1), [1, 2, 4, 8])
self.assertEqual(sorted(res2), [1, 2, 4, 8])
@beam.ptransform_fn
def SamplePTransform(pcoll):
"""Sample transform using the @ptransform_fn decorator."""
map_transform = 'ToPairs' >> beam.Map(lambda v: (v, None))
combine_transform = 'Group' >> beam.CombinePerKey(lambda vs: None)
keys_transform = 'Distinct' >> beam.Keys()
return pcoll | map_transform | combine_transform | keys_transform
class PTransformLabelsTest(unittest.TestCase):
class CustomTransform(beam.PTransform):
pardo = None
def expand(self, pcoll):
self.pardo = '*Do*' >> beam.FlatMap(lambda x: [x + 1])
return pcoll | self.pardo
def test_chained_ptransforms(self):
"""Tests that chaining gets proper nesting."""
with TestPipeline() as pipeline:
map1 = 'Map1' >> beam.Map(lambda x: (x, 1))
gbk = 'Gbk' >> beam.GroupByKey()
map2 = 'Map2' >> beam.Map(lambda x_ones2: (x_ones2[0], sum(x_ones2[1])))
t = (map1 | gbk | map2)
result = pipeline | 'Start' >> beam.Create(['a', 'a', 'b']) | t
self.assertTrue('Map1|Gbk|Map2/Map1' in pipeline.applied_labels)
self.assertTrue('Map1|Gbk|Map2/Gbk' in pipeline.applied_labels)
self.assertTrue('Map1|Gbk|Map2/Map2' in pipeline.applied_labels)
assert_that(result, equal_to([('a', 2), ('b', 1)]))
def test_apply_custom_transform_without_label(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'PColl' >> beam.Create([1, 2, 3])
custom = PTransformLabelsTest.CustomTransform()
result = pipeline.apply(custom, pcoll)
self.assertTrue('CustomTransform' in pipeline.applied_labels)
self.assertTrue('CustomTransform/*Do*' in pipeline.applied_labels)
assert_that(result, equal_to([2, 3, 4]))
def test_apply_custom_transform_with_label(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'PColl' >> beam.Create([1, 2, 3])
custom = PTransformLabelsTest.CustomTransform('*Custom*')
result = pipeline.apply(custom, pcoll)
self.assertTrue('*Custom*' in pipeline.applied_labels)
self.assertTrue('*Custom*/*Do*' in pipeline.applied_labels)
assert_that(result, equal_to([2, 3, 4]))
def test_combine_without_label(self):
vals = [1, 2, 3, 4, 5, 6, 7]
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create(vals)
combine = beam.CombineGlobally(sum)
result = pcoll | combine
self.assertTrue('CombineGlobally(sum)' in pipeline.applied_labels)
assert_that(result, equal_to([sum(vals)]))
def test_apply_ptransform_using_decorator(self):
pipeline = TestPipeline()
pcoll = pipeline | 'PColl' >> beam.Create([1, 2, 3])
_ = pcoll | '*Sample*' >> SamplePTransform()
self.assertTrue('*Sample*' in pipeline.applied_labels)
self.assertTrue('*Sample*/ToPairs' in pipeline.applied_labels)
self.assertTrue('*Sample*/Group' in pipeline.applied_labels)
self.assertTrue('*Sample*/Distinct' in pipeline.applied_labels)
def test_combine_with_label(self):
vals = [1, 2, 3, 4, 5, 6, 7]
with TestPipeline() as pipeline:
pcoll = pipeline | 'Start' >> beam.Create(vals)
combine = '*Sum*' >> beam.CombineGlobally(sum)
result = pcoll | combine
self.assertTrue('*Sum*' in pipeline.applied_labels)
assert_that(result, equal_to([sum(vals)]))
def check_label(self, ptransform, expected_label):
pipeline = TestPipeline()
pipeline | 'Start' >> beam.Create([('a', 1)]) | ptransform
actual_label = sorted(
label for label in pipeline.applied_labels
if not label.startswith('Start'))[0]
self.assertEqual(expected_label, re.sub(r'\d{3,}', '#', actual_label))
def test_default_labels(self):
self.check_label(beam.Map(len), r'Map(len)')
self.check_label(
beam.Map(lambda x: x), r'Map(<lambda at ptransform_test.py:#>)')
self.check_label(beam.FlatMap(list), r'FlatMap(list)')
self.check_label(beam.Filter(sum), r'Filter(sum)')
self.check_label(beam.CombineGlobally(sum), r'CombineGlobally(sum)')
self.check_label(beam.CombinePerKey(sum), r'CombinePerKey(sum)')
class MyDoFn(beam.DoFn):
def process(self, unused_element):
pass
self.check_label(beam.ParDo(MyDoFn()), r'ParDo(MyDoFn)')
def test_label_propogation(self):
self.check_label('TestMap' >> beam.Map(len), r'TestMap')
self.check_label('TestLambda' >> beam.Map(lambda x: x), r'TestLambda')
self.check_label('TestFlatMap' >> beam.FlatMap(list), r'TestFlatMap')
self.check_label('TestFilter' >> beam.Filter(sum), r'TestFilter')
self.check_label('TestCG' >> beam.CombineGlobally(sum), r'TestCG')
self.check_label('TestCPK' >> beam.CombinePerKey(sum), r'TestCPK')
class MyDoFn(beam.DoFn):
def process(self, unused_element):
pass
self.check_label('TestParDo' >> beam.ParDo(MyDoFn()), r'TestParDo')
class PTransformTestDisplayData(unittest.TestCase):
def test_map_named_function(self):
tr = beam.Map(len)
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem(
'len', key='fn', label='Transform Function', namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
def test_map_anonymous_function(self):
tr = beam.Map(lambda x: x)
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem(
'<lambda>', key='fn', label='Transform Function', namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
def test_flatmap_named_function(self):
tr = beam.FlatMap(list)
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem(
'list', key='fn', label='Transform Function', namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
def test_flatmap_anonymous_function(self):
tr = beam.FlatMap(lambda x: [x])
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem(
'<lambda>', key='fn', label='Transform Function', namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
def test_filter_named_function(self):
tr = beam.Filter(sum)
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem(
'sum', key='fn', label='Transform Function', namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
def test_filter_anonymous_function(self):
tr = beam.Filter(lambda x: x // 30)
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem(
'<lambda>', key='fn', label='Transform Function', namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
class PTransformTypeCheckTestCase(TypeHintTestCase):
def assertStartswith(self, msg, prefix):
self.assertTrue(
msg.startswith(prefix), '"%s" does not start with "%s"' % (msg, prefix))
def setUp(self):
self.p = TestPipeline()
def test_do_fn_pipeline_pipeline_type_check_satisfied(self):
@with_input_types(int, int)
@with_output_types(int)
class AddWithFive(beam.DoFn):
def process(self, element, five):
return [element + five]
d = (
self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Add' >> beam.ParDo(AddWithFive(), 5))
assert_that(d, equal_to([6, 7, 8]))
self.p.run()
def test_do_fn_pipeline_pipeline_type_check_violated(self):
@with_input_types(str, str)
@with_output_types(str)
class ToUpperCaseWithPrefix(beam.DoFn):
def process(self, element, prefix):
return [prefix + element.upper()]
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Upper' >> beam.ParDo(ToUpperCaseWithPrefix(), 'hello'))
self.assertStartswith(
e.exception.args[0],
"Type hint violation for 'Upper': "
"requires {} but got {} for element".format(str, int))
def test_do_fn_pipeline_runtime_type_check_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_input_types(int, int)
@with_output_types(int)
class AddWithNum(beam.DoFn):
def process(self, element, num):
return [element + num]
d = (
self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Add' >> beam.ParDo(AddWithNum(), 5))
assert_that(d, equal_to([6, 7, 8]))
self.p.run()
def test_do_fn_pipeline_runtime_type_check_violated(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_input_types(int, int)
@with_output_types(int)
class AddWithNum(beam.DoFn):
def process(self, element, num):
return [element + num]
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'T' >> beam.Create(['1', '2', '3']).with_output_types(str)
| 'Add' >> beam.ParDo(AddWithNum(), 5))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Type hint violation for 'Add': "
"requires {} but got {} for element".format(int, str))
def test_pardo_does_not_type_check_using_type_hint_decorators(self):
@with_input_types(a=int)
@with_output_types(typing.List[str])
def int_to_str(a):
return [str(a)]
# The function above is expecting an int for its only parameter. However, it
# will receive a str instead, which should result in a raised exception.
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'S' >> beam.Create(['b', 'a', 'r']).with_output_types(str)
| 'ToStr' >> beam.FlatMap(int_to_str))
self.assertStartswith(
e.exception.args[0],
"Type hint violation for 'ToStr': "
"requires {} but got {} for a".format(int, str))
def test_pardo_properly_type_checks_using_type_hint_decorators(self):
@with_input_types(a=str)
@with_output_types(typing.List[str])
def to_all_upper_case(a):
return [a.upper()]
# If this type-checks than no error should be raised.
d = (
self.p
| 'T' >> beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| 'Case' >> beam.FlatMap(to_all_upper_case))
assert_that(d, equal_to(['T', 'E', 'S', 'T']))
self.p.run()
# Output type should have been recognized as 'str' rather than List[str] to
# do the flatten part of FlatMap.
self.assertEqual(str, d.element_type)
def test_pardo_does_not_type_check_using_type_hint_methods(self):
# The first ParDo outputs pcoll's of type int, however the second ParDo is
# expecting pcoll's of type str instead.
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'S' >> beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| (
'Score' >> beam.FlatMap(lambda x: [1] if x == 't' else [2]).
with_input_types(str).with_output_types(int))
| (
'Upper' >> beam.FlatMap(lambda x: [x.upper()]).with_input_types(
str).with_output_types(str)))
self.assertStartswith(
e.exception.args[0],
"Type hint violation for 'Upper': "
"requires {} but got {} for x".format(str, int))
def test_pardo_properly_type_checks_using_type_hint_methods(self):
# Pipeline should be created successfully without an error
d = (
self.p
| 'S' >> beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| 'Dup' >> beam.FlatMap(lambda x: [x + x]).with_input_types(
str).with_output_types(str)
| 'Upper' >> beam.FlatMap(lambda x: [x.upper()]).with_input_types(
str).with_output_types(str))
assert_that(d, equal_to(['TT', 'EE', 'SS', 'TT']))
self.p.run()
def test_map_does_not_type_check_using_type_hints_methods(self):
# The transform before 'Map' has indicated that it outputs PCollections with
# int's, while Map is expecting one of str.
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'S' >> beam.Create([1, 2, 3, 4]).with_output_types(int)
| 'Upper' >> beam.Map(lambda x: x.upper()).with_input_types(
str).with_output_types(str))
self.assertStartswith(
e.exception.args[0],
"Type hint violation for 'Upper': "
"requires {} but got {} for x".format(str, int))
def test_map_properly_type_checks_using_type_hints_methods(self):
# No error should be raised if this type-checks properly.
d = (
self.p
| 'S' >> beam.Create([1, 2, 3, 4]).with_output_types(int)
| 'ToStr' >>
beam.Map(lambda x: str(x)).with_input_types(int).with_output_types(str))
assert_that(d, equal_to(['1', '2', '3', '4']))
self.p.run()
def test_map_does_not_type_check_using_type_hints_decorator(self):
@with_input_types(s=str)
@with_output_types(str)
def upper(s):
return s.upper()
# Hinted function above expects a str at pipeline construction.
# However, 'Map' should detect that Create has hinted an int instead.
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'S' >> beam.Create([1, 2, 3, 4]).with_output_types(int)
| 'Upper' >> beam.Map(upper))
self.assertStartswith(
e.exception.args[0],
"Type hint violation for 'Upper': "
"requires {} but got {} for s".format(str, int))
def test_map_properly_type_checks_using_type_hints_decorator(self):
@with_input_types(a=bool)
@with_output_types(int)
def bool_to_int(a):
return int(a)
# If this type-checks than no error should be raised.
d = (
self.p
| 'Bools' >> beam.Create([True, False, True]).with_output_types(bool)
| 'ToInts' >> beam.Map(bool_to_int))
assert_that(d, equal_to([1, 0, 1]))
self.p.run()
def test_filter_does_not_type_check_using_type_hints_method(self):
# Filter is expecting an int but instead looks to the 'left' and sees a str
# incoming.
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'Strs' >> beam.Create(['1', '2', '3', '4', '5'
]).with_output_types(str)
| 'Lower' >> beam.Map(lambda x: x.lower()).with_input_types(
str).with_output_types(str)
| 'Below 3' >> beam.Filter(lambda x: x < 3).with_input_types(int))
self.assertStartswith(
e.exception.args[0],
"Type hint violation for 'Below 3': "
"requires {} but got {} for x".format(int, str))
def test_filter_type_checks_using_type_hints_method(self):
# No error should be raised if this type-checks properly.
d = (
self.p
| beam.Create(['1', '2', '3', '4', '5']).with_output_types(str)
| 'ToInt' >>
beam.Map(lambda x: int(x)).with_input_types(str).with_output_types(int)
| 'Below 3' >> beam.Filter(lambda x: x < 3).with_input_types(int))
assert_that(d, equal_to([1, 2]))
self.p.run()
def test_filter_does_not_type_check_using_type_hints_decorator(self):
@with_input_types(a=float)
def more_than_half(a):
return a > 0.50
# Func above was hinted to only take a float, yet an int will be passed.
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'Ints' >> beam.Create([1, 2, 3, 4]).with_output_types(int)
| 'Half' >> beam.Filter(more_than_half))
self.assertStartswith(
e.exception.args[0],
"Type hint violation for 'Half': "
"requires {} but got {} for a".format(float, int))
def test_filter_type_checks_using_type_hints_decorator(self):
@with_input_types(b=int)
def half(b):
import random
return bool(random.choice([0, 1]))
# Filter should deduce that it returns the same type that it takes.
(
self.p
| 'Str' >> beam.Create(range(5)).with_output_types(int)
| 'Half' >> beam.Filter(half)
| 'ToBool' >> beam.Map(lambda x: bool(x)).with_input_types(
int).with_output_types(bool))
def test_group_by_key_only_output_type_deduction(self):
d = (
self.p
| 'Str' >> beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| (
'Pair' >> beam.Map(lambda x: (x, ord(x))).with_output_types(
typing.Tuple[str, str]))
| beam.GroupByKey())
# Output type should correctly be deduced.
# GBK-only should deduce that Tuple[A, B] is turned into
# Tuple[A, Iterable[B]].
self.assertCompatible(
typing.Tuple[str, typing.Iterable[str]], d.element_type)
def test_group_by_key_output_type_deduction(self):
d = (
self.p
| 'Str' >> beam.Create(range(20)).with_output_types(int)
| (
'PairNegative' >> beam.Map(lambda x: (x % 5, -x)).with_output_types(
typing.Tuple[int, int]))
| beam.GroupByKey())
# Output type should correctly be deduced.
# GBK should deduce that Tuple[A, B] is turned into Tuple[A, Iterable[B]].
self.assertCompatible(
typing.Tuple[int, typing.Iterable[int]], d.element_type)
def test_group_by_key_only_does_not_type_check(self):
# GBK will be passed raw int's here instead of some form of Tuple[A, B].
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create([1, 2, 3]).with_output_types(int)
| 'F' >> beam.GroupByKey())
self.assertStartswith(
e.exception.args[0],
"Input type hint violation at F: "
"expected Tuple[TypeVariable[K], TypeVariable[V]], "
"got {}".format(int))
def test_group_by_does_not_type_check(self):
# Create is returning a List[int, str], rather than a Tuple[int, str]
# that is aliased to Tuple[int, str].
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| (beam.Create([[1], [2]]).with_output_types(typing.Iterable[int]))
| 'T' >> beam.GroupByKey())
self.assertStartswith(
e.exception.args[0],
"Input type hint violation at T: "
"expected Tuple[TypeVariable[K], TypeVariable[V]], "
"got Iterable[int]")
def test_pipeline_checking_pardo_insufficient_type_information(self):
self.p._options.view_as(TypeOptions).type_check_strictness = 'ALL_REQUIRED'
# Type checking is enabled, but 'Create' doesn't pass on any relevant type
# information to the ParDo.
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'Nums' >> beam.Create(range(5))
| 'ModDup' >> beam.FlatMap(lambda x: (x % 2, x)))
self.assertEqual(
'Pipeline type checking is enabled, however no output '
'type-hint was found for the PTransform Create(Nums)',
e.exception.args[0])
def test_pipeline_checking_gbk_insufficient_type_information(self):
self.p._options.view_as(TypeOptions).type_check_strictness = 'ALL_REQUIRED'
# Type checking is enabled, but 'Map' doesn't pass on any relevant type
# information to GBK-only.
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'Nums' >> beam.Create(range(5)).with_output_types(int)
| 'ModDup' >> beam.Map(lambda x: (x % 2, x))
| beam.GroupByKey())
self.assertEqual(
'Pipeline type checking is enabled, however no output '
'type-hint was found for the PTransform '
'ParDo(ModDup)',
e.exception.args[0])
def test_disable_pipeline_type_check(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
# The pipeline below should raise a TypeError, however pipeline type
# checking was disabled above.
(
self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Lower' >> beam.Map(lambda x: x.lower()).with_input_types(
str).with_output_types(str))
def test_run_time_type_checking_enabled_type_violation(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_output_types(str)
@with_input_types(x=int)
def int_to_string(x):
return str(x)
# Function above has been type-hinted to only accept an int. But in the
# pipeline execution it'll be passed a string due to the output of Create.
(
self.p
| 'T' >> beam.Create(['some_string'])
| 'ToStr' >> beam.Map(int_to_string))
with self.assertRaises(typehints.TypeCheckError) as e:
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(ToStr): "
"Type-hint for argument: 'x' violated. "
"Expected an instance of {}, "
"instead found some_string, an instance of {}.".format(int, str))
def test_run_time_type_checking_enabled_types_satisfied(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_output_types(typing.Tuple[int, str])
@with_input_types(x=str)
def group_with_upper_ord(x):
return (ord(x.upper()) % 5, x)
# Pipeline checking is off, but the above function should satisfy types at
# run-time.
result = (
self.p
| 'T' >> beam.Create(['t', 'e', 's', 't', 'i', 'n', 'g'
]).with_output_types(str)
| 'GenKeys' >> beam.Map(group_with_upper_ord)
| 'O' >> beam.GroupByKey()
| _SortLists)
assert_that(
result,
equal_to([(1, ['g']), (3, ['i', 'n', 's']), (4, ['e', 't', 't'])]))
self.p.run()
def test_pipeline_checking_satisfied_but_run_time_types_violate(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_output_types(typing.Tuple[bool, int])
@with_input_types(a=int)
def is_even_as_key(a):
# Simulate a programming error, should be: return (a % 2 == 0, a)
# However this returns Tuple[int, int]
return (a % 2, a)
(
self.p
| 'Nums' >> beam.Create(range(5)).with_output_types(int)
| 'IsEven' >> beam.Map(is_even_as_key)
| 'Parity' >> beam.GroupByKey())
# Although all the types appear to be correct when checked at pipeline
# construction. Runtime type-checking should detect the 'is_even_as_key' is
# returning Tuple[int, int], instead of Tuple[bool, int].
with self.assertRaises(typehints.TypeCheckError) as e:
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(IsEven): "
"Tuple[bool, int] hint type-constraint violated. "
"The type of element #0 in the passed tuple is incorrect. "
"Expected an instance of type bool, "
"instead received an instance of type int.")
def test_pipeline_checking_satisfied_run_time_checking_satisfied(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
@with_output_types(typing.Tuple[bool, int])
@with_input_types(a=int)
def is_even_as_key(a):
# The programming error in the above test-case has now been fixed.
# Everything should properly type-check.
return (a % 2 == 0, a)
result = (
self.p
| 'Nums' >> beam.Create(range(5)).with_output_types(int)
| 'IsEven' >> beam.Map(is_even_as_key)
| 'Parity' >> beam.GroupByKey()
| _SortLists)
assert_that(result, equal_to([(False, [1, 3]), (True, [0, 2, 4])]))
self.p.run()
def test_pipeline_runtime_checking_violation_simple_type_input(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
self.p._options.view_as(TypeOptions).pipeline_type_check = False
# The type-hinted applied via the 'with_input_types()' method indicates the
# ParDo should receive an instance of type 'str', however an 'int' will be
# passed instead.
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create([1, 1, 1])
| (
'ToInt' >> beam.FlatMap(lambda x: [int(x)]).with_input_types(
str).with_output_types(int)))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(ToInt): "
"Type-hint for argument: 'x' violated. "
"Expected an instance of {}, "
"instead found 1, an instance of {}.".format(str, int))
def test_pipeline_runtime_checking_violation_composite_type_input(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
self.p._options.view_as(TypeOptions).pipeline_type_check = False
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create([(1, 3.0), (2, 4.9), (3, 9.5)])
| (
'Add' >>
beam.FlatMap(lambda x_y: [x_y[0] + x_y[1]]).with_input_types(
typing.Tuple[int, int]).with_output_types(int)))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(Add): "
"Type-hint for argument: 'x_y' violated: "
"Tuple[int, int] hint type-constraint violated. "
"The type of element #1 in the passed tuple is incorrect. "
"Expected an instance of type int, instead received an instance "
"of type float.")
def test_pipeline_runtime_checking_violation_simple_type_output(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
self.p._options.view_as(TypeOptions).pipeline_type_check = False
# The type-hinted applied via the 'returns()' method indicates the ParDo
# should output an instance of type 'int', however a 'float' will be
# generated instead.
print(
"HINTS",
(
'ToInt' >> beam.FlatMap(lambda x: [float(x)]).with_input_types(
int).with_output_types(int)).get_type_hints())
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create([1, 1, 1])
| (
'ToInt' >> beam.FlatMap(lambda x: [float(x)]).with_input_types(
int).with_output_types(int)))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within "
"ParDo(ToInt): "
"According to type-hint expected output should be "
"of type {}. Instead, received '1.0', "
"an instance of type {}.".format(int, float))
def test_pipeline_runtime_checking_violation_composite_type_output(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
self.p._options.view_as(TypeOptions).pipeline_type_check = False
# The type-hinted applied via the 'returns()' method indicates the ParDo
# should return an instance of type: Tuple[float, int]. However, an instance
# of 'int' will be generated instead.
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create([(1, 3.0), (2, 4.9), (3, 9.5)])
| (
'Swap' >>
beam.FlatMap(lambda x_y1: [x_y1[0] + x_y1[1]]).with_input_types(
typing.Tuple[int, float]).with_output_types(
typing.Tuple[float, int])))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within "
"ParDo(Swap): Tuple type constraint violated. "
"Valid object instance must be of type 'tuple'. Instead, "
"an instance of 'float' was received.")
def test_pipeline_runtime_checking_violation_with_side_inputs_decorator(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_output_types(int)
@with_input_types(a=int, b=int)
def add(a, b):
return a + b
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p | beam.Create([1, 2, 3, 4]) | 'Add 1' >> beam.Map(add, 1.0))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(Add 1): "
"Type-hint for argument: 'b' violated. "
"Expected an instance of {}, "
"instead found 1.0, an instance of {}.".format(int, float))
def test_pipeline_runtime_checking_violation_with_side_inputs_via_method(self): # pylint: disable=line-too-long
self.p._options.view_as(TypeOptions).runtime_type_check = True
self.p._options.view_as(TypeOptions).pipeline_type_check = False
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create([1, 2, 3, 4])
| (
'Add 1' >> beam.Map(lambda x, one: x + one, 1.0).with_input_types(
int, int).with_output_types(float)))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(Add 1): "
"Type-hint for argument: 'one' violated. "
"Expected an instance of {}, "
"instead found 1.0, an instance of {}.".format(int, float))
def test_combine_properly_pipeline_type_checks_using_decorator(self):
@with_output_types(int)
@with_input_types(ints=typing.Iterable[int])
def sum_ints(ints):
return sum(ints)
d = (
self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Sum' >> beam.CombineGlobally(sum_ints))
self.assertEqual(int, d.element_type)
assert_that(d, equal_to([6]))
self.p.run()
def test_combine_func_type_hint_does_not_take_iterable_using_decorator(self):
@with_output_types(int)
@with_input_types(a=int)
def bad_combine(a):
5 + a
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'M' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Add' >> beam.CombineGlobally(bad_combine))
self.assertEqual(
"All functions for a Combine PTransform must accept a "
"single argument compatible with: Iterable[Any]. "
"Instead a function with input type: {} was received.".format(int),
e.exception.args[0])
def test_combine_pipeline_type_propagation_using_decorators(self):
@with_output_types(int)
@with_input_types(ints=typing.Iterable[int])
def sum_ints(ints):
return sum(ints)
@with_output_types(typing.List[int])
@with_input_types(n=int)
def range_from_zero(n):
return list(range(n + 1))
d = (
self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Sum' >> beam.CombineGlobally(sum_ints)
| 'Range' >> beam.ParDo(range_from_zero))
self.assertEqual(int, d.element_type)
assert_that(d, equal_to([0, 1, 2, 3, 4, 5, 6]))
self.p.run()
def test_combine_runtime_type_check_satisfied_using_decorators(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
@with_output_types(int)
@with_input_types(ints=typing.Iterable[int])
def iter_mul(ints):
return reduce(operator.mul, ints, 1)
d = (
self.p
| 'K' >> beam.Create([5, 5, 5, 5]).with_output_types(int)
| 'Mul' >> beam.CombineGlobally(iter_mul))
assert_that(d, equal_to([625]))
self.p.run()
def test_combine_runtime_type_check_violation_using_decorators(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
# Combine fn is returning the incorrect type
@with_output_types(int)
@with_input_types(ints=typing.Iterable[int])
def iter_mul(ints):
return str(reduce(operator.mul, ints, 1))
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'K' >> beam.Create([5, 5, 5, 5]).with_output_types(int)
| 'Mul' >> beam.CombineGlobally(iter_mul))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within "
"Mul/CombinePerKey: "
"Type-hint for return type violated. "
"Expected an instance of {}, instead found".format(int))
def test_combine_pipeline_type_check_using_methods(self):
d = (
self.p
| beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| (
'concat' >> beam.CombineGlobally(lambda s: ''.join(s)).
with_input_types(str).with_output_types(str)))
def matcher(expected):
def match(actual):
equal_to(expected)(list(actual[0]))
return match
assert_that(d, matcher('estt'))
self.p.run()
def test_combine_runtime_type_check_using_methods(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| beam.Create(range(5)).with_output_types(int)
| (
'Sum' >> beam.CombineGlobally(lambda s: sum(s)).with_input_types(
int).with_output_types(int)))
assert_that(d, equal_to([10]))
self.p.run()
def test_combine_pipeline_type_check_violation_using_methods(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create(range(3)).with_output_types(int)
| (
'SortJoin' >> beam.CombineGlobally(lambda s: ''.join(sorted(s))).
with_input_types(str).with_output_types(str)))
self.assertStartswith(
e.exception.args[0],
"Input type hint violation at SortJoin: "
"expected {}, got {}".format(str, int))
def test_combine_runtime_type_check_violation_using_methods(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create([0]).with_output_types(int)
| (
'SortJoin' >> beam.CombineGlobally(lambda s: ''.join(sorted(s))).
with_input_types(str).with_output_types(str)))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within "
"ParDo(SortJoin/KeyWithVoid): "
"Type-hint for argument: 'v' violated. "
"Expected an instance of {}, "
"instead found 0, an instance of {}.".format(str, int))
def test_combine_insufficient_type_hint_information(self):
self.p._options.view_as(TypeOptions).type_check_strictness = 'ALL_REQUIRED'
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'E' >> beam.Create(range(3)).with_output_types(int)
| 'SortJoin' >> beam.CombineGlobally(lambda s: ''.join(sorted(s)))
| 'F' >> beam.Map(lambda x: x + 1))
self.assertStartswith(
e.exception.args[0],
'Pipeline type checking is enabled, '
'however no output type-hint was found for the PTransform '
'ParDo('
'SortJoin/CombinePerKey/')
def test_mean_globally_pipeline_checking_satisfied(self):
d = (
self.p
| 'C' >> beam.Create(range(5)).with_output_types(int)
| 'Mean' >> combine.Mean.Globally())
self.assertEqual(float, d.element_type)
assert_that(d, equal_to([2.0]))
self.p.run()
def test_mean_globally_pipeline_checking_violated(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'C' >> beam.Create(['test']).with_output_types(str)
| 'Mean' >> combine.Mean.Globally())
if sys.version_info[0] >= 3:
expected_msg = \
"Type hint violation for 'CombinePerKey': " \
"requires Tuple[TypeVariable[K], Union[float, int]] " \
"but got Tuple[None, str] for element"
else:
expected_msg = \
"Type hint violation for 'CombinePerKey': " \
"requires Tuple[TypeVariable[K], Union[float, int, long]] " \
"but got Tuple[None, str] for element"
self.assertStartswith(e.exception.args[0], expected_msg)
def test_mean_globally_runtime_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| 'C' >> beam.Create(range(5)).with_output_types(int)
| 'Mean' >> combine.Mean.Globally())
self.assertEqual(float, d.element_type)
assert_that(d, equal_to([2.0]))
self.p.run()
def test_mean_globally_runtime_checking_violated(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'C' >> beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| 'Mean' >> combine.Mean.Globally())
self.p.run()
self.assertEqual(
"Runtime type violation detected for transform input "
"when executing ParDoFlatMap(Combine): Tuple[Any, "
"Iterable[Union[int, float]]] hint type-constraint "
"violated. The type of element #1 in the passed tuple "
"is incorrect. Iterable[Union[int, float]] hint "
"type-constraint violated. The type of element #0 in "
"the passed Iterable is incorrect: Union[int, float] "
"type-constraint violated. Expected an instance of one "
"of: ('int', 'float'), received str instead.",
e.exception.args[0])
def test_mean_per_key_pipeline_checking_satisfied(self):
d = (
self.p
| beam.Create(range(5)).with_output_types(int)
| (
'EvenGroup' >> beam.Map(lambda x: (not x % 2, x)).with_output_types(
typing.Tuple[bool, int]))
| 'EvenMean' >> combine.Mean.PerKey())
self.assertCompatible(typing.Tuple[bool, float], d.element_type)
assert_that(d, equal_to([(False, 2.0), (True, 2.0)]))
self.p.run()
def test_mean_per_key_pipeline_checking_violated(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create(map(str, range(5))).with_output_types(str)
| (
'UpperPair' >> beam.Map(lambda x:
(x.upper(), x)).with_output_types(
typing.Tuple[str, str]))
| 'EvenMean' >> combine.Mean.PerKey())
self.p.run()
if sys.version_info[0] >= 3:
expected_msg = \
"Type hint violation for 'CombinePerKey(MeanCombineFn)': " \
"requires Tuple[TypeVariable[K], Union[float, int]] " \
"but got Tuple[str, str] for element"
else:
expected_msg = \
"Type hint violation for 'CombinePerKey(MeanCombineFn)': " \
"requires Tuple[TypeVariable[K], Union[float, int, long]] " \
"but got Tuple[str, str] for element"
self.assertStartswith(e.exception.args[0], expected_msg)
def test_mean_per_key_runtime_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| beam.Create(range(5)).with_output_types(int)
| (
'OddGroup' >> beam.Map(lambda x:
(bool(x % 2), x)).with_output_types(
typing.Tuple[bool, int]))
| 'OddMean' >> combine.Mean.PerKey())
self.assertCompatible(typing.Tuple[bool, float], d.element_type)
assert_that(d, equal_to([(False, 2.0), (True, 2.0)]))
self.p.run()
def test_mean_per_key_runtime_checking_violated(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create(range(5)).with_output_types(int)
| (
'OddGroup' >> beam.Map(lambda x:
(x, str(bool(x % 2)))).with_output_types(
typing.Tuple[int, str]))
| 'OddMean' >> combine.Mean.PerKey())
self.p.run()
if sys.version_info[0] >= 3:
expected_msg = \
"Runtime type violation detected within " \
"OddMean/CombinePerKey(MeanCombineFn): " \
"Type-hint for argument: 'element' violated: " \
"Union[float, int] type-constraint violated. " \
"Expected an instance of one of: ('float', 'int'), " \
"received str instead"
else:
expected_msg = \
"Runtime type violation detected within " \
"OddMean/CombinePerKey(MeanCombineFn): " \
"Type-hint for argument: 'element' violated: " \
"Union[float, int, long] type-constraint violated. " \
"Expected an instance of one of: ('float', 'int', 'long'), " \
"received str instead"
self.assertStartswith(e.exception.args[0], expected_msg)
def test_count_globally_pipeline_type_checking_satisfied(self):
d = (
self.p
| 'P' >> beam.Create(range(5)).with_output_types(int)
| 'CountInt' >> combine.Count.Globally())
self.assertEqual(int, d.element_type)
assert_that(d, equal_to([5]))
self.p.run()
def test_count_globally_runtime_type_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| 'P' >> beam.Create(range(5)).with_output_types(int)
| 'CountInt' >> combine.Count.Globally())
self.assertEqual(int, d.element_type)
assert_that(d, equal_to([5]))
self.p.run()
def test_count_perkey_pipeline_type_checking_satisfied(self):
d = (
self.p
| beam.Create(range(5)).with_output_types(int)
| (
'EvenGroup' >> beam.Map(lambda x: (not x % 2, x)).with_output_types(
typing.Tuple[bool, int]))
| 'CountInt' >> combine.Count.PerKey())
self.assertCompatible(typing.Tuple[bool, int], d.element_type)
assert_that(d, equal_to([(False, 2), (True, 3)]))
self.p.run()
def test_count_perkey_pipeline_type_checking_violated(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create(range(5)).with_output_types(int)
| 'CountInt' >> combine.Count.PerKey())
self.assertStartswith(
e.exception.args[0],
"Type hint violation for 'CombinePerKey(CountCombineFn)': "
"requires Tuple[TypeVariable[K], Any] "
"but got {} for element".format(int))
def test_count_perkey_runtime_type_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| 'DupKey' >> beam.Map(lambda x: (x, x)).with_output_types(
typing.Tuple[str, str])
| 'CountDups' >> combine.Count.PerKey())
self.assertCompatible(typing.Tuple[str, int], d.element_type)
assert_that(d, equal_to([('e', 1), ('s', 1), ('t', 2)]))
self.p.run()
def test_count_perelement_pipeline_type_checking_satisfied(self):
d = (
self.p
| beam.Create([1, 1, 2, 3]).with_output_types(int)
| 'CountElems' >> combine.Count.PerElement())
self.assertCompatible(typing.Tuple[int, int], d.element_type)
assert_that(d, equal_to([(1, 2), (2, 1), (3, 1)]))
self.p.run()
def test_count_perelement_pipeline_type_checking_violated(self):
self.p._options.view_as(TypeOptions).type_check_strictness = 'ALL_REQUIRED'
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| 'f' >> beam.Create([1, 1, 2, 3])
| 'CountElems' >> combine.Count.PerElement())
self.assertEqual(
'Pipeline type checking is enabled, however no output '
'type-hint was found for the PTransform '
'Create(f)',
e.exception.args[0])
def test_count_perelement_runtime_type_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| beam.Create([True, True, False, True, True]).with_output_types(bool)
| 'CountElems' >> combine.Count.PerElement())
self.assertCompatible(typing.Tuple[bool, int], d.element_type)
assert_that(d, equal_to([(False, 1), (True, 4)]))
self.p.run()
def test_top_of_pipeline_checking_satisfied(self):
d = (
self.p
| beam.Create(range(5, 11)).with_output_types(int)
| 'Top 3' >> combine.Top.Of(3))
self.assertCompatible(typing.Iterable[int], d.element_type)
assert_that(d, equal_to([[10, 9, 8]]))
self.p.run()
def test_top_of_runtime_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| beam.Create(list('testing')).with_output_types(str)
| 'AciiTop' >> combine.Top.Of(3))
self.assertCompatible(typing.Iterable[str], d.element_type)
assert_that(d, equal_to([['t', 't', 's']]))
self.p.run()
def test_per_key_pipeline_checking_violated(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create(range(100)).with_output_types(int)
| 'Num + 1' >> beam.Map(lambda x: x + 1).with_output_types(int)
| 'TopMod' >> combine.Top.PerKey(1))
self.assertStartswith(
e.exception.args[0],
"Type hint violation for 'CombinePerKey(TopCombineFn)': "
"requires Tuple[TypeVariable[K], TypeVariable[T]] "
"but got {} for element".format(int))
def test_per_key_pipeline_checking_satisfied(self):
d = (
self.p
| beam.Create(range(100)).with_output_types(int)
| (
'GroupMod 3' >> beam.Map(lambda x: (x % 3, x)).with_output_types(
typing.Tuple[int, int]))
| 'TopMod' >> combine.Top.PerKey(1))
self.assertCompatible(
typing.Tuple[int, typing.Iterable[int]], d.element_type)
assert_that(d, equal_to([(0, [99]), (1, [97]), (2, [98])]))
self.p.run()
def test_per_key_runtime_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| beam.Create(range(21))
| (
'GroupMod 3' >> beam.Map(lambda x: (x % 3, x)).with_output_types(
typing.Tuple[int, int]))
| 'TopMod' >> combine.Top.PerKey(1))
self.assertCompatible(
typing.Tuple[int, typing.Iterable[int]], d.element_type)
assert_that(d, equal_to([(0, [18]), (1, [19]), (2, [20])]))
self.p.run()
def test_sample_globally_pipeline_satisfied(self):
d = (
self.p
| beam.Create([2, 2, 3, 3]).with_output_types(int)
| 'Sample' >> combine.Sample.FixedSizeGlobally(3))
self.assertCompatible(typing.Iterable[int], d.element_type)
def matcher(expected_len):
def match(actual):
equal_to([expected_len])([len(actual[0])])
return match
assert_that(d, matcher(3))
self.p.run()
def test_sample_globally_runtime_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| beam.Create([2, 2, 3, 3]).with_output_types(int)
| 'Sample' >> combine.Sample.FixedSizeGlobally(2))
self.assertCompatible(typing.Iterable[int], d.element_type)
def matcher(expected_len):
def match(actual):
equal_to([expected_len])([len(actual[0])])
return match
assert_that(d, matcher(2))
self.p.run()
def test_sample_per_key_pipeline_satisfied(self):
d = (
self.p
| (
beam.Create([(1, 2), (1, 2), (2, 3),
(2, 3)]).with_output_types(typing.Tuple[int, int]))
| 'Sample' >> combine.Sample.FixedSizePerKey(2))
self.assertCompatible(
typing.Tuple[int, typing.Iterable[int]], d.element_type)
def matcher(expected_len):
def match(actual):
for _, sample in actual:
equal_to([expected_len])([len(sample)])
return match
assert_that(d, matcher(2))
self.p.run()
def test_sample_per_key_runtime_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| (
beam.Create([(1, 2), (1, 2), (2, 3),
(2, 3)]).with_output_types(typing.Tuple[int, int]))
| 'Sample' >> combine.Sample.FixedSizePerKey(1))
self.assertCompatible(
typing.Tuple[int, typing.Iterable[int]], d.element_type)
def matcher(expected_len):
def match(actual):
for _, sample in actual:
equal_to([expected_len])([len(sample)])
return match
assert_that(d, matcher(1))
self.p.run()
def test_to_list_pipeline_check_satisfied(self):
d = (
self.p
| beam.Create((1, 2, 3, 4)).with_output_types(int)
| combine.ToList())
self.assertCompatible(typing.List[int], d.element_type)
def matcher(expected):
def match(actual):
equal_to(expected)(actual[0])
return match
assert_that(d, matcher([1, 2, 3, 4]))
self.p.run()
def test_to_list_runtime_check_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| beam.Create(list('test')).with_output_types(str)
| combine.ToList())
self.assertCompatible(typing.List[str], d.element_type)
def matcher(expected):
def match(actual):
equal_to(expected)(actual[0])
return match
assert_that(d, matcher(['e', 's', 't', 't']))
self.p.run()
def test_to_dict_pipeline_check_violated(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(
self.p
| beam.Create([1, 2, 3, 4]).with_output_types(int)
| combine.ToDict())
self.assertStartswith(
e.exception.args[0],
"Type hint violation for 'CombinePerKey': "
"requires "
"Tuple[TypeVariable[K], Tuple[TypeVariable[K], TypeVariable[V]]] "
"but got Tuple[None, int] for element")
def test_to_dict_pipeline_check_satisfied(self):
d = (
self.p
| beam.Create([(1, 2),
(3, 4)]).with_output_types(typing.Tuple[int, int])
| combine.ToDict())
self.assertCompatible(typing.Dict[int, int], d.element_type)
assert_that(d, equal_to([{1: 2, 3: 4}]))
self.p.run()
def test_to_dict_runtime_check_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (
self.p
| (
beam.Create([('1', 2),
('3', 4)]).with_output_types(typing.Tuple[str, int]))
| combine.ToDict())
self.assertCompatible(typing.Dict[str, int], d.element_type)
assert_that(d, equal_to([{'1': 2, '3': 4}]))
self.p.run()
def test_runtime_type_check_python_type_error(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
with self.assertRaises(TypeError) as e:
(
self.p
| beam.Create([1, 2, 3]).with_output_types(int)
| 'Len' >> beam.Map(lambda x: len(x)).with_output_types(int))
self.p.run()
# Our special type-checking related TypeError shouldn't have been raised.
# Instead the above pipeline should have triggered a regular Python runtime
# TypeError.
self.assertEqual(
"object of type 'int' has no len() [while running 'Len']",
e.exception.args[0])
self.assertFalse(isinstance(e, typehints.TypeCheckError))
def test_pardo_type_inference(self):
self.assertEqual(int, beam.Filter(lambda x: False).infer_output_type(int))
self.assertEqual(
typehints.Tuple[str, int],
beam.Map(lambda x: (x, 1)).infer_output_type(str))
def test_gbk_type_inference(self):
self.assertEqual(
typehints.Tuple[str, typehints.Iterable[int]],
beam.GroupByKey().infer_output_type(typehints.KV[str, int]))
def test_pipeline_inference(self):
created = self.p | beam.Create(['a', 'b', 'c'])
mapped = created | 'pair with 1' >> beam.Map(lambda x: (x, 1))
grouped = mapped | beam.GroupByKey()
self.assertEqual(str, created.element_type)
self.assertEqual(typehints.KV[str, int], mapped.element_type)
self.assertEqual(
typehints.KV[str, typehints.Iterable[int]], grouped.element_type)
def test_inferred_bad_kv_type(self):
with self.assertRaises(typehints.TypeCheckError) as e:
_ = (
self.p
| beam.Create(['a', 'b', 'c'])
| 'Ungroupable' >> beam.Map(lambda x: (x, 0, 1.0))
| beam.GroupByKey())
self.assertStartswith(
e.exception.args[0],
'Input type hint violation at GroupByKey: '
'expected Tuple[TypeVariable[K], TypeVariable[V]], '
'got Tuple[str, int, float]')
def test_type_inference_command_line_flag_toggle(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
x = self.p | 'C1' >> beam.Create([1, 2, 3, 4])
self.assertIsNone(x.element_type)
self.p._options.view_as(TypeOptions).pipeline_type_check = True
x = self.p | 'C2' >> beam.Create([1, 2, 3, 4])
self.assertEqual(int, x.element_type)
def test_eager_execution(self):
doubled = [1, 2, 3, 4] | beam.Map(lambda x: 2 * x)
self.assertEqual([2, 4, 6, 8], doubled)
def test_eager_execution_tagged_outputs(self):
result = [1, 2, 3, 4] | beam.Map(
lambda x: pvalue.TaggedOutput('bar', 2 * x)).with_outputs('bar')
self.assertEqual([2, 4, 6, 8], result.bar)
with self.assertRaises(KeyError,
msg='Tag \'foo\' is not a defined output tag'):
result.foo
class TestPTransformFn(TypeHintTestCase):
def test_type_checking_fail(self):
@beam.ptransform_fn
def MyTransform(pcoll):
return pcoll | beam.ParDo(lambda x: [x]).with_output_types(str)
p = TestPipeline()
with self.assertRaisesRegex(beam.typehints.TypeCheckError,
r'expected.*int.*got.*str'):
_ = (p | beam.Create([1, 2]) | MyTransform().with_output_types(int))
def test_type_checking_success(self):
@beam.ptransform_fn
def MyTransform(pcoll):
return pcoll | beam.ParDo(lambda x: [x]).with_output_types(int)
with TestPipeline() as p:
_ = (p | beam.Create([1, 2]) | MyTransform().with_output_types(int))
def test_type_hints_arg(self):
# Tests passing type hints via the magic 'type_hints' argument name.
@beam.ptransform_fn
def MyTransform(pcoll, type_hints, test_arg):
self.assertEqual(test_arg, 'test')
return (
pcoll
| beam.ParDo(lambda x: [x]).with_output_types(
type_hints.output_types[0][0]))
with TestPipeline() as p:
_ = (p | beam.Create([1, 2]) | MyTransform('test').with_output_types(int))
def _sort_lists(result):
if isinstance(result, list):
return sorted(result)
elif isinstance(result, tuple):
return tuple(_sort_lists(e) for e in result)
elif isinstance(result, dict):
return {k: _sort_lists(v) for k, v in result.items()}
else:
return result
_SortLists = beam.Map(_sort_lists)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "63560df2555bd502523e3abe5ab89e05",
"timestamp": "",
"source": "github",
"line_count": 2340,
"max_line_length": 114,
"avg_line_length": 37.2982905982906,
"alnum_prop": 0.6035312449872821,
"repo_name": "iemejia/incubator-beam",
"id": "1cdd273fb3c45147c4d9c212841651eb52979e3e",
"size": "87278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/transforms/ptransform_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
str_to_int,
)
class AdobeTVIE(InfoExtractor):
_VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)'
_TEST = {
'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/',
'md5': '9bc5727bcdd55251f35ad311ca74fa1e',
'info_dict': {
'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop',
'ext': 'mp4',
'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop',
'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311',
'thumbnail': 're:https?://.*\.jpg$',
'upload_date': '20110914',
'duration': 60,
'view_count': int,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player = self._parse_json(
self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'),
video_id)
title = player.get('title') or self._search_regex(
r'data-title="([^"]+)"', webpage, 'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(
self._html_search_meta('datepublished', webpage, 'upload date'))
duration = parse_duration(
self._html_search_meta('duration', webpage, 'duration')
or self._search_regex(r'Runtime:\s*(\d{2}:\d{2}:\d{2})', webpage, 'duration'))
view_count = str_to_int(self._search_regex(
r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>',
webpage, 'view count'))
formats = [{
'url': source['src'],
'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None,
'tbr': source.get('bitrate'),
} for source in player['sources']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
| {
"content_hash": "779804aabb88a0c728b28bb262143cbc",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 146,
"avg_line_length": 35.34285714285714,
"alnum_prop": 0.5440582053354891,
"repo_name": "Electroscholars/P.E.E.R.S",
"id": "28e07f8b04ed89fe7c79f445f3454adfb04d0561",
"size": "2474",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "MainWindowArrowTest/youtube_dl/extractor/adobetv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "123"
},
{
"name": "Inno Setup",
"bytes": "7102"
},
{
"name": "Python",
"bytes": "2081027"
}
],
"symlink_target": ""
} |
import sys,os
from operator import itemgetter
from xml.etree.ElementTree import Element, SubElement, tostring
def write_formatted_xml (tweets, file_name):
output = file("tmp.xml",'w')
output.write(tostring(create_dataset_xml(tweets)))
output.close()
os.system('xmllint --format ' + output.name + ' > ' + file_name)
os.remove(output.name)
def create_dataset_xml (tweets):
dataset = Element('dataset')
for (tweetid,userid,username,label,target,content) in tweets:
attributes = {"tweetid":tweetid, "username":username, "label":label, "target":target}
itemEl = SubElement(dataset, "item", attributes)
contentEl = SubElement(itemEl, "content")
print content
contentEl.text = unicode(content)
return dataset
# To be included, 2 out of three votes must be for the majority label
vote_threshold = 2/3.0
annotations = [x for x in file(sys.argv[1]) if x[0].isdigit()]
filtered_tweets = []
for line in annotations:
annotated_tweet = line.strip().split("\t")
# Get the main information
tweetid, time, content, name, nickname = annotated_tweet[:5]
# Some nicknames are missing, in which case, use the name as nickname
if nickname == "":
nickname = name
# Get the ratings
ratings = annotated_tweet[5:]
counts = {}
[counts.__setitem__(label,1+counts.get(label,0)) for label in ratings]
countItems = counts.items()
countItems.sort(key=itemgetter(1),reverse=True)
bestLabel, countOfBest = countItems[0]
# Python doesn't have a switch statement -- ARGH!!
label = "mixed"
if bestLabel == "1":
label = "negative"
elif bestLabel == "2":
label = "positive"
elif bestLabel == "4":
label = "neutral" # "4" is "other", which we take as "neutral"
# "3" is mixed, which we aren't handling here
if label != "mixed" and countOfBest/float(len(ratings)) >= vote_threshold:
filtered_tweets.append((time, tweetid, content, name, nickname, label))
filtered_tweets.sort()
tweets = []
userid = "000000" # we don't have userid's
for (time,tweetid,content,name,nickname,label) in filtered_tweets:
# Attempt to identify the target as Obama, McCain, both, or general
contentLower = content.lower()
indexOfObama = contentLower.find("obama")
indexOfMcCain = contentLower.find("mccain")
target = "general"
if indexOfObama > -1:
target = "both" if indexOfMcCain > -1 else "obama"
elif indexOfMcCain > -1:
target = "mccain"
tweets.append((tweetid,userid,name,label,target,content))
firstThirdIndex = int(round(len(tweets)/3.0))
secondThirdIndex = 2*firstThirdIndex
write_formatted_xml(tweets[:firstThirdIndex], "train.xml")
write_formatted_xml(tweets[firstThirdIndex:secondThirdIndex], "dev.xml")
write_formatted_xml(tweets[secondThirdIndex:], "test.xml")
| {
"content_hash": "7dc1ef25e8ceb48edc7e0b5341b93160",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 93,
"avg_line_length": 33.870588235294115,
"alnum_prop": 0.6710663424800278,
"repo_name": "utcompling/Scalabha",
"id": "a721dc0396626933d26698aae992d9b92e7bfc23",
"size": "4484",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "data/classify/debate08/orig/create_debate_data_splits.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "1645"
},
{
"name": "Python",
"bytes": "4484"
},
{
"name": "R",
"bytes": "1595"
},
{
"name": "Scala",
"bytes": "517415"
},
{
"name": "Shell",
"bytes": "1760"
}
],
"symlink_target": ""
} |
"""All minimum dependencies for scikit-learn."""
from collections import defaultdict
import platform
import argparse
# scipy and cython should by in sync with pyproject.toml
# NumPy version should match oldest-supported-numpy for the minimum supported
# Python version.
# see: https://github.com/scipy/oldest-supported-numpy/blob/main/setup.cfg
if platform.python_implementation() == "PyPy":
NUMPY_MIN_VERSION = "1.19.2"
else:
NUMPY_MIN_VERSION = "1.17.3"
SCIPY_MIN_VERSION = "1.3.2"
JOBLIB_MIN_VERSION = "1.0.0"
THREADPOOLCTL_MIN_VERSION = "2.0.0"
PYTEST_MIN_VERSION = "5.0.1"
CYTHON_MIN_VERSION = "0.29.24"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"matplotlib": ("3.1.2", "benchmark, docs, examples, tests"),
"scikit-image": ("0.16.2", "docs, examples, tests"),
"pandas": ("1.0.5", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"flake8": ("3.8.2", "tests"),
"black": ("22.3.0", "tests"),
"mypy": ("0.961", "tests"),
"pyamg": ("4.0.0", "tests"),
"sphinx": ("4.0.1", "docs"),
"sphinx-gallery": ("0.7.0", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("7.1.2", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.3.0", "docs"),
"sphinxext-opengraph": ("0.4.2", "docs"),
"plotly": ("5.9.0", "docs, examples"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("1.1.1", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
| {
"content_hash": "6f16b9dc3e387a8cce86222dc26bd26e",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 86,
"avg_line_length": 36.763888888888886,
"alnum_prop": 0.6392142047601058,
"repo_name": "manhhomienbienthuy/scikit-learn",
"id": "64e7f9c8cf395b654989104912b3e11ffd5d5863",
"size": "2647",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sklearn/_min_dependencies.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "667693"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10404540"
},
{
"name": "Shell",
"bytes": "43325"
}
],
"symlink_target": ""
} |
import os
import sys
import time
import traceback
from quixote.qwip import QWIP
from quixote.publish import Publisher
from vilya import views as controllers
from vilya.config import DEVELOP_MODE
from vilya.libs.gzipper import make_gzip_middleware
from vilya.libs.permdir import get_tmpdir
from vilya.libs.auth.check_auth import check_auth
from vilya.libs.import_obj import import_obj_set
from vilya.libs.template import st
from vilya.models.user import User
from vilya.views.util import is_mobile_device
PERFORMANCE_METRIC_MARKER = '<!-- _performtips_ -->'
def show_performance_metric(request, output):
idx = output.find(PERFORMANCE_METRIC_MARKER)
if idx > 0:
pt = int((time.time() - request.start_time) * 1000)
cls = pt > 250 and 'red' or pt > 100 and 'orange' or 'green'
block = '<li class="hidden-phone"><span style="color:%s"> %d ms </span></li>' % (cls, pt)
output = (output[:idx] + block + output[idx + len(PERFORMANCE_METRIC_MARKER):])
return output
class CODEPublisher(Publisher):
def __init__(self, *args, **kwargs):
Publisher.__init__(self, *args, **kwargs)
self.configure(DISPLAY_EXCEPTIONS='plain',
SECURE_ERRORS=0,
UPLOAD_DIR=get_tmpdir() + '/upload/')
def start_request(self, request):
Publisher.start_request(self, request)
os.environ['SQLSTORE_SOURCE'] = request.get_url()
resp = request.response
resp.set_content_type('text/html; charset=utf-8')
resp.set_header('Pragma', 'no-cache')
resp.set_header('Cache-Control', 'must-revalidate, no-cache, private')
# FIXME: quixote with origin?
resp.set_header('Access-Control-Allow-Origin', '*')
request.enable_ajax = False
request.browser = request.guess_browser_version()
request.method = request.get_method()
request.url = request.get_path()
request.is_mobile = is_mobile_device(request)
request.start_time = time.time()
request.user = User.check_session(request)
import_obj_set("request", request)
def try_publish(self, request, path):
output = Publisher.try_publish(self, request, path)
output = show_performance_metric(request, output)
return output
def finish_failed_request(self, request):
if DEVELOP_MODE:
exc_type, exc_value, tb = sys.exc_info()
raise exc_type, exc_value, tb
else:
return Publisher.finish_failed_request(self, request)
def _generate_cgitb_error(self, request, original_response,
exc_type, exc_value, tb):
traceback.print_exc()
return st('/errors/500.html', **locals())
def create_publisher():
return CODEPublisher(controllers)
app = make_gzip_middleware(QWIP(create_publisher()))
| {
"content_hash": "5f8bcd7f6050b94f42252db7c22ec26f",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 97,
"avg_line_length": 34.91463414634146,
"alnum_prop": 0.6475724764233322,
"repo_name": "xantage/code",
"id": "d1197d4f00f0a69352c56b8e754cc51050ae148c",
"size": "2896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "28515"
},
{
"name": "HTML",
"bytes": "16879"
},
{
"name": "JavaScript",
"bytes": "990370"
},
{
"name": "Makefile",
"bytes": "568"
},
{
"name": "Python",
"bytes": "199565"
},
{
"name": "Shell",
"bytes": "6546"
}
],
"symlink_target": ""
} |
import csv, os
import SonicScrewdriver as utils
import random
selecteddates = dict()
selected = list()
selectedmeta = dict()
def get_meta():
meta = dict()
meta['aubirth'] = input('Authors year of birth? ')
meta['augender'] = input ('Authors gender? ')
meta['national origin'] = input('Authors nationality? ')
meta['firstpub'] = input('Date of first publication? ')
return meta
reviews = '/Users/tunder/Dropbox/ted/reception/reviewed/lists/ReviewedTitles1820-1839.csv'
with open(reviews, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
htid = utils.clean_pairtree(row['HTid'])
pubdate = int(row['date'][0:4])
firstpub = int(row['firstpub'])
yrrev = int(row['yrrev'])
if pubdate > yrrev + 5:
date = yrrev
print(str(pubdate) + " => " + str(yrrev))
else:
date = pubdate
jgenre = row['Jgenre']
if jgenre == 'poe':
selecteddates[htid] = date
selected.append(htid)
selectedmeta[htid] = row
bydate = dict()
authors = dict()
titles = dict()
datesbyhtid = dict()
with open('/Users/tunder/work/genre/metadata/poemeta.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
htid = row['htid']
authors[htid] = row['author']
titles[htid] = row['title']
date = utils.date_row(row)
datesbyhtid[htid] = date
if htid in selected:
continue
if date in bydate:
bydate[date].append(htid)
else:
bydate[date] = [htid]
controlset = set()
controlmeta = dict()
usedfromselected = list()
skip = int(input('Skip how many? '))
for theid in selected[skip:]:
date = selecteddates[theid]
usedfromselected.append(theid)
print(theid)
print(date)
print(authors[theid])
print(titles[theid])
found = False
while not found:
candidates = bydate[date]
choice = random.sample(candidates, 1)[0]
print(choice)
print(authors[choice])
print(titles[choice])
acceptable = input("ACCEPT? (y/n): ")
if acceptable == "y":
controlset.add(choice)
found = True
controlmeta[choice] = get_meta()
controlmeta[choice]['author'] = authors[choice]
controlmeta[choice]['title'] = titles[choice]
if acceptable == 'quit':
break
if acceptable == 'quit':
break
ficmetadata = list()
user = input("Write metadata for reviewed set ?")
if user == 'y':
for htid in usedfromselected:
date = str(selecteddates[htid])
meta = selectedmeta[htid]
author = meta['author']
title = meta['title']
birth = meta['aubirth']
gender = meta['augender']
nation = meta['national origin']
firstpub = meta['firstpub']
if nation.startswith('British'):
nation = 'uk'
else:
nation = 'us'
outline = htid + '\t' + 'elite' + '\t' + date + '\t' + author + '\t' + title + '\t' + birth + '\t' + gender + '\t' + nation + '\t' + firstpub + '\n'
ficmetadata.append(outline)
for htid in controlset:
if htid not in datesbyhtid:
print(htid)
continue
date = str(datesbyhtid[htid])
meta = controlmeta[htid]
author = meta['author']
title = meta['title']
birth = meta['aubirth']
gender = meta['augender']
nation = meta['national origin']
firstpub = meta['firstpub']
outline = htid + '\t' + 'vulgar' + '\t' + date + '\t' + author + '\t' + title + '\t' + birth + '\t' + gender + '\t' + nation + '\t' + firstpub + '\n'
ficmetadata.append(outline)
metapath = '/Users/tunder/Dropbox/GenreProject/metadata/richpoemeta1839.tsv'
if os.path.isfile(metapath):
fileexists = True
else:
fileexists = False
with open(metapath, mode = 'a', encoding = 'utf-8') as f:
if not fileexists:
f.write('\t'.join(['docid', 'recept', 'date', 'author', 'title', 'birth', 'gender', 'nationality', 'firstpub', 'othername', 'notes', 'canon']) + '\n')
for line in ficmetadata:
f.write(line)
| {
"content_hash": "1644c904687050aebdb12a43cdb79ba5",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 158,
"avg_line_length": 28.732876712328768,
"alnum_prop": 0.5744934445768772,
"repo_name": "tedunderwood/GenreProject",
"id": "92c613a8b46e4db3c8a0c7e309ab75ae67ae4c18",
"size": "4530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/reception/select_poetry_corpus3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "187389"
},
{
"name": "Python",
"bytes": "645172"
},
{
"name": "R",
"bytes": "34870"
}
],
"symlink_target": ""
} |
from guardian.models import GroupObjectPermission
from rest_framework import serializers as ser
from api.base.exceptions import InvalidModelValueError
from api.base.serializers import (
BaseAPISerializer, JSONAPISerializer, JSONAPIRelationshipSerializer,
VersionedDateTimeField, HideIfDisabled, IDField,
Link, LinksField, ListDictField, TypeField, RelationshipField,
WaterbutlerLink, ShowIfCurrentUser
)
from api.base.utils import absolute_reverse, get_user_auth
from api.files.serializers import QuickFilesSerializer
from osf.exceptions import ValidationValueError, ValidationError
from osf.models import OSFUser, QuickFilesNode
from website import util as website_utils
class QuickFilesRelationshipField(RelationshipField):
def to_representation(self, value):
relationship_links = super(QuickFilesRelationshipField, self).to_representation(value)
quickfiles_guid = value.nodes_created.filter(type=QuickFilesNode._typedmodels_type).values_list('guids___id', flat=True).get()
upload_url = website_utils.waterbutler_api_url_for(quickfiles_guid, 'osfstorage')
relationship_links['links']['upload'] = {
'href': upload_url,
'meta': {}
}
relationship_links['links']['download'] = {
'href': '{}?zip='.format(upload_url),
'meta': {}
}
return relationship_links
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'full_name',
'given_name',
'middle_names',
'family_name',
'id'
])
non_anonymized_fields = ['type']
id = IDField(source='_id', read_only=True)
type = TypeField()
full_name = ser.CharField(source='fullname', required=True, label='Full name', help_text='Display name used in the general user interface', max_length=186)
given_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
middle_names = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
family_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
suffix = HideIfDisabled(ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations'))
date_registered = HideIfDisabled(VersionedDateTimeField(read_only=True))
active = HideIfDisabled(ser.BooleanField(read_only=True, source='is_active'))
timezone = HideIfDisabled(ser.CharField(required=False, help_text="User's timezone, e.g. 'Etc/UTC"))
locale = HideIfDisabled(ser.CharField(required=False, help_text="User's locale, e.g. 'en_US'"))
social = ListDictField(required=False)
can_view_reviews = ShowIfCurrentUser(ser.SerializerMethodField(help_text='Whether the current user has the `view_submissions` permission to ANY reviews provider.'))
links = HideIfDisabled(LinksField(
{
'html': 'absolute_url',
'profile_image': 'profile_image_url',
}
))
nodes = HideIfDisabled(RelationshipField(
related_view='users:user-nodes',
related_view_kwargs={'user_id': '<_id>'},
related_meta={'projects_in_common': 'get_projects_in_common'},
))
quickfiles = HideIfDisabled(QuickFilesRelationshipField(
related_view='users:user-quickfiles',
related_view_kwargs={'user_id': '<_id>'},
))
registrations = HideIfDisabled(RelationshipField(
related_view='users:user-registrations',
related_view_kwargs={'user_id': '<_id>'},
))
institutions = HideIfDisabled(RelationshipField(
related_view='users:user-institutions',
related_view_kwargs={'user_id': '<_id>'},
self_view='users:user-institutions-relationship',
self_view_kwargs={'user_id': '<_id>'},
))
preprints = HideIfDisabled(RelationshipField(
related_view='users:user-preprints',
related_view_kwargs={'user_id': '<_id>'},
))
class Meta:
type_ = 'users'
def get_projects_in_common(self, obj):
user = get_user_auth(self.context['request']).user
if obj == user:
return user.contributor_to.count()
return obj.n_projects_in_common(user)
def absolute_url(self, obj):
if obj is not None:
return obj.absolute_url
return None
def get_absolute_url(self, obj):
return absolute_reverse('users:user-detail', kwargs={
'user_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_can_view_reviews(self, obj):
group_qs = GroupObjectPermission.objects.filter(group__user=obj, permission__codename='view_submissions')
return group_qs.exists() or obj.userobjectpermission_set.filter(permission__codename='view_submissions')
def profile_image_url(self, user):
size = self.context['request'].query_params.get('profile_image_size')
return user.profile_image_url(size=size)
def update(self, instance, validated_data):
assert isinstance(instance, OSFUser), 'instance must be a User'
for attr, value in validated_data.items():
if 'social' == attr:
for key, val in value.items():
# currently only profileWebsites are a list, the rest of the social key only has one value
if key == 'profileWebsites':
instance.social[key] = val
else:
if len(val) > 1:
raise InvalidModelValueError(
detail='{} only accept a list of one single value'. format(key)
)
instance.social[key] = val[0]
else:
setattr(instance, attr, value)
try:
instance.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
except ValidationError as e:
raise InvalidModelValueError(e)
return instance
class UserAddonSettingsSerializer(JSONAPISerializer):
"""
Overrides UserSerializer to make id required.
"""
id = ser.CharField(source='config.short_name', read_only=True)
user_has_auth = ser.BooleanField(source='has_auth', read_only=True)
links = LinksField({
'self': 'get_absolute_url',
'accounts': 'account_links'
})
class Meta:
type_ = 'user_addons'
def get_absolute_url(self, obj):
return absolute_reverse(
'users:user-addon-detail',
kwargs={
'provider': obj.config.short_name,
'user_id': self.context['request'].parser_context['kwargs']['user_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
def account_links(self, obj):
# TODO: [OSF-4933] remove this after refactoring Figshare
if hasattr(obj, 'external_accounts'):
return {
account._id: {
'account': absolute_reverse('users:user-external_account-detail', kwargs={
'user_id': obj.owner._id,
'provider': obj.config.short_name,
'account_id': account._id,
'version': self.context['request'].parser_context['kwargs']['version']
}),
'nodes_connected': [n.absolute_api_v2_url for n in obj.get_attached_nodes(account)]
}
for account in obj.external_accounts.all()
}
return {}
class UserDetailSerializer(UserSerializer):
"""
Overrides UserSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class UserQuickFilesSerializer(QuickFilesSerializer):
links = LinksField({
'info': Link('files:file-detail', kwargs={'file_id': '<_id>'}),
'upload': WaterbutlerLink(),
'delete': WaterbutlerLink(),
'move': WaterbutlerLink(),
'download': WaterbutlerLink(must_be_file=True),
})
class ReadEmailUserDetailSerializer(UserDetailSerializer):
email = ser.CharField(source='username', read_only=True)
class RelatedInstitution(JSONAPIRelationshipSerializer):
id = ser.CharField(required=False, allow_null=True, source='_id')
class Meta:
type_ = 'institutions'
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class UserInstitutionsRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=RelatedInstitution())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return absolute_reverse('users:user-institutions-relationship', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_related_url(self, obj):
return absolute_reverse('users:user-institutions', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class Meta:
type_ = 'institutions'
| {
"content_hash": "ab472fedc879ef4cadbb32c94b81ba08",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 168,
"avg_line_length": 38.710743801652896,
"alnum_prop": 0.6263877028181042,
"repo_name": "leb2dg/osf.io",
"id": "108e04dda45c846fd1307a1527513c5cce74fba4",
"size": "9368",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api/users/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "158060"
},
{
"name": "HTML",
"bytes": "110361"
},
{
"name": "JavaScript",
"bytes": "1621074"
},
{
"name": "Mako",
"bytes": "669660"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5400700"
}
],
"symlink_target": ""
} |
DATABASES = {
'default': {
'ENGINE': 'sqlite3',
'NAME': 'haystack_tests.db',
}
}
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'haystack',
'core',
]
ROOT_URLCONF = 'core.urls'
HAYSTACK_SITECONF = 'core.search_sites'
HAYSTACK_SEARCH_ENGINE = 'dummy'
| {
"content_hash": "1e94a87c89350dd7047dc246f579d27e",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 39,
"avg_line_length": 19.333333333333332,
"alnum_prop": 0.6083743842364532,
"repo_name": "calvinchengx/django-haystack",
"id": "3283e11392190058c5aa238ddd62c3b171645c53",
"size": "445",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/settings.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "480175"
},
{
"name": "Shell",
"bytes": "539"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
import tempfile
from django.contrib.gis import gdal
from django.contrib.gis.geos import HAS_GEOS
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from ..utils import no_oracle, oracle, postgis, skipUnlessGISLookup, spatialite
if HAS_GEOS:
from django.contrib.gis.db.models import Extent, MakeLine, Union
from django.contrib.gis.geos import (fromstr, GEOSGeometry,
Point, LineString, LinearRing, Polygon, GeometryCollection)
from .models import Country, City, PennsylvaniaCity, State, Track, NonConcreteModel, Feature, MinusOneSRID
def postgis_bug_version():
spatial_version = getattr(connection.ops, "spatial_version", (0, 0, 0))
return spatial_version and (2, 0, 0) <= spatial_version <= (2, 0, 1)
@skipUnlessDBFeature("gis_enabled")
class GeoModelTest(TestCase):
fixtures = ['initial']
def test_fixtures(self):
"Testing geographic model initialization from fixtures."
# Ensuring that data was loaded from initial data fixtures.
self.assertEqual(2, Country.objects.count())
self.assertEqual(8, City.objects.count())
self.assertEqual(2, State.objects.count())
def test_proxy(self):
"Testing Lazy-Geometry support (using the GeometryProxy)."
# Testing on a Point
pnt = Point(0, 0)
nullcity = City(name='NullCity', point=pnt)
nullcity.save()
# Making sure TypeError is thrown when trying to set with an
# incompatible type.
for bad in [5, 2.0, LineString((0, 0), (1, 1))]:
try:
nullcity.point = bad
except TypeError:
pass
else:
self.fail('Should throw a TypeError')
# Now setting with a compatible GEOS Geometry, saving, and ensuring
# the save took, notice no SRID is explicitly set.
new = Point(5, 23)
nullcity.point = new
# Ensuring that the SRID is automatically set to that of the
# field after assignment, but before saving.
self.assertEqual(4326, nullcity.point.srid)
nullcity.save()
# Ensuring the point was saved correctly after saving
self.assertEqual(new, City.objects.get(name='NullCity').point)
# Setting the X and Y of the Point
nullcity.point.x = 23
nullcity.point.y = 5
# Checking assignments pre & post-save.
self.assertNotEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.save()
self.assertEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.delete()
# Testing on a Polygon
shell = LinearRing((0, 0), (0, 100), (100, 100), (100, 0), (0, 0))
inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40))
# Creating a State object using a built Polygon
ply = Polygon(shell, inner)
nullstate = State(name='NullState', poly=ply)
self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None
nullstate.save()
ns = State.objects.get(name='NullState')
self.assertEqual(ply, ns.poly)
# Testing the `ogr` and `srs` lazy-geometry properties.
if gdal.HAS_GDAL:
self.assertIsInstance(ns.poly.ogr, gdal.OGRGeometry)
self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb)
self.assertIsInstance(ns.poly.srs, gdal.SpatialReference)
self.assertEqual('WGS 84', ns.poly.srs.name)
# Changing the interior ring on the poly attribute.
new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30))
ns.poly[1] = new_inner
ply[1] = new_inner
self.assertEqual(4326, ns.poly.srid)
ns.save()
self.assertEqual(ply, State.objects.get(name='NullState').poly)
ns.delete()
@skipUnlessDBFeature("supports_transform")
def test_lookup_insert_transform(self):
"Testing automatic transform for lookups and inserts."
# San Antonio in 'WGS84' (SRID 4326)
sa_4326 = 'POINT (-98.493183 29.424170)'
wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84
# Oracle doesn't have SRID 3084, using 41157.
if oracle:
# San Antonio in 'Texas 4205, Southern Zone (1983, meters)' (SRID 41157)
# Used the following Oracle SQL to get this value:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(
# SDO_CS.TRANSFORM(SDO_GEOMETRY('POINT (-98.493183 29.424170)', 4326), 41157))
# )
# FROM DUAL;
nad_wkt = 'POINT (300662.034646583 5416427.45974934)'
nad_srid = 41157
else:
# San Antonio in 'NAD83(HARN) / Texas Centric Lambert Conformal' (SRID 3084)
# Used ogr.py in gdal 1.4.1 for this transform
nad_wkt = 'POINT (1645978.362408288754523 6276356.025927528738976)'
nad_srid = 3084
# Constructing & querying with a point from a different SRID. Oracle
# `SDO_OVERLAPBDYINTERSECT` operates differently from
# `ST_Intersects`, so contains is used instead.
nad_pnt = fromstr(nad_wkt, srid=nad_srid)
if oracle:
tx = Country.objects.get(mpoly__contains=nad_pnt)
else:
tx = Country.objects.get(mpoly__intersects=nad_pnt)
self.assertEqual('Texas', tx.name)
# Creating San Antonio. Remember the Alamo.
sa = City.objects.create(name='San Antonio', point=nad_pnt)
# Now verifying that San Antonio was transformed correctly
sa = City.objects.get(name='San Antonio')
self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6)
self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6)
# If the GeometryField SRID is -1, then we shouldn't perform any
# transformation if the SRID of the input geometry is different.
if spatialite and connection.ops.spatial_version < (3, 0, 0):
# SpatiaLite < 3 does not support missing SRID values.
return
m1 = MinusOneSRID(geom=Point(17, 23, srid=4326))
m1.save()
self.assertEqual(-1, m1.geom.srid)
def test_createnull(self):
"Testing creating a model instance and the geometry being None"
c = City()
self.assertEqual(c.point, None)
def test_geometryfield(self):
"Testing the general GeometryField."
Feature(name='Point', geom=Point(1, 1)).save()
Feature(name='LineString', geom=LineString((0, 0), (1, 1), (5, 5))).save()
Feature(name='Polygon', geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))).save()
Feature(name='GeometryCollection',
geom=GeometryCollection(Point(2, 2), LineString((0, 0), (2, 2)),
Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))).save()
f_1 = Feature.objects.get(name='Point')
self.assertIsInstance(f_1.geom, Point)
self.assertEqual((1.0, 1.0), f_1.geom.tuple)
f_2 = Feature.objects.get(name='LineString')
self.assertIsInstance(f_2.geom, LineString)
self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple)
f_3 = Feature.objects.get(name='Polygon')
self.assertIsInstance(f_3.geom, Polygon)
f_4 = Feature.objects.get(name='GeometryCollection')
self.assertIsInstance(f_4.geom, GeometryCollection)
self.assertEqual(f_3.geom, f_4.geom[2])
@skipUnlessDBFeature("supports_transform")
def test_inherited_geofields(self):
"Test GeoQuerySet methods on inherited Geometry fields."
# Creating a Pennsylvanian city.
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
# All transformation SQL will need to be performed on the
# _parent_ table.
qs = PennsylvaniaCity.objects.transform(32128)
self.assertEqual(1, qs.count())
for pc in qs:
self.assertEqual(32128, pc.point.srid)
def test_raw_sql_query(self):
"Testing raw SQL query."
cities1 = City.objects.all()
# Only PostGIS would support a 'select *' query because of its recognized
# HEXEWKB format for geometry fields
as_text = 'ST_AsText(%s)' if postgis else connection.ops.select
cities2 = City.objects.raw(
'select id, name, %s from geoapp_city' % as_text % 'point'
)
self.assertEqual(len(cities1), len(list(cities2)))
self.assertIsInstance(cities2[0].point, Point)
def test_dumpdata_loaddata_cycle(self):
"""
Test a dumpdata/loaddata cycle with geographic data.
"""
out = six.StringIO()
original_data = list(City.objects.all().order_by('name'))
call_command('dumpdata', 'geoapp.City', stdout=out)
result = out.getvalue()
houston = City.objects.get(name='Houston')
self.assertIn('"point": "%s"' % houston.point.ewkt, result)
# Reload now dumped data
with tempfile.NamedTemporaryFile(mode='w', suffix='.json') as tmp:
tmp.write(result)
tmp.seek(0)
call_command('loaddata', tmp.name, verbosity=0)
self.assertListEqual(original_data, list(City.objects.all().order_by('name')))
@skipUnlessDBFeature("gis_enabled")
class GeoLookupTest(TestCase):
fixtures = ['initial']
def test_disjoint_lookup(self):
"Testing the `disjoint` lookup type."
ptown = City.objects.get(name='Pueblo')
qs1 = City.objects.filter(point__disjoint=ptown.point)
self.assertEqual(7, qs1.count())
if connection.features.supports_real_shape_operations:
qs2 = State.objects.filter(poly__disjoint=ptown.point)
self.assertEqual(1, qs2.count())
self.assertEqual('Kansas', qs2[0].name)
def test_contains_contained_lookups(self):
"Testing the 'contained', 'contains', and 'bbcontains' lookup types."
# Getting Texas, yes we were a country -- once ;)
texas = Country.objects.get(name='Texas')
# Seeing what cities are in Texas, should get Houston and Dallas,
# and Oklahoma City because 'contained' only checks on the
# _bounding box_ of the Geometries.
if connection.features.supports_contained_lookup:
qs = City.objects.filter(point__contained=texas.mpoly)
self.assertEqual(3, qs.count())
cities = ['Houston', 'Dallas', 'Oklahoma City']
for c in qs:
self.assertIn(c.name, cities)
# Pulling out some cities.
houston = City.objects.get(name='Houston')
wellington = City.objects.get(name='Wellington')
pueblo = City.objects.get(name='Pueblo')
okcity = City.objects.get(name='Oklahoma City')
lawrence = City.objects.get(name='Lawrence')
# Now testing contains on the countries using the points for
# Houston and Wellington.
tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry
nz = Country.objects.get(mpoly__contains=wellington.point.hex) # Query w/EWKBHEX
self.assertEqual('Texas', tx.name)
self.assertEqual('New Zealand', nz.name)
# Spatialite 2.3 thinks that Lawrence is in Puerto Rico (a NULL geometry).
if not (spatialite and connection.ops.spatial_version < (3, 0, 0)):
ks = State.objects.get(poly__contains=lawrence.point)
self.assertEqual('Kansas', ks.name)
# Pueblo and Oklahoma City (even though OK City is within the bounding box of Texas)
# are not contained in Texas or New Zealand.
self.assertEqual(len(Country.objects.filter(mpoly__contains=pueblo.point)), 0) # Query w/GEOSGeometry object
self.assertEqual(len(Country.objects.filter(mpoly__contains=okcity.point.wkt)),
0 if connection.features.supports_real_shape_operations else 1) # Query w/WKT
# OK City is contained w/in bounding box of Texas.
if connection.features.supports_bbcontains_lookup:
qs = Country.objects.filter(mpoly__bbcontains=okcity.point)
self.assertEqual(1, len(qs))
self.assertEqual('Texas', qs[0].name)
@skipUnlessDBFeature("supports_crosses_lookup")
def test_crosses_lookup(self):
Track.objects.create(
name='Line1',
line=LineString([(-95, 29), (-60, 0)])
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 0), (-60, 29)])).count(),
1
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 30), (0, 30)])).count(),
0
)
@skipUnlessDBFeature("supports_left_right_lookups")
def test_left_right_lookups(self):
"Testing the 'left' and 'right' lookup types."
# Left: A << B => true if xmax(A) < xmin(B)
# Right: A >> B => true if xmin(A) > xmax(B)
# See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source.
# The left/right lookup tests are known failures on PostGIS 2.0/2.0.1
# http://trac.osgeo.org/postgis/ticket/2035
if postgis_bug_version():
self.skipTest("PostGIS 2.0/2.0.1 left and right lookups are known to be buggy.")
# Getting the borders for Colorado & Kansas
co_border = State.objects.get(name='Colorado').poly
ks_border = State.objects.get(name='Kansas').poly
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
# These cities should be strictly to the right of the CO border.
cities = ['Houston', 'Dallas', 'Oklahoma City',
'Lawrence', 'Chicago', 'Wellington']
qs = City.objects.filter(point__right=co_border)
self.assertEqual(6, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# These cities should be strictly to the right of the KS border.
cities = ['Chicago', 'Wellington']
qs = City.objects.filter(point__right=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
vic = City.objects.get(point__left=co_border)
self.assertEqual('Victoria', vic.name)
cities = ['Pueblo', 'Victoria']
qs = City.objects.filter(point__left=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
@skipUnlessGISLookup("strictly_above", "strictly_below")
def test_strictly_above_below_lookups(self):
dallas = City.objects.get(name='Dallas')
self.assertQuerysetEqual(
City.objects.filter(point__strictly_above=dallas.point).order_by('name'),
['Chicago', 'Lawrence', 'Oklahoma City', 'Pueblo', 'Victoria'],
lambda b: b.name
)
self.assertQuerysetEqual(
City.objects.filter(point__strictly_below=dallas.point).order_by('name'),
['Houston', 'Wellington'],
lambda b: b.name
)
def test_equals_lookups(self):
"Testing the 'same_as' and 'equals' lookup types."
pnt = fromstr('POINT (-95.363151 29.763374)', srid=4326)
c1 = City.objects.get(point=pnt)
c2 = City.objects.get(point__same_as=pnt)
c3 = City.objects.get(point__equals=pnt)
for c in [c1, c2, c3]:
self.assertEqual('Houston', c.name)
@skipUnlessDBFeature("supports_null_geometries")
def test_null_geometries(self):
"Testing NULL geometry support, and the `isnull` lookup type."
# Creating a state with a NULL boundary.
State.objects.create(name='Puerto Rico')
# Querying for both NULL and Non-NULL values.
nullqs = State.objects.filter(poly__isnull=True)
validqs = State.objects.filter(poly__isnull=False)
# Puerto Rico should be NULL (it's a commonwealth unincorporated territory)
self.assertEqual(1, len(nullqs))
self.assertEqual('Puerto Rico', nullqs[0].name)
# The valid states should be Colorado & Kansas
self.assertEqual(2, len(validqs))
state_names = [s.name for s in validqs]
self.assertIn('Colorado', state_names)
self.assertIn('Kansas', state_names)
# Saving another commonwealth w/a NULL geometry.
nmi = State.objects.create(name='Northern Mariana Islands', poly=None)
self.assertEqual(nmi.poly, None)
# Assigning a geometry and saving -- then UPDATE back to NULL.
nmi.poly = 'POLYGON((0 0,1 0,1 1,1 0,0 0))'
nmi.save()
State.objects.filter(name='Northern Mariana Islands').update(poly=None)
self.assertIsNone(State.objects.get(name='Northern Mariana Islands').poly)
@skipUnlessDBFeature("supports_relate_lookup")
def test_relate_lookup(self):
"Testing the 'relate' lookup type."
# To make things more interesting, we will have our Texas reference point in
# different SRIDs.
pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847)
pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326)
# Not passing in a geometry as first param should
# raise a type error when initializing the GeoQuerySet
self.assertRaises(ValueError, Country.objects.filter, mpoly__relate=(23, 'foo'))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [((pnt1, 0), ValueError), ((pnt2, 'T*T***FF*', 0), ValueError)]:
qs = Country.objects.filter(mpoly__relate=bad_args)
self.assertRaises(e, qs.count)
# Relate works differently for the different backends.
if postgis or spatialite:
contains_mask = 'T*T***FF*'
within_mask = 'T*F**F***'
intersects_mask = 'T********'
elif oracle:
contains_mask = 'contains'
within_mask = 'inside'
# TODO: This is not quite the same as the PostGIS mask above
intersects_mask = 'overlapbdyintersect'
# Testing contains relation mask.
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name)
# Testing within relation mask.
ks = State.objects.get(name='Kansas')
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, within_mask)).name)
# Testing intersection relation mask.
if not oracle:
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name)
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, intersects_mask)).name)
@skipUnlessDBFeature("gis_enabled")
class GeoQuerySetTest(TestCase):
fixtures = ['initial']
# Please keep the tests in GeoQuerySet method's alphabetic order
@skipUnlessDBFeature("has_centroid_method")
def test_centroid(self):
"Testing the `centroid` GeoQuerySet method."
qs = State.objects.exclude(poly__isnull=True).centroid()
if oracle:
tol = 0.1
elif spatialite:
tol = 0.000001
else:
tol = 0.000000001
for s in qs:
self.assertTrue(s.poly.centroid.equals_exact(s.centroid, tol))
@skipUnlessDBFeature(
"has_difference_method", "has_intersection_method",
"has_sym_difference_method", "has_union_method")
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23)
qs = Country.objects.all().difference(geom).sym_difference(geom).union(geom)
# XXX For some reason SpatiaLite does something screwy with the Texas geometry here. Also,
# XXX it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.intersection(geom)
for c in qs:
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
pass
else:
self.assertEqual(c.mpoly.difference(geom), c.difference)
if not spatialite:
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
# Ordering might differ in collections
self.assertSetEqual(set(g.wkt for g in c.mpoly.sym_difference(geom)),
set(g.wkt for g in c.sym_difference))
self.assertSetEqual(set(g.wkt for g in c.mpoly.union(geom)),
set(g.wkt for g in c.union))
@skipUnlessDBFeature("has_envelope_method")
def test_envelope(self):
"Testing the `envelope` GeoQuerySet method."
countries = Country.objects.all().envelope()
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@skipUnlessDBFeature("supports_extent_aggr")
@ignore_warnings(category=RemovedInDjango110Warning)
def test_extent(self):
"""
Testing the (deprecated) `extent` GeoQuerySet method and the Extent
aggregate.
"""
# Reference query:
# `SELECT ST_extent(point) FROM geoapp_city WHERE (name='Houston' or name='Dallas');`
# => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203)
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
qs = City.objects.filter(name__in=('Houston', 'Dallas'))
extent1 = qs.extent()
extent2 = qs.aggregate(Extent('point'))['point__extent']
for extent in (extent1, extent2):
for val, exp in zip(extent, expected):
self.assertAlmostEqual(exp, val, 4)
self.assertIsNone(City.objects.filter(name=('Smalltown')).extent())
self.assertIsNone(City.objects.filter(name=('Smalltown')).aggregate(Extent('point'))['point__extent'])
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent_with_limit(self):
"""
Testing if extent supports limit.
"""
extent1 = City.objects.all().aggregate(Extent('point'))['point__extent']
extent2 = City.objects.all()[:3].aggregate(Extent('point'))['point__extent']
self.assertNotEqual(extent1, extent2)
@skipUnlessDBFeature("has_force_rhr_method")
def test_force_rhr(self):
"Testing GeoQuerySet.force_rhr()."
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
s = State.objects.force_rhr().get(name='Foo')
self.assertEqual(rhr_rings, s.force_rhr.coords)
@skipUnlessDBFeature("has_geohash_method")
def test_geohash(self):
"Testing GeoQuerySet.geohash()."
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.geohash().get(name='Houston')
h2 = City.objects.geohash(precision=5).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
def test_geojson(self):
"Testing GeoJSON output from the database using GeoQuerySet.geojson()."
# Only PostGIS and SpatiaLite 3.0+ support GeoJSON.
if not connection.ops.geojson:
self.assertRaises(NotImplementedError, Country.objects.all().geojson, field_name='mpoly')
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = (
'{"type":"Point","crs":{"type":"name","properties":'
'{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
)
victoria_json = (
'{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],'
'"coordinates":[-123.305196,48.462611]}'
)
chicago_json = (
'{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},'
'"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
)
if spatialite:
victoria_json = (
'{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],'
'"coordinates":[-123.305196,48.462611]}'
)
# Precision argument should only be an integer
self.assertRaises(TypeError, City.objects.geojson, precision='foo')
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0)
# FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(pueblo_json, City.objects.geojson().get(name='Pueblo').geojson)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(houston_json, City.objects.geojson(crs=True, model_att='json').get(name='Houston').json)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(victoria_json, City.objects.geojson(bbox=True).get(name='Victoria').geojson)
# SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(
chicago_json,
City.objects.geojson(bbox=True, crs=True, precision=5).get(name='Chicago').geojson
)
@skipUnlessDBFeature("has_gml_method")
def test_gml(self):
"Testing GML output from the database using GeoQuerySet.gml()."
# Should throw a TypeError when trying to obtain GML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.gml, field_name='name')
ptown1 = City.objects.gml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.gml(precision=9).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(
r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml">'
r'<gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ '
r'</gml:coordinates></gml:Point>'
)
elif spatialite and connection.ops.spatial_version < (3, 0, 0):
# Spatialite before 3.0 has extra colon in SrsName
gml_regex = re.compile(
r'^<gml:Point SrsName="EPSG::4326"><gml:coordinates decimal="\." '
r'cs="," ts=" ">-104.609251\d+,38.255001</gml:coordinates></gml:Point>'
)
else:
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>'
r'-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>'
)
for ptown in [ptown1, ptown2]:
self.assertTrue(gml_regex.match(ptown.gml))
if postgis:
self.assertIn('<gml:pos srsDimension="2">', City.objects.gml(version=3).get(name='Pueblo').gml)
@skipUnlessDBFeature("has_kml_method")
def test_kml(self):
"Testing KML output from the database using GeoQuerySet.kml()."
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.kml, 'name')
# Ensuring the KML is as expected.
ptown1 = City.objects.kml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.kml(precision=9).get(name='Pueblo')
for ptown in [ptown1, ptown2]:
self.assertEqual('<Point><coordinates>-104.609252,38.255001</coordinates></Point>', ptown.kml)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_make_line(self):
"""
Testing the (deprecated) `make_line` GeoQuerySet method and the MakeLine
aggregate.
"""
if not connection.features.supports_make_line_aggr:
# Only PostGIS has support for the MakeLine aggregate. For other
# backends, test that NotImplementedError is raised
self.assertRaises(
NotImplementedError,
City.objects.all().aggregate, MakeLine('point')
)
return
# Ensuring that a `TypeError` is raised on models without PointFields.
self.assertRaises(TypeError, State.objects.make_line)
self.assertRaises(TypeError, Country.objects.make_line)
# MakeLine on an inappropriate field returns simply None
self.assertIsNone(State.objects.aggregate(MakeLine('poly'))['poly__makeline'])
# Reference query:
# SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city;
ref_line = GEOSGeometry(
'LINESTRING(-95.363151 29.763374,-96.801611 32.782057,'
'-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001,'
'-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)',
srid=4326
)
# We check for equality with a tolerance of 10e-5 which is a lower bound
# of the precisions of ref_line coordinates
line1 = City.objects.make_line()
line2 = City.objects.aggregate(MakeLine('point'))['point__makeline']
for line in (line1, line2):
self.assertTrue(ref_line.equals_exact(line, tolerance=10e-5),
"%s != %s" % (ref_line, line))
@skipUnlessDBFeature("has_num_geom_method")
def test_num_geom(self):
"Testing the `num_geom` GeoQuerySet method."
# Both 'countries' only have two geometries.
for c in Country.objects.num_geom():
self.assertEqual(2, c.num_geom)
for c in City.objects.filter(point__isnull=False).num_geom():
# Oracle and PostGIS 2.0+ will return 1 for the number of
# geometries on non-collections, whereas PostGIS < 2.0.0
# will return None.
if postgis and connection.ops.spatial_version < (2, 0, 0):
self.assertIsNone(c.num_geom)
else:
self.assertEqual(1, c.num_geom)
@skipUnlessDBFeature("supports_num_points_poly")
def test_num_points(self):
"Testing the `num_points` GeoQuerySet method."
for c in Country.objects.num_points():
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.num_points():
self.assertEqual(1, c.num_points)
@skipUnlessDBFeature("has_point_on_surface_method")
def test_point_on_surface(self):
"Testing the `point_on_surface` GeoQuerySet method."
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05))
# FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
else:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
for c in Country.objects.point_on_surface():
if spatialite:
# XXX This seems to be a WKT-translation-related precision issue?
tol = 0.00001
else:
tol = 0.000000001
self.assertTrue(ref[c.name].equals_exact(c.point_on_surface, tol))
@skipUnlessDBFeature("has_reverse_method")
def test_reverse_geom(self):
"Testing GeoQuerySet.reverse_geom()."
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
t = Track.objects.reverse_geom().get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), t.reverse_geom.coords)
if oracle:
self.assertRaises(TypeError, State.objects.reverse_geom)
@skipUnlessDBFeature("has_scale_method")
def test_scale(self):
"Testing the `scale` GeoQuerySet method."
xfac, yfac = 2, 3
tol = 5 # XXX The low precision tolerance is for SpatiaLite
qs = Country.objects.scale(xfac, yfac, model_att='scaled')
for c in qs:
for p1, p2 in zip(c.mpoly, c.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
@skipUnlessDBFeature("has_snap_to_grid_method")
def test_snap_to_grid(self):
"Testing GeoQuerySet.snap_to_grid()."
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
self.assertRaises(ValueError, Country.objects.snap_to_grid, *bad_args)
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
self.assertRaises(TypeError, Country.objects.snap_to_grid, *bad_args)
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.1).get(name='San Marino').snap_to_grid, tol))
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(
ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23).get(name='San Marino').snap_to_grid, tol)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr(
'MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))'
)
self.assertTrue(
ref.equals_exact(
Country.objects.snap_to_grid(0.05, 0.23, 0.5, 0.17).get(name='San Marino').snap_to_grid,
tol
)
)
@skipUnlessDBFeature("has_svg_method")
def test_svg(self):
"Testing SVG output using GeoQuerySet.svg()."
self.assertRaises(TypeError, City.objects.svg, precision='foo')
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.svg().get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.svg(relative=5).get(name='Pueblo').svg)
@skipUnlessDBFeature("has_transform_method")
def test_transform(self):
"Testing the transform() GeoQuerySet method."
# Pre-transformed points for Houston and Pueblo.
htown = fromstr('POINT(1947516.83115183 6322297.06040572)', srid=3084)
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points. Oracle does not have the 3084 SRID.
if not oracle:
h = City.objects.transform(htown.srid).get(name='Houston')
self.assertEqual(3084, h.point.srid)
self.assertAlmostEqual(htown.x, h.point.x, prec)
self.assertAlmostEqual(htown.y, h.point.y, prec)
p1 = City.objects.transform(ptown.srid, field_name='point').get(name='Pueblo')
p2 = City.objects.transform(srid=ptown.srid).get(name='Pueblo')
for p in [p1, p2]:
self.assertEqual(2774, p.point.srid)
self.assertAlmostEqual(ptown.x, p.point.x, prec)
self.assertAlmostEqual(ptown.y, p.point.y, prec)
@skipUnlessDBFeature("has_translate_method")
def test_translate(self):
"Testing the `translate` GeoQuerySet method."
xfac, yfac = 5, -23
qs = Country.objects.translate(xfac, yfac, model_att='translated')
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# XXX The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# TODO: Oracle can be made to pass if
# union1 = union2 = fromstr('POINT (-97.5211570000000023 34.4646419999999978)')
# but this seems unexpected and should be investigated to determine the cause.
@skipUnlessDBFeature("has_unionagg_method")
@no_oracle
@ignore_warnings(category=RemovedInDjango110Warning)
def test_unionagg(self):
"""
Testing the (deprecated) `unionagg` (aggregate union) GeoQuerySet method
and the Union aggregate.
"""
tx = Country.objects.get(name='Texas').mpoly
# Houston, Dallas -- Ordering may differ depending on backend or GEOS version.
union1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
union2 = fromstr('MULTIPOINT(-95.363151 29.763374,-96.801611 32.782057)')
qs = City.objects.filter(point__within=tx)
self.assertRaises(TypeError, qs.unionagg, 'name')
self.assertRaises(ValueError, qs.aggregate, Union('name'))
# Using `field_name` keyword argument in one query and specifying an
# order in the other (which should not be used because this is
# an aggregate method on a spatial column)
u1 = qs.unionagg(field_name='point')
u2 = qs.order_by('name').unionagg()
u3 = qs.aggregate(Union('point'))['point__union']
u4 = qs.order_by('name').aggregate(Union('point'))['point__union']
tol = 0.00001
self.assertTrue(union1.equals_exact(u1, tol) or union2.equals_exact(u1, tol))
self.assertTrue(union1.equals_exact(u2, tol) or union2.equals_exact(u2, tol))
self.assertTrue(union1.equals_exact(u3, tol) or union2.equals_exact(u3, tol))
self.assertTrue(union1.equals_exact(u4, tol) or union2.equals_exact(u4, tol))
qs = City.objects.filter(name='NotACity')
self.assertIsNone(qs.unionagg(field_name='point'))
self.assertIsNone(qs.aggregate(Union('point'))['point__union'])
def test_within_subquery(self):
"""
Test that using a queryset inside a geo lookup is working (using a subquery)
(#14483).
"""
tex_cities = City.objects.filter(
point__within=Country.objects.filter(name='Texas').values('mpoly')).order_by('name')
expected = ['Dallas', 'Houston']
if not connection.features.supports_real_shape_operations:
expected.append('Oklahoma City')
self.assertEqual(
list(tex_cities.values_list('name', flat=True)),
expected
)
def test_non_concrete_field(self):
NonConcreteModel.objects.create(point=Point(0, 0), name='name')
list(NonConcreteModel.objects.all())
| {
"content_hash": "825f52ada2df08a8ec9293840c97d5d5",
"timestamp": "",
"source": "github",
"line_count": 925,
"max_line_length": 117,
"avg_line_length": 45.63567567567568,
"alnum_prop": 0.6126548693530429,
"repo_name": "liangazhou/django-rdp",
"id": "3472ed094c3df0173c2daf2b0f40750a4f2d0ce6",
"size": "42213",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packages/Django-1.8.6/tests/gis_tests/geoapp/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "22310"
},
{
"name": "CSS",
"bytes": "5463444"
},
{
"name": "CoffeeScript",
"bytes": "83631"
},
{
"name": "Groff",
"bytes": "450"
},
{
"name": "HTML",
"bytes": "439341404"
},
{
"name": "JavaScript",
"bytes": "19561573"
},
{
"name": "PHP",
"bytes": "94083"
},
{
"name": "Perl",
"bytes": "9844"
},
{
"name": "Python",
"bytes": "8069"
},
{
"name": "Shell",
"bytes": "11480"
},
{
"name": "XSLT",
"bytes": "224454"
}
],
"symlink_target": ""
} |
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v12.services.types import asset_set_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AssetSetServiceTransport(abc.ABC):
"""Abstract transport class for AssetSetService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
DEFAULT_HOST: str = "googleads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id,
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(
service_account.Credentials, "with_always_use_jwt_access"
)
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.mutate_asset_sets: gapic_v1.method.wrap_method(
self.mutate_asset_sets,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def mutate_asset_sets(
self,
) -> Callable[
[asset_set_service.MutateAssetSetsRequest],
Union[
asset_set_service.MutateAssetSetsResponse,
Awaitable[asset_set_service.MutateAssetSetsResponse],
],
]:
raise NotImplementedError()
__all__ = ("AssetSetServiceTransport",)
| {
"content_hash": "660dc9e1c3f202ea0080b4fa2dff3694",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 101,
"avg_line_length": 38.20863309352518,
"alnum_prop": 0.6207870457540953,
"repo_name": "googleads/google-ads-python",
"id": "9f9d87ffc8ce7d932a27f748ee954c1f06ee138c",
"size": "5911",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/services/services/asset_set_service/transports/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
class CharDistributionAnalysis(object):
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._char_to_freq_order = None
self._table_size = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self.typical_distribution_ratio = None
self._done = None
self._total_chars = None
self._freq_chars = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._done = False
self._total_chars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._freq_chars = 0
def feed(self, char, char_len):
"""feed a character with known length"""
if char_len == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(char)
else:
order = -1
if order >= 0:
self._total_chars += 1
# order is valid
if order < self._table_size:
if 512 > self._char_to_freq_order[order]:
self._freq_chars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD:
return self.SURE_NO
if self._total_chars != self._freq_chars:
r = (self._freq_chars / ((self._total_chars - self._freq_chars)
* self.typical_distribution_ratio))
if r < self.SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return self.SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._total_chars > self.ENOUGH_DATA_THRESHOLD
def get_order(self, byte_str):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(EUCTWDistributionAnalysis, self).__init__()
self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
self._table_size = EUCTW_TABLE_SIZE
self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(byte_str[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(byte_str[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(EUCKRDistributionAnalysis, self).__init__()
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
self._table_size = EUCKR_TABLE_SIZE
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(byte_str[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(byte_str[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(GB2312DistributionAnalysis, self).__init__()
self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
self._table_size = GB2312_TABLE_SIZE
self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(byte_str[0]), wrap_ord(byte_str[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(Big5DistributionAnalysis, self).__init__()
self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
self._table_size = BIG5_TABLE_SIZE
self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(byte_str[0]), wrap_ord(byte_str[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(SJISDistributionAnalysis, self).__init__()
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
self._table_size = JIS_TABLE_SIZE
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(byte_str[0]), wrap_ord(byte_str[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(EUCJPDistributionAnalysis, self).__init__()
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
self._table_size = JIS_TABLE_SIZE
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(byte_str[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(byte_str[1]) - 0xa1
else:
return -1
| {
"content_hash": "6f8f8ec27b6c68509109e3ac9b238947",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 85,
"avg_line_length": 40.47826086956522,
"alnum_prop": 0.6069936746628476,
"repo_name": "webmedic/booker",
"id": "d8e9e149cd707dddb3f74b078f8020e875f1d463",
"size": "9560",
"binary": false,
"copies": "51",
"ref": "refs/heads/master",
"path": "src/chardet/chardistribution.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5995"
},
{
"name": "CSS",
"bytes": "17900"
},
{
"name": "Python",
"bytes": "4133180"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class undefined_subtlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-ipv4-reachability/prefixes/prefix/undefined-subtlvs/undefined-subtlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__state")
_yang_name = "undefined-subtlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-ipv4-reachability",
"prefixes",
"prefix",
"undefined-subtlvs",
"undefined-subtlv",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
YANG Description: A reference to a subTLV
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: A reference to a subTLV
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
YANG Description: State parameters of the undefined sub-TLV.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of the undefined sub-TLV.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
type = __builtin__.property(_get_type)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("type", type), ("state", state)])
from . import state
class undefined_subtlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-ipv4-reachability/prefixes/prefix/undefined-subtlvs/undefined-subtlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__state")
_yang_name = "undefined-subtlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-ipv4-reachability",
"prefixes",
"prefix",
"undefined-subtlvs",
"undefined-subtlv",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
YANG Description: A reference to a subTLV
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/type (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: A reference to a subTLV
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
YANG Description: State parameters of the undefined sub-TLV.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of the undefined sub-TLV.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
type = __builtin__.property(_get_type)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("type", type), ("state", state)])
| {
"content_hash": "e9fb1e3f82769c1f4fe44fa73c83b3e6",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 375,
"avg_line_length": 39.45381526104418,
"alnum_prop": 0.5765472312703583,
"repo_name": "napalm-automation/napalm-yang",
"id": "1245fa5d1972cb66af3be855f9ec95191883ae50",
"size": "19672",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
"""api_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
urlpatterns = [
url(r'^api/', include('interview.urls')),
]
| {
"content_hash": "8ba15f24b5db2aedfca3df786c778ccb",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 37.55,
"alnum_prop": 0.6937416777629827,
"repo_name": "t103z/interview",
"id": "635be2a32c667b4a8feacd2a688879255d0bfc15",
"size": "751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api_server/api_server/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103187"
},
{
"name": "HTML",
"bytes": "6006"
},
{
"name": "JavaScript",
"bytes": "2652572"
},
{
"name": "Python",
"bytes": "204798"
},
{
"name": "Shell",
"bytes": "212"
},
{
"name": "TeX",
"bytes": "6177"
}
],
"symlink_target": ""
} |
"""The first hint for this problem is the title of the webpage: 'peak hell'.
When pronounced, it sounds very similar to 'pickle', which is the builtin
python object serialization package. When viewing the the source code of the
webpage, there is a 'peakhell' tag that links to a pickle file. We'll download
the file (prompting the user if they are okay with deserializing the file) then
view its contents."""
import pickle
import requests
import webbrowser
from bs4 import BeautifulSoup
webpage = "http://www.pythonchallenge.com/pc/def/peak.html"
r = requests.get(webpage)
soup = BeautifulSoup(r.content, "html.parser")
peakhell = soup.find("peakhell")["src"]
split_page = webpage.split("peak.html")
pickle_file = f"{split_page[0]}{peakhell}"
r = requests.get(pickle_file)
with open(peakhell, "wb") as fp:
fp.write(r.content)
# Print out each line to the console.
msg = pickle.load(open(peakhell, "rb"))
line = ""
for lst in msg:
for tup in lst:
line += tup[0] * tup[1]
print(line)
line = ""
print("opening new webpage...")
split_page = webpage.split("peak.html")
new_page = f"{split_page[0]}channel.html"
webbrowser.open(new_page)
| {
"content_hash": "5469123157d0e4a8937d2f62564df161",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 31.405405405405407,
"alnum_prop": 0.7185886402753873,
"repo_name": "cjonsmith/python-challenge",
"id": "79f89b45a66430a01d6b259abf350bb78951ef16",
"size": "1162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problem_05.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9930"
}
],
"symlink_target": ""
} |
from types import StringTypes
import dateutil.parser
import datetime
import time
#from vt_manager.communication.sfa.util.sfalogging import logger
DATEFORMAT = "%Y-%m-%dT%H:%M:%SZ"
def utcparse(input):
""" Translate a string into a time using dateutil.parser.parse but make sure it's in UTC time and strip
the timezone, so that it's compatible with normal datetime.datetime objects.
For safety this can also handle inputs that are either timestamps, or datetimes
"""
# prepare the input for the checks below by
# casting strings ('1327098335') to ints
if isinstance(input, StringTypes):
try:
input = int(input)
except ValueError:
pass
if isinstance (input, datetime.datetime):
logger.warn ("argument to utcparse already a datetime - doing nothing")
return input
elif isinstance (input, StringTypes):
t = dateutil.parser.parse(input)
if t.utcoffset() is not None:
t = t.utcoffset() + t.replace(tzinfo=None)
return t
elif isinstance (input, (int,float,long)):
return datetime.datetime.fromtimestamp(input)
else:
logger.error("Unexpected type in utcparse [%s]"%type(input))
def datetime_to_string(input):
return datetime.datetime.strftime(input, DATEFORMAT)
def datetime_to_utc(input):
return time.gmtime(datetime_to_epoch(input))
def datetime_to_epoch(input):
return int(time.mktime(input.timetuple()))
| {
"content_hash": "0d60ab80dcb38181aa5becc264f72585",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 107,
"avg_line_length": 33.18181818181818,
"alnum_prop": 0.6917808219178082,
"repo_name": "ict-felix/stack",
"id": "5e852ae7d05aa875888fd93cd3e152e094a79de1",
"size": "2686",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vt_manager/src/python/vt_manager/communication/sfa/util/sfatime.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11581"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "8073455"
},
{
"name": "Shell",
"bytes": "259720"
}
],
"symlink_target": ""
} |
"""Run layout tests."""
import errno
import logging
import optparse
import os
import signal
import sys
from layout_package import json_results_generator
from layout_package import printing
from layout_package import test_runner
from layout_package import test_runner2
from webkitpy.common.system import user
from webkitpy.thirdparty import simplejson
import port
_log = logging.getLogger(__name__)
def run(port, options, args, regular_output=sys.stderr,
buildbot_output=sys.stdout):
"""Run the tests.
Args:
port: Port object for port-specific behavior
options: a dictionary of command line options
args: a list of sub directories or files to test
regular_output: a stream-like object that we can send logging/debug
output to
buildbot_output: a stream-like object that we can write all output that
is intended to be parsed by the buildbot to
Returns:
the number of unexpected results that occurred, or -1 if there is an
error.
"""
warnings = _set_up_derived_options(port, options)
printer = printing.Printer(port, options, regular_output, buildbot_output,
int(options.child_processes), options.experimental_fully_parallel)
for w in warnings:
_log.warning(w)
if options.help_printing:
printer.help_printing()
printer.cleanup()
return 0
last_unexpected_results = _gather_unexpected_results(port)
if options.print_last_failures:
printer.write("\n".join(last_unexpected_results) + "\n")
printer.cleanup()
return 0
# We wrap any parts of the run that are slow or likely to raise exceptions
# in a try/finally to ensure that we clean up the logging configuration.
num_unexpected_results = -1
try:
runner = test_runner2.TestRunner2(port, options, printer)
runner._print_config()
printer.print_update("Collecting tests ...")
try:
runner.collect_tests(args, last_unexpected_results)
except IOError, e:
if e.errno == errno.ENOENT:
return -1
raise
if options.lint_test_files:
return runner.lint()
printer.print_update("Parsing expectations ...")
runner.parse_expectations()
printer.print_update("Checking build ...")
if not port.check_build(runner.needs_http()):
_log.error("Build check failed")
return -1
result_summary = runner.set_up_run()
if result_summary:
num_unexpected_results = runner.run(result_summary)
runner.clean_up_run()
_log.debug("Testing completed, Exit status: %d" %
num_unexpected_results)
finally:
printer.cleanup()
return num_unexpected_results
def _set_up_derived_options(port_obj, options):
"""Sets the options values that depend on other options values."""
# We return a list of warnings to print after the printer is initialized.
warnings = []
if options.worker_model is None:
options.worker_model = port_obj.default_worker_model()
if options.worker_model == 'inline':
if options.child_processes and int(options.child_processes) > 1:
warnings.append("--worker-model=inline overrides --child-processes")
options.child_processes = "1"
if not options.child_processes:
options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
str(port_obj.default_child_processes()))
if not options.configuration:
options.configuration = port_obj.default_configuration()
if options.pixel_tests is None:
options.pixel_tests = True
if not options.use_apache:
options.use_apache = sys.platform in ('darwin', 'linux2')
if not options.time_out_ms:
if options.configuration == "Debug":
options.time_out_ms = str(2 * test_runner.TestRunner.DEFAULT_TEST_TIMEOUT_MS)
else:
options.time_out_ms = str(test_runner.TestRunner.DEFAULT_TEST_TIMEOUT_MS)
options.slow_time_out_ms = str(5 * int(options.time_out_ms))
if options.additional_platform_directory:
normalized_platform_directories = []
for path in options.additional_platform_directory:
if not port_obj._filesystem.isabs(path):
warnings.append("--additional-platform-directory=%s is ignored since it is not absolute" % path)
continue
normalized_platform_directories.append(port_obj._filesystem.normpath(path))
options.additional_platform_directory = normalized_platform_directories
return warnings
def _gather_unexpected_results(port):
"""Returns the unexpected results from the previous run, if any."""
filesystem = port._filesystem
results_directory = port.results_directory()
options = port._options
last_unexpected_results = []
if options.print_last_failures or options.retest_last_failures:
unexpected_results_filename = filesystem.join(results_directory, "unexpected_results.json")
if filesystem.exists(unexpected_results_filename):
results = json_results_generator.load_json(filesystem, unexpected_results_filename)
last_unexpected_results = results['tests'].keys()
return last_unexpected_results
def _compat_shim_callback(option, opt_str, value, parser):
print "Ignoring unsupported option: %s" % opt_str
def _compat_shim_option(option_name, **kwargs):
return optparse.make_option(option_name, action="callback",
callback=_compat_shim_callback,
help="Ignored, for old-run-webkit-tests compat only.", **kwargs)
def parse_args(args=None):
"""Provides a default set of command line args.
Returns a tuple of options, args from optparse"""
# FIXME: All of these options should be stored closer to the code which
# FIXME: actually uses them. configuration_options should move
# FIXME: to WebKitPort and be shared across all scripts.
configuration_options = [
optparse.make_option("-t", "--target", dest="configuration",
help="(DEPRECATED)"),
# FIXME: --help should display which configuration is default.
optparse.make_option('--debug', action='store_const', const='Debug',
dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const',
const='Release', dest="configuration",
help='Set the configuration to Release'),
# old-run-webkit-tests also accepts -c, --configuration CONFIGURATION.
]
print_options = printing.print_options()
# FIXME: These options should move onto the ChromiumPort.
chromium_options = [
optparse.make_option("--chromium", action="store_true", default=False,
help="use the Chromium port"),
optparse.make_option("--startup-dialog", action="store_true",
default=False, help="create a dialog on DumpRenderTree startup"),
optparse.make_option("--gp-fault-error-box", action="store_true",
default=False, help="enable Windows GP fault error box"),
optparse.make_option("--js-flags",
type="string", help="JavaScript flags to pass to tests"),
optparse.make_option("--stress-opt", action="store_true",
default=False,
help="Enable additional stress test to JavaScript optimization"),
optparse.make_option("--stress-deopt", action="store_true",
default=False,
help="Enable additional stress test to JavaScript optimization"),
optparse.make_option("--nocheck-sys-deps", action="store_true",
default=False,
help="Don't check the system dependencies (themes)"),
optparse.make_option("--accelerated-compositing",
action="store_true",
help="Use hardware-accelerated compositing for rendering"),
optparse.make_option("--no-accelerated-compositing",
action="store_false",
dest="accelerated_compositing",
help="Don't use hardware-accelerated compositing for rendering"),
optparse.make_option("--accelerated-2d-canvas",
action="store_true",
help="Use hardware-accelerated 2D Canvas calls"),
optparse.make_option("--no-accelerated-2d-canvas",
action="store_false",
dest="accelerated_2d_canvas",
help="Don't use hardware-accelerated 2D Canvas calls"),
optparse.make_option("--enable-hardware-gpu",
action="store_true",
default=False,
help="Run graphics tests on real GPU hardware vs software"),
]
# Missing Mac-specific old-run-webkit-tests options:
# FIXME: Need: -g, --guard for guard malloc support on Mac.
# FIXME: Need: -l --leaks Enable leaks checking.
# FIXME: Need: --sample-on-timeout Run sample on timeout
old_run_webkit_tests_compat = [
# NRWT doesn't generate results by default anyway.
_compat_shim_option("--no-new-test-results"),
# NRWT doesn't sample on timeout yet anyway.
_compat_shim_option("--no-sample-on-timeout"),
# FIXME: NRWT needs to support remote links eventually.
_compat_shim_option("--use-remote-links-to-tests"),
]
results_options = [
# NEED for bots: --use-remote-links-to-tests Link to test files
# within the SVN repository in the results.
optparse.make_option("-p", "--pixel-tests", action="store_true",
dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
optparse.make_option("--no-pixel-tests", action="store_false",
dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
optparse.make_option("--tolerance",
help="Ignore image differences less than this percentage (some "
"ports may ignore this option)", type="float"),
optparse.make_option("--results-directory", help="Location of test results"),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--new-baseline", action="store_true",
default=False, help="Save all generated results as new baselines "
"into the platform directory, overwriting whatever's "
"already there."),
optparse.make_option("--reset-results", action="store_true",
default=False, help="Reset any existing baselines to the "
"generated results"),
optparse.make_option("--additional-drt-flag", action="append",
default=[], help="Additional command line flag to pass to DumpRenderTree "
"Specify multiple times to add multiple flags."),
optparse.make_option("--additional-platform-directory", action="append",
default=[], help="Additional directory where to look for test "
"baselines (will take precendence over platform baselines). "
"Specify multiple times to add multiple search path entries."),
optparse.make_option("--no-show-results", action="store_false",
default=True, dest="show_results",
help="Don't launch a browser with results after the tests "
"are done"),
# FIXME: We should have a helper function to do this sort of
# deprectated mapping and automatically log, etc.
optparse.make_option("--noshow-results", action="store_false",
dest="show_results",
help="Deprecated, same as --no-show-results."),
optparse.make_option("--no-launch-safari", action="store_false",
dest="show_results",
help="old-run-webkit-tests compat, same as --noshow-results."),
# old-run-webkit-tests:
# --[no-]launch-safari Launch (or do not launch) Safari to display
# test results (default: launch)
optparse.make_option("--full-results-html", action="store_true",
default=False,
help="Show all failures in results.html, rather than only "
"regressions"),
optparse.make_option("--clobber-old-results", action="store_true",
default=False, help="Clobbers test results from previous runs."),
optparse.make_option("--platform",
help="Override the platform for expected results"),
optparse.make_option("--no-record-results", action="store_false",
default=True, dest="record_results",
help="Don't record the results."),
# old-run-webkit-tests also has HTTP toggle options:
# --[no-]http Run (or do not run) http tests
# (default: run)
]
test_options = [
optparse.make_option("--build", dest="build",
action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up-to-date "
"(default)."),
optparse.make_option("--no-build", dest="build",
action="store_false", help="Don't check to see if the "
"DumpRenderTree build is up-to-date."),
optparse.make_option("-n", "--dry-run", action="store_true",
default=False,
help="Do everything but actually run the tests or upload results."),
# old-run-webkit-tests has --valgrind instead of wrapper.
optparse.make_option("--wrapper",
help="wrapper command to insert before invocations of "
"DumpRenderTree; option is split on whitespace before "
"running. (Example: --wrapper='valgrind --smc-check=all')"),
# old-run-webkit-tests:
# -i|--ignore-tests Comma-separated list of directories
# or tests to ignore
optparse.make_option("--test-list", action="append",
help="read list of tests to run from file", metavar="FILE"),
# old-run-webkit-tests uses --skipped==[default|ignore|only]
# instead of --force:
optparse.make_option("--force", action="store_true", default=False,
help="Run all tests, even those marked SKIP in the test list"),
optparse.make_option("--use-apache", action="store_true",
default=False, help="Whether to use apache instead of lighttpd."),
optparse.make_option("--time-out-ms",
help="Set the timeout for each test"),
# old-run-webkit-tests calls --randomize-order --random:
optparse.make_option("--randomize-order", action="store_true",
default=False, help=("Run tests in random order (useful "
"for tracking down corruption)")),
optparse.make_option("--run-chunk",
help=("Run a specified chunk (n:l), the nth of len l, "
"of the layout tests")),
optparse.make_option("--run-part", help=("Run a specified part (n:m), "
"the nth of m parts, of the layout tests")),
# old-run-webkit-tests calls --batch-size: --nthly n
# Restart DumpRenderTree every n tests (default: 1000)
optparse.make_option("--batch-size",
help=("Run a the tests in batches (n), after every n tests, "
"DumpRenderTree is relaunched."), type="int", default=0),
# old-run-webkit-tests calls --run-singly: -1|--singly
# Isolate each test case run (implies --nthly 1 --verbose)
optparse.make_option("--run-singly", action="store_true",
default=False, help="run a separate DumpRenderTree for each test"),
optparse.make_option("--child-processes",
help="Number of DumpRenderTrees to run in parallel."),
# FIXME: Display default number of child processes that will run.
optparse.make_option("--worker-model", action="store",
default=None, help=("controls worker model. Valid values are "
"'inline', 'threads', and 'processes'.")),
optparse.make_option("--experimental-fully-parallel",
action="store_true", default=False,
help="run all tests in parallel"),
optparse.make_option("--exit-after-n-failures", type="int", default=500,
help="Exit after the first N failures instead of running all "
"tests"),
optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
default=20, help="Exit after the first N crashes instead of "
"running all tests"),
# FIXME: consider: --iterations n
# Number of times to run the set of tests (e.g. ABCABCABC)
optparse.make_option("--print-last-failures", action="store_true",
default=False, help="Print the tests in the last run that "
"had unexpected failures (or passes) and then exit."),
optparse.make_option("--retest-last-failures", action="store_true",
default=False, help="re-test the tests in the last run that "
"had unexpected failures (or passes)."),
optparse.make_option("--retry-failures", action="store_true",
default=True,
help="Re-try any tests that produce unexpected results (default)"),
optparse.make_option("--no-retry-failures", action="store_false",
dest="retry_failures",
help="Don't re-try any tests that produce unexpected results."),
]
misc_options = [
optparse.make_option("--lint-test-files", action="store_true",
default=False, help=("Makes sure the test files parse for all "
"configurations. Does not run any tests.")),
]
# FIXME: Move these into json_results_generator.py
results_json_options = [
optparse.make_option("--master-name", help="The name of the buildbot master."),
optparse.make_option("--builder-name", default="DUMMY_BUILDER_NAME",
help=("The name of the builder shown on the waterfall running "
"this script e.g. WebKit.")),
optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
help=("The name of the builder used in its path, e.g. "
"webkit-rel.")),
optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
help=("The build number of the builder running this script.")),
optparse.make_option("--test-results-server", default="",
help=("If specified, upload results json files to this appengine "
"server.")),
]
option_list = (configuration_options + print_options +
chromium_options + results_options + test_options +
misc_options + results_json_options +
old_run_webkit_tests_compat)
option_parser = optparse.OptionParser(option_list=option_list)
return option_parser.parse_args(args)
def main():
options, args = parse_args()
port_obj = port.get(options.platform, options)
return run(port_obj, options, args)
if '__main__' == __name__:
try:
sys.exit(main())
except KeyboardInterrupt:
# this mirrors what the shell normally does
sys.exit(signal.SIGINT + 128)
| {
"content_hash": "ff80caff6b427e96d4de61dc52a9fbf9",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 112,
"avg_line_length": 46.763723150358,
"alnum_prop": 0.619118097376748,
"repo_name": "Xperia-Nicki/android_platform_sony_nicki",
"id": "e814008a1606c62241824e1c120138d7070604e4",
"size": "21230",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "external/webkit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "212775"
},
{
"name": "Awk",
"bytes": "19252"
},
{
"name": "C",
"bytes": "68667466"
},
{
"name": "C#",
"bytes": "55625"
},
{
"name": "C++",
"bytes": "54670920"
},
{
"name": "CLIPS",
"bytes": "12224"
},
{
"name": "CSS",
"bytes": "283405"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Java",
"bytes": "4882"
},
{
"name": "JavaScript",
"bytes": "19597804"
},
{
"name": "Objective-C",
"bytes": "5849156"
},
{
"name": "PHP",
"bytes": "17224"
},
{
"name": "Pascal",
"bytes": "42411"
},
{
"name": "Perl",
"bytes": "1632149"
},
{
"name": "Prolog",
"bytes": "214621"
},
{
"name": "Python",
"bytes": "3493321"
},
{
"name": "R",
"bytes": "290"
},
{
"name": "Ruby",
"bytes": "78743"
},
{
"name": "Scilab",
"bytes": "554"
},
{
"name": "Shell",
"bytes": "265637"
},
{
"name": "TypeScript",
"bytes": "45459"
},
{
"name": "XSLT",
"bytes": "11219"
}
],
"symlink_target": ""
} |
import os, sys
sys.path.append(os.getcwd())
import time
import tflib as lib
import tflib.save_images
import tflib.mnist
import tflib.cifar10
import tflib.plot
import tflib.inception_score
import numpy as np
import torch
import torchvision
from torch import nn
from torch import autograd
from torch import optim
# Download CIFAR-10 (Python version) at
# https://www.cs.toronto.edu/~kriz/cifar.html and fill in the path to the
# extracted files here!
DATA_DIR = 'cifar-10-batches-py/'
if len(DATA_DIR) == 0:
raise Exception('Please specify path to data directory in gan_cifar.py!')
MODE = 'wgan-gp' # Valid options are dcgan, wgan, or wgan-gp
DIM = 128 # This overfits substantially; you're probably better off with 64
LAMBDA = 10 # Gradient penalty lambda hyperparameter
CRITIC_ITERS = 5 # How many critic iterations per generator iteration
BATCH_SIZE = 64 # Batch size
ITERS = 200000 # How many generator iterations to train for
OUTPUT_DIM = 3072 # Number of pixels in CIFAR10 (3*32*32)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
preprocess = nn.Sequential(
nn.Linear(128, 4 * 4 * 4 * DIM),
nn.BatchNorm2d(4 * 4 * 4 * DIM),
nn.ReLU(True),
)
block1 = nn.Sequential(
nn.ConvTranspose2d(4 * DIM, 2 * DIM, 2, stride=2),
nn.BatchNorm2d(2 * DIM),
nn.ReLU(True),
)
block2 = nn.Sequential(
nn.ConvTranspose2d(2 * DIM, DIM, 2, stride=2),
nn.BatchNorm2d(DIM),
nn.ReLU(True),
)
deconv_out = nn.ConvTranspose2d(DIM, 3, 2, stride=2)
self.preprocess = preprocess
self.block1 = block1
self.block2 = block2
self.deconv_out = deconv_out
self.tanh = nn.Tanh()
def forward(self, input):
output = self.preprocess(input)
output = output.view(-1, 4 * DIM, 4, 4)
output = self.block1(output)
output = self.block2(output)
output = self.deconv_out(output)
output = self.tanh(output)
return output.view(-1, 3, 32, 32)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
main = nn.Sequential(
nn.Conv2d(3, DIM, 3, 2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(DIM, 2 * DIM, 3, 2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(2 * DIM, 4 * DIM, 3, 2, padding=1),
nn.LeakyReLU(),
)
self.main = main
self.linear = nn.Linear(4*4*4*DIM, 1)
def forward(self, input):
output = self.main(input)
output = output.view(-1, 4*4*4*DIM)
output = self.linear(output)
return output
netG = Generator()
netD = Discriminator()
print netG
print netD
use_cuda = torch.cuda.is_available()
if use_cuda:
gpu = 0
if use_cuda:
netD = netD.cuda(gpu)
netG = netG.cuda(gpu)
one = torch.FloatTensor([1])
mone = one * -1
if use_cuda:
one = one.cuda(gpu)
mone = mone.cuda(gpu)
optimizerD = optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.9))
optimizerG = optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.9))
def calc_gradient_penalty(netD, real_data, fake_data):
# print "real_data: ", real_data.size(), fake_data.size()
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, real_data.nelement()/BATCH_SIZE).contiguous().view(BATCH_SIZE, 3, 32, 32)
alpha = alpha.cuda(gpu) if use_cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if use_cuda:
interpolates = interpolates.cuda(gpu)
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu) if use_cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
# For generating samples
def generate_image(frame, netG):
fixed_noise_128 = torch.randn(128, 128)
if use_cuda:
fixed_noise_128 = fixed_noise_128.cuda(gpu)
noisev = autograd.Variable(fixed_noise_128, volatile=True)
samples = netG(noisev)
samples = samples.view(-1, 3, 32, 32)
samples = samples.mul(0.5).add(0.5)
samples = samples.cpu().data.numpy()
lib.save_images.save_images(samples, './tmp/cifar10/samples_{}.jpg'.format(frame))
# For calculating inception score
def get_inception_score(G, ):
all_samples = []
for i in xrange(10):
samples_100 = torch.randn(100, 128)
if use_cuda:
samples_100 = samples_100.cuda(gpu)
samples_100 = autograd.Variable(samples_100, volatile=True)
all_samples.append(G(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, 3, 32, 32)).transpose(0, 2, 3, 1)
return lib.inception_score.get_inception_score(list(all_samples))
# Dataset iterator
train_gen, dev_gen = lib.cifar10.load(BATCH_SIZE, data_dir=DATA_DIR)
def inf_train_gen():
while True:
for images, target in train_gen():
# yield images.astype('float32').reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)
yield images
gen = inf_train_gen()
preprocess = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
for iteration in xrange(ITERS):
start_time = time.time()
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for i in xrange(CRITIC_ITERS):
_data = gen.next()
netD.zero_grad()
# train with real
_data = _data.reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)
real_data = torch.stack([preprocess(item) for item in _data])
if use_cuda:
real_data = real_data.cuda(gpu)
real_data_v = autograd.Variable(real_data)
# import torchvision
# filename = os.path.join("test_train_data", str(iteration) + str(i) + ".jpg")
# torchvision.utils.save_image(real_data, filename)
D_real = netD(real_data_v)
D_real = D_real.mean()
D_real.backward(mone)
# train with fake
noise = torch.randn(BATCH_SIZE, 128)
if use_cuda:
noise = noise.cuda(gpu)
noisev = autograd.Variable(noise, volatile=True) # totally freeze netG
fake = autograd.Variable(netG(noisev).data)
inputv = fake
D_fake = netD(inputv)
D_fake = D_fake.mean()
D_fake.backward(one)
# train with gradient penalty
gradient_penalty = calc_gradient_penalty(netD, real_data_v.data, fake.data)
gradient_penalty.backward()
# print "gradien_penalty: ", gradient_penalty
D_cost = D_fake - D_real + gradient_penalty
Wasserstein_D = D_real - D_fake
optimizerD.step()
############################
# (2) Update G network
###########################
for p in netD.parameters():
p.requires_grad = False # to avoid computation
netG.zero_grad()
noise = torch.randn(BATCH_SIZE, 128)
if use_cuda:
noise = noise.cuda(gpu)
noisev = autograd.Variable(noise)
fake = netG(noisev)
G = netD(fake)
G = G.mean()
G.backward(mone)
G_cost = -G
optimizerG.step()
# Write logs and save samples
lib.plot.plot('./tmp/cifar10/train disc cost', D_cost.cpu().data.numpy())
lib.plot.plot('./tmp/cifar10/time', time.time() - start_time)
lib.plot.plot('./tmp/cifar10/train gen cost', G_cost.cpu().data.numpy())
lib.plot.plot('./tmp/cifar10/wasserstein distance', Wasserstein_D.cpu().data.numpy())
# Calculate inception score every 1K iters
if False and iteration % 1000 == 999:
inception_score = get_inception_score(netG)
lib.plot.plot('./tmp/cifar10/inception score', inception_score[0])
# Calculate dev loss and generate samples every 100 iters
if iteration % 100 == 99:
dev_disc_costs = []
for images, _ in dev_gen():
images = images.reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)
imgs = torch.stack([preprocess(item) for item in images])
# imgs = preprocess(images)
if use_cuda:
imgs = imgs.cuda(gpu)
imgs_v = autograd.Variable(imgs, volatile=True)
D = netD(imgs_v)
_dev_disc_cost = -D.mean().cpu().data.numpy()
dev_disc_costs.append(_dev_disc_cost)
lib.plot.plot('./tmp/cifar10/dev disc cost', np.mean(dev_disc_costs))
generate_image(iteration, netG)
# Save logs every 100 iters
if (iteration < 5) or (iteration % 100 == 99):
lib.plot.flush()
lib.plot.tick()
| {
"content_hash": "99b0b2b07b87728c61b6d8850e923409",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 118,
"avg_line_length": 34.010791366906474,
"alnum_prop": 0.6009518773135907,
"repo_name": "caogang/wgan-gp",
"id": "cfb8f61c031628b85b3b32fdf01f7dca0022e38c",
"size": "9455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gan_cifar10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74656"
}
],
"symlink_target": ""
} |
what a morning
| {
"content_hash": "4f983fcc18f60a3ecbfea080928927a5",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 14,
"avg_line_length": 15,
"alnum_prop": 0.8,
"repo_name": "fanalieri/casimirprogramming",
"id": "9e2aec9fbdfda65810e6dd808a5437821374234b",
"size": "15",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39"
}
],
"symlink_target": ""
} |
import ee
import math
from cmt.mapclient_qt import addToMap
from cmt.util.miscUtilities import safe_get_info
from cmt.modis.simple_modis_algorithms import *
from modis_utilities import *
'''
Contains algorithms and tools for using the built-in Earth Engine classifiers.
'''
#==============================================================
def _create_learning_image(domain, b):
'''Set up features for the classifier to be trained on'''
outputBands = _get_modis_learning_bands(domain, b) # Get the standard set of MODIS learning bands
#outputBands = _get_extensive_modis_learning_bands(domain, b) # Get the standard set of MODIS learning bands
# Try to add a DEM
try:
dem = domain.get_dem().image
outputBands.addBands(dem)
#outputBands = dem
except AttributeError:
pass # Suppress error if there is no DEM data
# Try to add Skybox RGB info (NIR is handled seperately because not all Skybox images have it)
# - Use all the base bands plus a grayscale texture measure
try:
try: # The Skybox data can be in one of two names
skyboxSensor = domain.skybox
except:
skyboxSensor = domain.skybox_nir
rgbBands = skyboxSensor.Red.addBands(skyboxSensor.Green).addBands(skyboxSensor.Blue)
grayBand = rgbBands.select('Red').add(rgbBands.select('Green')).add(rgbBands.select('Blue')).divide(ee.Image(3.0)).uint16()
edges = grayBand.convolve(ee.Kernel.laplacian8(normalize=True)).abs()
texture = edges.convolve(ee.Kernel.square(3, 'pixels')).select(['Red'], ['Texture'])
texture2Raw = grayBand.glcmTexture()
bandList = safe_get_info(texture2Raw)['bands']
bandName = [x['id'] for x in bandList if 'idm' in x['id']]
texture2 = texture2Raw.select(bandName).convolve(ee.Kernel.square(5, 'pixels'))
#skyboxBands = rgbBands.addBands(texture).addBands(texture2)
skyboxBands = rgbBands.addBands(texture2)
outputBands = outputBands.addBands(skyboxBands)
#outputBands = skyboxBands
#addToMap(grayBand, {'min': 0, 'max': 1200}, 'grayBand')
#addToMap(edges, {'min': 0, 'max': 250}, 'edges')
#addToMap(texture, {'min': 0, 'max': 250}, 'texture')
#addToMap(texture2, {'min': 0, 'max': 1}, 'texture2')
except AttributeError:
pass # Suppress error if there is no Skybox data
# Try to add Skybox Near IR band
try:
outputBands = outputBands.addBands(domain.skybox_nir.NIR)
#addToMap(domain.skybox.NIR, {'min': 0, 'max': 1200}, 'Near IR')
except AttributeError:
pass # Suppress error if there is no Skybox NIR data
return outputBands
def _get_modis_learning_bands(domain, b):
'''Set up features for the classifier to be trained on: [b2, b2/b1, b2/b1, NDVI, NDWI]'''
diff = b['b2'].subtract(b['b1'])
ratio = b['b2'].divide(b['b1'])
modisBands = b['b1'].addBands(b['b2']).addBands(diff).addBands(ratio).addBands(b['NDVI']).addBands(b['NDWI'])
return modisBands
def _get_extensive_modis_learning_bands(domain, b):
'''Like _get_modis_learning_bands but adding a lot of simple classifiers'''
#a = get_diff(b).select(['b1'], ['b1'])
a = b['b1'].select(['sur_refl_b01'], ['b1' ])
a = a.addBands(b['b2'].select(['sur_refl_b02'], ['b2' ]))
a = a.addBands(b['b2'].divide(b['b1']).select(['sur_refl_b02'], ['ratio' ]))
a = a.addBands(b['LSWI'].subtract(b['NDVI']).subtract(0.05).select(['sur_refl_b02'], ['LSWIminusNDVI']))
a = a.addBands(b['LSWI'].subtract(b['EVI']).subtract(0.05).select(['sur_refl_b02'], ['LSWIminusEVI' ]))
a = a.addBands(b['EVI'].subtract(0.3).select(['sur_refl_b02'], ['EVI' ]))
a = a.addBands(b['LSWI'].select(['sur_refl_b02'], ['LSWI' ]))
a = a.addBands(b['NDVI'].select(['sur_refl_b02'], ['NDVI' ]))
a = a.addBands(b['NDWI'].select(['sur_refl_b01'], ['NDWI' ]))
a = a.addBands(get_diff(b).select(['b1'], ['diff' ]))
a = a.addBands(get_fai(b).select(['b1'], ['fai' ]))
a = a.addBands(get_dartmouth(b).select(['b1'], ['dartmouth' ]))
a = a.addBands(get_mod_ndwi(b).select(['b1'], ['MNDWI' ]))
return a
def earth_engine_classifier(domain, b, classifier_name, extra_args={}):
'''Apply EE classifier tool using a ground truth image.'''
# Training requires a training image plus either ground truth or training features.
training_domain = None
#if domain.training_domain:
training_domain = domain.training_domain
#elif domain.unflooded_domain:
#training_domain = domain.unflooded_domain
if not training_domain:
raise Exception('Cannot run classifier algorithm without a training domain!')
training_image = _create_learning_image(training_domain, compute_modis_indices(training_domain))
if training_domain.training_features:
args = {
'training_features' : training_domain.training_features,
'training_property' : 'classification',
#'crs' : 'EPSG:32736',
#'crs_transform' : [0.8,0,733605.2,0,-0.8,8117589.2]
"crs": "EPSG:4326", # TODO: What to use here???
"crs_transform": [8.9831528411952135e-05, 0, -180, 0, -8.9831528411952135e-05, 90],
}
elif training_domain.ground_truth:
args = {
'training_image' : training_domain.ground_truth,
'training_band' : "b1",
'training_region' : training_domain.bounds
}
else: # Use the permanent water mask
args = {
'training_image' : get_permanent_water_mask(),
'training_band' : "b1",
'training_region' : training_domain.bounds
}
common_args = {
'image' : training_image,
'subsampling' : 0.2, # TODO: Reduce this on failure?
'max_classification': 2,
'classifier_mode' : 'classification',
'classifier_name' : classifier_name
}
args.update(common_args)
args.update(extra_args)
classifier = ee.apply("TrainClassifier", args) # Call the EE classifier
classified = _create_learning_image(domain, b).classify(classifier).select(['classification'], ['b1'])
# For high resolution Skybox images, apply an additional filter step to clean up speckles.
try:
try: # The Skybox data can be in one of two names
skyboxSensor = domain.skybox
except:
skyboxSensor = domain.skybox_nir
classified = classified.focal_min(13, 'circle', 'meters').focal_max(13, 'circle', 'meters')
except:
pass
return classified;
def cart(domain, b):
'''Classify using CART (Classification And Regression Tree)'''
return earth_engine_classifier(domain, b, 'Cart')
def svm(domain, b):
'''Classify using Pegasos classifier'''
return earth_engine_classifier(domain, b, 'Pegasos')
def random_forests(domain, b):
'''Classify using RifleSerialClassifier (Random Forests)'''
return earth_engine_classifier(domain, b, 'RifleSerialClassifier')
| {
"content_hash": "e600a5f2a01b96ae03b3e0a066aea8c3",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 134,
"avg_line_length": 46.54761904761905,
"alnum_prop": 0.5652173913043478,
"repo_name": "asurunis/CrisisMappingToolkit",
"id": "f11e94c350e974f12562527e1f91e7db00091d8d",
"size": "8731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmt/modis/ee_classifiers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "420180"
}
],
"symlink_target": ""
} |
import os
import tempfile
import unittest
from py4j.protocol import Py4JJavaError
from pyspark.ml import Pipeline
from pyspark.sql import types as t
from mleap.pyspark.feature.string_map import StringMap
from mleap.pyspark.spark_support import SimpleSparkSerializer
from tests.pyspark.lib.assertions import assert_df
from tests.pyspark.lib.spark_session import spark_session
INPUT_SCHEMA = t.StructType([t.StructField('key_col', t.StringType(), False),
t.StructField('extra_col', t.StringType(), False)])
OUTPUT_SCHEMA = t.StructType([t.StructField('key_col', t.StringType(), False),
t.StructField('extra_col', t.StringType(), False),
t.StructField('value_col', t.DoubleType(), False)])
class StringMapTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.spark = spark_session()
@classmethod
def tearDownClass(cls):
cls.spark.stop()
def setUp(self):
self.input = StringMapTest.spark.createDataFrame([['a', 'b']], INPUT_SCHEMA)
def test_map(self):
result = StringMap(
labels={'a': 1.0},
inputCol='key_col',
outputCol='value_col',
).transform(self.input)
expected = StringMapTest.spark.createDataFrame([['a', 'b', 1.0]], OUTPUT_SCHEMA)
assert_df(expected, result)
def test_map_default_value(self):
result = StringMap(
labels={'z': 1.0},
inputCol='key_col',
outputCol='value_col',
handleInvalid='keep',
).transform(self.input)
expected = StringMapTest.spark.createDataFrame([['a', 'b', 0.0]], OUTPUT_SCHEMA)
assert_df(expected, result)
def test_map_custom_default_value(self):
result = StringMap(
labels={'z': 1.0},
inputCol='key_col',
outputCol='value_col',
handleInvalid='keep',
defaultValue=-1.0
).transform(self.input)
expected = StringMapTest.spark.createDataFrame([['a', 'b', -1.0]], OUTPUT_SCHEMA)
assert_df(expected, result)
def test_map_missing_value_error(self):
with self.assertRaises(Py4JJavaError) as error:
StringMap(
labels={'z': 1.0},
inputCol='key_col',
outputCol='value_col'
).transform(self.input).collect()
self.assertIn('java.util.NoSuchElementException: Missing label: a', str(error.exception))
def test_map_from_dataframe(self):
labels_df = StringMapTest.spark.createDataFrame([['a', 1.0]], 'key_col: string, value_col: double')
result = StringMap.from_dataframe(
labels_df=labels_df,
inputCol='key_col',
outputCol='value_col'
).transform(self.input)
expected = StringMapTest.spark.createDataFrame([['a', 'b', 1.0]], OUTPUT_SCHEMA)
assert_df(expected, result)
def test_serialize_to_bundle(self):
string_map = StringMap(
labels={'a': 1.0},
inputCol='key_col',
outputCol='value_col',
)
pipeline = Pipeline(stages=[string_map]).fit(self.input)
serialization_dataset = pipeline.transform(self.input)
jar_file_path = _serialize_to_file(pipeline, serialization_dataset)
deserialized_pipeline = _deserialize_from_file(jar_file_path)
result = deserialized_pipeline.transform(self.input)
expected = StringMapTest.spark.createDataFrame([['a', 'b', 1.0]], OUTPUT_SCHEMA)
assert_df(expected, result)
@staticmethod
def test_validate_handleInvalid_ok():
StringMap(labels={}, handleInvalid='error')
def test_validate_handleInvalid_bad(self):
with self.assertRaises(AssertionError):
StringMap(labels=None, inputCol=dict(), outputCol=None, handleInvalid='invalid')
def test_validate_labels_type_fails(self):
with self.assertRaises(AssertionError):
StringMap(labels=None, inputCol=set(), outputCol=None)
def test_validate_labels_key_fails(self):
with self.assertRaises(AssertionError):
StringMap(labels=None, inputCol={False: 0.0}, outputCol=None)
def test_validate_labels_value_fails(self):
with self.assertRaises(AssertionError):
StringMap(labels=None, inputCol={'valid_key_type': 'invalid_value_type'}, outputCol=None)
def _serialize_to_file(model, df_for_serializing):
jar_file_path = _to_jar_file_path(
os.path.join(tempfile.mkdtemp(), 'test_serialize_to_bundle-pipeline.zip'))
SimpleSparkSerializer().serializeToBundle(model, jar_file_path, df_for_serializing)
return jar_file_path
def _to_jar_file_path(path):
return "jar:file:" + path
def _deserialize_from_file(path):
return SimpleSparkSerializer().deserializeFromBundle(path)
| {
"content_hash": "50a1b49addb916e360f0ba2f19d3de4d",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 107,
"avg_line_length": 36.46268656716418,
"alnum_prop": 0.6318051575931232,
"repo_name": "combust/mleap",
"id": "e372ca50f1bbb91d278a5aa868f3b3246267b836",
"size": "4886",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/tests/pyspark/feature/string_map_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "28124"
},
{
"name": "Makefile",
"bytes": "1592"
},
{
"name": "Python",
"bytes": "208315"
},
{
"name": "Scala",
"bytes": "1551436"
},
{
"name": "Shell",
"bytes": "1550"
}
],
"symlink_target": ""
} |
u'''\
:mod:`ecoxipy.etree_output` - Building ElementTree Data
=======================================================
:class:`ETreeOutput` creates :mod:`xml.etree.ElementTree` structures.
.. _ecoxipy.etree_output.examples:
Usage Example:
>>> from xml.dom.minidom import getDOMImplementation
>>> etree_output = ETreeOutput()
>>> from ecoxipy import MarkupBuilder
>>> b = MarkupBuilder(etree_output)
>>> xml_doc = b[:'section':True] (
... b.section(
... b.p(b & 'Hello World!'),
... None,
... b(u'<p>äöüß</p>'),
... b.p('<&>'),
... b(
... '<raw/>text', b.br,
... (i for i in range(3)), (i for i in range(3, 6))
... ),
... b | '<This is a comment!>',
... b['pi-target':'<PI content>'],
... b['pi-without-content':],
... attr='\\'"<&>'
... )
... )
>>> from io import BytesIO
>>> bytes_io = BytesIO()
>>> xml_doc.write(bytes_io, 'utf-8', True)
>>> document_string = u"""<?xml version='1.0' encoding='utf-8'?>\\n<section attr="'"<&>"><p>Hello World!</p><p>äöüß</p><p><&></p><raw />text<br />012345<!--<This is a comment!>--><?pi-target <PI content>?><?pi-without-content?></section>"""
>>> bytes_io.getvalue() == document_string.encode('UTF-8')
True
'''
from ecoxipy import Output, _unicode
class ETreeOutput(Output):
'''\
An :class:`Output` implementation which creates
:mod:`xml.etree.ElementTree` structures.
:param element_factory: A :mod:`xml.etree.ElementTree`-compatible
factory. If this is :const:`None` :mod:`xml.etree.ElementTree` is
used.
'''
def __init__(self, element_factory=None):
if element_factory is None:
from xml.etree import ElementTree
element_factory = ElementTree
self._element_factory = element_factory
from collections import deque as _deque
def is_native_type(self, content):
'''\
Tests if an object is a ``etree`` object by calling :meth:`iselement`
of the element factory.
:returns: :const:`True` for compatible :mod:`xml.etree.ElementTree`
objects, :const:`False` otherwise.
'''
return self._element_factory.iselement(content)
def element(self, name, children, attributes):
'''\
Creates an element.
'''
element = self._element_factory.Element(name, attributes)
if len(children) < 2:
try:
child = children.popleft()
if child.__class__ is _unicode:
element.text = child
else:
element.append(child)
except IndexError:
pass
return element
texts = None
previous = None
def handle_texts():
if texts is None or len(texts) == 0:
return
joined_texts = u''.join(texts)
texts.clear()
if previous is None:
element.text = joined_texts
else:
previous.tail = joined_texts
for child in children:
if child.__class__ is _unicode:
if texts is None:
texts = self._deque()
texts.append(child)
else:
handle_texts()
element.append(child)
previous = child
handle_texts()
return element
def text(self, content):
'''\
Creates an Unicode string.
'''
return content
def comment(self, content):
'''\
Creates a comment element.
'''
return self._element_factory.Comment(content)
def processing_instruction(self, target, content):
'''\
Creates a processing instruction element.
'''
return self._element_factory.ProcessingInstruction(
target, content)
def document(self, doctype_name, doctype_publicid, doctype_systemid,
children, omit_xml_declaration, encoding):
'''\
Creates an :mod:`xml.etree.ElementTree.ElementTree`-compatible object
using the factory.
As :mod:`xml.etree.ElementTree` lacks support for document type
declarations, the ``doctype_*`` parameters are ignored. Element tree
wrappers do not allow specification of the output encoding and of the
XML declaration, so ``encoding`` and ``omit_xml_declaration`` are also
ignored. As element trees only allow one root element, the length of
``children`` must be zero or one, otherwise a :class:`ValueError` is
raised.
'''
if len(children) == 0:
root_element = None
elif len(children) > 1:
raise ValueError('Only one root element is allowed.')
else:
root_element = children.popleft()
return self._element_factory.ElementTree(root_element)
del Output
| {
"content_hash": "23a0f69296b9f92efcd03c94bf7319c7",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 265,
"avg_line_length": 32.86092715231788,
"alnum_prop": 0.5525997581620314,
"repo_name": "IvIePhisto/ECoXiPy",
"id": "36751486d2554849442d7de96243a58bf0061884",
"size": "4994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ecoxipy/etree_output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "214034"
},
{
"name": "Shell",
"bytes": "6074"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch
from django.db.models.query import get_prefetcher
from django.test import TestCase, override_settings
from django.test.utils import CaptureQueriesContext
from django.utils import six
from django.utils.encoding import force_text
from .models import (
Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book, Bookmark,
BookReview, BookWithYear, Comment, Department, Employee, FavoriteAuthors,
House, LessonEntry, Person, Qualification, Reader, Room, TaggedItem,
Teacher, WordEntry,
)
class PrefetchRelatedTests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author.objects.create(name="Charlotte",
first_book=self.book1)
self.author2 = Author.objects.create(name="Anne",
first_book=self.book1)
self.author3 = Author.objects.create(name="Emily",
first_book=self.book1)
self.author4 = Author.objects.create(name="Jane",
first_book=self.book4)
self.book1.authors.add(self.author1, self.author2, self.author3)
self.book2.authors.add(self.author1)
self.book3.authors.add(self.author3)
self.book4.authors.add(self.author4)
self.reader1 = Reader.objects.create(name="Amy")
self.reader2 = Reader.objects.create(name="Belinda")
self.reader1.books_read.add(self.book1, self.book4)
self.reader2.books_read.add(self.book2, self.book4)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertIn(qs[0], qs)
def test_clear(self):
"""
Test that we can clear the behavior by calling prefetch_related()
"""
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""
Test we can follow a m2m and another m2m
"""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Test that objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[six.text_type(r) for r in b.read_by.all()]
for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
Test we can follow an m2m relation after a relation like ForeignKey
that doesn't have many objects
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[six.text_type(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"],
["Amy"],
["Amy"],
["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
Test that we can follow a m2m relation after going through
the select_related reverse of an o2o.
"""
qs = Author.objects.prefetch_related('bio__books').select_related('bio')
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
with self.assertRaises(AttributeError) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
with self.assertRaises(ValueError) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_forward_m2m_to_attr_conflict(self):
msg = 'to_attr=authors conflicts with a field on the Book model.'
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.prefetch_related(
Prefetch('authors', queryset=authors, to_attr='authors'),
))
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = 'to_attr=books conflicts with a field on the Author model.'
poems = Book.objects.filter(title='Poems')
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.prefetch_related(
Prefetch('books', queryset=poems, to_attr='books'),
))
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__addresses'))
sql = queries[-1]['sql']
self.assertEqual(sql.count(self.author1.name), 1)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__favorite_authors'))
sql = queries[-1]['sql']
self.assertEqual(sql.count(self.author1.name), 1)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__authorwithage'))
sql = queries[-1]['sql']
self.assertEqual(sql.count(str(self.author1.id)), 1, sql)
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(name='House 1', address="123 Main St", owner=self.person1)
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(name='House 2', address="45 Side St", owner=self.person1)
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.room2_3 = Room.objects.create(name="Kitchen", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(name='House 3', address="6 Downing St", owner=self.person2)
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=self.person2)
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.room4_3 = Room.objects.create(name="Kitchen", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
with self.assertRaises(ValueError):
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all())),
[['houses', 'rooms']]
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
with self.assertRaises(AttributeError):
self.traverse_qs(
Person.objects.prefetch_related(
'houses_lst__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
'primary_house__occupants__houses',
),
[['primary_house', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('primary_house__occupants', to_attr='occupants_lst'),
'primary_house__occupants_lst__houses',
),
[['primary_house', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
'all_houses__occupants__houses',
),
[['all_houses', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
Prefetch('all_houses__occupants', to_attr='occupants_lst'),
'all_houses__occupants_lst__houses',
),
[['all_houses', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
'houses',
queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]),
to_attr='houses_lst',
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(4):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'),
Prefetch('houses_lst__rooms_lst__main_room_of')
))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.all().prefetch_related(
Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')
)
)
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')
).first()
self.assertIsNone(room.main_room_of_attr)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch('rooms'))
persons = Person.objects.prefetch_related(Prefetch('houses', queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1
)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join("%s department: %s\n" %
(dept.name, ", ".join(six.text_type(t) for t in dept.teachers.all()))
for dept in qs)
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
def setUp(self):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
self.book1, self.book2, self.book3 = book1, book2, book3
self.reader1, self.reader2, self.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_traverse_GFK(self):
"""
Test that we can traverse a 'content_object' with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = {r.name for tag in qs
for r in tag.content_object.read_by.all()}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted([i.tag for i in bookmark.tags.all()]), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
class MultiTableInheritanceTest(TestCase):
def setUp(self):
self.book1 = BookWithYear.objects.create(
title="Poems", published_year=2010)
self.book2 = BookWithYear.objects.create(
title="More poems", published_year=2011)
self.author1 = AuthorWithAge.objects.create(
name='Jane', first_book=self.book1, age=50)
self.author2 = AuthorWithAge.objects.create(
name='Tom', first_book=self.book1, age=49)
self.author3 = AuthorWithAge.objects.create(
name='Robert', first_book=self.book2, age=48)
self.authorAddress = AuthorAddress.objects.create(
author=self.author1, address='SomeStreet 1')
self.book2.aged_authors.add(self.author2, self.author3)
self.br1 = BookReview.objects.create(
book=self.book1, notes="review book1")
self.br2 = BookReview.objects.create(
book=self.book2, notes="review book2")
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.authorAddress)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[six.text_type(book) for book in author.books_with_year.all()]
for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[six.text_type(book) for book in author.books_with_year.all()]
for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[six.text_type(author) for author in book.aged_authors.all()]
for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[six.text_type(author) for author in book.aged_authors.all()]
for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
l = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(l, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
def setUp(self):
self.book = Book.objects.create(title="Poems")
self.author1 = Author.objects.create(name='Jane', first_book=self.book)
self.author2 = Author.objects.create(name='Tom', first_book=self.book)
self.author3 = Author.objects.create(name='Robert', first_book=self.book)
self.authorAddress = AuthorAddress.objects.create(
author=self.author1, address='SomeStreet 1'
)
FavoriteAuthors.objects.create(author=self.author1,
likes_author=self.author2)
FavoriteAuthors.objects.create(author=self.author2,
likes_author=self.author3)
FavoriteAuthors.objects.create(author=self.author3,
likes_author=self.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.authorAddress)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[six.text_type(i_like) for i_like in author.favorite_authors.all()],
[six.text_type(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([six.text_type(self.author2)], [six.text_type(self.author3)]),
([six.text_type(self.author3)], [six.text_type(self.author1)]),
([six.text_type(self.author1)], [six.text_type(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(address="123 Main St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(address="45 Side St")
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(address="6 Downing St")
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(address="7 Regents St")
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
def setUp(self):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Check that prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
multi_db = True
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1)
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join("%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2)
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte",
first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne",
first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily",
first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane",
first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
def setUp(self):
self.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
self.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = self.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', force_text(queryset.query))
class Ticket25546Tests(TestCase):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
Before, prefetch queries were for 'addresses', 'first_time_authors', and
'first_time_authors__addresses'. The last query is the duplicate.
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title='book1'),
Book.objects.create(title='book2'),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name='Author11'),
Author.objects.create(first_book=cls.book1, name='Author12'),
Author.objects.create(first_book=cls.book2, name='Author21'),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address='Happy place'),
AuthorAddress.objects.create(author=cls.author12, address='Haunted house'),
AuthorAddress.objects.create(author=cls.author21, address='Happy place'),
]
def test_prefetch(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertListEqual(list(book1.first_time_authors.all()), [self.author11, self.author12])
self.assertListEqual(list(book2.first_time_authors.all()), [self.author21])
self.assertListEqual(list(book1.first_time_authors.all()[0].addresses.all()), [self.author1_address1])
self.assertListEqual(list(book1.first_time_authors.all()[1].addresses.all()), [])
self.assertListEqual(list(book2.first_time_authors.all()[0].addresses.all()), [self.author2_address1])
def test_prefetch_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
to_attr='happy_place',
)
),
to_attr='first_authors',
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertListEqual(book1.first_authors, [self.author11, self.author12])
self.assertListEqual(book2.first_authors, [self.author21])
self.assertListEqual(book1.first_authors[0].happy_place, [self.author1_address1])
self.assertListEqual(book1.first_authors[1].happy_place, [])
self.assertListEqual(book2.first_authors[0].happy_place, [self.author2_address1])
| {
"content_hash": "cce2f5c666b11d84ca6d180ab132c1a0",
"timestamp": "",
"source": "github",
"line_count": 1322,
"max_line_length": 119,
"avg_line_length": 43.136913767019664,
"alnum_prop": 0.5816718396549003,
"repo_name": "karyon/django",
"id": "5e64df2c707eee71ef2819ba2e81435da6f6d52b",
"size": "57027",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/prefetch_related/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52334"
},
{
"name": "HTML",
"bytes": "170527"
},
{
"name": "JavaScript",
"bytes": "256023"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11449863"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from a10sdk.common.A10BaseClass import A10BaseClass
class ThalesSecworld(A10BaseClass):
"""Class Description::
export Thales security world files.
Class thales-secworld supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param remote_file: {"optional": true, "type": "string", "description": "profile name for remote url", "format": "url"}
:param period: {"description": "Specify the period in second", "format": "number", "type": "number", "maximum": 31536000, "minimum": 60, "optional": true}
:param thales_secworld: {"description": "export Thales security world files - in .tgz format that has all files needed by AX", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}
:param use_mgmt_port: {"default": 0, "optional": true, "type": "number", "description": "Use management port as source port", "format": "flag"}
:param overwrite: {"default": 0, "optional": true, "type": "number", "description": "Overwrite existing file", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/export-periodic/thales-secworld/{thales_secworld}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "thales_secworld"]
self.b_key = "thales-secworld"
self.a10_url="/axapi/v3/export-periodic/thales-secworld/{thales_secworld}"
self.DeviceProxy = ""
self.uuid = ""
self.remote_file = ""
self.period = ""
self.thales_secworld = ""
self.use_mgmt_port = ""
self.overwrite = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| {
"content_hash": "291fa780a119b0956763abc26ba19f7a",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 220,
"avg_line_length": 44.56521739130435,
"alnum_prop": 0.64,
"repo_name": "amwelch/a10sdk-python",
"id": "4d69ffe7897cf1df951133e46cf04c6d1515b3c2",
"size": "2050",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/export/export_periodic_thales_secworld.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
} |
"""
fabfile.py
Fabric file for setting up a fully functional staging+production
enviornment on heroku.
1. Creates a staging and a production app, adds all the necessary settings, and
provisions a list of free addons.
2. Creates a pipeline from staging to production.
3. Creates a git repository and sets up remotes.
Set everything up:
fab heroku_setup
Deploy to Staging:
git push staging master
Promote Staging slug to Production:
heroku pipeline:promote
Deploy directly to Production:
git push production master
"""
from fabric.contrib.console import confirm, prompt
from fabric.api import abort, env, local, settings, task
########## GLOBALS
env.run = 'heroku run python manage.py'
HEROKU_ADDONS = (
'cloudamqp:lemur',
'heroku-postgresql:dev',
'scheduler:standard',
'redistogo:nano',
'memcachier:dev',
'newrelic:stark',
'pgbackups:auto-month',
'sentry:developer',
'mandrill:starter',
'papertrail:choklad'
)
HEROKU_CONFIGS = (
'DJANGO_SETTINGS_MODULE=techpong.settings.prod',
#'AWS_ACCESS_KEY_ID=xxx',
#'AWS_SECRET_ACCESS_KEY=xxx',
#'AWS_STORAGE_BUCKET_NAME=xxx',
)
########## END GLOBALS
########## HELPERS
def cont(cmd, message):
"""Given a command, ``cmd``, and a message, ``message``, allow a user to
either continue or break execution if errors occur while executing ``cmd``.
:param str cmd: The command to execute on the local system.
:param str message: The message to display to the user on failure.
.. note::
``message`` should be phrased in the form of a question, as if ``cmd``'s
execution fails, we'll ask the user to press 'y' or 'n' to continue or
cancel execution, respectively.
Usage::
cont('heroku run ...', "Couldn't complete %s. Continue anyway?" % cmd)
"""
with settings(warn_only=True):
result = local(cmd, capture=True)
if message and result.failed:
print result.stderr
if not confirm(message):
abort('Stopped execution per user request.')
@task
def generate_secret_key(key_length=64):
"""Simple convenience function to randomly generate a 64 character key
you can stick in your settings/environment"""
import string, random
options = string.digits + string.letters + ".,!@#$%^&*()-_+={}"
print ''.join([random.choice(options) for i in range(key_length)])
########## END HELPERS
########## HEROKU MANAGEMENT
@task
def heroku_setup():
"""Set up everything you need on heroku. Creates a production app
(remote: production) and an identical staging app (remote: staging) and
does the following:
- Create new Heroku applications.
- Install all ``HEROKU_ADDONS``.
- Set all ``HEROKU_CONFIGS``.
- Initialize New Relic's monitoring add-on.
https://devcenter.heroku.com/articles/multiple-environments
NOTE: the production app will have ENVIRONMENT_TYPE=production while staging
will have ENVIRONMENT_TYPE=staging if the code needs to know which environment
it is running in (for example, so staging can use a non-production db follower)
"""
app_name = prompt('What name should this heroku app use?', default='techpong')
staging_name = '%s-staging' % app_name
staging_remote = 'staging'
production_remote = 'production'
# create the apps on heroku
cont('heroku apps:create %s --remote %s --addons %s' %
(staging_name, staging_remote, ','.join(HEROKU_ADDONS)),
"Failed to create the staging app on heroku. Continue anyway?")
cont('heroku apps:create %s --remote %s --addons %s' %
(app_name, production_remote, ','.join(HEROKU_ADDONS)),
"Failed to create the production app on heroku. Continue anyway?")
# set configs
for config in HEROKU_CONFIGS:
cont('heroku config:set %s --app=%s' % (config, staging_name),
"Failed to set %s on Staging. Continue anyway?" % config)
cont('heroku config:set %s --app=%s' % (config, app_name),
"Failed to set %s on Production. Continue anyway?" % config)
# set debug
cont('heroku config:set DEBUG=True --app=%s' % staging_name,
"Failed to set DEBUG on Staging. Continue anyway?")
cont('heroku config:set DEBUG=False --app=%s' % app_name,
"Failed to set DEBUG on Production. Continue anyway?")
# set environment type
cont('heroku config:set ENVIRONMENT_TYPE=staging --app=%s' % staging_name,
"Failed to set ENVIRONMENT_TYPE on Staging. Continue anyway?")
cont('heroku config:set ENVIRONMENT_TYPE=production --app=%s' % app_name,
"Failed to set ENVIRONMENT_TYPE on Production. Continue anyway?")
## # this is a buildpack that includes npm (the node package manager) which
## # makes it easy to include things like coffeescript or less compilers
## # set buildpack
## cont('heroku config:set BUILDPACK_URL=git://github.com/galuszkak/heroku-buildpack-django.git --app %s' % staging_name,
## "Failed to set BUILDPACK_URL. Continue anyway without npm_requirements?")
## cont('heroku config:set BUILDPACK_URL=git://github.com/galuszkak/heroku-buildpack-django.git --app %s' % app_name,
## "Failed to set BUILDPACK_URL. Continue anyway without npm_requirements?")
# set user-env-compile or versioned static assets won't work!
cont( 'heroku labs:enable user-env-compile --app=%s' % staging_name,
"Failed to set user-env-compile on Staging. This will block versioned static assets. Continue anyway?")
cont( 'heroku labs:enable user-env-compile --app=%s' % app_name,
"Failed to set user-env-compile on Production. This will block versioned static assets. Continue anyway?")
# create a pipeline from staging to production
cont( 'heroku labs:enable pipelines',
"Failed to enable Pipelines. Continue anyway?")
cont( 'heroku plugins:install git://github.com/heroku/heroku-pipeline.git',
"Failed to install pipelines plugin. Continue anyway?")
cont( 'heroku pipeline:add -a %s %s' % (staging_name, app_name),
"Failed to create pipeline from Staging to Production. Continue anyway?")
# start newrelic
cont( ('%(run)s newrelic-admin validate-config - stdout --app=' % env) + staging_name,
"Failed to initialize New Relic on Staging. Continue anyway?")
cont( ('%(run)s newrelic-admin validate-config - stdout --app=' % env) + app_name,
"Failed to initialize New Relic on Production. Continue anyway?")
# set git to default to staging
local('git init')
local('git config heroku.remote staging')
########## END HEROKU MANAGEMENT
| {
"content_hash": "3ec1640cd2cd1f0d5937b3289e3e6131",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 124,
"avg_line_length": 39.12865497076023,
"alnum_prop": 0.668958302196981,
"repo_name": "collingreen/startuppong",
"id": "3a988441d81642a8f5955a2dad2a6d32fa686c3a",
"size": "6691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "20209"
},
{
"name": "CSS",
"bytes": "294265"
},
{
"name": "HTML",
"bytes": "106499"
},
{
"name": "JavaScript",
"bytes": "1270513"
},
{
"name": "Makefile",
"bytes": "205"
},
{
"name": "Python",
"bytes": "229239"
}
],
"symlink_target": ""
} |
import logging
import logging.config
import os
from typing import List
import click
import yaml
from blu.config import Config
from commander import CommanderImpl, MockCommander
from handbrakeCLI.handbrakeCLI_impl import HandbrakeCLI
from identifier.identifier_impl import Identifier
from makeMKV.makeMKV import MakeMKV
from makeMKV.model.drive import Drive
from makeMKV.model.enum.disc_type import DiscType
from makeMKV.model.stream import VideoStream
from makeMKV.model.title import Title
logger = logging.getLogger(__name__)
@click.command()
@click.option('--rip', '-r', multiple=True, is_flag=True, help='Rip media from disc')
@click.option('--config_location', '-C', help='Config file location. Default blu.yml')
@click.option('--log_config', '-L', help='Logging configuration file. Default logging.yml')
@click.option('--re-run', '-R', help='Re-run a previous run using output files', is_flag=True)
def blu(rip: bool, config_location: str, log_config: str, re_run: bool = False):
setup_logging(default_path=log_config)
config: Config = Config()
if config_location:
config.load(config_location)
else:
config.load('./blu.yml')
if rip:
if re_run:
makeMKV: MakeMKV = MakeMKV(MockCommander())
compressor: HandbrakeCLI = HandbrakeCLI(MockCommander())
else:
makeMKV: MakeMKV = MakeMKV(CommanderImpl())
compressor: HandbrakeCLI = HandbrakeCLI(CommanderImpl())
drives: List[Drive] = makeMKV.scan_drives()
identifier: Identifier = Identifier()
for drive in drives:
drive = makeMKV.scan_disc(drive)
identifier.identify(drive.disc)
if drive.disc.is_series():
ripSeries(compressor, config, drive, makeMKV, re_run)
else:
ripMovie(compressor, config, drive, makeMKV, re_run)
def ripMovie(compressor, config, drive, makeMKV, re_run):
value: Title = drive.disc.get_movie_title()
video_stream: VideoStream = value.getVideoStream()
container: str = str(config.cfg['general']['movies']['container'])
# str(config.cfg['general']['movies']['filename_format'] \
file_name: str = ('{title} ({year}) - {source} {resolution}p'
.format(title=drive.disc.get_nice_title(),
year=drive.disc.year,
source=drive.disc.type.toString(),
resolution=video_stream.video_size.y)) + "." + container
raw_dir: str = str(config.cfg['general']['movies']['location']['raw'])
compress_dir: str = str(config.cfg['general']['movies']['location']['compressed'])
if not re_run:
os.makedirs(compress_dir, exist_ok=True)
os.makedirs(raw_dir, exist_ok=True)
output = os.path.join(compress_dir, file_name)
raw_location = makeMKV.rip_disc(drive, raw_dir, value.id)
value.raw_location = os.path.join(raw_dir, file_name)
if not re_run:
os.replace(raw_location, value.raw_location)
# compressor.compressFile(input_file=value.raw_location,
# output_file=output,
# frame_rate=video_stream.frame_rate,
# width=video_stream.video_size.x,
# height=video_stream.video_size.y,
# quality=getQuality(config, drive))
value.output = output
def ripSeries(compressor, config, drive, makeMKV, re_run):
for key, value in drive.disc.titles.items():
video_stream: VideoStream = value.getVideoStream()
container: str = str(config.cfg['general']['series']['container'])
file_name: str = str(config.cfg['general']['series']['filename_format'] \
.format(series=value.series,
season=value.season,
episode=value.episode,
name=value.name,
source=drive.disc.type.toString(),
resolution=video_stream.video_size.y)) + "." + container
raw_dir: str = str(config.cfg['general']['series']['location']['raw'])
compress_dir: str = str(config.cfg['general']['series']['location']['compressed'])
if not re_run:
os.makedirs(compress_dir, exist_ok=True)
os.makedirs(raw_dir, exist_ok=True)
output = os.path.join(compress_dir, file_name)
raw_location = makeMKV.rip_disc(drive, raw_dir, value.id)
value.raw_location = os.path.join(raw_dir, file_name)
if not re_run:
os.replace(raw_location, value.raw_location)
compressor.compressFile(input_file=value.raw_location,
output_file=output,
frame_rate=video_stream.frame_rate,
width=video_stream.video_size.x,
height=video_stream.video_size.y,
quality=getQuality(config, drive))
value.output = output
def getQuality(config, drive):
quality: int = int(config.cfg['handbrake']['quality']['default'])
if drive.disc.type == DiscType.BRAY_TYPE_DISK:
quality = int(config.cfg['handbrake']['quality']['bluray'])
elif drive.disc.type == DiscType.DVD_TYPE_DISK:
quality = int(config.cfg['handbrake']['quality']['dvd'])
return quality
def setup_logging(
default_path='logging.yaml',
default_level=logging.INFO,
env_key='LOG_CFG'
):
"""Setup logging configuration
"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
if __name__ == '__main__':
blu()
| {
"content_hash": "b72a40c6eb2d957338c3a8a6739e9ba0",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 94,
"avg_line_length": 38.8343949044586,
"alnum_prop": 0.5852058389371823,
"repo_name": "coolman565/blu_two",
"id": "d4ebc2afdfe03805f8dcf15557f6a75f5ba88955",
"size": "6097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blu/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67741"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('movie', '0002_auto_20161115_1957'),
]
operations = [
migrations.RenameField(
model_name='movie',
old_name='watched',
new_name='number_of_times_watched',
),
migrations.AddField(
model_name='hiren',
name='created_at',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='hiren',
name='updated_at',
field=models.DateField(auto_now=True),
),
]
| {
"content_hash": "6f65960dc83b69a310bcc4f1c86c5d93",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 89,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.569620253164557,
"repo_name": "pyprism/Hiren-Movie",
"id": "3922e9d97f428074ece65a505cb24aa8457c5fb8",
"size": "863",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "movie/migrations/0003_auto_20161115_2000.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28617"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("ddcz", "0112_emaillist_autofill"),
]
operations = [
migrations.RunSQL("ALTER TABLE `uzivatele_maillist` DROP PRIMARY KEY;"),
migrations.RunSQL(
"ALTER TABLE uzivatele_maillist MODIFY django_id INTEGER AUTO_INCREMENT NOT NULL PRIMARY KEY;"
),
migrations.RunSQL(
"""
SET @m = (SELECT IFNULL(MAX(django_id) + 1, 1) FROM uzivatele_maillist);
SET @s = CONCAT('ALTER TABLE uzivatele_maillist AUTO_INCREMENT=', @m);
PREPARE stmt1 FROM @s;
EXECUTE stmt1;
DEALLOCATE PREPARE stmt1;
"""
),
]
| {
"content_hash": "f2d511ad642310a1ea0fe7a240a08307",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 106,
"avg_line_length": 31.541666666666668,
"alnum_prop": 0.5640686922060766,
"repo_name": "dracidoupe/graveyard",
"id": "5e614e133bb20669a71386c3a4f797a8b2c50118",
"size": "807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ddcz/migrations/0113_emaillist_automagic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "API Blueprint",
"bytes": "4273"
},
{
"name": "CSS",
"bytes": "37578"
},
{
"name": "Dockerfile",
"bytes": "208"
},
{
"name": "HTML",
"bytes": "101149"
},
{
"name": "JavaScript",
"bytes": "2417"
},
{
"name": "Python",
"bytes": "766548"
},
{
"name": "Shell",
"bytes": "5103"
}
],
"symlink_target": ""
} |
"""Enumerators for coordinate types"""
# -*- coding: utf-8 -*-
from dlkit.abstract_osid.osid.errors import NotFound
CELESTIAL_COORDINATE_TYPES = {
# Ecliptic Coordinate System #
'ECLIPTIC': 'Ecliptic',
# Equatorial Coordinate System #
'EQUATORIAL': 'Equatorial',
# Galactic Coordinate System #
'GCS': 'Galactic',
# Horizontal Altitude-Azimuth Coordinate System #
'HORIZON': 'Horizon',
# Supergalactic Coordinate System #
'SUPERGALACTIC': 'Supergalactic'
}
GEOGRAPHIC_COORDINATE_TYPES = {
# Earth Gravitational Model 1996 #
'EGM96': 'EGM96, Geodetic Earth Gravitational Model 1996 Coordinate',
# Geocentric #
'GEOCENTRIC': 'Geocentric',
# Geodetic Reference System 1980 #
'GRS80': 'GRS80, Geodetic Reference System 80 Coordinate',
# North American Datum of 1927 #
'NAD27': 'NAD27, Geodetic North American Datum of 1927 Coordinate',
# North American Datum of 1983 #
'NAD83': 'NAD83, Geodetic North American Datum of 1983 Coordinate',
# Maidenhead Locator System #
'MAIDENHEAD': 'Maidenhead, Maidenhead Locator',
# Military Grid Reference System #
'MGRS': 'MGRS, Military Grid Reference',
# World Geodetic System 1960 #
'WGS60': 'WGS60, World Geodetic System of 1960 Coordinate',
# World Geodetic System 1966 #
'WGS66': 'WGS66, World Geodetic System of 1966 Coordinate',
# World Geodetic System 1972 #
'WGS72': 'WGS72, World Geodetic System of 1972 Coordinate',
# World Geodetic System 1984 (used by GPS) #
'WGS84': 'WGS84, World Geodetic System of 1984 Coordinate',
# US Zip Codes #
'USPOSTAL': 'USPostal, United States Postal Code',
# Universal Polar Stereographic System #
'UPS': 'UPS, Universal Polar Stereographic Coordinate',
# Universal Transverse Mercator System #
'UTM': 'UTM, Universal Transverse Mercator Coordinate',
# AT&T V and H System #
'VH': 'V&H, AT&T V and H Coordinate',
# VOR-DME System #
'VOR': 'VOR-DME, VOR-DME Coordinate'
}
TYPE_SET = {
'CCT': CELESTIAL_COORDINATE_TYPES,
'GCT': GEOGRAPHIC_COORDINATE_TYPES
}
def get_type_data(name):
"""Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type
"""
name = name.upper()
if name in CELESTIAL_COORDINATE_TYPES:
domain = 'Celestial Coordinate Systems'
coordinate_name = CELESTIAL_COORDINATE_TYPES[name]
elif name in GEOGRAPHIC_COORDINATE_TYPES:
domain = 'Geographic Coordinate Systems'
coordinate_name = GEOGRAPHIC_COORDINATE_TYPES[name]
else:
raise NotFound('Coordinate Type' + name)
return {
'authority': 'okapia.net',
'namespace': 'coordinate',
'identifier': name,
'domain': domain,
'display_name': coordinate_name + ' Type',
'display_label': coordinate_name,
'description': ('The type for the ' + coordinate_name + ' System.')
}
| {
"content_hash": "8e7b2abad016a7724432e605ccdbe24d",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 75,
"avg_line_length": 29,
"alnum_prop": 0.6588550385001674,
"repo_name": "mitsei/dlkit",
"id": "efd523fb9d06493c2a1a97e2e80d1ee9e491ec74",
"size": "2987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dlkit/primordium/locale/types/coordinate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25170465"
},
{
"name": "TeX",
"bytes": "1088"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import absolute_import
from functools import reduce
import logging
from docker.errors import APIError
from .config import get_service_name_from_net, ConfigurationError
from .const import DEFAULT_TIMEOUT, LABEL_PROJECT, LABEL_SERVICE, LABEL_ONE_OFF
from .container import Container
from .legacy import check_for_legacy_containers
from .service import Service
from .utils import parallel_execute
log = logging.getLogger(__name__)
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in service.get('volumes_from', []) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client):
self.name = name
self.services = services
self.client = client
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@classmethod
def from_dicts(cls, name, service_dicts, client):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client)
for service_dict in sort_service_dicts(service_dicts):
links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict)
net = project.get_net(service_dict)
project.services.append(Service(client=client, project=name, links=links, net=net,
volumes_from=volumes_from, **service_dict))
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=self.service_names,
include_deps=include_deps
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_volumes_from(self, service_dict):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_name in service_dict.get('volumes_from', []):
try:
service = self.get_service(volume_name)
volumes_from.append(service)
except NoSuchService:
try:
container = Container.from_id(self.client, volume_name)
volumes_from.append(container)
except APIError:
raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name))
del service_dict['volumes_from']
return volumes_from
def get_net(self, service_dict):
if 'net' in service_dict:
net_name = get_service_name_from_net(service_dict.get('net'))
if net_name:
try:
net = self.get_service(net_name)
except NoSuchService:
try:
net = Container.from_id(self.client, net_name)
except APIError:
raise ConfigurationError('Service "%s" is trying to use the network of "%s", which is not the name of a service or container.' % (service_dict['name'], net_name))
else:
net = service_dict['net']
del service_dict['net']
else:
net = None
return net
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.stop(**options),
msg_index=lambda c: c.name,
msg="Stopping"
)
def kill(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.kill(**options),
msg_index=lambda c: c.name,
msg="Killing"
)
def remove_stopped(self, service_names=None, **options):
all_containers = self.containers(service_names, stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
parallel_execute(
objects=stopped_containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def restart(self, service_names=None, **options):
for service in self.get_services(service_names):
service.restart(**options)
def build(self, service_names=None, no_cache=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self,
service_names=None,
start_deps=True,
allow_recreate=True,
force_recreate=False,
insecure_registry=False,
do_build=True,
timeout=DEFAULT_TIMEOUT):
if force_recreate and not allow_recreate:
raise ValueError("force_recreate and allow_recreate are in conflict")
services = self.get_services(service_names, include_deps=start_deps)
for service in services:
service.remove_duplicate_containers()
plans = self._get_convergence_plans(
services,
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
return [
container
for service in services
for container in service.execute_convergence_plan(
plans[service.name],
insecure_registry=insecure_registry,
do_build=do_build,
timeout=timeout
)
]
def _get_convergence_plans(self,
services,
allow_recreate=True,
force_recreate=False):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans
and plans[name].action == 'recreate'
]
if updated_dependencies and allow_recreate:
log.debug(
'%s has upstream changes (%s)',
service.name, ", ".join(updated_dependencies),
)
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=True,
)
else:
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
plans[service.name] = plan
return plans
def pull(self, service_names=None, insecure_registry=False):
for service in self.get_services(service_names, include_deps=True):
service.pull(insecure_registry=insecure_registry)
def containers(self, service_names=None, stopped=False, one_off=False):
if service_names:
self.validate_service_names(service_names)
else:
service_names = self.service_names
containers = [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]
def matches_service_names(container):
return container.labels.get(LABEL_SERVICE) in service_names
if not containers:
check_for_legacy_containers(
self.client,
self.name,
self.service_names,
)
return filter(matches_service_names, containers)
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class DependencyError(ConfigurationError):
pass
| {
"content_hash": "fcfb774a62f35bced2a92b538129dfff",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 186,
"avg_line_length": 34.70670391061452,
"alnum_prop": 0.5621730382293763,
"repo_name": "feelobot/compose",
"id": "c5028492c408fa87ffc94fad1e2d9a0a0884ccab",
"size": "12425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compose/project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "278707"
},
{
"name": "Shell",
"bytes": "16016"
}
],
"symlink_target": ""
} |
def WebIDLTest(parser, harness):
threw = False
try:
results = parser.parse("""
interface VariadicConstraints1 {
void foo(byte... arg1, byte arg2);
};
""")
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
results = parser.parse("""
interface VariadicConstraints2 {
void foo(byte... arg1, optional byte arg2);
};
""")
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
results = parser.parse("""
interface VariadicConstraints3 {
void foo(optional byte... arg1);
};
""")
except:
threw = True
harness.ok(threw, "Should have thrown.")
| {
"content_hash": "81c29c95e24d82797fd289491682a530",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 57,
"avg_line_length": 21.384615384615383,
"alnum_prop": 0.5011990407673861,
"repo_name": "wilebeast/FireFox-OS",
"id": "9cba22c584254e527cef728bb80f7c2973b51772",
"size": "834",
"binary": false,
"copies": "106",
"ref": "refs/heads/master",
"path": "B2G/gecko/dom/bindings/parser/tests/test_variadic_constraints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""Python wrappers for the Google Storage RESTful API."""
__all__ = ['ReadBuffer',
'StreamingBuffer',
]
import collections
import os
import urlparse
from . import api_utils
from . import common
from . import errors
from . import rest_api
try:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
except ImportError:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
def _get_storage_api(retry_params, account_id=None):
"""Returns storage_api instance for API methods.
Args:
retry_params: An instance of api_utils.RetryParams. If none,
thread's default will be used.
account_id: Internal-use only.
Returns:
A storage_api instance to handle urlfetch work to GCS.
On dev appserver, this instance by default will talk to a local stub
unless common.ACCESS_TOKEN is set. That token will be used to talk
to the real GCS.
"""
api = _StorageApi(_StorageApi.full_control_scope,
service_account_id=account_id,
retry_params=retry_params)
if common.local_run() and not common.get_access_token():
api.api_url = common.local_api_url()
if common.get_access_token():
api.token = common.get_access_token()
return api
class _StorageApi(rest_api._RestApi):
"""A simple wrapper for the Google Storage RESTful API.
WARNING: Do NOT directly use this api. It's an implementation detail
and is subject to change at any release.
All async methods have similar args and returns.
Args:
path: The path to the Google Storage object or bucket, e.g.
'/mybucket/myfile' or '/mybucket'.
**kwd: Options for urlfetch. e.g.
headers={'content-type': 'text/plain'}, payload='blah'.
Returns:
A ndb Future. When fulfilled, future.get_result() should return
a tuple of (status, headers, content) that represents a HTTP response
of Google Cloud Storage XML API.
"""
api_url = 'https://storage.googleapis.com'
read_only_scope = 'https://www.googleapis.com/auth/devstorage.read_only'
read_write_scope = 'https://www.googleapis.com/auth/devstorage.read_write'
full_control_scope = 'https://www.googleapis.com/auth/devstorage.full_control'
def __getstate__(self):
"""Store state as part of serialization/pickling.
Returns:
A tuple (of dictionaries) with the state of this object
"""
return (super(_StorageApi, self).__getstate__(), {'api_url': self.api_url})
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the tuple from a __getstate__ call
"""
superstate, localstate = state
super(_StorageApi, self).__setstate__(superstate)
self.api_url = localstate['api_url']
@api_utils._eager_tasklet
@ndb.tasklet
def do_request_async(self, url, method='GET', headers=None, payload=None,
deadline=None, callback=None):
"""Inherit docs.
This method translates urlfetch exceptions to more service specific ones.
"""
if headers is None:
headers = {}
if 'x-goog-api-version' not in headers:
headers['x-goog-api-version'] = '2'
headers['accept-encoding'] = 'gzip, *'
try:
resp_tuple = yield super(_StorageApi, self).do_request_async(
url, method=method, headers=headers, payload=payload,
deadline=deadline, callback=callback)
except urlfetch.DownloadError, e:
raise errors.TimeoutError(
'Request to Google Cloud Storage timed out.', e)
raise ndb.Return(resp_tuple)
def post_object_async(self, path, **kwds):
"""POST to an object."""
return self.do_request_async(self.api_url + path, 'POST', **kwds)
def put_object_async(self, path, **kwds):
"""PUT an object."""
return self.do_request_async(self.api_url + path, 'PUT', **kwds)
def get_object_async(self, path, **kwds):
"""GET an object.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def delete_object_async(self, path, **kwds):
"""DELETE an object.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'DELETE', **kwds)
def head_object_async(self, path, **kwds):
"""HEAD an object.
Depending on request headers, HEAD returns various object properties,
e.g. Content-Length, Last-Modified, and ETag.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'HEAD', **kwds)
def get_bucket_async(self, path, **kwds):
"""GET a bucket."""
return self.do_request_async(self.api_url + path, 'GET', **kwds)
_StorageApi = rest_api.add_sync_methods(_StorageApi)
class ReadBuffer(object):
"""A class for reading Google storage files."""
DEFAULT_BUFFER_SIZE = 1024 * 1024
MAX_REQUEST_SIZE = 30 * DEFAULT_BUFFER_SIZE
def __init__(self,
api,
path,
buffer_size=DEFAULT_BUFFER_SIZE,
max_request_size=MAX_REQUEST_SIZE):
"""Constructor.
Args:
api: A StorageApi instance.
path: Quoted/escaped path to the object, e.g. /mybucket/myfile
buffer_size: buffer size. The ReadBuffer keeps
one buffer. But there may be a pending future that contains
a second buffer. This size must be less than max_request_size.
max_request_size: Max bytes to request in one urlfetch.
"""
self._api = api
self._path = path
self.name = api_utils._unquote_filename(path)
self.closed = False
assert buffer_size <= max_request_size
self._buffer_size = buffer_size
self._max_request_size = max_request_size
self._offset = 0
self._buffer = _Buffer()
self._etag = None
self._request_next_buffer()
status, headers, content = self._api.head_object(path)
errors.check_status(status, [200], path, resp_headers=headers, body=content)
self._file_size = long(headers['content-length'])
self._check_etag(headers.get('etag'))
if self._file_size == 0:
self._buffer_future = None
def __getstate__(self):
"""Store state as part of serialization/pickling.
The contents of the read buffer are not stored, only the current offset for
data read by the client. A new read buffer is established at unpickling.
The head information for the object (file size and etag) are stored to
reduce startup and ensure the file has not changed.
Returns:
A dictionary with the state of this object
"""
return {'api': self._api,
'path': self._path,
'buffer_size': self._buffer_size,
'request_size': self._max_request_size,
'etag': self._etag,
'size': self._file_size,
'offset': self._offset,
'closed': self.closed}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the dictionary from a __getstate__ call
Along with restoring the state, pre-fetch the next read buffer.
"""
self._api = state['api']
self._path = state['path']
self.name = api_utils._unquote_filename(self._path)
self._buffer_size = state['buffer_size']
self._max_request_size = state['request_size']
self._etag = state['etag']
self._file_size = state['size']
self._offset = state['offset']
self._buffer = _Buffer()
self.closed = state['closed']
self._buffer_future = None
if self._remaining() and not self.closed:
self._request_next_buffer()
def __iter__(self):
"""Iterator interface.
Note the ReadBuffer container itself is the iterator. It's
(quote PEP0234)
'destructive: they consumes all the values and a second iterator
cannot easily be created that iterates independently over the same values.
You could open the file for the second time, or seek() to the beginning.'
Returns:
Self.
"""
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readline(self, size=-1):
"""Read one line delimited by '\n' from the file.
A trailing newline character is kept in the string. It may be absent when a
file ends with an incomplete line. If the size argument is non-negative,
it specifies the maximum string size (counting the newline) to return.
A negative size is the same as unspecified. Empty string is returned
only when EOF is encountered immediately.
Args:
size: Maximum number of bytes to read. If not specified, readline stops
only on '\n' or EOF.
Returns:
The data read as a string.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if size == 0 or not self._remaining():
return ''
data_list = []
newline_offset = self._buffer.find_newline(size)
while newline_offset < 0:
data = self._buffer.read(size)
size -= len(data)
self._offset += len(data)
data_list.append(data)
if size == 0 or not self._remaining():
return ''.join(data_list)
self._buffer.reset(self._buffer_future.get_result())
self._request_next_buffer()
newline_offset = self._buffer.find_newline(size)
data = self._buffer.read_to_offset(newline_offset + 1)
self._offset += len(data)
data_list.append(data)
return ''.join(data_list)
def read(self, size=-1):
"""Read data from RAW file.
Args:
size: Number of bytes to read as integer. Actual number of bytes
read is always equal to size unless EOF is reached. If size is
negative or unspecified, read the entire file.
Returns:
data read as str.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if not self._remaining():
return ''
data_list = []
while True:
remaining = self._buffer.remaining()
if size >= 0 and size < remaining:
data_list.append(self._buffer.read(size))
self._offset += size
break
else:
size -= remaining
self._offset += remaining
data_list.append(self._buffer.read())
if self._buffer_future is None:
if size < 0 or size >= self._remaining():
needs = self._remaining()
else:
needs = size
data_list.extend(self._get_segments(self._offset, needs))
self._offset += needs
break
if self._buffer_future:
self._buffer.reset(self._buffer_future.get_result())
self._buffer_future = None
if self._buffer_future is None:
self._request_next_buffer()
return ''.join(data_list)
def _remaining(self):
return self._file_size - self._offset
def _request_next_buffer(self):
"""Request next buffer.
Requires self._offset and self._buffer are in consistent state
"""
self._buffer_future = None
next_offset = self._offset + self._buffer.remaining()
if not hasattr(self, '_file_size') or next_offset != self._file_size:
self._buffer_future = self._get_segment(next_offset,
self._buffer_size)
def _get_segments(self, start, request_size):
"""Get segments of the file from Google Storage as a list.
A large request is broken into segments to avoid hitting urlfetch
response size limit. Each segment is returned from a separate urlfetch.
Args:
start: start offset to request. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request.
Returns:
A list of file segments in order
"""
if not request_size:
return []
end = start + request_size
futures = []
while request_size > self._max_request_size:
futures.append(self._get_segment(start, self._max_request_size))
request_size -= self._max_request_size
start += self._max_request_size
if start < end:
futures.append(self._get_segment(start, end-start))
return [fut.get_result() for fut in futures]
@ndb.tasklet
def _get_segment(self, start, request_size):
"""Get a segment of the file from Google Storage.
Args:
start: start offset of the segment. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request. Have to be small enough
for a single urlfetch request. May go over the logical range of the
file.
Yields:
a segment [start, start + request_size) of the file.
Raises:
ValueError: if the file has changed while reading.
"""
end = start + request_size - 1
content_range = '%d-%d' % (start, end)
headers = {'Range': 'bytes=' + content_range}
status, resp_headers, content = yield self._api.get_object_async(
self._path, headers=headers)
errors.check_status(status, [200, 206], self._path, headers, resp_headers,
body=content)
self._check_etag(resp_headers.get('etag'))
raise ndb.Return(content)
def _check_etag(self, etag):
"""Check if etag is the same across requests to GCS.
If self._etag is None, set it. If etag is set, check that the new
etag equals the old one.
In the __init__ method, we fire one HEAD and one GET request using
ndb tasklet. One of them would return first and set the first value.
Args:
etag: etag from a GCS HTTP response. None if etag is not part of the
response header. It could be None for example in the case of GCS
composite file.
Raises:
ValueError: if two etags are not equal.
"""
if etag is None:
return
elif self._etag is None:
self._etag = etag
elif self._etag != etag:
raise ValueError('File on GCS has changed while reading.')
def close(self):
self.closed = True
self._buffer = None
self._buffer_future = None
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.close()
return False
def seek(self, offset, whence=os.SEEK_SET):
"""Set the file's current offset.
Note if the new offset is out of bound, it is adjusted to either 0 or EOF.
Args:
offset: seek offset as number.
whence: seek mode. Supported modes are os.SEEK_SET (absolute seek),
os.SEEK_CUR (seek relative to the current position), and os.SEEK_END
(seek relative to the end, offset should be negative).
Raises:
IOError: When this buffer is closed.
ValueError: When whence is invalid.
"""
self._check_open()
self._buffer.reset()
self._buffer_future = None
if whence == os.SEEK_SET:
self._offset = offset
elif whence == os.SEEK_CUR:
self._offset += offset
elif whence == os.SEEK_END:
self._offset = self._file_size + offset
else:
raise ValueError('Whence mode %s is invalid.' % str(whence))
self._offset = min(self._offset, self._file_size)
self._offset = max(self._offset, 0)
if self._remaining():
self._request_next_buffer()
def tell(self):
"""Tell the file's current offset.
Returns:
current offset in reading this file.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
return self._offset
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return True
def readable(self):
return True
def writable(self):
return False
class _Buffer(object):
"""In memory buffer."""
def __init__(self):
self.reset()
def reset(self, content='', offset=0):
self._buffer = content
self._offset = offset
def read(self, size=-1):
"""Returns bytes from self._buffer and update related offsets.
Args:
size: number of bytes to read starting from current offset.
Read the entire buffer if negative.
Returns:
Requested bytes from buffer.
"""
if size < 0:
offset = len(self._buffer)
else:
offset = self._offset + size
return self.read_to_offset(offset)
def read_to_offset(self, offset):
"""Returns bytes from self._buffer and update related offsets.
Args:
offset: read from current offset to this offset, exclusive.
Returns:
Requested bytes from buffer.
"""
assert offset >= self._offset
result = self._buffer[self._offset: offset]
self._offset += len(result)
return result
def remaining(self):
return len(self._buffer) - self._offset
def find_newline(self, size=-1):
"""Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist.
"""
if size < 0:
return self._buffer.find('\n', self._offset)
return self._buffer.find('\n', self._offset, self._offset + size)
class StreamingBuffer(object):
"""A class for creating large objects using the 'resumable' API.
The API is a subset of the Python writable stream API sufficient to
support writing zip files using the zipfile module.
The exact sequence of calls and use of headers is documented at
https://developers.google.com/storage/docs/developer-guide#unknownresumables
"""
_blocksize = 256 * 1024
_flushsize = 8 * _blocksize
_maxrequestsize = 9 * 4 * _blocksize
def __init__(self,
api,
path,
content_type=None,
gcs_headers=None):
"""Constructor.
Args:
api: A StorageApi instance.
path: Quoted/escaped path to the object, e.g. /mybucket/myfile
content_type: Optional content-type; Default value is
delegate to Google Cloud Storage.
gcs_headers: additional gs headers as a str->str dict, e.g
{'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}.
Raises:
IOError: When this location can not be found.
"""
assert self._maxrequestsize > self._blocksize
assert self._maxrequestsize % self._blocksize == 0
assert self._maxrequestsize >= self._flushsize
self._api = api
self._path = path
self.name = api_utils._unquote_filename(path)
self.closed = False
self._buffer = collections.deque()
self._buffered = 0
self._written = 0
self._offset = 0
headers = {'x-goog-resumable': 'start'}
if content_type:
headers['content-type'] = content_type
if gcs_headers:
headers.update(gcs_headers)
status, resp_headers, content = self._api.post_object(path, headers=headers)
errors.check_status(status, [201], path, headers, resp_headers,
body=content)
loc = resp_headers.get('location')
if not loc:
raise IOError('No location header found in 201 response')
parsed = urlparse.urlparse(loc)
self._path_with_token = '%s?%s' % (self._path, parsed.query)
def __getstate__(self):
"""Store state as part of serialization/pickling.
The contents of the write buffer are stored. Writes to the underlying
storage are required to be on block boundaries (_blocksize) except for the
last write. In the worst case the pickled version of this object may be
slightly larger than the blocksize.
Returns:
A dictionary with the state of this object
"""
return {'api': self._api,
'path': self._path,
'path_token': self._path_with_token,
'buffer': self._buffer,
'buffered': self._buffered,
'written': self._written,
'offset': self._offset,
'closed': self.closed}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the dictionary from a __getstate__ call
"""
self._api = state['api']
self._path_with_token = state['path_token']
self._buffer = state['buffer']
self._buffered = state['buffered']
self._written = state['written']
self._offset = state['offset']
self.closed = state['closed']
self._path = state['path']
self.name = api_utils._unquote_filename(self._path)
def write(self, data):
"""Write some bytes.
Args:
data: data to write. str.
Raises:
TypeError: if data is not of type str.
"""
self._check_open()
if not isinstance(data, str):
raise TypeError('Expected str but got %s.' % type(data))
if not data:
return
self._buffer.append(data)
self._buffered += len(data)
self._offset += len(data)
if self._buffered >= self._flushsize:
self._flush()
def flush(self):
"""Flush as much as possible to GCS.
GCS *requires* that all writes except for the final one align on
256KB boundaries. So the internal buffer may still have < 256KB bytes left
after flush.
"""
self._check_open()
self._flush(finish=False)
def tell(self):
"""Return the total number of bytes passed to write() so far.
(There is no seek() method.)
"""
return self._offset
def close(self):
"""Flush the buffer and finalize the file.
When this returns the new file is available for reading.
"""
if not self.closed:
self.closed = True
self._flush(finish=True)
self._buffer = None
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.close()
return False
def _flush(self, finish=False):
"""Internal API to flush.
Buffer is flushed to GCS only when the total amount of buffered data is at
least self._blocksize, or to flush the final (incomplete) block of
the file with finish=True.
"""
blocksize_or_zero = 0 if finish else self._blocksize
while self._buffered >= blocksize_or_zero:
buffer = []
buffered = 0
excess = 0
while self._buffer:
buf = self._buffer.popleft()
size = len(buf)
self._buffered -= size
buffer.append(buf)
buffered += size
if buffered >= self._maxrequestsize:
excess = buffered - self._maxrequestsize
break
if self._buffered < blocksize_or_zero and buffered >= blocksize_or_zero:
excess = buffered % self._blocksize
break
if excess:
over = buffer.pop()
size = len(over)
assert size >= excess
buffered -= size
head, tail = over[:-excess], over[-excess:]
self._buffer.appendleft(tail)
self._buffered += len(tail)
if head:
buffer.append(head)
buffered += len(head)
data = ''.join(buffer)
file_len = '*'
if finish and not self._buffered:
file_len = self._written + len(data)
self._send_data(data, self._written, file_len)
self._written += len(data)
if file_len != '*':
break
def _send_data(self, data, start_offset, file_len):
"""Send the block to the storage service.
This is a utility method that does not modify self.
Args:
data: data to send in str.
start_offset: start offset of the data in relation to the file.
file_len: an int if this is the last data to append to the file.
Otherwise '*'.
"""
headers = {}
end_offset = start_offset + len(data) - 1
if data:
headers['content-range'] = ('bytes %d-%d/%s' %
(start_offset, end_offset, file_len))
else:
headers['content-range'] = ('bytes */%s' % file_len)
status, response_headers, content = self._api.put_object(
self._path_with_token, payload=data, headers=headers)
if file_len == '*':
expected = 308
else:
expected = 200
errors.check_status(status, [expected], self._path, headers,
response_headers, content,
{'upload_path': self._path_with_token})
def _get_offset_from_gcs(self):
"""Get the last offset that has been written to GCS.
This is a utility method that does not modify self.
Returns:
an int of the last offset written to GCS by this upload, inclusive.
-1 means nothing has been written.
"""
headers = {'content-range': 'bytes */*'}
status, response_headers, content = self._api.put_object(
self._path_with_token, headers=headers)
errors.check_status(status, [308], self._path, headers,
response_headers, content,
{'upload_path': self._path_with_token})
val = response_headers.get('range')
if val is None:
return -1
_, offset = val.rsplit('-', 1)
return int(offset)
def _force_close(self, file_length=None):
"""Close this buffer on file_length.
Finalize this upload immediately on file_length.
Contents that are still in memory will not be uploaded.
This is a utility method that does not modify self.
Args:
file_length: file length. Must match what has been uploaded. If None,
it will be queried from GCS.
"""
if file_length is None:
file_length = self._get_offset_from_gcs() + 1
self._send_data('', 0, file_length)
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return False
def readable(self):
return False
def writable(self):
return True
| {
"content_hash": "ba2201906bde5a32a5a0119a8af0894f",
"timestamp": "",
"source": "github",
"line_count": 856,
"max_line_length": 80,
"avg_line_length": 29.690420560747665,
"alnum_prop": 0.629549478654338,
"repo_name": "gdgjodhpur/appengine-opencv-sudoku-python",
"id": "9bfb2ee430cc7c68ab02b9fb8aa73a44db25b980",
"size": "26011",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "cloudstorage/storage_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "125106"
}
],
"symlink_target": ""
} |
"""Tests for the event object filter."""
import unittest
from plaso.containers import events
from plaso.filters import event_filter
from plaso.lib import errors
from tests.filters import test_lib
class EventObjectFilterTest(test_lib.FilterTestCase):
"""Tests for the event object filter."""
def testCompilerFilter(self):
"""Tests the CompileFilter function."""
test_filter = event_filter.EventObjectFilter()
test_filter.CompileFilter(
'some_stuff is "random" and other_stuff is not "random"')
test_filter.CompileFilter('timestamp is "2020-12-23 15:00:00"')
test_filter.CompileFilter('timestamp is DATETIME("2020-12-23T15:00:00")')
test_filter.CompileFilter('filename contains PATH("/etc/issue")')
with self.assertRaises(errors.ParseError):
test_filter.CompileFilter(
'SELECT stuff FROM machine WHERE conditions are met')
with self.assertRaises(errors.ParseError):
test_filter.CompileFilter(
'/tmp/file_that_most_likely_does_not_exist')
with self.assertRaises(errors.ParseError):
test_filter.CompileFilter(
'some random stuff that is destined to fail')
with self.assertRaises(errors.ParseError):
test_filter.CompileFilter(
'some_stuff is "random" and other_stuff ')
def testMatch(self):
"""Tests the Match function."""
test_filter = event_filter.EventObjectFilter()
test_filter.CompileFilter('timestamp is DATETIME("2020-12-23T15:00:00")')
event = events.EventObject()
event.timestamp = 1608735600000000
result = test_filter.Match(event, None, None, None)
self.assertTrue(result)
test_filter = event_filter.EventObjectFilter()
test_filter.CompileFilter('filename contains PATH("etc/issue")')
event_data = events.EventData()
event_data.filename = '/usr/local/etc/issue'
result = test_filter.Match(None, event_data, None, None)
self.assertTrue(result)
event_data.filename = '/etc/issue.net'
result = test_filter.Match(None, event_data, None, None)
self.assertFalse(result)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d66034d3d25fa60116223c73269c3117",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 77,
"avg_line_length": 29.87323943661972,
"alnum_prop": 0.7006129184347006,
"repo_name": "joachimmetz/plaso",
"id": "e78d96d397f7b7dc0f618cddc73628b5f656be2e",
"size": "2168",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "tests/filters/event_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4301"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1305"
},
{
"name": "Python",
"bytes": "5345755"
},
{
"name": "Shell",
"bytes": "27279"
},
{
"name": "YARA",
"bytes": "507"
}
],
"symlink_target": ""
} |
from test.test_support import verbose, TestSkipped
import locale
import sys
if sys.platform == 'darwin':
raise TestSkipped("Locale support on MacOSX is minimal and cannot be tested")
oldlocale = locale.setlocale(locale.LC_NUMERIC)
if sys.platform.startswith("win"):
tlocs = ("en",)
else:
tlocs = ("en_US.UTF-8", "en_US.US-ASCII", "en_US")
for tloc in tlocs:
try:
locale.setlocale(locale.LC_NUMERIC, tloc)
break
except locale.Error:
continue
else:
raise ImportError, "test locale not supported (tried %s)"%(', '.join(tlocs))
def testformat(formatstr, value, grouping = 0, output=None, func=locale.format):
if verbose:
if output:
print "%s %% %s =? %s ..." %\
(repr(formatstr), repr(value), repr(output)),
else:
print "%s %% %s works? ..." % (repr(formatstr), repr(value)),
result = func(formatstr, value, grouping = grouping)
if output and result != output:
if verbose:
print 'no'
print "%s %% %s == %s != %s" %\
(repr(formatstr), repr(value), repr(result), repr(output))
else:
if verbose:
print "yes"
try:
# On Solaris 10, the thousands_sep is the empty string
sep = locale.localeconv()['thousands_sep']
testformat("%f", 1024, grouping=1, output='1%s024.000000' % sep)
testformat("%f", 102, grouping=1, output='102.000000')
testformat("%f", -42, grouping=1, output='-42.000000')
testformat("%+f", -42, grouping=1, output='-42.000000')
testformat("%20.f", -42, grouping=1, output=' -42')
testformat("%+10.f", -4200, grouping=1, output=' -4%s200' % sep)
testformat("%-10.f", 4200, grouping=1, output='4%s200 ' % sep)
# Invoke getpreferredencoding to make sure it does not cause exceptions,
locale.getpreferredencoding()
# === Test format() with more complex formatting strings
# test if grouping is independent from other characters in formatting string
testformat("One million is %i", 1000000, grouping=1,
output='One million is 1%s000%s000' % (sep, sep),
func=locale.format_string)
testformat("One million is %i", 1000000, grouping=1,
output='One million is 1%s000%s000' % (sep, sep),
func=locale.format_string)
# test dots in formatting string
testformat(".%f.", 1000.0, output='.1000.000000.', func=locale.format_string)
# test floats
testformat("--> %10.2f", 1000.0, grouping=1, output='--> 1%s000.00' % sep,
func=locale.format_string)
# test asterisk formats
testformat("%10.*f", (2, 1000.0), grouping=0, output=' 1000.00',
func=locale.format_string)
testformat("%*.*f", (10, 2, 1000.0), grouping=1, output=' 1%s000.00' % sep,
func=locale.format_string)
# test more-in-one
testformat("int %i float %.2f str %s", (1000, 1000.0, 'str'), grouping=1,
output='int 1%s000 float 1%s000.00 str str' % (sep, sep),
func=locale.format_string)
finally:
locale.setlocale(locale.LC_NUMERIC, oldlocale)
# Test BSD Rune locale's bug for isctype functions.
def teststrop(s, method, output):
if verbose:
print "%s.%s() =? %s ..." % (repr(s), method, repr(output)),
result = getattr(s, method)()
if result != output:
if verbose:
print "no"
print "%s.%s() == %s != %s" % (repr(s), method, repr(result),
repr(output))
elif verbose:
print "yes"
try:
if sys.platform == 'sunos5':
# On Solaris, in en_US.UTF-8, \xa0 is a space
raise locale.Error
oldlocale = locale.setlocale(locale.LC_CTYPE)
locale.setlocale(locale.LC_CTYPE, 'en_US.UTF-8')
except locale.Error:
pass
else:
try:
teststrop('\x20', 'isspace', True)
teststrop('\xa0', 'isspace', False)
teststrop('\xa1', 'isspace', False)
teststrop('\xc0', 'isalpha', False)
teststrop('\xc0', 'isalnum', False)
teststrop('\xc0', 'isupper', False)
teststrop('\xc0', 'islower', False)
teststrop('\xec\xa0\xbc', 'split', ['\xec\xa0\xbc'])
teststrop('\xed\x95\xa0', 'strip', '\xed\x95\xa0')
teststrop('\xcc\x85', 'lower', '\xcc\x85')
teststrop('\xed\x95\xa0', 'upper', '\xed\x95\xa0')
finally:
locale.setlocale(locale.LC_CTYPE, oldlocale)
| {
"content_hash": "82f11ca81d68a6a292a7a2852246c849",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 81,
"avg_line_length": 38.721739130434784,
"alnum_prop": 0.5838760386256456,
"repo_name": "MalloyPower/parsing-python",
"id": "9e264b9c4ee4389bc4c7dd08311d99c9c19167b9",
"size": "4453",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.5/Lib/test/test_locale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
"""gRPC's Python API."""
import abc
import enum
import sys
import six
from grpc._cython import cygrpc as _cygrpc
############################## Future Interface ###############################
class FutureTimeoutError(Exception):
"""Indicates that a method call on a Future timed out."""
class FutureCancelledError(Exception):
"""Indicates that the computation underlying a Future was cancelled."""
class Future(six.with_metaclass(abc.ABCMeta)):
"""A representation of a computation in another control flow.
Computations represented by a Future may be yet to be begun, may be ongoing,
or may have already completed.
"""
@abc.abstractmethod
def cancel(self):
"""Attempts to cancel the computation.
This method does not block.
Returns:
bool:
Returns True if the computation was canceled.
Returns False under all other circumstances, for example:
1. computation has begun and could not be canceled.
2. computation has finished
3. computation is scheduled for execution and it is impossible to
determine its state without blocking.
"""
raise NotImplementedError()
@abc.abstractmethod
def cancelled(self):
"""Describes whether the computation was cancelled.
This method does not block.
Returns:
bool:
Returns True if the computation was cancelled before its result became
available.
False under all other circumstances, for example:
1. computation was not cancelled.
2. computation's result is available.
"""
raise NotImplementedError()
@abc.abstractmethod
def running(self):
"""Describes whether the computation is taking place.
This method does not block.
Returns:
bool:
Returns True if the computation is scheduled for execution or currently
executing.
Returns False if the computation already executed or was cancelled.
"""
raise NotImplementedError()
@abc.abstractmethod
def done(self):
"""Describes whether the computation has taken place.
This method does not block.
Returns:
bool:
Returns True if the computation already executed or was cancelled.
Returns False if the computation is scheduled for execution or currently
executing.
This is exactly opposite of the running() method's result.
"""
raise NotImplementedError()
@abc.abstractmethod
def result(self, timeout=None):
"""Returns the result of the computation or raises its exception.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
finish or be cancelled. If None, the call will block until the
computations's termination.
Returns:
The return value of the computation.
Raises:
FutureTimeoutError: If a timeout value is passed and the computation does
not terminate within the allotted time.
FutureCancelledError: If the computation was cancelled.
Exception: If the computation raised an exception, this call will raise
the same exception.
"""
raise NotImplementedError()
@abc.abstractmethod
def exception(self, timeout=None):
"""Return the exception raised by the computation.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
terminate or be cancelled. If None, the call will block until the
computations's termination.
Returns:
The exception raised by the computation, or None if the computation did
not raise an exception.
Raises:
FutureTimeoutError: If a timeout value is passed and the computation does
not terminate within the allotted time.
FutureCancelledError: If the computation was cancelled.
"""
raise NotImplementedError()
@abc.abstractmethod
def traceback(self, timeout=None):
"""Access the traceback of the exception raised by the computation.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
terminate or be cancelled. If None, the call will block until the
computations's termination.
Returns:
The traceback of the exception raised by the computation, or None if the
computation did not raise an exception.
Raises:
FutureTimeoutError: If a timeout value is passed and the computation does
not terminate within the allotted time.
FutureCancelledError: If the computation was cancelled.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_done_callback(self, fn):
"""Adds a function to be called at completion of the computation.
The callback will be passed this Future object describing the outcome of
the computation.
If the computation has already completed, the callback will be called
immediately.
Args:
fn: A callable taking this Future object as its single parameter.
"""
raise NotImplementedError()
################################ gRPC Enums ##################################
@enum.unique
class ChannelConnectivity(enum.Enum):
"""Mirrors grpc_connectivity_state in the gRPC Core.
Attributes:
IDLE: The channel is idle.
CONNECTING: The channel is connecting.
READY: The channel is ready to conduct RPCs.
TRANSIENT_FAILURE: The channel has seen a failure from which it expects to
recover.
SHUTDOWN: The channel has seen a failure from which it cannot recover.
"""
IDLE = (_cygrpc.ConnectivityState.idle, 'idle')
CONNECTING = (_cygrpc.ConnectivityState.connecting, 'connecting')
READY = (_cygrpc.ConnectivityState.ready, 'ready')
TRANSIENT_FAILURE = (_cygrpc.ConnectivityState.transient_failure,
'transient failure')
SHUTDOWN = (_cygrpc.ConnectivityState.shutdown, 'shutdown')
@enum.unique
class StatusCode(enum.Enum):
"""Mirrors grpc_status_code in the gRPC Core."""
OK = (_cygrpc.StatusCode.ok, 'ok')
CANCELLED = (_cygrpc.StatusCode.cancelled, 'cancelled')
UNKNOWN = (_cygrpc.StatusCode.unknown, 'unknown')
INVALID_ARGUMENT = (_cygrpc.StatusCode.invalid_argument, 'invalid argument')
DEADLINE_EXCEEDED = (_cygrpc.StatusCode.deadline_exceeded,
'deadline exceeded')
NOT_FOUND = (_cygrpc.StatusCode.not_found, 'not found')
ALREADY_EXISTS = (_cygrpc.StatusCode.already_exists, 'already exists')
PERMISSION_DENIED = (_cygrpc.StatusCode.permission_denied,
'permission denied')
RESOURCE_EXHAUSTED = (_cygrpc.StatusCode.resource_exhausted,
'resource exhausted')
FAILED_PRECONDITION = (_cygrpc.StatusCode.failed_precondition,
'failed precondition')
ABORTED = (_cygrpc.StatusCode.aborted, 'aborted')
OUT_OF_RANGE = (_cygrpc.StatusCode.out_of_range, 'out of range')
UNIMPLEMENTED = (_cygrpc.StatusCode.unimplemented, 'unimplemented')
INTERNAL = (_cygrpc.StatusCode.internal, 'internal')
UNAVAILABLE = (_cygrpc.StatusCode.unavailable, 'unavailable')
DATA_LOSS = (_cygrpc.StatusCode.data_loss, 'data loss')
UNAUTHENTICATED = (_cygrpc.StatusCode.unauthenticated, 'unauthenticated')
############################# gRPC Exceptions ################################
class RpcError(Exception):
"""Raised by the gRPC library to indicate non-OK-status RPC termination."""
############################## Shared Context ################################
class RpcContext(six.with_metaclass(abc.ABCMeta)):
"""Provides RPC-related information and action."""
@abc.abstractmethod
def is_active(self):
"""Describes whether the RPC is active or has terminated.
Returns:
bool:
True if RPC is active, False otherwise.
"""
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
"""Describes the length of allowed time remaining for the RPC.
Returns:
A nonnegative float indicating the length of allowed time in seconds
remaining for the RPC to complete before it is considered to have timed
out, or None if no deadline was specified for the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def cancel(self):
"""Cancels the RPC.
Idempotent and has no effect if the RPC has already terminated.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_callback(self, callback):
"""Registers a callback to be called on RPC termination.
Args:
callback: A no-parameter callable to be called on RPC termination.
Returns:
bool:
True if the callback was added and will be called later; False if the
callback was not added and will not be called (because the RPC
already terminated or some other reason).
"""
raise NotImplementedError()
######################### Invocation-Side Context ############################
class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
"""Invocation-side utility object for an RPC."""
@abc.abstractmethod
def initial_metadata(self):
"""Accesses the initial metadata sent by the server.
This method blocks until the value is available.
Returns:
The initial :term:`metadata`.
"""
raise NotImplementedError()
@abc.abstractmethod
def trailing_metadata(self):
"""Accesses the trailing metadata sent by the server.
This method blocks until the value is available.
Returns:
The trailing :term:`metadata`.
"""
raise NotImplementedError()
@abc.abstractmethod
def code(self):
"""Accesses the status code sent by the server.
This method blocks until the value is available.
Returns:
The StatusCode value for the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def details(self):
"""Accesses the details sent by the server.
This method blocks until the value is available.
Returns:
The details string of the RPC.
"""
raise NotImplementedError()
############ Authentication & Authorization Interfaces & Classes #############
class ChannelCredentials(object):
"""An encapsulation of the data required to create a secure Channel.
This class has no supported interface - it exists to define the type of its
instances and its instances exist to be passed to other functions. For
example, ssl_channel_credentials returns an instance, and secure_channel
consumes an instance of this class.
"""
def __init__(self, credentials):
self._credentials = credentials
class CallCredentials(object):
"""An encapsulation of the data required to assert an identity over a
channel.
A CallCredentials may be composed with ChannelCredentials to always assert
identity for every call over that Channel.
This class has no supported interface - it exists to define the type of its
instances and its instances exist to be passed to other functions.
"""
def __init__(self, credentials):
self._credentials = credentials
class AuthMetadataContext(six.with_metaclass(abc.ABCMeta)):
"""Provides information to call credentials metadata plugins.
Attributes:
service_url: A string URL of the service being called into.
method_name: A string of the fully qualified method name being called.
"""
class AuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)):
"""Callback object received by a metadata plugin."""
def __call__(self, metadata, error):
"""Inform the gRPC runtime of the metadata to construct a
CallCredentials.
Args:
metadata: The :term:`metadata` used to construct the CallCredentials.
error: An Exception to indicate error or None to indicate success.
"""
raise NotImplementedError()
class AuthMetadataPlugin(six.with_metaclass(abc.ABCMeta)):
"""A specification for custom authentication."""
def __call__(self, context, callback):
"""Implements authentication by passing metadata to a callback.
Implementations of this method must not block.
Args:
context: An AuthMetadataContext providing information on the RPC that the
plugin is being called to authenticate.
callback: An AuthMetadataPluginCallback to be invoked either synchronously
or asynchronously.
"""
raise NotImplementedError()
class ServerCredentials(object):
"""An encapsulation of the data required to open a secure port on a Server.
This class has no supported interface - it exists to define the type of its
instances and its instances exist to be passed to other functions.
"""
def __init__(self, credentials):
self._credentials = credentials
######################## Multi-Callable Interfaces ###########################
class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a unary-unary RPC from client-side."""
@abc.abstractmethod
def __call__(self, request, timeout=None, metadata=None, credentials=None):
"""Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
The response value for the RPC.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
raise NotImplementedError()
@abc.abstractmethod
def with_call(self, request, timeout=None, metadata=None, credentials=None):
"""Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional durating of time in seconds to allow for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
The response value for the RPC and a Call value for the RPC.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
raise NotImplementedError()
@abc.abstractmethod
def future(self, request, timeout=None, metadata=None, credentials=None):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and a Future. In the event of
RPC completion, the return Call-Future's result value will be the
response message of the RPC. Should the event terminate with non-OK
status, the returned Call-Future's exception value will be an RpcError.
"""
raise NotImplementedError()
class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a unary-stream RPC from client-side."""
@abc.abstractmethod
def __call__(self, request, timeout=None, metadata=None, credentials=None):
"""Invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
If None, the timeout is considered infinite.
metadata: An optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and an iterator of response
values. Drawing response values from the returned Call-iterator may
raise RpcError indicating termination of the RPC with non-OK status.
"""
raise NotImplementedError()
class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a stream-unary RPC from client-side."""
@abc.abstractmethod
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
"""Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
If None, the timeout is considered infinite.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
The response value for the RPC.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also implement grpc.Call, affording methods
such as metadata, code, and details.
"""
raise NotImplementedError()
@abc.abstractmethod
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
"""Synchronously invokes the underlying RPC on the client.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
If None, the timeout is considered infinite.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
The response value for the RPC and a Call object for the RPC.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
raise NotImplementedError()
@abc.abstractmethod
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
"""Asynchronously invokes the underlying RPC on the client.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
If None, the timeout is considered infinite.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and a Future. In the event of
RPC completion, the return Call-Future's result value will be the
response message of the RPC. Should the event terminate with non-OK
status, the returned Call-Future's exception value will be an RpcError.
"""
raise NotImplementedError()
class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a stream-stream RPC on client-side."""
@abc.abstractmethod
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
"""Invokes the underlying RPC on the client.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
if not specified the timeout is considered infinite.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and an iterator of response
values. Drawing response values from the returned Call-iterator may
raise RpcError indicating termination of the RPC with non-OK status.
"""
raise NotImplementedError()
############################# Channel Interface ##############################
class Channel(six.with_metaclass(abc.ABCMeta)):
"""Affords RPC invocation via generic methods on client-side."""
@abc.abstractmethod
def subscribe(self, callback, try_to_connect=False):
"""Subscribe to this Channel's connectivity state machine.
A Channel may be in any of the states described by ChannelConnectivity.
This method allows application to monitor the state transitions.
The typical use case is to debug or gain better visibility into gRPC
runtime's state.
Args:
callback: A callable to be invoked with ChannelConnectivity argument.
ChannelConnectivity describes current state of the channel.
The callable will be invoked immediately upon subscription and again for
every change to ChannelConnectivity until it is unsubscribed or this
Channel object goes out of scope.
try_to_connect: A boolean indicating whether or not this Channel should
attempt to connect immediately. If set to False, gRPC runtime decides
when to connect.
"""
raise NotImplementedError()
@abc.abstractmethod
def unsubscribe(self, callback):
"""Unsubscribes a subscribed callback from this Channel's connectivity.
Args:
callback: A callable previously registered with this Channel from having
been passed to its "subscribe" method.
"""
raise NotImplementedError()
@abc.abstractmethod
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
"""Creates a UnaryUnaryMultiCallable for a unary-unary method.
Args:
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
Returns:
A UnaryUnaryMultiCallable value for the named unary-unary method.
"""
raise NotImplementedError()
@abc.abstractmethod
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
"""Creates a UnaryStreamMultiCallable for a unary-stream method.
Args:
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
Returns:
A UnaryStreamMultiCallable value for the name unary-stream method.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
"""Creates a StreamUnaryMultiCallable for a stream-unary method.
Args:
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
Returns:
A StreamUnaryMultiCallable value for the named stream-unary method.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
"""Creates a StreamStreamMultiCallable for a stream-stream method.
Args:
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
Returns:
A StreamStreamMultiCallable value for the named stream-stream method.
"""
raise NotImplementedError()
########################## Service-Side Context ##############################
class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
"""A context object passed to method implementations."""
@abc.abstractmethod
def invocation_metadata(self):
"""Accesses the metadata from the sent by the client.
Returns:
The invocation :term:`metadata`.
"""
raise NotImplementedError()
@abc.abstractmethod
def peer(self):
"""Identifies the peer that invoked the RPC being serviced.
Returns:
A string identifying the peer that invoked the RPC being serviced.
The string format is determined by gRPC runtime.
"""
raise NotImplementedError()
@abc.abstractmethod
def peer_identities(self):
"""Gets one or more peer identity(s).
Equivalent to
servicer_context.auth_context().get(
servicer_context.peer_identity_key())
Returns:
An iterable of the identities, or None if the call is not authenticated.
Each identity is returned as a raw bytes type.
"""
raise NotImplementedError()
@abc.abstractmethod
def peer_identity_key(self):
"""The auth property used to identify the peer.
For example, "x509_common_name" or "x509_subject_alternative_name" are
used to identify an SSL peer.
Returns:
The auth property (string) that indicates the
peer identity, or None if the call is not authenticated.
"""
raise NotImplementedError()
@abc.abstractmethod
def auth_context(self):
"""Gets the auth context for the call.
Returns:
A map of strings to an iterable of bytes for each auth property.
"""
raise NotImplementedError()
@abc.abstractmethod
def send_initial_metadata(self, initial_metadata):
"""Sends the initial metadata value to the client.
This method need not be called by implementations if they have no
metadata to add to what the gRPC runtime will transmit.
Args:
initial_metadata: The initial :term:`metadata`.
"""
raise NotImplementedError()
@abc.abstractmethod
def set_trailing_metadata(self, trailing_metadata):
"""Sends the trailing metadata for the RPC.
This method need not be called by implementations if they have no
metadata to add to what the gRPC runtime will transmit.
Args:
trailing_metadata: The trailing :term:`metadata`.
"""
raise NotImplementedError()
@abc.abstractmethod
def set_code(self, code):
"""Sets the value to be used as status code upon RPC completion.
This method need not be called by method implementations if they wish the
gRPC runtime to determine the status code of the RPC.
Args:
code: A StatusCode object to be sent to the client.
"""
raise NotImplementedError()
@abc.abstractmethod
def set_details(self, details):
"""Sets the value to be used as detail string upon RPC completion.
This method need not be called by method implementations if they have no
details to transmit.
Args:
details: An arbitrary string to be sent to the client upon completion.
"""
raise NotImplementedError()
##################### Service-Side Handler Interfaces ########################
class RpcMethodHandler(six.with_metaclass(abc.ABCMeta)):
"""An implementation of a single RPC method.
Attributes:
request_streaming: Whether the RPC supports exactly one request message or
any arbitrary number of request messages.
response_streaming: Whether the RPC supports exactly one response message or
any arbitrary number of response messages.
request_deserializer: A callable behavior that accepts a byte string and
returns an object suitable to be passed to this object's business logic,
or None to indicate that this object's business logic should be passed the
raw request bytes.
response_serializer: A callable behavior that accepts an object produced by
this object's business logic and returns a byte string, or None to
indicate that the byte strings produced by this object's business logic
should be transmitted on the wire as they are.
unary_unary: This object's application-specific business logic as a callable
value that takes a request value and a ServicerContext object and returns
a response value. Only non-None if both request_streaming and
response_streaming are False.
unary_stream: This object's application-specific business logic as a
callable value that takes a request value and a ServicerContext object and
returns an iterator of response values. Only non-None if request_streaming
is False and response_streaming is True.
stream_unary: This object's application-specific business logic as a
callable value that takes an iterator of request values and a
ServicerContext object and returns a response value. Only non-None if
request_streaming is True and response_streaming is False.
stream_stream: This object's application-specific business logic as a
callable value that takes an iterator of request values and a
ServicerContext object and returns an iterator of response values. Only
non-None if request_streaming and response_streaming are both True.
"""
class HandlerCallDetails(six.with_metaclass(abc.ABCMeta)):
"""Describes an RPC that has just arrived for service.
Attributes:
method: The method name of the RPC.
invocation_metadata: The :term:`metadata` sent by the client.
"""
class GenericRpcHandler(six.with_metaclass(abc.ABCMeta)):
"""An implementation of arbitrarily many RPC methods."""
@abc.abstractmethod
def service(self, handler_call_details):
"""Returns the handler for servicing the RPC.
Args:
handler_call_details: A HandlerCallDetails describing the RPC.
Returns:
An RpcMethodHandler with which the RPC may be serviced if the
implementation chooses to service this RPC, or None otherwise.
"""
raise NotImplementedError()
class ServiceRpcHandler(six.with_metaclass(abc.ABCMeta, GenericRpcHandler)):
"""An implementation of RPC methods belonging to a service.
A service handles RPC methods with structured names of the form
'/Service.Name/Service.Method', where 'Service.Name' is the value
returned by service_name(), and 'Service.Method' is the method
name. A service can have multiple method names, but only a single
service name.
"""
@abc.abstractmethod
def service_name(self):
"""Returns this service's name.
Returns:
The service name.
"""
raise NotImplementedError()
############################# Server Interface ###############################
class Server(six.with_metaclass(abc.ABCMeta)):
"""Services RPCs."""
@abc.abstractmethod
def add_generic_rpc_handlers(self, generic_rpc_handlers):
"""Registers GenericRpcHandlers with this Server.
This method is only safe to call before the server is started.
Args:
generic_rpc_handlers: An iterable of GenericRpcHandlers that will be used
to service RPCs.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_insecure_port(self, address):
"""Opens an insecure port for accepting RPCs.
This method may only be called before starting the server.
Args:
address: The address for which to open a port.
if the port is 0, or not specified in the address, then gRPC runtime
will choose a port.
Returns:
integer:
An integer port on which server will accept RPC requests.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_secure_port(self, address, server_credentials):
"""Opens a secure port for accepting RPCs.
This method may only be called before starting the server.
Args:
address: The address for which to open a port.
if the port is 0, or not specified in the address, then gRPC runtime
will choose a port.
server_credentials: A ServerCredentials object.
Returns:
integer:
An integer port on which server will accept RPC requests.
"""
raise NotImplementedError()
@abc.abstractmethod
def start(self):
"""Starts this Server.
This method may only be called once. (i.e. it is not idempotent).
"""
raise NotImplementedError()
@abc.abstractmethod
def stop(self, grace):
"""Stops this Server.
This method immediately stop service of new RPCs in all cases.
If a grace period is specified, this method returns immediately
and all RPCs active at the end of the grace period are aborted.
If a grace period is not specified, then all existing RPCs are
teriminated immediately and the this method blocks until the last
RPC handler terminates.
This method is idempotent and may be called at any time. Passing a smaller
grace value in subsequentcall will have the effect of stopping the Server
sooner. Passing a larger grace value in subsequent call *will not* have the
effect of stopping the server later (i.e. the most restrictive grace
value is used).
Args:
grace: A duration of time in seconds or None.
Returns:
A threading.Event that will be set when this Server has completely
stopped, i.e. when running RPCs either complete or are aborted and
all handlers have terminated.
"""
raise NotImplementedError()
################################# Functions ################################
def unary_unary_rpc_method_handler(behavior,
request_deserializer=None,
response_serializer=None):
"""Creates an RpcMethodHandler for a unary-unary RPC method.
Args:
behavior: The implementation of an RPC that accepts one request and returns
one response.
request_deserializer: An optional behavior for request deserialization.
response_serializer: An optional behavior for response serialization.
Returns:
An RpcMethodHandler object that is typically used by grpc.Server.
"""
from grpc import _utilities # pylint: disable=cyclic-import
return _utilities.RpcMethodHandler(False, False, request_deserializer,
response_serializer, behavior, None,
None, None)
def unary_stream_rpc_method_handler(behavior,
request_deserializer=None,
response_serializer=None):
"""Creates an RpcMethodHandler for a unary-stream RPC method.
Args:
behavior: The implementation of an RPC that accepts one request and returns
an iterator of response values.
request_deserializer: An optional behavior for request deserialization.
response_serializer: An optional behavior for response serialization.
Returns:
An RpcMethodHandler object that is typically used by grpc.Server.
"""
from grpc import _utilities # pylint: disable=cyclic-import
return _utilities.RpcMethodHandler(False, True, request_deserializer,
response_serializer, None, behavior,
None, None)
def stream_unary_rpc_method_handler(behavior,
request_deserializer=None,
response_serializer=None):
"""Creates an RpcMethodHandler for a stream-unary RPC method.
Args:
behavior: The implementation of an RPC that accepts an iterator of request
values and returns a single response value.
request_deserializer: An optional behavior for request deserialization.
response_serializer: An optional behavior for response serialization.
Returns:
An RpcMethodHandler object that is typically used by grpc.Server.
"""
from grpc import _utilities # pylint: disable=cyclic-import
return _utilities.RpcMethodHandler(True, False, request_deserializer,
response_serializer, None, None,
behavior, None)
def stream_stream_rpc_method_handler(behavior,
request_deserializer=None,
response_serializer=None):
"""Creates an RpcMethodHandler for a stream-stream RPC method.
Args:
behavior: The implementation of an RPC that accepts an iterator of request
values and returns an iterator of response values.
request_deserializer: An optional behavior for request deserialization.
response_serializer: An optional behavior for response serialization.
Returns:
An RpcMethodHandler object that is typically used by grpc.Server.
"""
from grpc import _utilities # pylint: disable=cyclic-import
return _utilities.RpcMethodHandler(True, True, request_deserializer,
response_serializer, None, None, None,
behavior)
def method_handlers_generic_handler(service, method_handlers):
"""Creates a GenericRpcHandler from RpcMethodHandlers.
Args:
service: The name of the service that is implemented by the method_handlers.
method_handlers: A dictionary that maps method names to corresponding
RpcMethodHandler.
Returns:
A GenericRpcHandler. This is typically added to the grpc.Server object
with add_generic_rpc_handlers() before starting the server.
"""
from grpc import _utilities # pylint: disable=cyclic-import
return _utilities.DictionaryGenericHandler(service, method_handlers)
def ssl_channel_credentials(root_certificates=None,
private_key=None,
certificate_chain=None):
"""Creates a ChannelCredentials for use with an SSL-enabled Channel.
Args:
root_certificates: The PEM-encoded root certificates as a byte string,
or None to retrieve them from a default location chosen by gRPC runtime.
private_key: The PEM-encoded private key as a byte string, or None if no
private key should be used.
certificate_chain: The PEM-encoded certificate chain as a byte string
to use or or None if no certificate chain should be used.
Returns:
A ChannelCredentials for use with an SSL-enabled Channel.
"""
if private_key is not None or certificate_chain is not None:
pair = _cygrpc.SslPemKeyCertPair(private_key, certificate_chain)
else:
pair = None
return ChannelCredentials(
_cygrpc.channel_credentials_ssl(root_certificates, pair))
def metadata_call_credentials(metadata_plugin, name=None):
"""Construct CallCredentials from an AuthMetadataPlugin.
Args:
metadata_plugin: An AuthMetadataPlugin to use for authentication.
name: An optional name for the plugin.
Returns:
A CallCredentials.
"""
from grpc import _plugin_wrapping # pylint: disable=cyclic-import
if name is None:
try:
effective_name = metadata_plugin.__name__
except AttributeError:
effective_name = metadata_plugin.__class__.__name__
else:
effective_name = name
return CallCredentials(
_plugin_wrapping.call_credentials_metadata_plugin(metadata_plugin,
effective_name))
def access_token_call_credentials(access_token):
"""Construct CallCredentials from an access token.
Args:
access_token: A string to place directly in the http request
authorization header, for example
"authorization: Bearer <access_token>".
Returns:
A CallCredentials.
"""
from grpc import _auth # pylint: disable=cyclic-import
return metadata_call_credentials(
_auth.AccessTokenCallCredentials(access_token))
def composite_call_credentials(*call_credentials):
"""Compose multiple CallCredentials to make a new CallCredentials.
Args:
*call_credentials: At least two CallCredentials objects.
Returns:
A CallCredentials object composed of the given CallCredentials objects.
"""
from grpc import _credential_composition # pylint: disable=cyclic-import
cygrpc_call_credentials = tuple(
single_call_credentials._credentials
for single_call_credentials in call_credentials)
return CallCredentials(
_credential_composition.call(cygrpc_call_credentials))
def composite_channel_credentials(channel_credentials, *call_credentials):
"""Compose a ChannelCredentials and one or more CallCredentials objects.
Args:
channel_credentials: A ChannelCredentials object.
*call_credentials: One or more CallCredentials objects.
Returns:
A ChannelCredentials composed of the given ChannelCredentials and
CallCredentials objects.
"""
from grpc import _credential_composition # pylint: disable=cyclic-import
cygrpc_call_credentials = tuple(
single_call_credentials._credentials
for single_call_credentials in call_credentials)
return ChannelCredentials(
_credential_composition.channel(channel_credentials._credentials,
cygrpc_call_credentials))
def ssl_server_credentials(private_key_certificate_chain_pairs,
root_certificates=None,
require_client_auth=False):
"""Creates a ServerCredentials for use with an SSL-enabled Server.
Args:
private_key_certificate_chain_pairs: A list of pairs of the form
[PEM-encoded private key, PEM-encoded certificate chain].
root_certificates: An optional byte string of PEM-encoded client root
certificates that the server will use to verify client authentication.
If omitted, require_client_auth must also be False.
require_client_auth: A boolean indicating whether or not to require
clients to be authenticated. May only be True if root_certificates
is not None.
Returns:
A ServerCredentials for use with an SSL-enabled Server. Typically, this
object is an argument to add_secure_port() method during server setup.
"""
if len(private_key_certificate_chain_pairs) == 0:
raise ValueError(
'At least one private key-certificate chain pair is required!')
elif require_client_auth and root_certificates is None:
raise ValueError(
'Illegal to require client auth without providing root certificates!'
)
else:
return ServerCredentials(
_cygrpc.server_credentials_ssl(root_certificates, [
_cygrpc.SslPemKeyCertPair(key, pem)
for key, pem in private_key_certificate_chain_pairs
], require_client_auth))
def channel_ready_future(channel):
"""Creates a Future that tracks when a Channel is ready.
Cancelling the Future does not affect the channel's state machine.
It merely decouples the Future from channel state machine.
Args:
channel: A Channel object.
Returns:
A Future object that matures when the channel connectivity is
ChannelConnectivity.READY.
"""
from grpc import _utilities # pylint: disable=cyclic-import
return _utilities.channel_ready_future(channel)
def insecure_channel(target, options=None):
"""Creates an insecure Channel to a server.
Args:
target: The server address
options: An optional list of key-value pairs (channel args in gRPC runtime)
to configure the channel.
Returns:
A Channel object.
"""
from grpc import _channel # pylint: disable=cyclic-import
return _channel.Channel(target, () if options is None else options, None)
def secure_channel(target, credentials, options=None):
"""Creates a secure Channel to a server.
Args:
target: The server address.
credentials: A ChannelCredentials instance.
options: An optional list of key-value pairs (channel args in gRPC runtime)
to configure the channel.
Returns:
A Channel object.
"""
from grpc import _channel # pylint: disable=cyclic-import
return _channel.Channel(target, () if options is None else options,
credentials._credentials)
def server(thread_pool,
handlers=None,
options=None,
maximum_concurrent_rpcs=None):
"""Creates a Server with which RPCs can be serviced.
Args:
thread_pool: A futures.ThreadPoolExecutor to be used by the Server
to execute RPC handlers.
handlers: An optional list of GenericRpcHandlers used for executing RPCs.
More handlers may be added by calling add_generic_rpc_handlers any time
before the server is started.
options: An optional list of key-value pairs (channel args in gRPC runtime)
to configure the channel.
maximum_concurrent_rpcs: The maximum number of concurrent RPCs this server
will service before returning RESOURCE_EXHAUSTED status, or None to
indicate no limit.
Returns:
A Server object.
"""
from grpc import _server # pylint: disable=cyclic-import
return _server.Server(thread_pool, () if handlers is None else handlers, ()
if options is None else options,
maximum_concurrent_rpcs)
################################### __all__ #################################
__all__ = ('FutureTimeoutError', 'FutureCancelledError', 'Future',
'ChannelConnectivity', 'StatusCode', 'RpcError', 'RpcContext',
'Call', 'ChannelCredentials', 'CallCredentials',
'AuthMetadataContext', 'AuthMetadataPluginCallback',
'AuthMetadataPlugin', 'ServerCredentials', 'UnaryUnaryMultiCallable',
'UnaryStreamMultiCallable', 'StreamUnaryMultiCallable',
'StreamStreamMultiCallable', 'Channel', 'ServicerContext',
'RpcMethodHandler', 'HandlerCallDetails', 'GenericRpcHandler',
'ServiceRpcHandler', 'Server', 'unary_unary_rpc_method_handler',
'unary_stream_rpc_method_handler', 'stream_unary_rpc_method_handler',
'stream_stream_rpc_method_handler',
'method_handlers_generic_handler', 'ssl_channel_credentials',
'metadata_call_credentials', 'access_token_call_credentials',
'composite_call_credentials', 'composite_channel_credentials',
'ssl_server_credentials', 'channel_ready_future', 'insecure_channel',
'secure_channel', 'server',)
############################### Extension Shims ################################
# Here to maintain backwards compatibility; avoid using these in new code!
try:
import grpc_tools
sys.modules.update({'grpc.tools': grpc_tools})
except ImportError:
pass
try:
import grpc_health
sys.modules.update({'grpc.health': grpc_health})
except ImportError:
pass
try:
import grpc_reflection
sys.modules.update({'grpc.reflection': grpc_reflection})
except ImportError:
pass
| {
"content_hash": "a63aea6bdd2246be6aacee28adb02666",
"timestamp": "",
"source": "github",
"line_count": 1354,
"max_line_length": 81,
"avg_line_length": 35.57016248153619,
"alnum_prop": 0.6722519828910759,
"repo_name": "kriswuollett/grpc",
"id": "5426b47c76185f75774e6006ae5125b1c08fd708",
"size": "49695",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/python/grpcio/grpc/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "12635"
},
{
"name": "C",
"bytes": "4804665"
},
{
"name": "C#",
"bytes": "1042810"
},
{
"name": "C++",
"bytes": "1293983"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "265235"
},
{
"name": "Makefile",
"bytes": "558082"
},
{
"name": "Objective-C",
"bytes": "271318"
},
{
"name": "PHP",
"bytes": "128982"
},
{
"name": "Protocol Buffer",
"bytes": "101725"
},
{
"name": "Python",
"bytes": "1590710"
},
{
"name": "Ruby",
"bytes": "461694"
},
{
"name": "Shell",
"bytes": "49693"
},
{
"name": "Swift",
"bytes": "5279"
}
],
"symlink_target": ""
} |
def is_string(value):
return isinstance(value, str)
| {
"content_hash": "b4fd34576493a120f86af2d1daaee60c",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 33,
"avg_line_length": 28,
"alnum_prop": 0.7142857142857143,
"repo_name": "cloud4rpi/cloud4rpi",
"id": "fcc80d5ba781da8c53aa7a243f30a285090f5d37",
"size": "56",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloud4rpi/utils_v3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "492"
},
{
"name": "Python",
"bytes": "44644"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import mock
from opentracing.span import Span
class ScopeCompatibilityCheckMixin(object):
"""
A mixin class for validation that a given scope manager implementation
satisfies the requirements of the OpenTracing API.
"""
def scope_manager(self):
raise NotImplementedError('Subclass must implement scope_manager()')
def run_test(self, test_fn):
"""
Utility method that can be optionally defined by ScopeManager
implementers to run the passed test_fn() function
in a given environment, such as a coroutine or greenlet.
By default, it simply runs the passed test_fn() function
in the current thread.
"""
test_fn()
def test_missing_active_external(self):
# test that 'active' does not fail outside the run_test()
# implementation (greenlet or coroutine).
scope_manager = self.scope_manager()
assert scope_manager.active is None
def test_missing_active(self):
def fn():
scope_manager = self.scope_manager()
assert scope_manager.active is None
self.run_test(fn)
def test_activate(self):
def fn():
scope_manager = self.scope_manager()
span = mock.MagicMock(spec=Span)
scope = scope_manager.activate(span, False)
assert scope is not None
assert scope_manager.active is scope
scope.close()
assert span.finish.call_count == 0
assert scope_manager.active is None
self.run_test(fn)
def test_activate_external(self):
# test that activate() does not fail outside the run_test()
# implementation (greenlet or corotuine).
scope_manager = self.scope_manager()
span = mock.MagicMock(spec=Span)
scope = scope_manager.activate(span, False)
assert scope is not None
assert scope_manager.active is scope
scope.close()
assert span.finish.call_count == 0
assert scope_manager.active is None
def test_activate_finish_on_close(self):
def fn():
scope_manager = self.scope_manager()
span = mock.MagicMock(spec=Span)
scope = scope_manager.activate(span, True)
assert scope is not None
assert scope_manager.active is scope
scope.close()
assert span.finish.call_count == 1
assert scope_manager.active is None
self.run_test(fn)
def test_activate_nested(self):
def fn():
# when a Scope is closed, the previous one must be re-activated.
scope_manager = self.scope_manager()
parent_span = mock.MagicMock(spec=Span)
child_span = mock.MagicMock(spec=Span)
with scope_manager.activate(parent_span, True) as parent:
assert parent is not None
assert scope_manager.active is parent
with scope_manager.activate(child_span, True) as child:
assert child is not None
assert scope_manager.active is child
assert scope_manager.active is parent
assert parent_span.finish.call_count == 1
assert child_span.finish.call_count == 1
assert scope_manager.active is None
self.run_test(fn)
def test_activate_finish_on_close_nested(self):
def fn():
# finish_on_close must be correctly handled
scope_manager = self.scope_manager()
parent_span = mock.MagicMock(spec=Span)
child_span = mock.MagicMock(spec=Span)
parent = scope_manager.activate(parent_span, False)
with scope_manager.activate(child_span, True):
pass
parent.close()
assert parent_span.finish.call_count == 0
assert child_span.finish.call_count == 1
assert scope_manager.active is None
self.run_test(fn)
def test_close_wrong_order(self):
def fn():
# only the active `Scope` can be closed
scope_manager = self.scope_manager()
parent_span = mock.MagicMock(spec=Span)
child_span = mock.MagicMock(spec=Span)
parent = scope_manager.activate(parent_span, True)
child = scope_manager.activate(child_span, True)
parent.close()
assert parent_span.finish.call_count == 0
assert scope_manager.active == child
self.run_test(fn)
| {
"content_hash": "573474bea432950292f1f783dc45577c",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 76,
"avg_line_length": 32.5531914893617,
"alnum_prop": 0.6008714596949891,
"repo_name": "cloudera/hue",
"id": "b70df4893c47eafad69823154f262303bed75d94",
"size": "5688",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/opentracing-2.2.0/opentracing/harness/scope_check.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import telnetlib
import threading
import sys
INFORM = '\033[94m'
OKGREEN = '\033[92m'
REQUEST = '\033[93m'
RESPONSE = '\033[91m'
INPUT = '\033[0m'
class ReadThread(threading.Thread):
def __init__(self, name, telnet_session):
self.keep_reading = True
self.telnet = telnet_session
super(ReadThread, self).__init__(name=name)
self.raw = False
self.prefix_colour = ''
def run(self):
text_buffer = ''
while self.keep_reading:
text = tn.read_eager()
text_buffer += text
if '\n' in text or '\r' in text:
text_buffer = self.print_line(text_buffer)
def print_line(self, text):
if not text or not text.strip():
return text
ret_str = ''
lines = text.split('\n')
if text[-1] != '\n':
ret_str = lines.pop()
for line in lines:
self.set_colour(line)
self.print_katcp(line + '\n')
return ret_str
def stop(self):
self.keep_reading = False
def set_colour(self, line):
if line and self.prefix_colour is not False:
color = {'#': INFORM,
'!': RESPONSE}.get(line[0])
if color:
self.prefix_colour = color
def print_katcp(self, text):
if not self.raw:
text = text.replace('\\n', '\n')
text = text.replace('\\_', ' ')
text = text.replace('\\@', '\@')
text = text.replace('\_', ' ')
text = text.replace(r'\\n', '\n')
if self.prefix_colour is False:
colour = ''
else:
colour = self.prefix_colour
print('\r{0}{1}'.format(colour, text), end='')
def toggle_raw(self):
self.raw = not self.raw
def toggle_colour(self):
if self.prefix_colour is False:
self.prefix_colour = ''
else:
self.prefix_colour = False
def print_help():
print('Help')
print('----')
print('\t \? or \help : Display this.')
print('\t \quit or \exit : Close the connection.')
print('\t \\raw : Toggle Raw mode do not escape KatCP special characters.')
print('\t \colour : Toggle colour display.')
# \t for timing on/off.
if __name__ == '__main__':
try:
host = sys.argv[1]
port = int(sys.argv[2])
except IndexError:
print('Specify Host and Port')
sys.exit()
except ValueError:
print('Invalid Host or Port')
sys.exit()
print('Connected to', host, port)
print_help()
tn = telnetlib.Telnet(host, port)
reader = ReadThread('read{0}:{1}'.format(host, port), tn)
reader.start()
history = []
run = True
while run:
try:
choice = raw_input('%s>>> ' % INPUT)
choice = choice.strip()
except KeyboardInterrupt:
run = False
if choice.startswith('\\'):
# This is an internal to katcp_consore command.
key = choice.strip('\\').lower()
if key in ['quit', 'exit', 'q', 'e']:
run = False
elif key in ['help', '?', 'h']:
print_help()
elif key in ['raw', 'r']:
reader.toggle_raw()
elif key in ['colour', 'c', 'color']:
reader.toggle_colour()
else:
print('{0}{1}'.format(REQUEST, choice))
tn.write(choice)
tn.write('\n')
reader.stop()
reader.join()
tn.close()
| {
"content_hash": "c0525dae94712d2c7d3a5c17dcea8a1a",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 28.181102362204726,
"alnum_prop": 0.5043308186644314,
"repo_name": "martinslabber/katcp_utils",
"id": "dbd7eae9c4903467773424affa48879c5bdf5a34",
"size": "3766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katcp_console/katcp_telnet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14362"
}
],
"symlink_target": ""
} |
"""Tests for create_python_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import sys
from tensorflow.python.platform import test
from tensorflow.python.tools.api.generator import create_python_api
from tensorflow.python.util.tf_export import tf_export
@tf_export('test_op', 'test_op1', 'test.test_op2')
def test_op():
pass
@tf_export('test1.foo', v1=['test.foo'])
def deprecated_test_op():
pass
@tf_export('TestClass', 'NewTestClass')
class TestClass(object):
pass
_TEST_CONSTANT = 5
_MODULE_NAME = 'tensorflow.python.test_module'
class CreatePythonApiTest(test.TestCase):
def setUp(self):
# Add fake op to a module that has 'tensorflow' in the name.
sys.modules[_MODULE_NAME] = imp.new_module(_MODULE_NAME)
setattr(sys.modules[_MODULE_NAME], 'test_op', test_op)
setattr(sys.modules[_MODULE_NAME], 'deprecated_test_op', deprecated_test_op)
setattr(sys.modules[_MODULE_NAME], 'TestClass', TestClass)
test_op.__module__ = _MODULE_NAME
TestClass.__module__ = _MODULE_NAME
tf_export('consts._TEST_CONSTANT').export_constant(
_MODULE_NAME, '_TEST_CONSTANT')
def tearDown(self):
del sys.modules[_MODULE_NAME]
def testFunctionImportIsAdded(self):
imports, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
output_package='tensorflow',
api_name='tensorflow',
api_version=1)
if create_python_api._LAZY_LOADING:
expected_import = (
'\'test_op1\': '
'(\'tensorflow.python.test_module\','
' \'test_op\')')
else:
expected_import = (
'from tensorflow.python.test_module '
'import test_op as test_op1')
self.assertTrue(
expected_import in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
if create_python_api._LAZY_LOADING:
expected_import = (
'\'test_op\': '
'(\'tensorflow.python.test_module\','
' \'test_op\')')
else:
expected_import = (
'from tensorflow.python.test_module '
'import test_op')
self.assertTrue(
expected_import in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
# Also check that compat.v1 is not added to imports.
self.assertFalse('compat.v1' in imports,
msg='compat.v1 in %s' % str(imports.keys()))
def testClassImportIsAdded(self):
imports, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
output_package='tensorflow',
api_name='tensorflow',
api_version=2)
if create_python_api._LAZY_LOADING:
expected_import = (
'\'NewTestClass\':'
' (\'tensorflow.python.test_module\','
' \'TestClass\')')
else:
expected_import = (
'from tensorflow.python.test_module '
'import TestClass')
self.assertTrue(
'TestClass' in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
def testConstantIsAdded(self):
imports, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
output_package='tensorflow',
api_name='tensorflow',
api_version=1)
if create_python_api._LAZY_LOADING:
expected = ('\'_TEST_CONSTANT\':'
' (\'tensorflow.python.test_module\','
' \'_TEST_CONSTANT\')')
else:
expected = ('from tensorflow.python.test_module '
'import _TEST_CONSTANT')
self.assertTrue(expected in str(imports),
msg='%s not in %s' % (expected, str(imports)))
def testCompatModuleIsAdded(self):
imports, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
output_package='tensorflow',
api_name='tensorflow',
api_version=2,
compat_api_versions=[1])
self.assertTrue('compat.v1' in imports,
msg='compat.v1 not in %s' % str(imports.keys()))
self.assertTrue('compat.v1.test' in imports,
msg='compat.v1.test not in %s' % str(imports.keys()))
def testNestedCompatModulesAreAdded(self):
imports, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
output_package='tensorflow',
api_name='tensorflow',
api_version=2,
compat_api_versions=[1, 2])
self.assertIn('compat.v1.compat.v1', imports,
msg='compat.v1.compat.v1 not in %s' % str(imports.keys()))
self.assertIn('compat.v1.compat.v2', imports,
msg='compat.v1.compat.v2 not in %s' % str(imports.keys()))
self.assertIn('compat.v2.compat.v1', imports,
msg='compat.v2.compat.v1 not in %s' % str(imports.keys()))
self.assertIn('compat.v2.compat.v2', imports,
msg='compat.v2.compat.v2 not in %s' % str(imports.keys()))
if __name__ == '__main__':
test.main()
| {
"content_hash": "e9ce778f00e7698cceb42c1e26622fa9",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 80,
"avg_line_length": 34.00666666666667,
"alnum_prop": 0.609684375612625,
"repo_name": "adit-chandra/tensorflow",
"id": "010f189dcb27f0e1f6276071ca5b1724fa6d12c0",
"size": "5789",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/tools/api/generator/create_python_api_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45988"
},
{
"name": "C",
"bytes": "773694"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76734263"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952944"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1299322"
},
{
"name": "Makefile",
"bytes": "61397"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297753"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38764318"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "643787"
},
{
"name": "Smarty",
"bytes": "34727"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
} |
import sys, os, re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.doctest']
extensions = ['sphinx.ext.autodoc', #'sphinx.ext.autosummary',
'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
autodoc_default_flags = [ "special-members" ]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
#epydoc_mapping = {
# '/_static/api/': [r'rdflib\.'],
# }
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rdflib'
copyright = u'2009 - 2013, RDFLib Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Find version. We have to do this because we can't import it in Python 3 until
# its been automatically converted in the setup process.
def find_version(filename):
_version_re = re.compile(r'__version__ = "(.*)"')
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
# The full version, including alpha/beta/rc tags.
release = find_version('../rdflib/__init__.py')
# The short X.Y version.
version = re.sub("[0-9]+\\.[0-9]\\..*", "\1", release)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build', 'draft']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'armstrong'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes", ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
html_logo = '_static/logo.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'rdflibdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'rdflib.tex', u'rdflib Documentation',
u'RDFLib Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
}
| {
"content_hash": "9ab62c17389014f7ee8cc22f3ddcb0f8",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 80,
"avg_line_length": 33.02843601895734,
"alnum_prop": 0.6985220261156551,
"repo_name": "yingerj/rdflib",
"id": "814e1f54dc749c7243a2732823215a2663b24b36",
"size": "7386",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "145"
},
{
"name": "HTML",
"bytes": "120202"
},
{
"name": "Python",
"bytes": "1438458"
},
{
"name": "Ruby",
"bytes": "28544"
},
{
"name": "Shell",
"bytes": "1052"
}
],
"symlink_target": ""
} |
def extractBakaDogeza(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if 'chapter' in item['title'].lower() and (vol or chp):
return buildReleaseMessageWithType(item, 'Knights & Magic', vol, chp, frag=frag, postfix=postfix)
return False
| {
"content_hash": "5d55a2272e285495304f2b35125203dc",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 99,
"avg_line_length": 35.75,
"alnum_prop": 0.7202797202797203,
"repo_name": "fake-name/ReadableWebProxy",
"id": "7402b8fc84282e732e497c60a67327b2b06a06be",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractBakaDogeza.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh-cn'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '&gd+bs(3q^_f_^4hlq(-gvo3t3rrd+nse!4j4^em)ttmsmwdm-'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'wechat', # my wechat app
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "ce2b2443fc2af07c7efe980547be1496",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 101,
"avg_line_length": 34.86274509803921,
"alnum_prop": 0.6803524559430071,
"repo_name": "fwpz/WeiPython",
"id": "2a48b19cf283c2afa390437799d21fa244159de4",
"size": "5373",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mysite/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29442"
}
],
"symlink_target": ""
} |
"""
Created on Fri Feb 19 16:55:54 2016
@author: Administrator
"""
import pandas as pd
import pandas.io.sql as pd_sql
import sqlite3 as sql
df_file = pd.read_csv('CS_table_No2_No4_new.csv',delimiter=";", skip_blank_lines = True,
error_bad_lines=False,encoding='utf8')
df_file = df_file.drop(['STUDENTID','ACADYEAR','CAMPUSID','SEMESTER','CURRIC','CAMPUSNAME','SECTIONGROUP','GRADE'],axis=1)
df_dropDup = df_file.drop_duplicates(['sub_id'], take_last=True)
con = sql.connect("db.sqlite3")
#df = pd.DataFrame({'TestData': [1, 2, 3, 4, 5, 6, 7, 8, 9]}, dtype='float')
pd_sql.to_sql(df_dropDup, "mywebpage_subject", con, index=False)
con.close() | {
"content_hash": "32140a0c3f49d87f907356ef0dfa9216",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 122,
"avg_line_length": 34,
"alnum_prop": 0.6246498599439776,
"repo_name": "wasit7/book_pae",
"id": "31bcabdaf65c4400e5ae950f859c294a82e9dcdf",
"size": "738",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "book/django/project/add_subject.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "345113"
},
{
"name": "HTML",
"bytes": "217393"
},
{
"name": "JavaScript",
"bytes": "42775"
},
{
"name": "Jupyter Notebook",
"bytes": "3075174"
},
{
"name": "Python",
"bytes": "263859"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.