query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
decorator to register a babel cli handler.
def babel_cli_handler(**options): def decorator(cls): """ decorates the given class and registers an instance of it into available babel cli handlers. :param BabelCLIHandlerBase cls: babel cli handler class. :returns: babel cli handler class. :rtype: BabelCLIHandlerBase """ instance = cls() babel_services.register_cli_handler(instance, **options) return cls return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decorator(cls):\n\n instance = cls()\n babel_services.register_cli_handler(instance, **options)\n\n return cls", "def __init__(self):\n\n super().__init__(BabelCLIHandlersEnum.INIT)", "def add_cmd_handler(self, cmd, func):\n len_args = len(inspect.getargspec(func)[0])\n def add_meta(f):\n def decorator(*args, **kwargs):\n f(*args, **kwargs)\n decorator.bytes_needed = len_args - 1 # exclude self\n decorator.__name__ = f.__name__\n return decorator\n func = add_meta(func)\n self._command_handlers[cmd] = func", "def register_command(*parse_args, **options):\n def wrapper(function):\n function._is_command = True\n return function\n return wrapper", "def __init__(self):\n\n super().__init__(BabelCLIHandlersEnum.COMPILE)", "def cli(_):\n pass", "def cli(_):\n pass", "def register_cli_commands(app):\n app.cli.add_command(init_events_command)", "def register_command(name):\n\n def register(cmd):\n Facade().register_command(name, cmd)\n return cmd\n\n return register", "def command(*args, **kwargs):\r\n def decorator(func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(f'<{func.__qualname__}> must be a coroutine function')\r\n new_func = CommandParent(func, **kwargs)\r\n _HANDLER.commands[new_func.name] = new_func\r\n return new_func\r\n return decorator", "def make_command_register(collector):\n\n def _register(*args, name=None):\n a_transform = _transform(*args)\n return collector.register(transform=a_transform, name=name)\n\n return _register", "def register_command(self, func):\n self.commands[func.__name__] = func", "def __call__(self, path):\n def wrapper(application):\n self.register(path, application)\n return application\n return wrapper", "def add_handler(self, handler):\n self.register(abcs.AHandler, handler, handler)", "def __init__(self, *args, **kwargs):\n\n super().__init__()\n\n # a dictionary containing cli handlers for different commands.\n # in the form of: {str handler_name: CLIHandlerBase handler}\n self._cli_handlers = DTO()", "def register_command(func):\n supported_commands.append(func.__name__)\n return func", "def _cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass" ]
[ "0.83057785", "0.6101431", "0.6031415", "0.6014547", "0.5841859", "0.5641326", "0.5641326", "0.5525839", "0.55132365", "0.5494176", "0.5488199", "0.54627776", "0.5449929", "0.5416219", "0.53628474", "0.5356179", "0.5328154", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033" ]
0.8217108
1
decorates the given class and registers an instance of it into available babel cli handlers.
def decorator(cls): instance = cls() babel_services.register_cli_handler(instance, **options) return cls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def babel_cli_handler(**options):\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available babel cli handlers.\n\n :param BabelCLIHandlerBase cls: babel cli handler class.\n\n :returns: babel cli handler class.\n :rtype: BabelCLIHandlerBase\n \"\"\"\n\n instance = cls()\n babel_services.register_cli_handler(instance, **options)\n\n return cls\n\n return decorator", "def add_class(self, cls):\n self.commands.append(cls)", "def _class_wrapper(command_class):\n WebBot().register_command(command_class)\n return command_class", "def classmethod(self, encoding):\n # Add encodings for hidden self and cmd arguments.\n encoding = ensure_bytes(encoding)\n typecodes = parse_type_encoding(encoding)\n typecodes.insert(1, b'@:')\n encoding = b''.join(typecodes)\n\n def decorator(f):\n def objc_class_method(objc_cls, objc_cmd, *args):\n py_cls = ObjCClass(objc_cls)\n py_cls.objc_cmd = objc_cmd\n args = convert_method_arguments(encoding, args)\n result = f(py_cls, *args)\n if isinstance(result, ObjCClass):\n result = result.ptr.value\n elif isinstance(result, ObjCInstance):\n result = result.ptr.value\n return result\n name = f.__name__.replace('_', ':')\n self.add_class_method(objc_class_method, name, encoding)\n return objc_class_method\n return decorator", "def register(cls):\n register(cls, cls.provided_class)", "def register(cls, class_):\n cls._registered[class_.tag()] = class_", "def register(cls, class_to_register):\n cls.registered_loaders.append(class_to_register)\n return class_to_register", "def _class(self, _class):\n\n self.__class = _class", "def _class(self, _class):\n\n self.__class = _class", "def __call__ (self, cls):\n # Define a wrapper function to capture the actual instantiation and __init__ params\n @wraps(cls)\n def wrapper_f(*args, **kwargs):\n #print(f'type of cls is {type(cls)}')\n peripheral = self.peripheral_type(**self.kwargs)\n o = cls(*args, **kwargs)\n o.message_debug(f\"Decorating class {cls.__name__} with {self.peripheral_type.__name__}\")\n o.attach_sensor(peripheral)\n return o\n return wrapper_f", "def register(cls: Any, *args: Any, **kwargs: Any) -> Callable:\n\n def wrapper(subclass: Any, *args: Any, **kwargs: Any) -> Any:\n return subclass\n\n return wrapper", "def _decorate(cls):\n global_validators = [session_required, catch_typeerror]\n # Cheat methods _hosts_name_label\n # -------------\n # Methods that have a trivial implementation for all classes.\n # 1. get_by_uuid == getting by ref, so just return uuid for\n # all get_by_uuid() methods.\n \n for api_cls in classes.keys():\n # We'll let the autoplug classes implement these functions\n # themselves - its much cleaner to do it in the base class\n \n get_by_uuid = '%s_get_by_uuid' % api_cls\n get_uuid = '%s_get_uuid' % api_cls\n get_all_records = '%s_get_all_records' % api_cls \n\n def _get_by_uuid(_1, _2, ref):\n return xen_api_success(ref)\n\n def _get_uuid(_1, _2, ref):\n return xen_api_success(ref)\n\n def unpack(v):\n return v.get('Value')\n\n def _get_all_records(_api_cls):\n return lambda s, session: \\\n xen_api_success(dict([(ref, unpack(getattr(cls, '%s_get_record' % _api_cls)(s, session, ref)))\\\n for ref in unpack(getattr(cls, '%s_get_all' % _api_cls)(s, session))]))\n\n setattr(cls, get_by_uuid, _get_by_uuid)\n setattr(cls, get_uuid, _get_uuid)\n setattr(cls, get_all_records, _get_all_records(api_cls))\n\n # Autoplugging classes\n # --------------------\n # These have all of their methods grabbed out from the implementation\n # class, and wrapped up to be compatible with the Xen-API.\n\n# def getter(ref, type):\n# return XendAPIStore.get(ref, type)\n\n def wrap_method(name, new_f):\n try:\n f = getattr(cls, name)\n wrapped_f = (lambda * args: new_f(f, *args))\n wrapped_f.api = f.api\n wrapped_f.async = f.async\n setattr(cls, name, wrapped_f)\n except AttributeError:\n # Logged below (API call: %s not found)\n pass\n\n\n def setter_event_wrapper(api_cls, attr_name):\n setter_name = '%s_set_%s' % (api_cls, attr_name)\n wrap_method(\n setter_name,\n lambda setter, s, session, ref, *args:\n _setter_event_dispatch(s, setter, api_cls, attr_name,\n session, ref, args))\n\n\n def ctor_event_wrapper(api_cls):\n ctor_name = '%s_create' % api_cls\n wrap_method(\n ctor_name,\n lambda ctor, s, session, *args:\n _ctor_event_dispatch(s, ctor, api_cls, session, args))\n\n\n def dtor_event_wrapper(api_cls):\n dtor_name = '%s_destroy' % api_cls\n wrap_method(\n dtor_name,\n lambda dtor, s, session, ref, *args:\n _dtor_event_dispatch(s, dtor, api_cls, session, ref, args))\n\n\n # Wrapping validators around XMLRPC calls\n # ---------------------------------------\n for api_cls, validator in classes.items():\n def doit(n, takes_instance, async_support=False,\n return_type=None):\n n_ = n.replace('.', '_')\n try:\n f = getattr(cls, n_)\n if n not in argcounts:\n argcounts[n] = f.func_code.co_argcount - 1\n \n validators = takes_instance and validator and \\\n [validator] or []\n \n validators += global_validators\n for v in validators:\n f = v(f)\n f.api = n\n f.async = async_support\n if return_type:\n f.return_type = return_type\n \n setattr(cls, n_, f)\n except AttributeError:\n log.warn(\"API call: %s not found\" % n)\n\n \n ro_attrs = getattr(cls, '%s_attr_ro' % api_cls, []) \\\n + cls.Base_attr_ro\n rw_attrs = getattr(cls, '%s_attr_rw' % api_cls, []) \\\n + cls.Base_attr_rw\n methods = getattr(cls, '%s_methods' % api_cls, []) \\\n + cls.Base_methods\n funcs = getattr(cls, '%s_funcs' % api_cls, []) \\\n + cls.Base_funcs\n\n # wrap validators around readable class attributes\n for attr_name in ro_attrs + rw_attrs:\n doit('%s.get_%s' % (api_cls, attr_name), True,\n async_support=False)\n\n # wrap validators around writable class attrributes\n for attr_name in rw_attrs:\n doit('%s.set_%s' % (api_cls, attr_name), True,\n async_support=False)\n setter_event_wrapper(api_cls, attr_name)\n\n # wrap validators around methods\n for method_name, return_type in methods:\n doit('%s.%s' % (api_cls, method_name), True,\n async_support=True)\n\n # wrap validators around class functions\n for func_name, return_type in funcs:\n \n doit('%s.%s' % (api_cls, func_name), False,\n async_support=True,\n return_type=return_type)\n \n ctor_event_wrapper(api_cls)\n dtor_event_wrapper(api_cls)", "def bind_class(self, className, sequence=None, func=None, add=None):\n return super().bind_class(className, sequence, func, add)", "def class_message_handler(\n self,\n *custom_filters,\n commands=None,\n regexp=None,\n content_types=None,\n state=None,\n run_task=None,\n **kwargs\n ):\n\n def decorator(class_):\n handler = class_()\n\n self.register_message_handler(\n handler.callback,\n *custom_filters,\n commands=commands,\n regexp=regexp,\n content_types=content_types,\n state=state,\n run_task=run_task,\n **kwargs\n )\n return class_\n\n return decorator", "def decorate_class(cls, klass: type, decorate_subclasses=False, **setting_kwds) -> None:\n assert isinstance(klass, type) # in \"debug\" mode only\n if not isinstance(klass, type): # in either mode, have the same awareness at the same time\n return\n\n # Filter out builtins.\n if not get_file_of_object(klass):\n return\n\n def _deco_class(kls: type):\n t = cls(**setting_kwds)\n _ = t(kls)\n # assert _ == kls\n\n def _deco_class_rec(kls: type):\n _deco_class(kls)\n for subclass in kls.__subclasses__():\n _deco_class_rec(subclass)\n\n if decorate_subclasses:\n _deco_class_rec(klass)\n else:\n _deco_class(klass)\n # (_deco_class_rec if decorate_subclasses else _deco_class)(klass)", "def as_handler(cls, **initkwargs):\n @wraps(cls, updated=())\n def handler(asset, *args, **kwargs):\n return handler.handler_class(**initkwargs)(asset, *args, **kwargs)\n handler.handler_class = cls\n handler.supports_check_mode = cls.supports_check_mode\n return handler", "def register_instance(cls):\n\n @functools.wraps(cls)\n def wrapper_decorator(*args, **kwargs):\n\n instance = cls(*args, **kwargs)\n\n Register[cls.__name__] = instance\n\n return instance\n\n return wrapper_decorator", "def _register(cls):\r\n command_name = cls.__dict__.get('__command__', None)\r\n if command_name:\r\n Command._commands[command_name] = cls", "def auto_validator_hook():\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available auto validator hooks.\n\n :param type cls: auto validator hook class.\n\n :returns: auto validator hook class.\n :rtype: type\n \"\"\"\n\n instance = cls()\n auto_validator_services.register_hook(instance)\n\n return cls\n\n return decorator", "def extension(klass):\n registry.register(klass)\n return klass", "def extend_class(cls):\n return lambda f: (setattr(cls, f.__name__, f) or f)", "def format_class(cls, **kwargs): \n _doc_formatter = cls._format_obj(**kwargs) \n try:\n assert USE_WRAPT_OR_NOT and wrapt\n warnings.warn('wrapt based class decorator not implemented')\n except:\n pass\n finally:\n def _class_decorator(_cls):\n try: \n meta_cls = _cls.__metaclass__\n except:\n meta_cls = type\n class metaclass_decorator(meta_cls):\n def __new__(meta, name, bases, attrs):\n name = _cls.__name__\n attrs = _cls.__dict__\n bases = _cls.__bases__\n return meta_cls.__new__(meta, name, bases, attrs)\n metaclass_decorator.__name__ = '__metaclass__'\n class new_cls(_cls):\n __metadata__ = metaclass_decorator\n # We set the __doc__ directly when defining the new class, as to avoid the\n # 'non-writable' issue with __doc__\n # indeed attribute '__doc__' of 'type' objects is not writable:\n # \"AttributeError: attribute '__doc__' of 'type' objects is not writable\"\n # hence new-style classes (child of 'object' type) have non writable docstring\n __doc__ = _doc_formatter(_cls)\n # override new_cls.__init__ to prevent recursion, because new_cls.__init__ \n # is _cls.__init__ and it keeps calling itself.\n # name set after the class declaration\n try:\n new_cls.__name__ = _cls.__name__\n except: pass\n try:\n new_cls.__module__ = _cls.__module__\n except: pass\n return new_cls\n return _class_decorator", "def setup_class(cls):", "def setup_class(cls):", "def command(cls, name=None):\n postfix = name\n def decorator(method):\n if postfix is None:\n name = method.__name__\n else:\n name = postfix\n mod = method.__module__\n if mod.startswith('scripts.commands'):\n mod = mod[len('scripts.commands'):]\n mod = mod.lstrip('.')\n if mod == '__main__':\n full_name = name\n else:\n full_name = mod+'.'+name\n\n app = cls\n subcmds = cls.subcommands\n for sub in full_name.split('.')[:-1]:\n if sub not in subcmds:\n sub_app = type(sub+'App', (cli.Application,),{})\n sub_app = app.subcommand(sub)(sub_app)\n subcmds[sub] = (sub_app, {})\n else:\n pass\n\n app, subcmds = subcmds[sub]\n app.__doc__ = importlib.import_module(method.__module__).__doc__\n\n signature = inspect.signature(method)\n arguments = []\n for (arg_name, param) in signature.parameters.items():\n tp = param.annotation\n if isinstance(tp, Option) or isinstance(tp, Flag):\n if tp._names:\n names = tp._names\n else:\n names = ['-'+arg_name[0], '--'+arg_name]\n arguments.append([tp, arg_name, names, param.default, tp._doc])\n\n def main(self, *args):\n kw_args = {}\n for tp, name, _, _, _ in arguments:\n kw_args[name] = getattr(self, name)\n method(*args, **kw_args)\n\n newclass = type(name+'App', (cli.Application,), {\"main\": main})\n newclass.__doc__ = method.__doc__\n newclass = app.subcommand(name)(newclass)\n\n for tp, name, names, default, doc in arguments:\n if isinstance(tp, Option):\n setattr(newclass, name, cli.SwitchAttr(names, default=default, help=doc))\n elif isinstance(tp, Flag):\n setattr(newclass, name, cli.Flag(names, help=doc))\n return method\n\n return decorator", "def as_handler(cls, **initkwargs):\n @wraps(cls, updated=())\n def handler(asset):\n return handler.handler_class(**initkwargs)(asset)\n handler.handler_class = cls\n return handler", "def wrap_class(cls, class_name, class_method_inst):\n if not cls:\n return\n for (method, method_log_args) in class_method_inst.iteritems():\n fn = getattr(cls, method, None)\n if not fn:\n # Not all methods may be in all versions of pymongo...\n continue\n kvs = { 'Class': '%s.%s' % (cls.__module__, cls.__name__),\n 'Function': method,\n 'Action': '%s.%s' % (class_name, method),\n }\n # XXX Not Python2.4-friendly\n setattr(cls, method, oboe.log_method(PYMONGO_LAYER, entry_kvs=kvs, **method_log_args)(fn))", "def register_outliner_class(self, outliner_type, outliner_class):\n\n self._registered_outliner_classes[outliner_type] = outliner_class\n return True", "def serializer(*args, **kwargs):\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available serializers.\n\n :param type cls: serializer class.\n\n :returns: serializer class.\n :rtype: type\n \"\"\"\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls\n\n return decorator", "def _instrument_class(self, cls):\n newcls = type('InstrumentedClass', (cls, MapRedBase), {})\n return newcls" ]
[ "0.7828587", "0.65119535", "0.64277583", "0.6273776", "0.62530273", "0.61781615", "0.61653596", "0.60024714", "0.60024714", "0.5967244", "0.57739705", "0.5762401", "0.5757489", "0.5746612", "0.56892204", "0.5668369", "0.56358856", "0.56313264", "0.5613844", "0.5579808", "0.55725545", "0.55186033", "0.5498826", "0.5498826", "0.5483674", "0.5476988", "0.54615474", "0.5444454", "0.5423939", "0.5397158" ]
0.7280425
1
Return a new (unsaved) shareditem object. Does not set any of the fields that would come from the Request object (i.e. ``user``).
def get_shared_object(self): if not self.is_valid(): raise ValueError("get_shared_object may only be called on valid forms") new = SharedItem( object_id = force_unicode(self.target_object._get_pk_val()), content_type = ContentType.objects.get_for_model(self.target_object), share_date = datetime.datetime.now(), ) return new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_shared_object(self):\n if not self.is_valid():\n raise ValueError(\"get_shared_object may only be called on valid forms\")\n\n new = SharedItem(\n content_type = ContentType.objects.get_for_model(self.target_object),\n object_id = force_unicode(self.target_object._get_pk_val()),\n share_date = datetime.datetime.now(),\n )\n \n return new", "def fromSharedItem(cls, sharedItem):\n localpart = None\n for (localpart, domain) in userbase.getAccountNames(sharedItem.store):\n break\n if localpart is None:\n raise NoSuchShare()\n for share in sharedItem.store.query(Share,\n Share.sharedItem == sharedItem):\n break\n else:\n raise NoSuchShare()\n return cls(\n shareID=share.shareID,\n localpart=localpart, domain=domain)", "def shareItem(self, sharedItem, shareID=None, interfaces=ALL_IMPLEMENTED):\n if shareID is None:\n shareID = genShareID(sharedItem.store)\n return Share(store=self.store,\n shareID=shareID,\n sharedItem=sharedItem,\n sharedTo=self,\n sharedInterfaces=interfaces)", "def create_item(self, user: User, **kwargs) -> None:", "def copy(self):\n return Object(_default_item=self._default_item, **self._items)", "def item_shared(self, item):\n self.update_item(item)", "def save_object(self, data):\n return Item(**data)", "def get_object(self):\n if not self._item:\n self._item = get_object_or_404(Item, pk=self.kwargs['item_id'])\n return self._item", "def cloneItemOnly( self, parent ):\n o_item = self.__class__( parent, self.o_data )\n\n return o_item", "def share(self, request):\n try:\n article = self.get_object()\n except PermissionDenied as pd:\n return Response({'error': str(pd)})\n\n article.shared_by.add(request.user)\n return Response({'message': '\"{}\" is shared'.format(article.title)})", "def create_item(item: Item):\n coll_users = data_access.get_user_collection()\n coll_items = data_access.get_items_collection()\n\n if not item.users:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"Empty user list not allowed.\")\n\n if not item.content:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"No description / content given.\")\n\n for user_name in item.users:\n if coll_users.find_one({\"name\": user_name}) is None:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n f\"User {user_name} not exists in the user list.\")\n\n item_dict = item.dict()\n item_dict[\"item_id\"] = uuid.uuid4()\n\n tm_now = datetime.datetime.now().isoformat()\n item_dict[\"status_change_date\"] = tm_now\n\n coll_items.insert_one(item_dict)", "def create_items(sender, instance, **kwargs):\n if instance.item_id is None and instance.item is None:\n item = Item()\n if hasattr(instance, 'active'):\n item.active = getattr(instance, 'active')\n item.save()\n instance.item = item", "def add_shared_items(shared_list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(shared_list_id)\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{shared_list_id}\")", "def clone(self, userName):\n if self.store:\n obj = self.store.load_or_create({'name':self.name, 'creator':userName})\n if obj:\n return obj\n \n try:\n obj = self.create()\n for key in self._allattr([self.ID, self.CREATOR, self.CREATED_TIME, self.LAST_MODIFIED]):\n obj._setattr(key, self._getattr(key))\n obj.creator = userName\n return obj\n except Exception as e:\n logger.error(e.message)\n logger.error('can not clone {0}'.format(self.generic()))\n return None", "def process_item(self, item, spider):\n task = SpiderTask.objects.get(id=spider.task_id)\n dj_item = Item.objects.create(task=task, **item)\n return dj_item", "def post(self):\n data = request.json\n return UserServices(data=data).save_new_item()", "def copy(self):\n new = object.__new__(type(self))\n new.required = self.required\n new.title = self.title\n new.type = self.type\n values = self.values\n if (values is not None):\n values = (*values,)\n new.values = values\n return new", "def _create_or_update_packinglistitem(self, item_identifier, item, user, optional_attrs={}):\n try:\n packing_list_item = self.packing_list.packing_list_item_model.objects.get(\n packing_list=self.packing_list,\n item_reference=item_identifier)\n except self.packing_list.packing_list_item_model.DoesNotExist:\n try:\n optional_description = item.optional_description or ''\n except AttributeError:\n optional_description = None\n options = {\n 'requisition': item._meta.verbose_name,\n 'item_description': '{subject_identifier} ({initials}) VISIT:{visit} DOB:{dob} {optional}'.format(\n subject_identifier=item.registered_subject.subject_identifier,\n initials=item.registered_subject.initials,\n visit=item.visit_code,\n dob=item.registered_subject.dob,\n optional=optional_description,\n ),\n 'user_created': user,\n }\n options.update(**optional_attrs)\n packing_list_item = self.packing_list.packing_list_item_model.objects.create(\n packing_list=self.packing_list,\n item_reference=item_identifier,\n **options)\n return packing_list_item", "def create(cls):\n return BasketItem(code=str(uuid.uuid4()))", "def mock_item(title='Item One', author='Author One', location='Location One'):\n\n\titem_data = {'title': title, 'author': author, 'location': location}\n\n\treturn models.new_item(item_data), title, author, location", "def copy(self):\n new = object.__new__(type(self))\n new.bot = self.bot\n new.description = self.description\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = 0\n new.name = self.name\n return new", "def setup_public_reusable_item_1(self):\n\n # ensure reusable item is public\n reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n reusableitem.is_public = True\n reusableitem.save()\n\n # add a reference to this reusable item by user 2\n create_toptenlist(self, 'user_2', 2) # create a list for user 2\n reference_reusable_item(self, 'user_2', self.reusableitem_1.id, 'toptenlist_2', 0)\n\n return reusableitem", "def copy(self):\n return self.__class__(self.items, self.is_cloud)", "def process_item(self, item, spider):\n if item['id'] in self.ids_seen:\n raise DropItem(\"Duplicate item found: {0}\".format(item))\n else:\n self.ids_seen.add(item['id'])\n\n session = Session()\n\n if 'sex' in item:\n friends = item.pop('friends')\n for friend in friends:\n try:\n session.execute(friendship.insert(), params={\"friend_a_id\": item['id'], \"friend_b_id\": friend})\n session.commit()\n except:\n session.rollback()\n continue\n item.pop('image_urls')\n pictures = item.pop('images')\n if pictures:\n item['picture'] = pictures[0]['path']\n data = User(**item)\n else:\n data = City(**item)\n\n try:\n session.add(data)\n session.commit()\n except:\n session.rollback()\n raise Exception(\n \"[ERROR]: {0} - {1}\".format(sys.exc_info()[0], sys.exc_info()[1])\n )\n finally:\n session.close()\n\n return item", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def _create_item(request):\r\n usage_key = UsageKey.from_string(request.json['parent_locator'])\r\n category = request.json['category']\r\n\r\n display_name = request.json.get('display_name')\r\n\r\n if not has_course_access(request.user, usage_key.course_key):\r\n raise PermissionDenied()\r\n\r\n parent = get_modulestore(category).get_item(usage_key)\r\n dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)\r\n\r\n # get the metadata, display_name, and definition from the request\r\n metadata = {}\r\n data = None\r\n template_id = request.json.get('boilerplate')\r\n if template_id:\r\n clz = parent.runtime.load_block_type(category)\r\n if clz is not None:\r\n template = clz.get_template(template_id)\r\n if template is not None:\r\n metadata = template.get('metadata', {})\r\n data = template.get('data')\r\n\r\n if display_name is not None:\r\n metadata['display_name'] = display_name\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=data,\r\n metadata=metadata,\r\n system=parent.runtime,\r\n )\r\n\r\n # TODO replace w/ nicer accessor\r\n if not 'detached' in parent.runtime.load_block_type(category)._class_tags:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent.location).update_item(parent, request.user.id)\r\n\r\n return JsonResponse({\"locator\": unicode(dest_usage_key), \"courseKey\": unicode(dest_usage_key.course_key)})", "def get_or_create(cls, **kwargs):\n item = cls.query.filter_by(**kwargs).first()\n if not item:\n item = cls(**kwargs)\n db.session.add(item)\n db.session.commit()\n return item", "def load(self, request, item, linked_item, extra):\n\t\textra['buttons_update'] = True\n\t\textra['fileitem'] = linked_item\n\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t\t'name' : linked_item.name,\n\t\t\t'fileitem' : linked_item.file,\n\t\t}", "def create(self, request, *args, **kwargs):\n # Clean up input data\n data = self.clean_data(request.data)\n\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n\n # Record the user who created this Part object\n item = serializer.save()\n item.user = request.user\n item.system = False\n\n # quantity field cannot be explicitly adjusted here\n item.quantity = item.item.quantity\n item.save()\n\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)", "def post(self, item):\n\n db.session.add(item)\n\n return item" ]
[ "0.7739369", "0.64699394", "0.6046109", "0.5965286", "0.587094", "0.5790064", "0.5787484", "0.5744283", "0.5732211", "0.56390357", "0.5534467", "0.5512519", "0.5505798", "0.5461782", "0.53700083", "0.53373706", "0.53137255", "0.5306573", "0.5269624", "0.5267749", "0.52600294", "0.52524054", "0.5245564", "0.52407694", "0.52369905", "0.52321607", "0.5224713", "0.52160347", "0.52156115", "0.5203534" ]
0.772191
1
Return a new (unsaved) shareditem object. Does not set any of the fields that would come from the Request object (i.e. ``user``).
def get_shared_object(self): if not self.is_valid(): raise ValueError("get_shared_object may only be called on valid forms") new = SharedItem( content_type = ContentType.objects.get_for_model(self.target_object), object_id = force_unicode(self.target_object._get_pk_val()), share_date = datetime.datetime.now(), ) return new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_shared_object(self):\n if not self.is_valid():\n raise ValueError(\"get_shared_object may only be called on valid forms\")\n\n new = SharedItem(\n object_id = force_unicode(self.target_object._get_pk_val()),\n content_type = ContentType.objects.get_for_model(self.target_object),\n share_date = datetime.datetime.now(),\n )\n \n return new", "def fromSharedItem(cls, sharedItem):\n localpart = None\n for (localpart, domain) in userbase.getAccountNames(sharedItem.store):\n break\n if localpart is None:\n raise NoSuchShare()\n for share in sharedItem.store.query(Share,\n Share.sharedItem == sharedItem):\n break\n else:\n raise NoSuchShare()\n return cls(\n shareID=share.shareID,\n localpart=localpart, domain=domain)", "def shareItem(self, sharedItem, shareID=None, interfaces=ALL_IMPLEMENTED):\n if shareID is None:\n shareID = genShareID(sharedItem.store)\n return Share(store=self.store,\n shareID=shareID,\n sharedItem=sharedItem,\n sharedTo=self,\n sharedInterfaces=interfaces)", "def create_item(self, user: User, **kwargs) -> None:", "def copy(self):\n return Object(_default_item=self._default_item, **self._items)", "def item_shared(self, item):\n self.update_item(item)", "def save_object(self, data):\n return Item(**data)", "def get_object(self):\n if not self._item:\n self._item = get_object_or_404(Item, pk=self.kwargs['item_id'])\n return self._item", "def cloneItemOnly( self, parent ):\n o_item = self.__class__( parent, self.o_data )\n\n return o_item", "def share(self, request):\n try:\n article = self.get_object()\n except PermissionDenied as pd:\n return Response({'error': str(pd)})\n\n article.shared_by.add(request.user)\n return Response({'message': '\"{}\" is shared'.format(article.title)})", "def create_item(item: Item):\n coll_users = data_access.get_user_collection()\n coll_items = data_access.get_items_collection()\n\n if not item.users:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"Empty user list not allowed.\")\n\n if not item.content:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"No description / content given.\")\n\n for user_name in item.users:\n if coll_users.find_one({\"name\": user_name}) is None:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n f\"User {user_name} not exists in the user list.\")\n\n item_dict = item.dict()\n item_dict[\"item_id\"] = uuid.uuid4()\n\n tm_now = datetime.datetime.now().isoformat()\n item_dict[\"status_change_date\"] = tm_now\n\n coll_items.insert_one(item_dict)", "def create_items(sender, instance, **kwargs):\n if instance.item_id is None and instance.item is None:\n item = Item()\n if hasattr(instance, 'active'):\n item.active = getattr(instance, 'active')\n item.save()\n instance.item = item", "def add_shared_items(shared_list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(shared_list_id)\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{shared_list_id}\")", "def clone(self, userName):\n if self.store:\n obj = self.store.load_or_create({'name':self.name, 'creator':userName})\n if obj:\n return obj\n \n try:\n obj = self.create()\n for key in self._allattr([self.ID, self.CREATOR, self.CREATED_TIME, self.LAST_MODIFIED]):\n obj._setattr(key, self._getattr(key))\n obj.creator = userName\n return obj\n except Exception as e:\n logger.error(e.message)\n logger.error('can not clone {0}'.format(self.generic()))\n return None", "def process_item(self, item, spider):\n task = SpiderTask.objects.get(id=spider.task_id)\n dj_item = Item.objects.create(task=task, **item)\n return dj_item", "def post(self):\n data = request.json\n return UserServices(data=data).save_new_item()", "def copy(self):\n new = object.__new__(type(self))\n new.required = self.required\n new.title = self.title\n new.type = self.type\n values = self.values\n if (values is not None):\n values = (*values,)\n new.values = values\n return new", "def _create_or_update_packinglistitem(self, item_identifier, item, user, optional_attrs={}):\n try:\n packing_list_item = self.packing_list.packing_list_item_model.objects.get(\n packing_list=self.packing_list,\n item_reference=item_identifier)\n except self.packing_list.packing_list_item_model.DoesNotExist:\n try:\n optional_description = item.optional_description or ''\n except AttributeError:\n optional_description = None\n options = {\n 'requisition': item._meta.verbose_name,\n 'item_description': '{subject_identifier} ({initials}) VISIT:{visit} DOB:{dob} {optional}'.format(\n subject_identifier=item.registered_subject.subject_identifier,\n initials=item.registered_subject.initials,\n visit=item.visit_code,\n dob=item.registered_subject.dob,\n optional=optional_description,\n ),\n 'user_created': user,\n }\n options.update(**optional_attrs)\n packing_list_item = self.packing_list.packing_list_item_model.objects.create(\n packing_list=self.packing_list,\n item_reference=item_identifier,\n **options)\n return packing_list_item", "def mock_item(title='Item One', author='Author One', location='Location One'):\n\n\titem_data = {'title': title, 'author': author, 'location': location}\n\n\treturn models.new_item(item_data), title, author, location", "def create(cls):\n return BasketItem(code=str(uuid.uuid4()))", "def copy(self):\n new = object.__new__(type(self))\n new.bot = self.bot\n new.description = self.description\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = 0\n new.name = self.name\n return new", "def setup_public_reusable_item_1(self):\n\n # ensure reusable item is public\n reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n reusableitem.is_public = True\n reusableitem.save()\n\n # add a reference to this reusable item by user 2\n create_toptenlist(self, 'user_2', 2) # create a list for user 2\n reference_reusable_item(self, 'user_2', self.reusableitem_1.id, 'toptenlist_2', 0)\n\n return reusableitem", "def copy(self):\n return self.__class__(self.items, self.is_cloud)", "def process_item(self, item, spider):\n if item['id'] in self.ids_seen:\n raise DropItem(\"Duplicate item found: {0}\".format(item))\n else:\n self.ids_seen.add(item['id'])\n\n session = Session()\n\n if 'sex' in item:\n friends = item.pop('friends')\n for friend in friends:\n try:\n session.execute(friendship.insert(), params={\"friend_a_id\": item['id'], \"friend_b_id\": friend})\n session.commit()\n except:\n session.rollback()\n continue\n item.pop('image_urls')\n pictures = item.pop('images')\n if pictures:\n item['picture'] = pictures[0]['path']\n data = User(**item)\n else:\n data = City(**item)\n\n try:\n session.add(data)\n session.commit()\n except:\n session.rollback()\n raise Exception(\n \"[ERROR]: {0} - {1}\".format(sys.exc_info()[0], sys.exc_info()[1])\n )\n finally:\n session.close()\n\n return item", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def _create_item(request):\r\n usage_key = UsageKey.from_string(request.json['parent_locator'])\r\n category = request.json['category']\r\n\r\n display_name = request.json.get('display_name')\r\n\r\n if not has_course_access(request.user, usage_key.course_key):\r\n raise PermissionDenied()\r\n\r\n parent = get_modulestore(category).get_item(usage_key)\r\n dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)\r\n\r\n # get the metadata, display_name, and definition from the request\r\n metadata = {}\r\n data = None\r\n template_id = request.json.get('boilerplate')\r\n if template_id:\r\n clz = parent.runtime.load_block_type(category)\r\n if clz is not None:\r\n template = clz.get_template(template_id)\r\n if template is not None:\r\n metadata = template.get('metadata', {})\r\n data = template.get('data')\r\n\r\n if display_name is not None:\r\n metadata['display_name'] = display_name\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=data,\r\n metadata=metadata,\r\n system=parent.runtime,\r\n )\r\n\r\n # TODO replace w/ nicer accessor\r\n if not 'detached' in parent.runtime.load_block_type(category)._class_tags:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent.location).update_item(parent, request.user.id)\r\n\r\n return JsonResponse({\"locator\": unicode(dest_usage_key), \"courseKey\": unicode(dest_usage_key.course_key)})", "def get_or_create(cls, **kwargs):\n item = cls.query.filter_by(**kwargs).first()\n if not item:\n item = cls(**kwargs)\n db.session.add(item)\n db.session.commit()\n return item", "def create(self, request, *args, **kwargs):\n # Clean up input data\n data = self.clean_data(request.data)\n\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n\n # Record the user who created this Part object\n item = serializer.save()\n item.user = request.user\n item.system = False\n\n # quantity field cannot be explicitly adjusted here\n item.quantity = item.item.quantity\n item.save()\n\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)", "def load(self, request, item, linked_item, extra):\n\t\textra['buttons_update'] = True\n\t\textra['fileitem'] = linked_item\n\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t\t'name' : linked_item.name,\n\t\t\t'fileitem' : linked_item.file,\n\t\t}", "def post(self, item):\n\n db.session.add(item)\n\n return item" ]
[ "0.7722135", "0.64705783", "0.6047539", "0.59654415", "0.5869505", "0.57898974", "0.5785594", "0.57411844", "0.57283014", "0.5641411", "0.5534432", "0.5511725", "0.55054003", "0.5462437", "0.5369579", "0.5337859", "0.53142005", "0.5303614", "0.52669346", "0.52664846", "0.52594626", "0.52514905", "0.52438545", "0.5240882", "0.52365786", "0.5230385", "0.5223718", "0.5216448", "0.52145505", "0.52056813" ]
0.7739693
0
Test FeathrClient() get_online_features and batch_get can get data correctly.
def test_feathr_online_store_agg_features(): online_test_table = get_online_test_table_name("nycTaxiCITableMaven") test_workspace_dir = Path( __file__).parent.resolve() / "test_user_workspace" # os.chdir(test_workspace_dir) # The `feathr_runtime_location` was commented out in this config file, so feathr should use # Maven package as the dependency and `noop.jar` as the main file client: FeathrClient = basic_test_setup(os.path.join(test_workspace_dir, "feathr_config_maven.yaml")) location_id = TypedKey(key_column="DOLocationID", key_column_type=ValueType.INT32, description="location id in NYC", full_name="nyc_taxi.location_id") feature_query = FeatureQuery( feature_list=["f_location_avg_fare"], key=location_id) settings = ObservationSettings( observation_path="wasbs://[email protected]/sample_data/green_tripdata_2020-04.csv", event_timestamp_column="lpep_dropoff_datetime", timestamp_format="yyyy-MM-dd HH:mm:ss") now = datetime.now() # set output folder based on different runtime if client.spark_runtime == 'databricks': output_path = ''.join(['dbfs:/feathrazure_cijob','_', str(now.minute), '_', str(now.second), ".avro"]) else: output_path = ''.join(['abfss://[email protected]/demo_data/output','_', str(now.minute), '_', str(now.second), ".avro"]) client.get_offline_features(observation_settings=settings, feature_query=feature_query, output_path=output_path) # assuming the job can successfully run; otherwise it will throw exception client.wait_job_to_finish(timeout_sec=Constants.SPARK_JOB_TIMEOUT_SECONDS) return backfill_time = BackfillTime(start=datetime( 2020, 5, 20), end=datetime(2020, 5, 20), step=timedelta(days=1)) redisSink = RedisSink(table_name=online_test_table) settings = MaterializationSettings("TestJobName", sinks=[redisSink], feature_names=[ "f_location_avg_fare", "f_location_max_fare"], backfill_time=backfill_time) client.materialize_features(settings) # just assume the job is successful without validating the actual result in Redis. Might need to consolidate # this part with the test_feathr_online_store test case client.wait_job_to_finish(timeout_sec=Constants.SPARK_JOB_TIMEOUT_SECONDS) res = client.get_online_features(online_test_table, '265', [ 'f_location_avg_fare', 'f_location_max_fare']) # just assume there are values. We don't hard code the values for now for testing # the correctness of the feature generation should be guaranteed by feathr runtime. # ID 239 and 265 are available in the `DOLocationID` column in this file: # https://s3.amazonaws.com/nyc-tlc/trip+data/green_tripdata_2020-04.csv # View more details on this dataset: https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page assert len(res) == 2 assert res[0] != None assert res[1] != None res = client.multi_get_online_features(online_test_table, ['239', '265'], ['f_location_avg_fare', 'f_location_max_fare']) assert res['239'][0] != None assert res['239'][1] != None assert res['265'][0] != None assert res['265'][1] != None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get(self):\n simple_fields = {\n \"verbose\": False,\n \"min_core_neighbors\": self.min_core_neighbors,\n \"num_features\": 1,\n \"num_unpacked_features\": 2,\n \"num_distance_components\": 1,\n \"radius\": self.radius,\n \"num_examples\": 30,\n }\n\n for field, ans in simple_fields.items():\n self.assertEqual(self.model._get(field), ans, \"{} failed\".format(field))\n\n _list_fields = {\n \"distance\": self.distance,\n \"unpacked_features\": [\"X1[0]\", \"X1[1]\"],\n \"features\": [\"X1\"],\n }\n\n for field, ans in _list_fields.items():\n self.assertItemsEqual(\n self.model._get(field), ans, \"{} failed\".format(field)\n )\n self.assertGreaterEqual(self.model.training_time, 0)\n self.assertGreaterEqual(self.model.num_clusters, 0)\n self.assertEqual(self.model.cluster_id.num_rows(), 30)", "def test_read_feature_collection(self):\n fc = self.read_feature()\n assert len(fc.features) == 1\n feature = fc.features[0]\n self.check_feature(feature)", "def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])", "def test_get(self):\n # Set up\n self.assertTrue(\n # Login into browser session as developer\n self.client.login(username=self.developer.username, password=\"password\")\n )\n # Get expected data\n objs = ServicePattern.objects.all().add_service_name()\n serializer = ServicePatternSerializer(objs, many=True)\n expected = serializer.data\n\n # Test - Get response from API\n response = self.client.get(self.feed_list_url, HTTP_HOST=self.hostname)\n actual = response.data[\"features\"]\n\n # Assert\n self.assertEqual(actual, expected[\"features\"])\n\n feature = actual[0]\n fields = [\n \"service_pattern_id\",\n \"revision\",\n \"origin\",\n \"destination\",\n \"description\",\n \"service_name\",\n ]\n for field in fields:\n self.assertIn(field, feature[\"properties\"].keys())", "def do_features_request_2(features=None):\n\n #  connect to database\n cur_db = connect_db(\"172.20.38.50\", \"mvelay\", \"user\", \"sandbox\")\n cursor = cur_db.cursor()\n\n # build whole query\n cur_query = \"\"\" SELECT module, sw, version FROM t_feature\n WHERE feature=\"%s\" AND supported=1;\"\"\" % (features[0])\n\n print cur_query\n cursor.execute(cur_query)\n results = cursor.fetchall()\n cursor.close()\n\n if results:\n results = results[:1000] # Limit to first 1000 results\n else:\n results = None\n\n return features[0], results", "async def getFeatures(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ConfigurationValidator.getFeatures()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getFeatures\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getFeatures\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/configuration/v1.0/feature\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)", "def test_get_run(self):\n pass", "def test_client_retrieve(self):\n pass", "def test_for_client():", "def test_get_features():\n features = (\n \"Feature Name : Capa1\\r\\n State : Enabled\\r\\n\"\n \"Feature Name : Capa2\\r\\n State : Disabled\\r\\n\"\n )\n\n mock = MagicMock(return_value=features)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n out = dism.get_features()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Features\"]\n )\n assert out == [\"Capa1\", \"Capa2\"]", "def test_gettem_using_get(self):\n pass", "def batch_read_feature_values(\n self,\n ) -> Callable[\n [featurestore_service.BatchReadFeatureValuesRequest],\n Awaitable[operations_pb2.Operation],\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"batch_read_feature_values\" not in self._stubs:\n self._stubs[\"batch_read_feature_values\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues\",\n request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"batch_read_feature_values\"]", "def test_get_distribution_no_feature(self):\r\n url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, 200)\r\n res_json = json.loads(response.content)\r\n self.assertEqual(type(res_json['available_features']), list)\r\n\r\n url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url + u'?feature=')\r\n self.assertEqual(response.status_code, 200)\r\n res_json = json.loads(response.content)\r\n self.assertEqual(type(res_json['available_features']), list)", "def test_batch(self):\n pass", "def test_get(self):\n log.info(\"START INTEG TEST GET\")\n\n # Start sampling.\n self.clear_sample_data()\n self.driver.start_sampling()\n self.clear_async_data()\n\n # From sample file A0000010.DEC:\n # Flag record, first and last velocity record, time record.\n log.info(\"FIRST FILE A0000002 INTEG TEST GET\")\n self.create_sample_data('valid_A0000002.DEC', \"A0000002.DEC\")\n self.assert_data(None, 'valid_A0000002.yml', \n count=3, timeout=10)\n\n # From sample file A0000010.DEC:\n # Flag record, first and last velocity records twice, time record.\n log.info(\"SECOND FILE A0000004 INTEG TEST GET\")\n self.clear_async_data()\n self.create_sample_data('valid_A0000004.DEC', \"A0000004.DEC\")\n self.assert_data(None, 'valid_A0000004.yml', \n count=5, timeout=10)\n\n # Made-up data with all flags set to True.\n # Field values may not be realistic.\n log.info(\"THIRD FILE A0000003 INTEG TEST GET\")\n self.clear_async_data()\n self.create_sample_data('all_A0000003.DEC', \"A0000003.DEC\")\n self.assert_data(None, 'all_A0000003.yml', \n count=4, timeout=10)\n log.info(\"END INTEG TEST GET\")", "def test_available_features():\n features = (\n \"Feature Name : Capa1\\r\\n State : Enabled\\r\\n\"\n \"Feature Name : Capa2\\r\\n State : Disabled\\r\\n\"\n )\n\n mock = MagicMock(return_value=features)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n out = dism.available_features()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Features\"]\n )\n assert out == [\"Capa2\"]", "def test_get(self):\n pass", "def get_feature(\n self,\n ) -> Callable[[featurestore_service.GetFeatureRequest], Awaitable[feature.Feature]]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_feature\" not in self._stubs:\n self._stubs[\"get_feature\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature\",\n request_serializer=featurestore_service.GetFeatureRequest.serialize,\n response_deserializer=feature.Feature.deserialize,\n )\n return self._stubs[\"get_feature\"]", "def test2():\n\n # Internal Feature Layers\n feature_queries = []\n feature_layers = db(db.gis_layer_feature.resource == \"office\").select()\n for layer in feature_layers:\n if layer.role_required and not auth.s3_has_role(layer.role_required):\n continue\n _layer = gis.get_feature_layer(layer.module,\n layer.resource,\n layer.name,\n layer.popup_label,\n config=config,\n marker_id=layer.marker_id,\n active=layer.visible,\n polygons=layer.polygons,\n opacity=layer.opacity)\n if _layer:\n # Add a URL for downloading the GeoJSON\n # @ToDO: add to gis.get_feature_layer\n _layer[\"url\"] = \"%s.geojson\" % URL(r=request, c=layer.module, f=layer.resource)\n marker = db(db.gis_marker.id == _layer[\"marker\"]).select(db.gis_marker.image,\n db.gis_marker.height,\n db.gis_marker.width,\n limitby=(0, 1)).first()\n _layer[\"marker\"] = marker\n feature_queries.append(_layer)\n\n return dict(feature_queries=feature_queries)", "def get_features(self, request, **kwargs):\n raise NotImplementedError()", "def test_api_msa_endpoint(self):\n params = {'lender': '90000451965', 'metro': '49180'}\n url = reverse(msa)\n resp = self.client.get(url, params)\n result_dict = json.loads(resp.content)\n self.assertTrue(isinstance(result_dict, dict))\n self.assertContains(resp, 'features')", "def test_module(client: Client, *args) -> Tuple[str, dict, dict]:\n\n client.run_parameters_validations()\n\n for service in client.services:\n # if there are risk rules, select the first one for test\n risk_rule = client.risk_rule[0] if client.risk_rule else None\n client.build_iterator(service, client.indicator_type, risk_rule)\n client.get_batches_from_file(limit=1)\n return 'ok', {}, {}", "def test_get_client(self):\n pass", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def list_features(\n self,\n ) -> Callable[\n [featurestore_service.ListFeaturesRequest],\n Awaitable[featurestore_service.ListFeaturesResponse],\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_features\" not in self._stubs:\n self._stubs[\"list_features\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures\",\n request_serializer=featurestore_service.ListFeaturesRequest.serialize,\n response_deserializer=featurestore_service.ListFeaturesResponse.deserialize,\n )\n return self._stubs[\"list_features\"]", "def online_read(\n self,\n config: RepoConfig,\n table: FeatureView,\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n hbase = HbaseUtils(self._get_conn(config))\n project = config.project\n table_name = _table_id(project, table)\n\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n\n row_keys = [\n serialize_entity_key(\n entity_key,\n entity_key_serialization_version=config.entity_key_serialization_version,\n ).hex()\n for entity_key in entity_keys\n ]\n rows = hbase.rows(table_name, row_keys=row_keys)\n\n for _, row in rows:\n res = {}\n res_ts = None\n for feature_name, feature_value in row.items():\n f_name = HbaseConstants.get_feature_from_col(feature_name)\n if requested_features is not None and f_name in requested_features:\n v = ValueProto()\n v.ParseFromString(feature_value)\n res[f_name] = v\n if f_name is HbaseConstants.EVENT_TS:\n ts = struct.unpack(\">L\", feature_value)[0]\n res_ts = datetime.fromtimestamp(ts)\n if not res:\n result.append((None, None))\n else:\n result.append((res_ts, res))\n return result", "def test_get_learners(self):\n pass", "def test_get_with_filter_factoid(mockclient_cl1):\n r = mockclient_cl1.get(TEST_URL + \"?size=100&f=F00062\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 3", "def test_get(self):\r\n resp = self.client.get_json(self.url + '/0')\r\n self.assertEqual(resp.status_code, 200)\r\n obj = json.loads(resp.content)\r\n self.assertEqual(self.starting_graders[0], obj)", "def test_api_predictors_get(self):\n pass" ]
[ "0.6168632", "0.60641015", "0.60555446", "0.6000268", "0.59343565", "0.59123015", "0.58149856", "0.5790768", "0.5763791", "0.5751622", "0.5747959", "0.5700878", "0.56979954", "0.5681993", "0.566791", "0.5665153", "0.56642336", "0.56637", "0.56631935", "0.5621099", "0.5589105", "0.5572067", "0.55581313", "0.5554992", "0.5538371", "0.5524045", "0.5492497", "0.54899895", "0.54743785", "0.5472374" ]
0.66819775
0
Initializes the object to have a pronunciation dictionary available
def __init__(self): self._pronunciations = nltk.corpus.cmudict.dict()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self._pronunciations = nltk.corpus.cmudict.dict()\n \"\"\"\n API Documentation for CMU dictionary corpus\n http://www.nltk.org/api/nltk.corpus.reader.html#module-nltk.corpus.reader.cmudict\n \"\"\"", "def __init__(self):\n super().__init__()\n self.mu = 0.0\n self.type = 'Poisson'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self):\n\n # initialise the empty mappings dictionary\n self.data = {\n 'loan_id': None,\n 'product': None,\n 'origination_date': None,\n 'reversion_date': None,\n 'rate_term': None,\n 'loan_amount': None,\n 'initial_rate': None,\n 'reversion_rate': None,\n 'term': None,\n 'interest_only_amount': None,\n 'upfront_fees': None,\n 'upfront_costs': None,\n 'entity_eir': None\n }", "def __init__(self, reaction_info: ParticleReactionKinematicsInfo):\n self._reaction_info = reaction_info\n self._registered_inv_masses: Dict[Tuple, str] = dict()\n self._registered_subsystems: Dict[SubSystem, Tuple[str, str]] = dict()", "def __init__(self):\n self.dictionary = None", "def __init__ ( self , phenotypes ) :\n\t\tfor k , v in phenotypes.items():\n\t\t\tassert type( k ) is str , 'phenotype keys must be strings'\n\t\t\tassert v[1] > v[0] , 'upper bound of ' + k + ' must be greater than the lower bound'\n\t\t\tassert type( v[1] ) is int and type( v[0] ) is int, ' (!) recent change means bounds need to be in ints now: https://github.com/zafarali/metastasis/issues/17'\n\n\t\tself.phenotypes = phenotypes", "def _init(self):\n pass", "def __init__(self):\n super(sppasSymbolSettings, self).__init__()\n\n self.__dict__ = dict(\n unk=\"<UNK>\",\n phone=sppasSymbolSettings.__phone_symbols(),\n ortho=sppasSymbolSettings.__ortho_symbols(),\n all=sppasSymbolSettings.__all_symbols()\n )", "def __initialize_nlp(self, nlp):\n nlp[\"nbQ\"] = 0\n nlp[\"nbQdot\"] = 0\n nlp[\"nbTau\"] = 0\n nlp[\"nbMuscles\"] = 0\n nlp[\"plot\"] = {}\n nlp[\"var_states\"] = {}\n nlp[\"var_controls\"] = {}\n nlp[\"CX\"] = self.CX\n nlp[\"x\"] = nlp[\"CX\"]()\n nlp[\"u\"] = nlp[\"CX\"]()\n nlp[\"J\"] = []\n nlp[\"g\"] = []\n nlp[\"g_bounds\"] = []\n nlp[\"casadi_func\"] = {}", "def __init__(self):\n self.lookup = {}", "def __init__(self, *args, **kwargs):\n dict.__init__(self, *args, **kwargs)", "def __init__(self):\n self._inst = {}", "def __init__(self,paramDict):\n self.pandeia_params = paramDict\n self.prep_and_run()", "def __init__(self, dict):\n self.dict = dict", "def __init__(self, dict):\n self.dict = dict", "def initPheromone(self):\n print '[Initializing pheromone values]'\n self.pheromoneValue = {}\n\n for token in self.postingTokens:\n self.pheromoneValue[token] = self.initialPheromone", "def __init__(self):\n self.ngramCounts = collections.defaultdict(zero_fn);\n self.continuationProb = collections.defaultdict(set_fn);\n self.total = 0;", "def __init__(self):\n self.dict = {}", "def __init__(self):\n self.dict = {}", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self):\n self.__dict__ = dict()\n self.load()", "def __init__():", "def __init__(self):\n self._ngrams = {}" ]
[ "0.7030081", "0.62855494", "0.62579364", "0.6253256", "0.6249114", "0.61850667", "0.6163011", "0.6113119", "0.6111343", "0.6096763", "0.6093766", "0.60932755", "0.60580236", "0.60405856", "0.60405856", "0.60081416", "0.60046226", "0.59790784", "0.59790784", "0.59735584", "0.59735584", "0.59735584", "0.59735584", "0.59735584", "0.59735584", "0.59735584", "0.59735584", "0.59675187", "0.59673834", "0.5965411" ]
0.75347185
1
Returns the number of syllables in a word. If there's more than one pronunciation, take the shorter one. If there is no entry in the dictionary, return 1.
def num_syllables(self, word): # TODO: provide an implementation! word = word.lower() D = self._pronunciations #D = nltk.corpus.cmudict.dict() if(word not in D.keys()): #print word not in CMUDictionary return 1 #count stores no of syllables for each pronunciation of the word count = [] #for each pronunciation for x in D[word]: n = 0 #for each syllable for y in x: #if vowel sound if y[-1].isdigit(): n = n + 1 count.append(n) # return the pronunciation having least syllables return min(count) #return min([len([y for y in x if y[-1].isdigit()]) for x in D[word.lower()]])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_of_syllables(self, word):\n\n if word.lower() in self.cmu_dict:\n return len([phoneme for phoneme in self.cmu_dict[word.lower()][0]\n if phoneme[-1].isdigit()])\n # If word is unknown, assume 1 syllable/3 letters (average for English)\n else:\n return len(word)//3", "def num_syllables(self, word):\n\n return 1", "def num_syllables(self, word):\n \"\"\"\n using the logic of vowel counting, count all vowels in the pronunciations\n \"\"\"\n dictionary = self._pronunciations;\n # check if word is present in the CMU dictionary\n if word in dictionary :\n word_pronunciations = dictionary[word.lower()]\n else :\n return 1\n \n vowels = ['A', 'E', 'I', 'O', 'U']\n \n ## find the shorter pronunciation for word\n shorter_arr = [];\n for pronunciation in word_pronunciations :\n if len(pronunciation) > len(shorter_arr) : shorter_arr = pronunciation\n \n num_length = 0\n \n for phoneme in shorter_arr :\n if phoneme[:1] in vowels : num_length += 1\n \n return num_length", "def syllable_counter(word):\n letters = [c for c in list(word.lower()) if c.isalpha()]\n\n if len(letters) == 0:\n return 0\n\n if len(letters) in [1, 2]:\n return 1\n\n num_syllables = 0\n last_syllable_pos = 0\n for i, letter in enumerate(letters):\n if letter not in VOWELS:\n if i and letters[i - 1] in VOWELS:\n num_syllables += 1\n last_syllable_pos = i\n syllable = ''\n elif i == len(letters) - 1:\n if letter != 'e':\n num_syllables += 1\n elif i - last_syllable_pos >= 2:\n num_syllables += 1\n\n return num_syllables or 1", "def countsyllables_en(word):\r\n\tif not word:\r\n\t\treturn 0\r\n\r\n\t# Remove final silent 'e'\r\n\tif word[-1] == \"e\":\r\n\t\tword = word[:-1]\r\n\r\n\t# Check for a cached syllable count\r\n\tif word in fallback_cache:\r\n\t\treturn fallback_cache[word]\r\n\r\n\t# Count vowel groups\r\n\tresult = 0\r\n\tprev_was_vowel = False\r\n\tfor char in word:\r\n\t\tis_vowel = char in VOWELS or char == 'y'\r\n\t\tif is_vowel and not prev_was_vowel:\r\n\t\t\tresult += 1\r\n\t\tprev_was_vowel = is_vowel\r\n\r\n\t# Add & subtract syllables\r\n\tfor r in fallback_addsyl:\r\n\t\tif r.search(word):\r\n\t\t\tresult += 1\r\n\tfor r in fallback_subsyl:\r\n\t\tif r.search(word):\r\n\t\t\tresult -= 1\r\n\r\n\t# Cache the syllable count\r\n\tfallback_cache[word] = result\r\n\r\n\treturn result", "def _get_num_syllables(doc: Doc, min_syllables: int = 1):\n text = (word for word in doc if not word.is_punct and \"'\" not in word.text)\n syllables_per_word = tuple(syllapy.count(word.text) for word in text)\n return sum(c for c in syllables_per_word if c >= min_syllables)", "def get_syllables(word):\n\tif word not in syllable_dict:\n\t\ttry: syllables = wordApi.getHyphenation(word)\n\t\texcept UnicodeEncodeError:\n\t\t\tsyllable_dict[word] = np.NaN\n\t\tif not syllables:\n\t\t\tsyllables = wordApi.getHyphenation(word.lower())\n\t\t\tif not syllables:\n\t\t\t\tsyllables = wordApi.getHyphenation(word.capitalize())\n\t\t\t\tif not syllables:\n\t\t\t\t\tsyllable_dict[word] = np.NaN\n\t\t\t\t\treturn syllable_dict[word]\n\t\tsyllable_dict[word] = len(syllables)\n\treturn syllable_dict[word]", "def count_syllables(words):\n\n\n count = 0\n\n for word in words:\n word_count = count_syllables_in_word(word)\n count = count + word_count\n return count", "def estimate(word):\n parts = re.split(r'[^aeiouy]+', word)\n valid_parts = []\n\n for part in parts:\n if part != '':\n valid_parts.append(part)\n\n syllables = 0\n\n for p in re_subsyllables:\n if p.match(word):\n syllables -= 1\n\n for p in re_addsyllables:\n if p.match(word):\n syllables += 1\n\n syllables += len(valid_parts)\n\n if syllables <= 0:\n syllables = 1\n\n return syllables", "def word_syllables(word):\n\n count = 0\n endings = '!@#$%^&*()_+[]{}:;,.eE\"'+\"'\"\n\n while word[-1] in endings:\n word = word[: -1]\n\n if len(word) <= 3:\n return 1\n\n vows = 'aeiouAEIOU'\n prev_char_vow = False\n for char in word:\n if char in vows:\n if not prev_char_vow:\n count = count + 1\n prev_char_vow = True\n else:\n prev_char_vow = False\n\n if word[-1] in 'Yy':\n count = count + 1\n\n return count", "def syllable_count(word):\n # Count the vowels in the word\n # Subtract one vowel from every dipthong\n count = len(re.findall(r'([aeiouyAEIOUY]+)', word))\n # Subtract any silent vowels\n if len(word) > 2:\n if word[-1] == 'e' and \\\n not is_vowel(word[-2]) and \\\n is_vowel(word[-3]):\n count = count - 1\n return count", "def count_syllables_in_word(word):\n\n count = 0\n\n endings = '!,;.?:'\n last_char = word[-1]\n\n if last_char in endings:\n processed_word = word[0:-1]\n else:\n processed_word = word\n\n\n if len(processed_word) <= 3:\n return 1\n if processed_word[-1] in 'Ee':\n processed_word = processed_word[0:-1]\n\n vowels = 'aeiouAEIOU'\n prev_char_was_vowel = False\n\n for char in processed_word:\n if char in vowels:\n if not prev_char_was_vowel:\n count += 1\n prev_char_was_vowel = True\n\n else:\n prev_char_was_vowel = False\n\n if processed_word[-1] in 'yY':\n count += 1\n \n\n return count", "def update_syllable_count(word, syll_count):\n\n syllables = word.split('-')\n for i in range(1, 4):\n for j in range(len(syllables) - i + 1):\n gram = '-'.join(syllables[j: j + i])\n count = syll_count.setdefault(gram, 0)\n syll_count[gram] = count + 1", "def total_syllables(target_text):\n\n splited_text = target_text.split()\n count = 0\n for word in splited_text:\n count = count + word_syllables(word)\n return count", "def n_polysyllable_words(\n doc_or_tokens: types.DocOrTokens,\n *,\n lang: Optional[str] = None,\n min_n_syllables: int = 3,\n) -> int:\n # docs are hashable, so we can leverage the lru cache as-is\n if isinstance(doc_or_tokens, Doc):\n nspw = n_syllables_per_word(doc_or_tokens, lang=lang)\n # otherwise, let's get an iterable of words but cast it to a hashable tuple\n # so we can leverage the lru cache on this and related calls in, say, n_long_words\n else:\n words = utils.get_words(doc_or_tokens)\n nspw = n_syllables_per_word(tuple(words), lang=lang)\n return itertoolz.count(ns for ns in nspw if ns >= min_n_syllables)", "def count_syllables(text):\n\n import re\n\n # Make a list of vowel sounds presenting in the text (converted to lower-case letters)\n syllable_list = re.findall(r'[aiouy]+e*|e(?!d\\b|ly)[aiouye]?|[td]ed|le\\b', text.lower())\n # Find the size of the list\n count = len(syllable_list)\n\n return count", "def count_syllables(book):\n d = dict(cmudict.entries())\n with open(book, 'r') as myfile:\n booky = myfile.read().lower()\n tokenized_book = nltk.word_tokenize(booky)\n\n count = 0\n for word in tokenized_book:\n count += ( nsly(word, d))\n\n return count", "def n_syllables_per_word(\n doc_or_tokens: types.DocOrTokens, *, lang: Optional[str] = None\n) -> tuple[int, ...]:\n if lang is None:\n if isinstance(doc_or_tokens, Doc):\n lang = doc_or_tokens.lang_\n else:\n raise ValueError(\n \"`lang` must be specified when computing n syllables per word \"\n \"from an iterable of tokens\"\n )\n hyphenator = utils.load_hyphenator(lang=lang)\n words = utils.get_words(doc_or_tokens)\n return tuple(len(hyphenator.positions(word.lower_)) + 1 for word in words)", "def n_monosyllable_words(\n doc_or_tokens: types.DocOrTokens, *, lang: Optional[str] = None\n) -> int:\n # docs are hashable, so we can leverage the lru cache as-is\n if isinstance(doc_or_tokens, Doc):\n nspw = n_syllables_per_word(doc_or_tokens, lang=lang)\n # otherwise, let's get an iterable of words but cast it to a hashable tuple\n # so we can leverage the lru cache on this and related calls in, say, n_long_words\n else:\n words = utils.get_words(doc_or_tokens)\n nspw = n_syllables_per_word(tuple(words), lang=lang)\n return itertoolz.count(ns for ns in nspw if ns == 1)", "def n_syllables(doc_or_tokens: types.DocOrTokens, *, lang: Optional[str] = None) -> int:\n # docs are hashable, so we can leverage the lru cache as-is\n if isinstance(doc_or_tokens, Doc):\n nspw = n_syllables_per_word(doc_or_tokens, lang=lang)\n # otherwise, let's get an iterable of words but cast it to a hashable tuple\n # so we can leverage the lru cache on this and related calls in, say, n_long_words\n else:\n words = utils.get_words(doc_or_tokens)\n nspw = n_syllables_per_word(tuple(words), lang=lang)\n return sum(nspw)", "def count_syllables(word):\n vowels = \"aeiouy\"\n count = 0\n last_was_vowel = False\n for letter in word:\n found_vowel = False\n for v in vowels:\n if v == letter:\n if not last_was_vowel: count += 1 # don't count diphthongs\n found_vowel = last_was_vowel = True\n break\n if not found_vowel: # If full cycle and no vowel found, set last_was_vowel to false\n last_was_vowel = False\n\n\n if len(word) > 2 and word[-2:] == \"es\" and count > 1: # Remove es - it's \"usually\" silent (?)\n count -= 1\n\n if len(word) > 4 and word[-1:] == \"e\": # remove silent e\n count -= 1\n\n if len(word) > 1 and word[-2:] == \"ee\": # adds 1 for na\n count += 1\n\n if len(word) > 1 and word[-2:] == \"na\": # adds 1 for na\n count += 1\n\n # Check for special case words\n special_case = ['eloise','i']\n if word in special_case:\n count += 1\n\n return count", "def countsyllables_nlde(word):\r\n\tresult = 0\r\n\tprev_was_vowel = word[0] in VOWELS\r\n\tfor char in word[1:]:\r\n\t\tis_vowel = char in VOWELS\r\n\t\tif prev_was_vowel and not is_vowel:\r\n\t\t\tresult += 1\r\n\t\tprev_was_vowel = is_vowel\r\n\r\n\tif (len(word) > 1 and word[0] in VOWELS\r\n\t\t\tand word.endswith('e') and not word[-2] in VOWELS):\r\n\t\tresult += 1\r\n\treturn result or 1", "def syllable_counter(string):\n\ti = 0 # index of while loop \n\tcounter = 0 # counter of syllables\n\tvowels = ['a','e','i','o','u','y','e '] # what are vowels\n\tdiphthongs = ['ee', 'ei', 'ea', 'oo', 'oi', 'oy', 'ou', 'ai', 'ie', 'ey', 'ay'] #what are diphthongs\n\tindex = 0 \n\n\twhile string[index] != ' ': # break at space\n\t\tchar = string[index] # look at each letter in string\n\t\tnext_char = string[index+1] # and the letter following\n\t\tif char.isalpha():\n\t\t\tif char in vowels: \n\t\t\t\tif (char + next_char in diphthongs): \n\t\t\t\t\tcounter = counter + 1 # count\n\t\t\t\t\tindex = index + 1 # skips second letter in diphthong\n\t\t\t\telif (char == 'e' and next_char == ' '): # assume if e at end of word, is not syllable\n\t\t\t\t\tpass # don't count\n\t\t\t\telse: \n\t\t\t\t\tcounter = counter + 1 # if it's a solitary vowel, add one to counter\n\t\tindex = index + 1\n\n\treturn counter", "def count(word):\n\n return len(word)", "def getWordScore(word: str, n: int) -> int:\n # (SCRABBLE_LETTER_VALUES[char]) rise a exception if char not in SCRABBL...\n ans = sum(SCRABBLE_LETTER_VALUES.get(char, 0) for char in word) * len(word)\n\n # [if False, if True] [condition] (ternary op)\n return [ans, ans + 50] [len(word) == n]", "def number_syllables(self):\n return len(self.array_form)", "def getWordCharCount(w):\r\n rus = len(re.findall(r\"[а-я]\",w))\r\n eng = len(re.findall(r\"[a-z]\",w))\r\n c = len(w) \r\n return c, rus, eng", "def count_word2(self, word):\n pass", "def count_words_and_dublicates(novel):", "def getWordScore(word, n):\n score = 0\n\n for letters in word:\n if letters in SCRABBLE_LETTER_VALUES:\n score += SCRABBLE_LETTER_VALUES[letters]\n\n if len(word) == n:\n return (score * len(word)) + 50\n else:\n return score * len(word)" ]
[ "0.8187434", "0.7941615", "0.7839003", "0.77053165", "0.76928765", "0.76834965", "0.7579669", "0.7408953", "0.724696", "0.722788", "0.7227621", "0.71323586", "0.70042217", "0.69125587", "0.68785167", "0.68268627", "0.6769647", "0.67252815", "0.67212397", "0.6622493", "0.65549856", "0.65315974", "0.6458056", "0.6143281", "0.6068877", "0.5966394", "0.5941749", "0.5903695", "0.5882586", "0.58493674" ]
0.8625879
0
Takes text where lines are separated by newline characters. Returns True if the text is a limerick, False otherwise. A limerick is defined as a poem with the form AABBA, where the A lines rhyme with each other, the B lines rhyme with each other, and the A lines do not rhyme with the B lines.
def is_limerick(self, text): # TODO: provide an implementation! text = text.lower() p = [] p = text.split('\n') p = [i.strip(' ') for i in p] p = list(filter(None, p)) # all limericks must have 5 lines AABBA if len(p) != 5: return False #words list stores the list of words in each line of the limerick words = [] for i in range(0, 5): p[i] = p[i].strip(".,:;?!") temp = [] T = p[i] temp = self.apostrophe_tokenize(T) words.append(temp) count = [] #print len(words) for i in range(0, 5): #print words[i] n = 0 for j in words[i]: n = n + self.num_syllables(j) count.append(n) # check if any line has fewer than 4 syllables for i in count: if i < 4: return False A1 = count[0] A2 = count[1] B1 = count[2] B2 = count[3] A3 = count[4] # check if B1 has fewer syllables than A1, A2 and A3 if B1 > A1 or B1 > A2 or B1 > A3: return False # check if B2 has fewer syllables than A1, A2 and A3 if B2 > A1 or B2 > A2 or B2 > A3: return False # check if the no of syllables in B1 and B2 differs by more than 2 if abs(B1 - B2) > 2: return False # check if any two A's differ in no of syllables by more than 2 if abs(A1 - A2) > 2 or abs(A1 - A3) > 2 or abs(A2 - A3) > 2: return False #check if A1, A2 and A3 rhyme with each other if self.rhymes(words[0][-1], words[1][-1]) and self.rhymes(words[0][-1], words[4][-1]) and self.rhymes(words[1][-1], words[4][-1]): #check if B1 and B2 rhyme with each other if self.rhymes(words[2][-1],words[3][-1]): #check if A and B do not rhyme if (not self.rhymes(words[0][-1], words[2][-1]) and not self.rhymes(words[0][-1], words[3][-1]) and not self.rhymes(words[1][-1], words[2][-1]) and not self.rhymes(words[1][-1], words[3][-1]) and not self.rhymes(words[4][-1], words[2][-1]) and not self.rhymes(words[4][-1], words[3][-1]) ): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_limerick(self, text):\n\n return False", "def is_limerick(self, text):\n \n sentences = text.splitlines()\n \n #remove blank setences\n sentences = [sentence for sentence in sentences if sentence.strip()] \n \n if len(sentences) != 5 : return False \n #remove punctuations for all sentences\n words_sentence1 = word_tokenize(sentences[0].translate(None, string.punctuation).lower())\n words_sentence2 = word_tokenize(sentences[1].translate(None, string.punctuation).lower())\n words_sentence3 = word_tokenize(sentences[2].translate(None, string.punctuation).lower())\n words_sentence4 = word_tokenize(sentences[3].translate(None, string.punctuation).lower())\n words_sentence5 = word_tokenize(sentences[4].translate(None, string.punctuation).lower())\n \n #check rhymes for AAA BB and not rhymes for AB\n ret_flag = (self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence2[len(words_sentence2) - 1]) and\n self.rhymes(words_sentence3[len(words_sentence3) - 1 ],\n words_sentence4[len(words_sentence4) - 1 ]) and\n self.rhymes(words_sentence2[len(words_sentence2) - 1 ],\n words_sentence5[len(words_sentence5) - 1 ]) and\n self.rhymes(words_sentence1[len(words_sentence1) - 1 ],\n words_sentence5[len(words_sentence5) - 1 ]) and \n (not self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence4[len(words_sentence4) - 1])) and \n (not self.rhymes(words_sentence2[len(words_sentence2) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence2[len(words_sentence2) - 1],\n words_sentence4[len(words_sentence4) - 1])) and \n (not self.rhymes(words_sentence5[len(words_sentence5) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence5[len(words_sentence5) - 1],\n words_sentence4[len(words_sentence4) - 1])))\n \n if ret_flag == False: return False\n \n \n # Check additional constraints\n \n sum_of_syl1 = 0\n for word in words_sentence1 : sum_of_syl1 += self.num_syllables(word)\n \n if sum_of_syl1 < 4 : return False\n sum_of_syl2 = 0\n for word in words_sentence2 : sum_of_syl2 += self.num_syllables(word)\n \n if sum_of_syl2 < 4 : return False\n \n \n sum_of_syl_A_diff = 0\n if sum_of_syl1 > sum_of_syl2 : sum_of_syl_A_diff = sum_of_syl1 - sum_of_syl2\n else : sum_of_syl_A_diff = sum_of_syl2 - sum_of_syl1\n \n if sum_of_syl_A_diff > 2 : return False \n \n sum_of_syl3 = 0\n for word in words_sentence3 : sum_of_syl3 += self.num_syllables(word)\n \n if sum_of_syl3 < 4 : return False\n sum_of_syl4 = 0\n for word in words_sentence4 : sum_of_syl4 += self.num_syllables(word)\n \n if sum_of_syl4 < 4 : return False\n \n \n sum_of_syl_B_diff = 0\n if sum_of_syl3 > sum_of_syl4 : sum_of_syl_B_diff = sum_of_syl3 - sum_of_syl4\n else : sum_of_syl_B_diff = sum_of_syl4 - sum_of_syl3\n \n if sum_of_syl_B_diff > 2 : return False \n \n if (sum_of_syl3 > sum_of_syl1 and sum_of_syl3 > sum_of_syl2 \n and sum_of_syl4 > sum_of_syl1 and sum_of_syl4 > sum_of_syl2) : return False\n \n \n sum_of_syl5 = 0\n for word in words_sentence5 : sum_of_syl5 += self.num_syllables(word) \n \n if sum_of_syl5 < 4 : return False\n \n sum_of_syl_A_diff = 0\n if sum_of_syl1 > sum_of_syl5 : sum_of_syl_A_diff = sum_of_syl1 - sum_of_syl5\n else : sum_of_syl_A_diff = sum_of_syl5 - sum_of_syl1\n \n if sum_of_syl_A_diff > 2 : return False \n \n sum_of_syl_A_diff = 0\n if sum_of_syl2 > sum_of_syl5 : sum_of_syl_A_diff = sum_of_syl2 - sum_of_syl5\n else : sum_of_syl_A_diff = sum_of_syl5 - sum_of_syl2\n \n \n if sum_of_syl_A_diff > 2 : return False \n \n if (sum_of_syl3 > sum_of_syl5 and sum_of_syl4 > sum_of_syl5) : return False\n \n \n return ret_flag", "def is_line_on_multiline(feature_1: Sequence, feature_2: Sequence) -> bool:\n return any(is_line_on_line(feature_1, coords_2) for coords_2 in feature_2)", "def test_LogicalLines(self) -> None:\n content = \"\"\"\nfoo \\\\\nbar \\\\\nbaz\nfoo\nbling \\\\\nbling \\\\ bling\nbling\n\"\"\"\n fobj = io.StringIO(content)\n lines = LogicalLines(fobj).readlines()\n assert lines == [\n '\\n',\n 'foo bar baz\\n',\n 'foo\\n',\n 'bling bling \\\\ bling\\n',\n 'bling\\n',\n ], lines", "def treat_new_line(self,text):\n text=text.replace('.\\n','. ')\n text=re.sub(r'(\\n\\s*)+\\n+', '\\n\\n',text )\n \n lw=text.split('\\n\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n \n for i in range(1,len(lw)):\n try:\n\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','') !='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n\n\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1][-1].replace(' ','')!='':\n\n if lw[i-1][-1].replace(' ','')[-1]!='-':\n lw[i-1]+=\"\"\n else:\n\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n else:\n lw[i-1]+=\"\\n\\n\"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n \n text=\"\".join(lw)\n \n lw=text.split('\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n for i in range(1,len(lw)):\n try:\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1]==\"-\":\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n\n\n\n else:\n lw[i-1]+=\" \"\n else:\n lw[i-1]+=\" \"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n text=\"\".join(lw)\n return text", "def IsMultiline(self):\r\n\r\n return \"\\n\" in self.caption", "def isline(l):\n return isinstance(l,list) and len(l) == 2 \\\n and ispoint(l[0]) and ispoint(l[1])", "def lines_to_blocks(text):\n n_sep = text.count('\\n\\n')\n n_lines = text.count('\\n')\n #approximate ratio of double newlines vs single newline: 40\n if int(n_sep/n_lines*100) > 40:\n text = re.sub('\\n\\n', '\\n',text)\n #try to split it up with topic indicators such as numbers or bullet points\n text = re.sub(r'[0-9]+[.]', '\\n',text)\n text = re.sub('•', '\\n',text)\n return text", "def _has_newline(line) -> bool:\n if line and (\"\\r\" in line or \"\\n\" in line):\n return True\n return False", "def is_sonnet(poem):\n return len([line for line in poem.split(\"\\n\") if line]) == 14", "def detect_nl(string_or_lines, line_end=None):\n if line_end is None:\n line_end = '\\n' if (string_or_lines and\n string_or_lines[-1].endswith('\\n')) else ''\n return line_end", "def is_text(line, start, end, line_number, code_blocks):\n if any(c[0] <= line_number <= c[1] for c in code_blocks):\n return False\n else:\n n = len(line)\n idx = -1\n last_block_was_text = False\n in_link = False\n in_url = False\n while idx < start:\n if in_link:\n link_idx = line[idx+1:].find(')')\n assert link_idx != -1\n code_idx = n\n url_idx = n\n elif in_url:\n url_idx = line[idx+1:].find('>')\n assert url_idx != -1\n code_idx = n\n link_idx = n\n else:\n code_idx = line[idx+1:].find('`')\n link_idx = line[idx+1:].find('](')\n url_idx = line[idx+1:].find('<')\n if code_idx == -1:\n code_idx = n\n if link_idx == -1:\n link_idx = n\n if url_idx == -1:\n url_idx = n\n\n nearest_match = min(code_idx, link_idx, url_idx)\n\n if nearest_match == url_idx:\n in_url = not in_url\n elif nearest_match == link_idx:\n in_link = not in_link\n idx += nearest_match+1\n last_block_was_text = not last_block_was_text\n\n return last_block_was_text", "def match_multiline(self, text, delimiter, in_state, style):\n # If inside triple-single quotes, start at 0\n if self.previousBlockState() == in_state:\n start = 0\n add = 0\n # Otherwise, look for the delimiter on this line\n else:\n start = delimiter.indexIn(text)\n # Move past this match\n add = delimiter.matchedLength()\n\n # As long as there's a delimiter match on this line...\n while start >= 0:\n # Look for the ending delimiter\n end = delimiter.indexIn(text, start + add)\n # Ending delimiter on this line?\n if end >= add:\n length = end - start + add + delimiter.matchedLength()\n self.setCurrentBlockState(0)\n # No; multi-line string\n else:\n self.setCurrentBlockState(in_state)\n length = len(text) - start + add\n # Apply formatting\n self.setFormat(start, length, self.styles[style])\n # Look for the next match\n start = delimiter.indexIn(text, start + length)\n\n # Return True if still inside a multi-line string, False otherwise\n if self.currentBlockState() == in_state:\n return True\n else:\n return False", "def make_line(line, n_syl, syl_counts):\n\n # Current number of syllables in constructed line.\n # This includes the syllable count of the first word.\n curr = 0\n\n # Now, since the list is reversed, the last word of the actual sonnet\n # line is the first word of 'line'. So we want to check if this\n # word can be counted as one syllable.\n\n # Number of syllable in first word (last word of actual line)\n init_syl = syl_counts[line[0]]\n init_syl_alt = init_syl\n\n # Alternative syllable count\n if ((line[0] + '_') in syl_counts):\n init_syl_alt = syl_counts[line[0] + '_']\n\n for i in range(1, n_syl):\n if line[i] not in syl_counts:\n return (False, '')\n\n w_syl = syl_counts[line[i]]\n\n if init_syl + curr + w_syl and init_syl_alt + curr + w_syl > n_syl:\n return (False, '')\n if init_syl+ curr + w_syl == n_syl or init_syl_alt + curr + w_syl == n_syl:\n return (True, ' '.join(line[:i+1]))\n curr += w_syl", "def is_line(self): \n return False", "def is_line(self):\n return True", "def is_line(self):\n return True", "def handleNewLineBeforeEditor(editor, text, charPos, lineStartCharPos,\r\n wikiDocument, settings):\r\n # autoIndent, autoBullet, autoUnbullet\r\n \r\n return True", "def is_line_on_line(feature_1: Sequence, feature_2: Sequence) -> bool:\n\n line_on_line = False\n\n for coords in feature_1:\n\n line_on_line = boolean_point_on_line(coords, feature_2)\n if not line_on_line:\n break\n\n return line_on_line", "def test_format_linebreaks():\r\n test_cases = (\r\n ('Simple:\\n\\nLine two', '<p>Simple:</p><p>Line two</p>'),\r\n ('DOS:\\r\\n\\r\\nLine breaks', '<p>DOS:</p><p>Line breaks</p>'),\r\n ('Classic Mac:\\r\\rLine breaks', '<p>Classic Mac:</p><p>Line breaks</p>'),\r\n ('Consecutive:\\n\\n\\n\\n\\n\\nLine breaks', '<p>Consecutive:</p><p>Line breaks</p>'),\r\n ('Multiple:\\r\\n\\r\\nLine\\r\\n\\r\\nbreaks', '<p>Multiple:</p><p>Line</p><p>breaks</p>'),\r\n ('\\nLeading and trailing\\n', '<p>Leading and trailing</p>'),\r\n ('Single\\ndoesn\\'t wrap', '<p>Single\\ndoesn\\'t wrap</p>'),\r\n ('Quote:\\n\\n<blockquote>(1) One\\n\\n(2) Two</blockquote>\\n\\nAfter',\r\n '<p>Quote:</p><blockquote><p>(1) One</p><p>(2) Two</p></blockquote><p>After</p>'),\r\n ('Quote 2:\\n\\n<blockquote>(1) One\\n\\n(2) Two\\n</blockquote>\\n\\nAfter',\r\n '<p>Quote 2:</p><blockquote><p>(1) One</p><p>(2) Two\\n</p></blockquote><p>After</p>'),\r\n )\r\n for input_text, expected_output in test_cases:\r\n yield is_equal, format_linebreaks(input_text), expected_output", "def blocks_are_equal(i, j, types, text, n):\n while i < n and j < n:\n if text[i] == text[j]:\n if is_lms(i, types) and is_lms(j, types):\n return True\n else:\n i += 1\n j += 1\n else:\n return False\n return False", "def single_line_paragraph(s: str) -> bool:\n return s.startswith('@') or s.strip() in ('\"\"\"', \"'''\")", "def is_line(self):\n return False", "def is_multipoint_on_linestring(feature_1: Sequence, feature_2: Sequence) -> bool:\n\n points_on_line = False\n\n points_on_line = all(\n boolean_point_on_line(coords_1, feature_2[1]) for coords_1 in feature_1[1]\n )\n\n if not points_on_line:\n return points_on_line\n\n points_on_line = any(\n boolean_point_on_line(coords_1, feature_2[1], {\"ignoreEndVertices\": True})\n for coords_1 in feature_1[1]\n )\n\n return points_on_line", "def logicalLines(iterable, **kwargs):\n # kwargs\n kwargs = lowerKeys(kwargs)\n continueChar = kwargs.get('continuechar', '-')\n commentChar = kwargs.get('commentchar', '!')\n #\n iterable = ( line.strip() for line in iterable )\n tmp = []\n for line in iterable:\n if line.split(commentChar)[0].endswith(continueChar):\n tmp.append(line[:-1])\n else:\n if tmp:\n tmp.append(line)\n yield ' '.join(tmp)\n tmp = []\n else:\n yield line\n # flush\n if tmp:\n yield ' '.join(tmp)", "def ll(L1, L2):\n if not all(isinstance(L, Line) for L in (L1, L2)):\n raise TypeError('ll() expects two lines')\n return L1.normal_vector() ** L2.normal_vector() == 0", "def is_mlcmt(line,mlcmto,mlcmtc):\n \n return [line.find(mlcmto),line.find(mlcmtc)]", "def _l_(L1, L2):\n if not all(isinstance(L, Line) for L in (L1, L2)):\n raise TypeError('_l_() expects two lines')\n return L1.normal_vec() * L2.normal_vec() == 0", "def paragraphify(text: str) -> str:\n text = text and text.replace('\\r', '').strip('\\n')\n\n if not text:\n return ''\n\n return ''.join(f'<p>{p}</p>' for p in (\n p.replace('\\n', '<br>') for p in _multiple_newlines.split(text)\n ))", "def parseLines(text):\n lines = text.strip().split('\\n')\n esclines = []\n esc = False\n for l in lines:\n if esc:\n esclines[-1] = esclines[-1]+l\n else:\n esclines.append(l)\n if len(l)>0 and l[-1]=='\\\\':\n esclines[-1] = esclines[-1][:-1]\n esc = True\n else:\n esc = False\n return esclines" ]
[ "0.7332721", "0.7118218", "0.6140549", "0.58115304", "0.5777548", "0.5629705", "0.5620639", "0.5535499", "0.5423312", "0.5394802", "0.5378505", "0.53308886", "0.5302663", "0.5278468", "0.52720255", "0.5223611", "0.5223611", "0.51562977", "0.51394373", "0.5101829", "0.50781065", "0.50735366", "0.5072135", "0.5064861", "0.5036632", "0.5016328", "0.5015158", "0.50030005", "0.49849412", "0.49588516" ]
0.7593807
0
Calculates sky background temperature for a given Galactic longitude (gl), Galactic latitude (gb), and frequency (freq in MHz). Coordinates are in degrees. Assuming spectral index of "index", default is 2.55 Return value is in K If frequency array 'freqs' is given, then Tsky is calculated for each frequency in the array, and returned value is list of Tsky's
def tsky(gl, gb, freq, index, freqs=None): # reading the table nsky=np.zeros((90, 180), dtype=float) for ii in xrange(90): for jj in xrange(180): pos=(ii*180+jj)*5 nsky[ii,jj]=float(haslam_table[pos:pos+5]) # Convert to standard l,b b = int(gb + 90.5) if b >= 180: b = 179 l = int(gl + 0.5) if gl >= 360: l = 0 l = int((l / 4)) if freqs == None: tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) return tsky else: temps=[] for freq in freqs: tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) temps.append(tsky) return temps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tsky_range(gl, gb, f1, f2, index, freqs=None):\n\n\t# reading the table\n\tnsky=np.zeros((90, 180), dtype=float)\n\tfor ii in xrange(90):\n\t\tfor jj in xrange(180):\n\t\t\tpos=(ii*180+jj)*5\n\t\t\tnsky[ii,jj]=float(haslam_table[pos:pos+5])\n\n\t# Convert to standard l,b\n\tb = int(gb + 90.5)\n\tif b >= 180: b = 179\n\tl = int(gl + 0.5)\n\tif gl >= 360: l = 0\n\tl = int((l / 4))\n\t\n\tif freqs == None:\n\t\ttot=0\n\t\tfor ii in xrange(101):\n\t\t\tfreq = f1 + ii*(f2-f1)/100.\n\t\t\ttsky = 2.7 + nsky[l,b] * (freq/408.0)**(index)\n\t\t\ttot += tsky\n\t\ttot /= 100.\n\t\treturn tot\n\telse:\n\t\ttemps=[]\n\t\tfor ff in xrange(1, len(freqs)):\n\t\t\ttot = 0\n\t\t\tfor ii in xrange(101):\n\t\t\t\tfreq = freqs[ff-1] + ii*(freqs[ff]-freqs[ff-1])/100.\n\t\t\t\ttsky = 2.7 + nsky[l,b] * (freq/408.0)**(index)\n\t\t\t\ttot += tsky\n\t\t\ttot /= 100.\n\t\t\ttemps.append(tot)\n\t\treturn temps", "def tskypy(self, psr):\n # ensure l is in range 0 -> 360\n b = psr.gb\n if psr.gl < 0.:\n l = 360 + psr.gl\n else:\n l = psr.gl\n\n # convert from l and b to list indices\n j = b + 90.5\n if j > 179:\n j = 179\n\n nl = l - 0.5\n if l < 0.5:\n nl = 359\n i = float(nl) / 4.\n \n tsky_haslam = self.tskylist[180*int(i) + int(j)]\n # scale temperature before returning\n return tsky_haslam * (self.freq/408.0)**(-2.6)", "def Tsky(source, freq=350*u.MHz, model='2008'):\n\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n\n\n m=SkyModel(freq=freq, tskymodel=model)\n return m.Tsky(source)", "def Tsky(self, source):\n\n if not _usePyGSM:\n raise ImportError('PyGSM is not available: cannot access sky temperatures')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n\n source=source.galactic\n T=healpy.pixelfunc.get_interp_val(self.map,\n source.l.value,\n source.b.value,\n lonlat=True)\n return T*u.K", "def gpower_integrand(self,theta,phi):\n\n wavelength = 299.9/self.frequency\n if(phi == 0): phi = .00001\n if(phi == np.pi): phi = np.pi - .00001\n\n self.aa.alt = np.pi/2.0 - theta\n self.aa.az = np.pi/2.0 - phi\n\n coords = self.aa.raDec(self.__lst_current,self.location)\n\n coords = self.Rotator(np.pi/2 - coords.dec,coords.ra)\n\n Tsky = hp.get_interp_val(self.hpmap,coords[0],coords[1])*(self.frequency/408.0)**(-2.55)\n\n ans = self.lofasm.beam_pattern(theta,phi,[0,0,1])\n ans += self.lofasm.beam_pattern(theta,phi,[0,1,0]) \n ans *= (Tsky*(1.3804e-23)/wavelength**2)/(1e-26)/2.0\n\n return ans", "def gtgram(\n wave, fs, window_time, hop_time, channels, f_min, f_max=None, return_freqs=False\n):\n xe = gtgram_xe(wave, fs, channels, f_min, f_max)\n nwin, hop_samples, ncols = gtgram_strides(fs, window_time, hop_time, xe.shape[1])\n\n y = np.zeros((channels, ncols))\n\n for cnum in range(ncols):\n segment = xe[:, cnum * hop_samples + np.arange(nwin)]\n y[:, cnum] = np.sqrt(segment.mean(1))\n\n if return_freqs:\n cfs = centre_freqs(fs, channels, f_min, f_max)\n return cfs, y\n return y", "def compute_ctf(freqs,rots,akv,cs,wgh,dfmid1f,dfmid2f,angastf,dscale,bfactor=None): \n av = akv * 1e3 # Convert kilovots to volts\n cs = cs * 1e7 # Convert spherical aberation from mm to A\n \n # wavelength of electrons\n elambda = 12.2643247 / n.sqrt(av + av**2 * 0.978466e-6)\n \n wgh1 = dscale*n.sqrt(1.0 - wgh**2)\n wgh2 = dscale*wgh\n\n ix = freqs[:,0]\n iy = freqs[:,1]\n freq_radius = n.sqrt(ix**2 + iy**2)\n\n angle = elambda*freq_radius\n angspt = n.arctan2(iy,ix)\n if rots is not None:\n angspt = n.mod(angspt.reshape((-1,1)) + rots.reshape((1,-1)),2.0*n.pi)\n angle = angle.reshape((-1,1)) \n c1 = 2.0*n.pi*angle**2/(2.0*elambda)\n c2 = -c1*cs*angle**2/2.0\n angdif = angspt - angastf\n ccos = n.cos(2.0*angdif)\n df = 0.5*(dfmid1f + dfmid2f + ccos*(dfmid1f-dfmid2f))\n chi = c1*df + c2\n\n ctf = -wgh1*n.sin(chi) - wgh2*n.cos(chi)\n \n if bfactor is not None:\n ctf *= envelope_function(freq_radius, bfactor)\n\n return n.require(ctf,dtype = freqs.dtype)", "def thermodynamic_temperature(frequency, T_cmb=None):\n nu = frequency.to(si.GHz, spectral())\n\n if T_cmb is None:\n from astropy.cosmology import default_cosmology\n\n T_cmb = default_cosmology.get().Tcmb0\n\n def f(nu, T_cmb=T_cmb):\n x = _si.h * nu / _si.k_B / T_cmb\n return x**2 * np.exp(x) / np.expm1(x) ** 2\n\n def convert_Jy_to_K(x_jybm):\n factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(\n astrophys.Jy\n )\n return x_jybm / factor\n\n def convert_K_to_Jy(x_K):\n factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(\n si.K\n )\n return x_K / factor\n\n return Equivalency(\n [(astrophys.Jy / si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)],\n \"thermodynamic_temperature\",\n {\"frequency\": frequency, \"T_cmb\": T_cmb},\n )", "def fringe_frequency(self, wavelength=0.028, terrestrial_latitude=37.873199, h_s0=0):\n\t\tBew, Bns, baseline = bf.baseline_script_2D(self.hour_angles, 0, self.volts, self.times)\n\t\tfirst_term = Bew / wavelength * np.cos(self.dec) * cos(h_s0)\n\t\tsecond_term = Bns / wavelength * np.sin(terrestrial_latitude) * np.cos(self.dec) * np.sin(h_s0)\n\t\treturn first_term - second_term", "def compute_tsky_hot( xv, yv, hv, thot, tcold):\n\n nData = len(yv) \n epsilons = np.full( nData, EPSILON)\n tsys = np.zeros(nData) # initialize arrays\n\n Z = np.zeros(nData)\n oneMZ = np.zeros(nData)\n # For full Temp calibration, a spectrum taken at high elevation away from \n # The galactic plan is used. For this program the cold spectrum must be\n # the spectrum being calibrated. See the M command for comparision\n epsilons = np.full( nData, EPSILON)\n yv = np.maximum( yv, epsilons)\n hv = np.maximum( hv, epsilons)\n # comput the cold/hot ratio\n Z = yv/hv\n oneMZ = np.full( nData, 1.) - Z\n oneMZ = np.maximum( oneMZ, epsilons)\n\n # the cold, receiver, temperature is this function\n tsys = ((Z*thot) - tcold)/oneMZ\n \n n6 = int(nData/6)\n n56 = 5*n6\n\n tsysmedian = np.median( tsys[n6:n56])\n\n tsky = np.zeros(nData) # initialize arrays\n S = np.zeros(nData) # initialize arrays\n\n # The system gain S is computed assuming a tsys is the cold load\n S = np.full( nData, tsysmedian+thot)/hv\n # scale the observed instensity in counts to Kelvins.\n tsky = S*yv\n\n return tsky", "def system_temp(freq_hz):\n freqs = np.array([0.05e9, 0.07e9, 0.11e9, 0.17e9, 0.25e9, 0.35e9, 0.45e9,\n 0.55e9, 0.65e9])\n t_sys = np.array([4.0409e3, 1.5029e3, 0.6676e3, 0.2936e3, 0.1402e3, 0.0873e3,\n 0.0689e3, 0.0607e3, 0.0613e3])\n f = interp1d(np.log10(freqs), np.log10(t_sys), kind='cubic')\n return 10**f(np.log10(freq_hz))", "def Gamma_per_grain(ZZall, Gamma_a_Z, ZZ_fz, fdist, GG):\n\n # index in the ZZall array for the charges in ZZ_fz\n zi_down = np.where(ZZall == ZZ_fz[0])[0][0]# find the index of the ZZ_fz[0] in ZZall \n zi_up = np.where(ZZall == ZZ_fz[-1])[0][0]# find the index of the ZZ_fz[-1] in ZZall\n \n #Gamma_pe_a = np.sum(fz*Gamma_dotdot_scaled[zi_down:zi_up+1])\n Gamma_pe_a = np.sum(fdist*Gamma_a_Z[zi_down:zi_up+1])\n \n return Gamma_pe_a", "def at_frequencies(\n self,\n freqs,\n inplace=True,\n freq_interp_kind=\"cubic\",\n nan_handling=\"clip\",\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n atol=None,\n ):\n sky = self if inplace else self.copy()\n\n if atol is None:\n atol = self.freq_tol\n\n if self.spectral_type == \"spectral_index\":\n sky.stokes = (\n self.stokes\n * (freqs[:, None].to(\"Hz\") / self.reference_frequency[None, :].to(\"Hz\"))\n ** self.spectral_index[None, :]\n )\n sky.reference_frequency = None\n elif self.spectral_type == \"full\":\n # Find a subset of the current array.\n ar0 = self.freq_array.to_value(\"Hz\")\n ar1 = freqs.to_value(\"Hz\")\n tol = atol.to_value(\"Hz\")\n matches = np.fromiter(\n (np.isclose(freq, ar1, atol=tol).any() for freq in ar0), dtype=bool\n )\n\n if np.sum(matches) != freqs.size:\n raise ValueError(\n \"Some requested frequencies are not present in the current SkyModel.\"\n )\n sky.stokes = self.stokes[:, matches, :]\n if sky.freq_edge_array is not None:\n sky.freq_edge_array = sky.freq_edge_array[:, matches]\n elif self.spectral_type == \"subband\":\n if np.max(freqs.to(\"Hz\")) > np.max(self.freq_array.to(\"Hz\")):\n raise ValueError(\n \"A requested frequency is larger than the highest subband frequency.\"\n )\n if np.min(freqs.to(\"Hz\")) < np.min(self.freq_array.to(\"Hz\")):\n raise ValueError(\n \"A requested frequency is smaller than the lowest subband frequency.\"\n )\n # Interpolate. Need to be careful if there are NaNs -- they spoil the\n # interpolation even for sources that do not have any NaNs.\n stokes_unit = self.stokes.unit\n if np.any(np.isnan(self.stokes.value)):\n allowed_nan_handling = [\"propagate\", \"interp\", \"clip\"]\n if nan_handling not in allowed_nan_handling:\n raise ValueError(\n f\"nan_handling must be one of {allowed_nan_handling}\"\n )\n\n message = \"Some stokes values are NaNs.\"\n if nan_handling == \"propagate\":\n message += (\n \" All output stokes values for sources with any NaN values \"\n \"will be NaN.\"\n )\n else:\n message += \" Interpolating using the non-NaN values only.\"\n message += (\n \" You can change the way NaNs are handled using the \"\n \"`nan_handling` keyword.\"\n )\n warnings.warn(message)\n stokes_arr = self.stokes.value\n freq_arr = self.freq_array.to(\"Hz\").value\n at_freq_arr = freqs.to(\"Hz\").value\n # first interpolate any that have no NaNs\n wh_nan = np.nonzero(np.any(np.isnan(stokes_arr), axis=(0, 1)))[0]\n wh_non_nan = np.nonzero(np.all(~np.isnan(stokes_arr), axis=(0, 1)))[0]\n assert wh_non_nan.size + wh_nan.size == self.Ncomponents, (\n \"Something went wrong with spliting sources with NaNs. This is a \"\n \"bug, please make an issue in our issue log\"\n )\n new_stokes = np.zeros(\n (4, freqs.size, self.Ncomponents), dtype=stokes_arr.dtype\n )\n if wh_non_nan.size > 0:\n finterp = scipy.interpolate.interp1d(\n freq_arr,\n stokes_arr[:, :, wh_non_nan],\n axis=1,\n kind=freq_interp_kind,\n )\n new_stokes[:, :, wh_non_nan] = finterp(at_freq_arr)\n\n if nan_handling == \"propagate\":\n new_stokes[:, :, wh_nan] = np.NaN\n else:\n wh_all_nan = []\n wh_nan_high = []\n wh_nan_low = []\n wh_nan_many = []\n for comp in wh_nan:\n freq_inds_use = np.nonzero(\n np.all(~np.isnan(stokes_arr[:, :, comp]), axis=0)\n )[0]\n if freq_inds_use.size == 0:\n new_stokes[:, :, comp] = np.NaN\n wh_all_nan.append(comp)\n continue\n at_freq_inds_use = np.arange(freqs.size)\n\n if np.max(at_freq_arr) > np.max(freq_arr[freq_inds_use]):\n at_freq_inds_use = np.nonzero(\n at_freq_arr <= np.max(freq_arr[freq_inds_use])\n )[0]\n at_freqs_large = np.nonzero(\n at_freq_arr > np.max(freq_arr[freq_inds_use])\n )[0]\n wh_nan_high.append(comp)\n if nan_handling == \"interp\":\n new_stokes[:, at_freqs_large, comp] = np.NaN\n else: # clip\n large_inds_use = np.full(\n (at_freqs_large.size), freq_inds_use[-1]\n )\n new_stokes[:, at_freqs_large, comp] = stokes_arr[\n :, large_inds_use, comp\n ]\n\n if np.min(at_freq_arr) < np.min(freq_arr[freq_inds_use]):\n at_freq_inds_use_low = np.nonzero(\n at_freq_arr >= np.min(freq_arr[freq_inds_use])\n )[0]\n at_freq_inds_use = np.intersect1d(\n at_freq_inds_use, at_freq_inds_use_low\n )\n at_freqs_small = np.nonzero(\n at_freq_arr < np.min(freq_arr[freq_inds_use])\n )[0]\n wh_nan_low.append(comp)\n if nan_handling == \"interp\":\n new_stokes[:, at_freqs_small, comp] = np.NaN\n else: # clip\n small_inds_use = np.full(\n (at_freqs_small.size), freq_inds_use[0]\n )\n new_stokes[:, at_freqs_small, comp] = stokes_arr[\n :, small_inds_use, comp\n ]\n\n if at_freq_inds_use.size > 0:\n try:\n finterp = scipy.interpolate.interp1d(\n freq_arr[freq_inds_use],\n stokes_arr[:, freq_inds_use, comp],\n axis=1,\n kind=freq_interp_kind,\n )\n except ValueError:\n wh_nan_many.append(comp)\n finterp = scipy.interpolate.interp1d(\n freq_arr[freq_inds_use],\n stokes_arr[:, freq_inds_use, comp],\n axis=1,\n kind=\"linear\",\n )\n new_stokes[:, at_freq_inds_use, comp] = finterp(\n at_freq_arr[at_freq_inds_use]\n )\n else:\n continue\n if len(wh_all_nan) > 0:\n warnings.warn(\n f\"{len(wh_all_nan)} components had all NaN stokes values. \"\n \"Output stokes for these components will all be NaN.\"\n )\n if len(wh_nan_high) > 0:\n message = (\n f\"{len(wh_nan_high)} components had all NaN stokes values \"\n \"above one or more of the requested frequencies. \"\n )\n if nan_handling == \"interp\":\n message += (\n \"The stokes for these components at these frequencies \"\n \"will be NaN.\"\n )\n else:\n message += (\n \"Using the stokes value at the highest frequency \"\n \"without a NaN for these components at these \"\n \"frequencies.\"\n )\n warnings.warn(message)\n if len(wh_nan_low) > 0:\n message = (\n f\"{len(wh_nan_low)} components had all NaN stokes values below \"\n \"one or more of the requested frequencies. \"\n )\n if nan_handling == \"interp\":\n message += (\n \"The stokes for these components at these frequencies \"\n \"will be NaN.\"\n )\n else:\n message += (\n \"Using the stokes value at the lowest frequency \"\n \"without a NaN for these components at these frequencies.\"\n )\n warnings.warn(message)\n if len(wh_nan_many) > 0:\n warnings.warn(\n f\"{len(wh_nan_many)} components had too few non-NaN stokes \"\n \"values for chosen interpolation. Using linear \"\n \"interpolation for these components instead.\"\n )\n sky.stokes = new_stokes * stokes_unit\n else:\n finterp = scipy.interpolate.interp1d(\n self.freq_array.to(\"Hz\").value,\n self.stokes.value,\n axis=1,\n kind=freq_interp_kind,\n )\n sky.stokes = finterp(freqs.to(\"Hz\").value) * stokes_unit\n else:\n # flat spectrum\n stokes_unit = self.stokes.unit\n sky.stokes = np.repeat(self.stokes.value, len(freqs), axis=1) * stokes_unit\n\n sky.reference_frequency = None\n sky.Nfreqs = freqs.size\n sky.freq_array = freqs\n if sky.spectral_type == \"subband\" and sky.freq_edge_array is not None:\n sky.freq_edge_array = None\n sky.spectral_type = \"full\"\n if sky.frame_coherency is not None:\n sky.coherency_radec = sky.calc_frame_coherency()\n\n if run_check:\n sky.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n if not inplace:\n return sky", "def get_frequency(self,):\n\n # TODO: Find way to appropriately reconvert the frequency to its initial\n # TODO: Value or alert that the value is APPROXIMATE\n FTW = int (0)\n freq = int(0)\n\n FTW_bytes = self._read('CFTW0')\n FTW = FTW.from_bytes(FTW_bytes,'big')\n freq = FTW*self.clock_freq/2**32\n\n print('Latest frequency set: ', \"{:.2e}\".format(freq), 'Hz')\n print(['%.2e' % elem for elem in self.frequencies])\n\n return self.frequencies", "def get_skylight(self, coords):\n\n x, y, z = coords\n index, y = divmod(y, 16)\n\n return self.sections[index].get_skylight((x, y, z))", "def get_spectrum_freq(self):\n if not self.is_a_spectrum_file():\n raise TelemacException(\\\n \"This file does not seem to be a spectrum file\")\n\n nfreq = 0\n eps = 1e-6\n f_1 = 10e10\n f_2 = 10e10\n raisf = 0.\n for x, y in zip(self.meshx, self.meshy):\n if abs(x) <= eps and y >= 0.:\n nfreq += 1\n f_temp = y\n if f_temp < f_1:\n f_2 = f_1\n f_1 = f_temp\n elif f_temp < f_2:\n f_2 = f_temp\n\n raisf = f_2/f_1\n\n freqs = [f_1 * raisf**i for i in range(nfreq)]\n\n dfreqs = np.zeros(nfreq, dtype=np.float64)\n\n auxi = (raisf - 1.)/2.\n dfreqs[0] = auxi*freqs[0]\n for i in range(1, nfreq-1):\n dfreqs[i] = auxi*(freqs[i] + freqs[i-1])\n\n dfreqs[-1] = auxi*freqs[-2]\n\n return np.array(freqs), dfreqs", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def tidefit(self,frqnames=None,basetime=None):\r\n \r\n # Get the tidal fruequencies\r\n if frqnames == None:\r\n\t\t\t# This returns the default frequencies from the uspectra class\r\n frq,frqnames = getTideFreq(Fin=None)\r\n else:\r\n frq,frqnames = getTideFreq(Fin=frqnames)\r\n \r\n # Call the uspectra method\r\n U = uspectra(self.tsec,self.y,frq=frq,method='lsqfast')\r\n \r\n amp,phs = U.phsamp(phsbase=basetime)\r\n \r\n return amp, phs, frq, frqnames, U.invfft()", "def sfreq_to_times(gaze_array, sfreq, start_time=0):\n return np.arange(0, len(gaze_array) / sfreq, 1. / sfreq) + start_time", "def spectrum_test62(f):\n format_wav = ff.FortranRecordReader(\"(10f8.2)\")\n format_flux = ff.FortranRecordReader(\"(6e12.5)\")\n\n wav = []\n flux = []\n npts = int(f.readline()) # number of frequency points\n\n while len(wav) < npts:\n wav += format_wav.read(f.readline())\n wav = np.array(wav[:npts])\n\n test = f.readline() # atmospheric parameters\n if len(test.split()) == 6:\n flux += format_flux.read(test)\n\n while len(flux) < npts:\n flux += format_flux.read(f.readline())\n flux = np.array(flux[:npts])\n\n return wav, flux", "def stft(db,istart=0,istop=86400,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**9):\r\n \r\n #get length of input time series if there is two columns\r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm<fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=fx.shape[0]\r\n fm=1\r\n if fm>1:\r\n fx=fx.reshape(fn)\r\n else:\r\n fx=fx.reshape(fn)\r\n #make a hanning window to minimize aliazing and Gibbs effect of short time \r\n #windows\r\n h=normalizeL2(np.hanning(nh))\r\n #make a hanning window to smooth in frequency domain\r\n if ng!=1:\r\n if np.remainder(ng,2)!=1:\r\n ng=ng-1\r\n print 'ng forced to be odd as ng-1'\r\n else:\r\n pass\r\n g=normalizeL2(np.hanning(ng))\r\n else:\r\n pass\r\n #make time step list\r\n tlst=np.arange(start=0,stop=fn-nh+1,step=tstep)\r\n #make a frequency list for plotting exporting only positive frequencies\r\n df=float(df)\r\n flst=np.fft.fftfreq(nfbins,1/df)[0:nfbins/2] #get only positive frequencies\r\n #initialize the TFD array\r\n tfarray=np.zeros((nfbins/2,len(tlst)),dtype='complex128')\r\n \r\n fa=sps.hilbert(dctrend(fx))\r\n \r\n for place,ii in enumerate(tlst):\r\n fxwin=fa[ii:ii+nh]*h\r\n #get only positive frequencies\r\n FXwin=np.fft.fft(padzeros(fxwin,npad=nfbins))[:nfbins/2]\r\n #smooth in frequency plane\r\n if ng!=1:\r\n FXwin=np.convolve(padzeros(FXwin,npad=len(FXwin)+ng-1),g,'valid')\r\n else:\r\n pass\r\n #pull out only positive quadrant, flip array for plotting\r\n tfarray[:,place]=FXwin[::-1]\r\n \r\n return tfarray,tlst,flst", "def get_fluxes_within_mask(tpf, aper_mask, gaia_sources):\n assert tpf is not None\n assert aper_mask is not None\n assert gaia_sources is not None\n ra, dec = gaia_sources[[\"ra\", \"dec\"]].values.T\n pix_coords = tpf.wcs.all_world2pix(np.c_[ra, dec], 0)\n contour_points = measure.find_contours(aper_mask, level=0.1)[0]\n isinside = [\n is_point_inside_mask(contour_points, pix) for pix in pix_coords\n ]\n min_gmag = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].min()\n gamma = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].apply(\n lambda x: 10 ** (0.4 * (min_gmag - x))\n )\n return gamma", "def compute_single_ph_gfunc(\n obj: floquet_analysis.FloquetAnalyzer,\n freqs: np.array) -> np.ndarray:\n # Compute the decay operator and the convolved excitation operator within\n # the single-excitation subspace.\n decay_op = obj.decay_op(1)\n ex_op_conv = obj.ex_op_conv(1, freqs)\n gfunc_time = np.matmul(\n decay_op[np.newaxis, :, :, :], ex_op_conv)[:, :, 0, 0]\n return gfunc_time, np.fft.fftshift(np.fft.ifft(gfunc_time, axis=1), axes=1)", "def brightness_temperature(frequency, beam_area=None):\n if frequency.unit.is_equivalent(si.sr):\n if not beam_area.unit.is_equivalent(si.Hz):\n raise ValueError(\n \"The inputs to `brightness_temperature` are frequency and angular area.\"\n )\n warnings.warn(\n \"The inputs to `brightness_temperature` have changed. \"\n \"Frequency is now the first input, and angular area \"\n \"is the second, optional input.\",\n AstropyDeprecationWarning,\n )\n frequency, beam_area = beam_area, frequency\n\n nu = frequency.to(si.GHz, spectral())\n factor_Jy = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value\n factor_K = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value\n\n if beam_area is not None:\n beam = beam_area.to_value(si.sr)\n\n def convert_Jy_to_K(x_jybm):\n return x_jybm / beam / factor_Jy\n\n def convert_K_to_Jy(x_K):\n return x_K * beam / factor_K\n\n return Equivalency(\n [\n (astrophys.Jy, si.K, convert_Jy_to_K, convert_K_to_Jy),\n (astrophys.Jy / astrophys.beam, si.K, convert_Jy_to_K, convert_K_to_Jy),\n ],\n \"brightness_temperature\",\n {\"frequency\": frequency, \"beam_area\": beam_area},\n )\n else:\n\n def convert_JySr_to_K(x_jysr):\n return x_jysr / factor_Jy\n\n def convert_K_to_JySr(x_K):\n return x_K / factor_K # multiplied by 1x for 1 steradian\n\n return Equivalency(\n [(astrophys.Jy / si.sr, si.K, convert_JySr_to_K, convert_K_to_JySr)],\n \"brightness_temperature\",\n {\"frequency\": frequency, \"beam_area\": beam_area},\n )", "def gtgram(wave,fs,window_time, hop_time,channels,f_min,f_max):\n xe = gtgram_xe(wave, fs, channels, f_min, f_max)\n nwin, hop_samples, ncols = gt.gtgram_strides(fs,window_time, hop_time, xe.shape[1])\n y = np.zeros((channels, ncols))\n for cnum in range(ncols):\n segment = xe[:, cnum * hop_samples + np.arange(nwin)]\n y[:, cnum] = np.sqrt(segment.mean(1))\n return y", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def frequencies(self):\r\n\r\n self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)\r\n NFFT = self.method.get('NFFT', 64)\r\n Fs = self.method.get('Fs')\r\n freqs = tsu.get_freqs(Fs, NFFT)\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)\r\n\r\n return freqs[lb_idx:ub_idx]", "def tsz_spectrum(self, nu):\n x = NU_SCALE * nu # Frequency/temperature\n #g_nu = ( x*(np.exp(x) + 1.) / (np.exp(x) - 1.) ) - 4. # tSZ spectral dependence\n g_nu = x**2. * np.exp(x) * (x/np.tanh(x/2.) - 4.) / (np.exp(x) - 1.)**2.\n return g_nu", "def k_2_jy(freq: float, theta_major: float,\n theta_minor: float, brightness: float) -> float:\n conv = (1.222E3 * (freq ** -2) / theta_minor / theta_major) ** -1\n return brightness * conv", "def EGWD_fg(f):\n A = 4.2e-47\n res = np.zeros((len(f)))\n for i,freq in enumerate(f): \n if freq >=3e-3:\n # strain \n res[i] = A * freq**(-7/3) * np.exp(-2*(freq/5e-2)**2) \n else:\n res[i] = np.NaN\n return np.array(res)" ]
[ "0.7263583", "0.55189", "0.5414309", "0.52412045", "0.5169021", "0.51627105", "0.51417047", "0.51059294", "0.5028249", "0.49997443", "0.49332586", "0.4930508", "0.49028358", "0.48790222", "0.48755518", "0.4864208", "0.4823804", "0.4810659", "0.4780731", "0.4776312", "0.47525737", "0.47469786", "0.46991336", "0.46919245", "0.467459", "0.46471933", "0.4637196", "0.4629898", "0.4619782", "0.46104607" ]
0.82942164
0
Calculates average sky background temperature for a given Galactic longitude (gl), Galactic latitude (gb), and between frequencies f1 and f2 (in MHz). Coordinates are in degrees. Assuming spectral index of "index", default is 2.55 Return value is in K If frequency array 'freqs' is given, then avergae Tsky is calculated for each frequency range f0f1, f1f2,... in the array, and returned value is list of average Tsky's. The size of the returned array is less by 1 than the size of freqs.
def tsky_range(gl, gb, f1, f2, index, freqs=None): # reading the table nsky=np.zeros((90, 180), dtype=float) for ii in xrange(90): for jj in xrange(180): pos=(ii*180+jj)*5 nsky[ii,jj]=float(haslam_table[pos:pos+5]) # Convert to standard l,b b = int(gb + 90.5) if b >= 180: b = 179 l = int(gl + 0.5) if gl >= 360: l = 0 l = int((l / 4)) if freqs == None: tot=0 for ii in xrange(101): freq = f1 + ii*(f2-f1)/100. tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) tot += tsky tot /= 100. return tot else: temps=[] for ff in xrange(1, len(freqs)): tot = 0 for ii in xrange(101): freq = freqs[ff-1] + ii*(freqs[ff]-freqs[ff-1])/100. tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) tot += tsky tot /= 100. temps.append(tot) return temps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tsky(gl, gb, freq, index, freqs=None):\n\n\t# reading the table\n\tnsky=np.zeros((90, 180), dtype=float)\n\tfor ii in xrange(90):\n\t\tfor jj in xrange(180):\n\t\t\tpos=(ii*180+jj)*5\n\t\t\tnsky[ii,jj]=float(haslam_table[pos:pos+5])\n\n\t# Convert to standard l,b\n\tb = int(gb + 90.5)\n\tif b >= 180: b = 179\n\tl = int(gl + 0.5)\n\tif gl >= 360: l = 0\n\tl = int((l / 4))\n\t\n\tif freqs == None:\n\t\ttsky = 2.7 + nsky[l,b] * (freq/408.0)**(index)\n\t\treturn tsky\n\telse:\n\t\ttemps=[]\n\t\tfor freq in freqs:\n\t\t\ttsky = 2.7 + nsky[l,b] * (freq/408.0)**(index)\n\t\t\ttemps.append(tsky)\n\t\treturn temps", "def get_average(self, s_freq, e_freq):\n s_ind = self.get_bin(s_freq)\n e_ind = self.get_bin(e_freq)\n lst = self.mags[s_ind:e_ind+1]\n try:\n avg = sum(lst)/len(lst)\n except:\n print(s_ind, e_ind)\n print('werid stuff')\n avg = 0\n return avg", "def get_average_energy(audio, beats, begin, end):\n buffer = np.square(audio[int(beats[int(begin)]):int(beats[int(end)])])\n average = np.mean(buffer)\n return average", "def get_fluxes_within_mask(tpf, aper_mask, gaia_sources):\n assert tpf is not None\n assert aper_mask is not None\n assert gaia_sources is not None\n ra, dec = gaia_sources[[\"ra\", \"dec\"]].values.T\n pix_coords = tpf.wcs.all_world2pix(np.c_[ra, dec], 0)\n contour_points = measure.find_contours(aper_mask, level=0.1)[0]\n isinside = [\n is_point_inside_mask(contour_points, pix) for pix in pix_coords\n ]\n min_gmag = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].min()\n gamma = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].apply(\n lambda x: 10 ** (0.4 * (min_gmag - x))\n )\n return gamma", "def coldaverage( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n avenames = names # create an output list to average\n\n# assume only a limited range of galactic latitudes are available\n# not range above +/-60.\n use60Range = False\n minGlat = 90. # initialize to extremea\n maxGlat = -90.\n maxEl = -90.\n minEl = 90.\n ncold = 0\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'AST': # speed up by only looking at astronomy files\n continue\n \n rs.read_spec_ast(filename) # An observation, read values\n\n if rs.telel < 0: # only working with observations, skip elevation <= 0.\n continue\n\n maxGlat = max( rs.gallat, maxGlat)\n minGlat = min( rs.gallat, minGlat)\n maxEl = max( rs.telel, maxEl)\n minEl = min( rs.telel, minEl)\n # end for all files loop, looking for max el and latitude ranges\n\n # if any high galactic latitudes, use only above +/-60d \n if minGlat < -60. or maxGlat > 60.:\n minGlat = -60.\n maxGlat = 60.\n else: # else no high galactic latitude data\n # use highest galactic latitudes - +/-5.degrees\n if -minGlat > maxGlat: # if negative latitudes higher\n minGlat = minGlat + 5.\n maxGlat = 90.\n else: # else positive latitudes higher\n maxGlat = maxGlat - 5.\n minGlat = -90.\n\n # only use the elevations above 60 degrees, if any\n if maxEl > 60.:\n maxEl = 60.\n else:\n maxEl = maxEl - 10. #else must use highest elevations available\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if rs.telel < maxEl:\n continue\n\n if rs.gallat > maxGlat or rs.gallat < minGlat:\n avenames[ncold] = filename\n ncold = ncold + 1\n # end of for all files loop\n\n ncold, cold = average( avenames[0:ncold]) # now use generic program for averages\n if ncold < 1:\n print 'No Cold load files; can not calibrate!'\n exit()\n\n return ncold, cold, minEl, maxEl", "def avg_spike_frequency_abf(abf, epoch):\n p0 = abf.sweepEpochs.p1s[epoch]\n p1 = abf.sweepEpochs.p1s[epoch+1]\n t = abf.sweepX[p0:p1]\n V = abf.sweepY[p0:p1]\n return avg_spike_frequency(t, V)", "def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga", "def average_fft(ut: np.ndarray) -> np.ndarray:\n\n # We average over each row of ut.\n ut_average = np.average(ut, axis=0) # shape (262144,)\n\n return ut_average", "def compute_GS(GMtcs):\n\n GS = np.mean(GMtcs,axis=0) #average over voxels\n\n return GS", "def hotaverage( names):\n rs = radioastronomy.Spectrum() # create input and average structures\n nhot = 0\n\n avenames = names # create a list of files to average\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'HOT': # speed up by only looking at hot load files\n continue\n \n rs.read_spec_ast(filename)\n\n if rs.telel > 0: # only working with hot load, skip elevation > 0.\n continue\n\n avenames[nhot] = filename\n nhot = nhot + 1\n # end of for all files loop\n\n nhot, hot = average( avenames[0:nhot]) # now use generic program for averages\n if nhot < 1:\n print 'No hot load files; can not calibrate!'\n exit()\n\n return nhot, hot", "def gtgram(\n wave, fs, window_time, hop_time, channels, f_min, f_max=None, return_freqs=False\n):\n xe = gtgram_xe(wave, fs, channels, f_min, f_max)\n nwin, hop_samples, ncols = gtgram_strides(fs, window_time, hop_time, xe.shape[1])\n\n y = np.zeros((channels, ncols))\n\n for cnum in range(ncols):\n segment = xe[:, cnum * hop_samples + np.arange(nwin)]\n y[:, cnum] = np.sqrt(segment.mean(1))\n\n if return_freqs:\n cfs = centre_freqs(fs, channels, f_min, f_max)\n return cfs, y\n return y", "def Gamma_per_grain(ZZall, Gamma_a_Z, ZZ_fz, fdist, GG):\n\n # index in the ZZall array for the charges in ZZ_fz\n zi_down = np.where(ZZall == ZZ_fz[0])[0][0]# find the index of the ZZ_fz[0] in ZZall \n zi_up = np.where(ZZall == ZZ_fz[-1])[0][0]# find the index of the ZZ_fz[-1] in ZZall\n \n #Gamma_pe_a = np.sum(fz*Gamma_dotdot_scaled[zi_down:zi_up+1])\n Gamma_pe_a = np.sum(fdist*Gamma_a_Z[zi_down:zi_up+1])\n \n return Gamma_pe_a", "def trialAverage(dffTraceAllRoi, bgIndex):\n # each element in trialAvgAllRoi is an epoch\n # then each epoch has a single list\n # this list contains arrays of trial averages for every roi\n # if bg, instead of an array, it has NaN\n trialAvgAllRoi = {}\n for epoch in dffTraceAllRoi:\n trialAvgAllRoi[epoch] = []\n for roi in range(len(dffTraceAllRoi[epoch])):\n trialLengths = []\n # if Bg, append NaN to trialLengths\n # this way you dont distrupt bgIndex in the future\n if roi == bgIndex:\n trialLengths.append(numpy.nan)\n else:\n for trial in dffTraceAllRoi[epoch][roi]:\n trialLengths.append(len(trial))\n\n if trialLengths[0] is not numpy.nan:\n # real ROI case, not bg\n minTrialLen = min(trialLengths)\n trialFit = 0\n for trial in dffTraceAllRoi[epoch][roi]:\n trialFit += trial[:minTrialLen]\n # calculate the trial average for an roi\n trialAvg = trialFit/len(dffTraceAllRoi[epoch][roi])\n\n trialAvgAllRoi[epoch].append(trialAvg)\n\n elif trialLengths[0] is numpy.nan:\n # bgRoi case\n trialAvgAllRoi[epoch].append(numpy.nan)\n\n return trialAvgAllRoi", "def spectrum_test62(f):\n format_wav = ff.FortranRecordReader(\"(10f8.2)\")\n format_flux = ff.FortranRecordReader(\"(6e12.5)\")\n\n wav = []\n flux = []\n npts = int(f.readline()) # number of frequency points\n\n while len(wav) < npts:\n wav += format_wav.read(f.readline())\n wav = np.array(wav[:npts])\n\n test = f.readline() # atmospheric parameters\n if len(test.split()) == 6:\n flux += format_flux.read(test)\n\n while len(flux) < npts:\n flux += format_flux.read(f.readline())\n flux = np.array(flux[:npts])\n\n return wav, flux", "def analyze2(ys, freqs, ts):", "def EGWD_fg(f):\n A = 4.2e-47\n res = np.zeros((len(f)))\n for i,freq in enumerate(f): \n if freq >=3e-3:\n # strain \n res[i] = A * freq**(-7/3) * np.exp(-2*(freq/5e-2)**2) \n else:\n res[i] = np.NaN\n return np.array(res)", "def compute_ctf(freqs,rots,akv,cs,wgh,dfmid1f,dfmid2f,angastf,dscale,bfactor=None): \n av = akv * 1e3 # Convert kilovots to volts\n cs = cs * 1e7 # Convert spherical aberation from mm to A\n \n # wavelength of electrons\n elambda = 12.2643247 / n.sqrt(av + av**2 * 0.978466e-6)\n \n wgh1 = dscale*n.sqrt(1.0 - wgh**2)\n wgh2 = dscale*wgh\n\n ix = freqs[:,0]\n iy = freqs[:,1]\n freq_radius = n.sqrt(ix**2 + iy**2)\n\n angle = elambda*freq_radius\n angspt = n.arctan2(iy,ix)\n if rots is not None:\n angspt = n.mod(angspt.reshape((-1,1)) + rots.reshape((1,-1)),2.0*n.pi)\n angle = angle.reshape((-1,1)) \n c1 = 2.0*n.pi*angle**2/(2.0*elambda)\n c2 = -c1*cs*angle**2/2.0\n angdif = angspt - angastf\n ccos = n.cos(2.0*angdif)\n df = 0.5*(dfmid1f + dfmid2f + ccos*(dfmid1f-dfmid2f))\n chi = c1*df + c2\n\n ctf = -wgh1*n.sin(chi) - wgh2*n.cos(chi)\n \n if bfactor is not None:\n ctf *= envelope_function(freq_radius, bfactor)\n\n return n.require(ctf,dtype = freqs.dtype)", "def gps2tas(GS, TK, verbose=0):\n # confirm GS and TK are valid lengths:\n if 2 < len(GS) < 5:\n pass\n else:\n raise ValueError(\"GS must be a list of three or four items\")\n\n if 2 < len(TK) < 5:\n pass\n else:\n raise ValueError(\"TK must be a list of three or four items\")\n\n if len(GS) != len(TK):\n raise ValueError(\n \"The ground speed and track arrays must have the same number of elements.\"\n )\n\n if len(GS) == 3:\n result = gps2tas3(GS, TK, verbose)\n return result\n else:\n gs_data_sets, tk_data_sets, results = [], [], []\n\n gs_data_sets.append([GS[0], GS[1], GS[2]])\n gs_data_sets.append([GS[1], GS[2], GS[3]])\n gs_data_sets.append([GS[2], GS[3], GS[0]])\n gs_data_sets.append([GS[3], GS[0], GS[1]])\n\n tk_data_sets.append([TK[0], TK[1], TK[2]])\n tk_data_sets.append([TK[1], TK[2], TK[3]])\n tk_data_sets.append([TK[2], TK[3], TK[0]])\n tk_data_sets.append([TK[3], TK[0], TK[1]])\n\n for (gs, tk) in zip(gs_data_sets, tk_data_sets):\n results.append(gps2tas3(gs, tk, 2))\n\n ave_TAS = 0\n ave_wind_x = 0\n ave_wind_y = 0\n sum2_TAS = 0\n\n for item in results:\n ave_TAS += item[0]\n sum2_TAS += item[0] ** 2\n ave_wind_x += item[1][0] * M.sin(M.pi * item[1][1] / 180.0)\n ave_wind_y += item[1][0] * M.cos(M.pi * item[1][1] / 180.0)\n\n ave_TAS /= 4.0\n std_dev_TAS = M.sqrt((sum2_TAS - 4 * ave_TAS ** 2) / 3)\n ave_wind_x /= 4\n ave_wind_y /= 4.0\n ave_wind_speed = M.sqrt(ave_wind_x ** 2 + ave_wind_y ** 2)\n ave_wind_dir = (720.0 - (180.0 / M.pi * M.atan2(ave_wind_x, ave_wind_y))) % 360\n # return results\n\n if verbose == 0:\n return ave_TAS\n elif verbose == 1:\n return ave_TAS, std_dev_TAS\n elif verbose == 2:\n return (\n ave_TAS,\n std_dev_TAS,\n (\n (results[0][1][0], results[0][1][1]),\n (results[1][1][0], results[1][1][1]),\n (results[2][1][0], results[2][1][1]),\n (results[3][1][0], results[3][1][1]),\n ),\n )\n else:\n raise ValueError(\"The value of verbose must be equal to 0, 1 or 2\")", "def AllFreAverageV(self,):\n \t\tv_array = self.data\n \t\taaverage_v = np.average(v_array[:,1])\n \t\tprint('Whole frequency average group velocity:\\nVw=',aaverage_v/1000,'km/s')\n \t\treturn", "def average( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n asum = radioastronomy.Spectrum()\n nsum = 0\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if nsum == 0:\n asum = copy.deepcopy( rs)\n firstlon = rs.gallon\n asum.ydataA = rs.ydataA * rs.durationSec\n asum.gallat = rs.gallat * rs.durationSec\n asum.gallon = rs.gallon * rs.durationSec\n nsum = 1\n firstutc = rs.utc\n lastutc = rs.utc\n else:\n asum.ydataA = asum.ydataA + (rs.ydataA * rs.durationSec)\n asum.count = asum.count + rs.count\n asum.durationSec = asum.durationSec + rs.durationSec\n # fix wrap of longitudes\n if abs(rs.gallon - firstlon) > 180:\n crossZero = True\n if rs.gallon > firstlon:\n rs.gallon = rs.gallon - 360.\n else:\n rs.gallon = rs.gallon + 360.\n asum.gallon = asum.gallon + (rs.gallon * rs.durationSec)\n asum.gallat = asum.gallat + (rs.gallat * rs.durationSec)\n # keep track of observing time for weighted sum\n lastutc = rs.utc\n nsum = nsum + 1\n #end for all files loop\n\n if nsum < 1:\n print \"No acceptable files in average list\"\n else:\n asum.ydataA = asum.ydataA/float(asum.durationSec)\n asum.gallon = asum.gallon/float(asum.durationSec)\n asum.gallat = asum.gallat/float(asum.durationSec)\n aveutc,duration = radioastronomy.aveutcs( firstutc, lastutc)\n asum.utc = aveutc\n if (duration < 1.):\n print 'hotcold.average: very short average interval: ',duration\n return nsum, asum", "def calculate_boltzmann_average(energy, temperature, kb=0.0019872041):\n beta = 1 / (kb * temperature)\n F = np.array(energy)\n Ptot = np.exp(-F * beta)\n P = Ptot / Ptot.sum()\n F_avg = (P * F).sum()\n return F_avg", "def at_frequencies(\n self,\n freqs,\n inplace=True,\n freq_interp_kind=\"cubic\",\n nan_handling=\"clip\",\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n atol=None,\n ):\n sky = self if inplace else self.copy()\n\n if atol is None:\n atol = self.freq_tol\n\n if self.spectral_type == \"spectral_index\":\n sky.stokes = (\n self.stokes\n * (freqs[:, None].to(\"Hz\") / self.reference_frequency[None, :].to(\"Hz\"))\n ** self.spectral_index[None, :]\n )\n sky.reference_frequency = None\n elif self.spectral_type == \"full\":\n # Find a subset of the current array.\n ar0 = self.freq_array.to_value(\"Hz\")\n ar1 = freqs.to_value(\"Hz\")\n tol = atol.to_value(\"Hz\")\n matches = np.fromiter(\n (np.isclose(freq, ar1, atol=tol).any() for freq in ar0), dtype=bool\n )\n\n if np.sum(matches) != freqs.size:\n raise ValueError(\n \"Some requested frequencies are not present in the current SkyModel.\"\n )\n sky.stokes = self.stokes[:, matches, :]\n if sky.freq_edge_array is not None:\n sky.freq_edge_array = sky.freq_edge_array[:, matches]\n elif self.spectral_type == \"subband\":\n if np.max(freqs.to(\"Hz\")) > np.max(self.freq_array.to(\"Hz\")):\n raise ValueError(\n \"A requested frequency is larger than the highest subband frequency.\"\n )\n if np.min(freqs.to(\"Hz\")) < np.min(self.freq_array.to(\"Hz\")):\n raise ValueError(\n \"A requested frequency is smaller than the lowest subband frequency.\"\n )\n # Interpolate. Need to be careful if there are NaNs -- they spoil the\n # interpolation even for sources that do not have any NaNs.\n stokes_unit = self.stokes.unit\n if np.any(np.isnan(self.stokes.value)):\n allowed_nan_handling = [\"propagate\", \"interp\", \"clip\"]\n if nan_handling not in allowed_nan_handling:\n raise ValueError(\n f\"nan_handling must be one of {allowed_nan_handling}\"\n )\n\n message = \"Some stokes values are NaNs.\"\n if nan_handling == \"propagate\":\n message += (\n \" All output stokes values for sources with any NaN values \"\n \"will be NaN.\"\n )\n else:\n message += \" Interpolating using the non-NaN values only.\"\n message += (\n \" You can change the way NaNs are handled using the \"\n \"`nan_handling` keyword.\"\n )\n warnings.warn(message)\n stokes_arr = self.stokes.value\n freq_arr = self.freq_array.to(\"Hz\").value\n at_freq_arr = freqs.to(\"Hz\").value\n # first interpolate any that have no NaNs\n wh_nan = np.nonzero(np.any(np.isnan(stokes_arr), axis=(0, 1)))[0]\n wh_non_nan = np.nonzero(np.all(~np.isnan(stokes_arr), axis=(0, 1)))[0]\n assert wh_non_nan.size + wh_nan.size == self.Ncomponents, (\n \"Something went wrong with spliting sources with NaNs. This is a \"\n \"bug, please make an issue in our issue log\"\n )\n new_stokes = np.zeros(\n (4, freqs.size, self.Ncomponents), dtype=stokes_arr.dtype\n )\n if wh_non_nan.size > 0:\n finterp = scipy.interpolate.interp1d(\n freq_arr,\n stokes_arr[:, :, wh_non_nan],\n axis=1,\n kind=freq_interp_kind,\n )\n new_stokes[:, :, wh_non_nan] = finterp(at_freq_arr)\n\n if nan_handling == \"propagate\":\n new_stokes[:, :, wh_nan] = np.NaN\n else:\n wh_all_nan = []\n wh_nan_high = []\n wh_nan_low = []\n wh_nan_many = []\n for comp in wh_nan:\n freq_inds_use = np.nonzero(\n np.all(~np.isnan(stokes_arr[:, :, comp]), axis=0)\n )[0]\n if freq_inds_use.size == 0:\n new_stokes[:, :, comp] = np.NaN\n wh_all_nan.append(comp)\n continue\n at_freq_inds_use = np.arange(freqs.size)\n\n if np.max(at_freq_arr) > np.max(freq_arr[freq_inds_use]):\n at_freq_inds_use = np.nonzero(\n at_freq_arr <= np.max(freq_arr[freq_inds_use])\n )[0]\n at_freqs_large = np.nonzero(\n at_freq_arr > np.max(freq_arr[freq_inds_use])\n )[0]\n wh_nan_high.append(comp)\n if nan_handling == \"interp\":\n new_stokes[:, at_freqs_large, comp] = np.NaN\n else: # clip\n large_inds_use = np.full(\n (at_freqs_large.size), freq_inds_use[-1]\n )\n new_stokes[:, at_freqs_large, comp] = stokes_arr[\n :, large_inds_use, comp\n ]\n\n if np.min(at_freq_arr) < np.min(freq_arr[freq_inds_use]):\n at_freq_inds_use_low = np.nonzero(\n at_freq_arr >= np.min(freq_arr[freq_inds_use])\n )[0]\n at_freq_inds_use = np.intersect1d(\n at_freq_inds_use, at_freq_inds_use_low\n )\n at_freqs_small = np.nonzero(\n at_freq_arr < np.min(freq_arr[freq_inds_use])\n )[0]\n wh_nan_low.append(comp)\n if nan_handling == \"interp\":\n new_stokes[:, at_freqs_small, comp] = np.NaN\n else: # clip\n small_inds_use = np.full(\n (at_freqs_small.size), freq_inds_use[0]\n )\n new_stokes[:, at_freqs_small, comp] = stokes_arr[\n :, small_inds_use, comp\n ]\n\n if at_freq_inds_use.size > 0:\n try:\n finterp = scipy.interpolate.interp1d(\n freq_arr[freq_inds_use],\n stokes_arr[:, freq_inds_use, comp],\n axis=1,\n kind=freq_interp_kind,\n )\n except ValueError:\n wh_nan_many.append(comp)\n finterp = scipy.interpolate.interp1d(\n freq_arr[freq_inds_use],\n stokes_arr[:, freq_inds_use, comp],\n axis=1,\n kind=\"linear\",\n )\n new_stokes[:, at_freq_inds_use, comp] = finterp(\n at_freq_arr[at_freq_inds_use]\n )\n else:\n continue\n if len(wh_all_nan) > 0:\n warnings.warn(\n f\"{len(wh_all_nan)} components had all NaN stokes values. \"\n \"Output stokes for these components will all be NaN.\"\n )\n if len(wh_nan_high) > 0:\n message = (\n f\"{len(wh_nan_high)} components had all NaN stokes values \"\n \"above one or more of the requested frequencies. \"\n )\n if nan_handling == \"interp\":\n message += (\n \"The stokes for these components at these frequencies \"\n \"will be NaN.\"\n )\n else:\n message += (\n \"Using the stokes value at the highest frequency \"\n \"without a NaN for these components at these \"\n \"frequencies.\"\n )\n warnings.warn(message)\n if len(wh_nan_low) > 0:\n message = (\n f\"{len(wh_nan_low)} components had all NaN stokes values below \"\n \"one or more of the requested frequencies. \"\n )\n if nan_handling == \"interp\":\n message += (\n \"The stokes for these components at these frequencies \"\n \"will be NaN.\"\n )\n else:\n message += (\n \"Using the stokes value at the lowest frequency \"\n \"without a NaN for these components at these frequencies.\"\n )\n warnings.warn(message)\n if len(wh_nan_many) > 0:\n warnings.warn(\n f\"{len(wh_nan_many)} components had too few non-NaN stokes \"\n \"values for chosen interpolation. Using linear \"\n \"interpolation for these components instead.\"\n )\n sky.stokes = new_stokes * stokes_unit\n else:\n finterp = scipy.interpolate.interp1d(\n self.freq_array.to(\"Hz\").value,\n self.stokes.value,\n axis=1,\n kind=freq_interp_kind,\n )\n sky.stokes = finterp(freqs.to(\"Hz\").value) * stokes_unit\n else:\n # flat spectrum\n stokes_unit = self.stokes.unit\n sky.stokes = np.repeat(self.stokes.value, len(freqs), axis=1) * stokes_unit\n\n sky.reference_frequency = None\n sky.Nfreqs = freqs.size\n sky.freq_array = freqs\n if sky.spectral_type == \"subband\" and sky.freq_edge_array is not None:\n sky.freq_edge_array = None\n sky.spectral_type = \"full\"\n if sky.frame_coherency is not None:\n sky.coherency_radec = sky.calc_frame_coherency()\n\n if run_check:\n sky.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n if not inplace:\n return sky", "def _fog_by_index(\n self, index: int, val: Optional[Tuple[int, float]]\n ) -> Tuple[int, float]:\n if index < 0 or index >= 26 * 21:\n raise IndexError(\"invalid fog index\")\n\n colours = self.variables.setdefault(\"fog_trigger\", VariableArray(VariableUInt))\n if not isinstance(colours, VariableArray):\n raise ValueError(\"fog_trigger variable not an array\")\n pers = self.variables.setdefault(\"fog_per\", VariableArray(VariableFloat))\n if not isinstance(pers, VariableArray):\n raise ValueError(\"fog_per variable not an array\")\n\n while index >= len(colours):\n colours.append(0x111118)\n while index >= len(pers):\n pers.append(0.0)\n\n result = (colours[index], pers[index])\n if val is not None:\n colours[index], pers[index] = val\n return result", "def enstrophy_average(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3): # vorticity-3 component\n #---------------------------------------------------------------------#\n # Defining the domain variables #\n #---------------------------------------------------------------------#\n dim = omega1.shape\n time = dim[-1]\n avg = np.zeros(time)\n #---------------------------------------------------------------------#\n # Looping over the time variable #\n #---------------------------------------------------------------------#\n print_count = 51\n for i in range(0, time):\n term1 = np.square(omega1[:,:,:,i])\n term2 = np.square(omega2[:,:,:,i])\n term3 = np.square(omega3[:,:,:,i])\n enst = 0.5*(term1 + term2 + term3)\n avg[i] = np.mean(enst)\n #-----------------------------------------------------------------#\n # Printing statement #\n #-----------------------------------------------------------------#\n if print_count > 20:\n print('Enstrophy average ---> t_step = %i' %(i))\n print_count = 0\n print_count += 1\n\n return avg", "def getAvgTemp(self, typeSpec, blockList=None, flux2Weight=False):\n num = 0.0\n denom = 0.0\n if not blockList:\n blockList = list(self.getBlocks())\n\n for b in blockList:\n if flux2Weight:\n weight = b.p.flux**2.0\n else:\n weight = 1.0\n for c in b.iterComponents(typeSpec):\n vol = c.getVolume()\n num += c.temperatureInC * vol * weight\n denom += vol * weight\n\n if denom:\n return num / denom\n else:\n raise RuntimeError(\"no temperature average for {0}\".format(typeSpec))", "def anharm_freq(freqs, xmat):\n anharms = np.zeros(len(freqs))\n for i, freq in enumerate(freqs):\n anharms[i] = freq\n anharms[i] += 2. * xmat[i][i]\n tmp = 0\n for j in range(len(freqs)):\n if j != i:\n tmp += xmat[i][j]\n\n anharms[i] += 1./2 * tmp\n\n return anharms", "def element_area_and_temperature(freq_hz):\n # Element noise data.\n noise_data = {\n 'freqs': [0.05e9, 0.07e9, 0.11e9, 0.17e9, 0.25e9, 0.35e9,\n 0.45e9, 0.55e9, 0.65e9],\n 'a_eff': [1.8791, 1.8791, 1.8694, 1.3193, 0.6080, 0.2956,\n 0.2046, 0.1384, 0.0792],\n 't_sys': [4.0409e3, 1.5029e3, 0.6676e3, 0.2936e3, 0.1402e3, 0.0873e3,\n 0.0689e3, 0.0607e3, 0.0613e3]\n }\n log_freq = numpy.log10(freq_hz)\n freqs = numpy.array(noise_data['freqs'])\n a_eff = numpy.array(noise_data['a_eff'])\n t_sys = numpy.array(noise_data['t_sys'])\n f_cut = 2\n\n # Interpolate to get effective area.\n if freq_hz <= freqs[f_cut]:\n f = scipy.interpolate.interp1d(numpy.log10(freqs[:f_cut+1]), \n numpy.log10(a_eff[:f_cut+1]), kind='slinear')\n a_eff = 10**f(log_freq)\n else:\n f = scipy.interpolate.interp1d(numpy.log10(freqs[f_cut:]), \n numpy.log10(a_eff[f_cut:]), kind='cubic')\n a_eff = 10**f(log_freq)\n\n # Interpolate to get system temperature.\n f = scipy.interpolate.interp1d(numpy.log10(freqs), \n numpy.log10(t_sys), kind='cubic')\n t_sys = 10**f(log_freq)\n return a_eff, t_sys", "def average(self, times=2):\n for i in range(times):\n self.statistics()\n global t, avlist\n length = len(t)\n avlist.append(t)\n t = []\n\n total_list = []\n\n for l in range(length):\n total_list.append([])\n\n for j in range(times):\n \"\"\"per time\"\"\"\n for i in range(length):\n total_list[i].append(avlist[j][i])\n\n \"\"\"calculate\"\"\"\n ylist = []\n avlist = []\n for a in total_list:\n avg = 0\n for b in a:\n avg += b\n ylist.append(avg/times)\n self.listy = ylist\n\n for e in range(self.el[self.re[0]], self.re[1], self.re[2]):\n self.listx.append(e)", "def gaussianFilter(gain,BT,spSym,nTaps):\n\n a = np.sqrt(np.log(2)/2)/BT\n t = np.linspace(-.5*nTaps,.5*nTaps-1,nTaps)/spSym\n\n ft = np.sqrt(np.pi)/a *np.exp(-(np.pi**2*(t)**2)/a**2)\n ft /= np.sum(ft) * gain # normalize filter\n\n return ft", "def avg_temps(self):\r\n average_temp = 0\r\n for j in range(len(self.trip)):\r\n average_temp += self.trip[j].get_temperature(j)\r\n average_temp /= len(self.trip)\r\n return average_temp" ]
[ "0.7237736", "0.5336351", "0.53191733", "0.5311113", "0.52894306", "0.5208346", "0.5117886", "0.5105985", "0.50825256", "0.5041957", "0.4994894", "0.49861515", "0.49033117", "0.48573145", "0.48243278", "0.48106575", "0.4807822", "0.47676003", "0.4739023", "0.47271356", "0.47114715", "0.470455", "0.46933037", "0.46865195", "0.4659219", "0.46554795", "0.46553335", "0.4623883", "0.460616", "0.46022913" ]
0.71738416
1
Verification process of signature for file name document
def verification(file_name: str) -> None: print("Verification process...") file_name = os.path.join('data', file_name) file1 = open("data/key.txt", "r") file2 = open("data/signature.txt", "r") p = int(file1.readline().rstrip()) q = int(file1.readline().rstrip()) g = int(file1.readline().rstrip()) h = int(file1.readline().rstrip()) c1 = int(file2.readline().rstrip()) c2 = int(file2.readline().rstrip()) print('c1 = ', c1) print('c2 = ', c2) t1 = sha_hash(file_name) print('hash = ', t1) inverseC2 = compute_inverse(c2, q) t1 = (t1 * inverseC2) % q t2 = compute_inverse(c2, q) t2 = (t2 * c1) % q valid1 = square_multiply(g, t1, p) valid2 = square_multiply(h, t2, p) valid = ((valid1 * valid2) % p) % q if valid == c1: print("Valid signature") else: print("Invalid signature")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_signature_dialog():\n signature_name = input(\"Enter signature identity: \")\n file_path = input(\"Enter file path: \")\n user = input(\"Enter username: \")\n\n if not(os.path.exists(user)):\n raise Exception(ERRORS.NOT_FOUND_USER)\n if not(os.path.exists(f\"{signature_name}.sig\")):\n raise Exception(ERRORS.NOT_FOUND_SIGNATURE)\n if not(os.path.exists(file_path)):\n raise Exception(ERRORS.NOT_FOUND_FILE)\n\n with open(user, \"r\") as file:\n _ = int(file.readline())\n y = int(file.readline())\n with open(f\"{signature_name}.sig\", \"r\") as file:\n r = int(file.readline())\n s = int(file.readline())\n with open(file_path, \"rb\") as file:\n file_hash = hashlib.sha256(file.read()).hexdigest()\n file_hash_int = int(file_hash, 16)\n \n if (r<0 or r>=Q) or (s<0 or s>=Q):\n raise Exception(ERRORS.INVALID_SIGNATURE)\n \n w = pow(s, Q-2, Q)\n u1 = (file_hash_int * w) % Q\n u2 = (r * w) % Q\n v = ((pow(G, u1, P) * pow(y, u2, P)) % P) % Q\n\n if v == r:\n print(f\"Signature is valid. The signature {signature_name}.sig verifies that {file_path} is sent by {user}.\")\n return\n \n print(f\"Signature is not valid.\")", "def test_signature_verification(self):\n curdir = os.path.dirname(os.path.abspath(__file__))\n keydir = os.path.join(curdir, \"data\", \"ima_keys\")\n\n lines = SIGNATURES.split('\\n')\n\n # empty keyring\n keyring = ima_file_signatures.ImaKeyring()\n self.assertTrue(ima.process_measurement_list(lines, ima_keyring=keyring) is None)\n\n # add key for 1st entry; 1st entry must be verifiable\n rsakeyfile = os.path.join(keydir, \"rsa2048pub.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(rsakeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:1], ima_keyring=keyring) is not None)\n self.assertTrue(ima.process_measurement_list(lines[1:2], ima_keyring=keyring) is None)\n\n # add key for 2nd entry; 1st & 2nd entries must be verifiable\n eckeyfile = os.path.join(keydir, \"secp256k1.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(eckeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:2], ima_keyring=keyring) is not None)", "def check_sig(filename):\n pipe = Popen([\"gpg\", \"--verify\", filename], stderr=PIPE)\n pipe.stderr.read()\n status = pipe.wait()\n if status != 0:\n raise BadSignature('%s is not properly signed' % filename)", "def _sign_document(self):\n return False", "def verify_signature(self, inputs, signature):\n pass", "def verify_signature(self, inputs, signature):\n pass", "def verify_signature(signed_file_path, output_file_path):\n cmd = [\"gpg\", \"-d\"]\n keyring_path = configuration.get_gpg_public_keyring_path()\n\n # if a keyring is specified in the conf, used it, else use default one\n if keyring_path != \"\":\n cmd += [GPG_NO_DEFAULT_KEYRING_OPTION, GPG_KEYRING_ARG, keyring_path]\n cmd += [\"--output\", output_file_path, signed_file_path]\n\n # temporary workaround for the omi/gpg bug causing gpg to create a .gpg folder in the wrong home dir\n # only apply the workaround for oms installation\n env = None\n if \"nxOMSAutomationWorkerResource\" in os.path.abspath(__file__):\n env = os.environ.copy()\n env[\"HOME\"] = \"/var/opt/microsoft/omsagent/run\"\n\n proc = subprocessfactory.create_subprocess(cmd=cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n\n if proc.poll() == 0:\n tracer.log_debug_trace(\"Signature is valid.\")\n return True\n\n tracer.log_sandbox_job_runbook_signature_validation_failed(stderr)\n return False", "def check_sig(self):\n check_sig(self.path)\n dsc = self.get_dsc()\n if dsc is not None:\n check_sig(dsc)", "def verify_request_signature(req_info: StatusResponse) -> None:\n if not req_info.signature_check(req_info.xmlstr):\n raise ValueError(_(\"Message signature verification failure\"))", "def checksignature(self):\n if(self.name=='ORBIT'): return\n if(self.ctpnum==0): return\n cmd=\"CheckSignature(\"+self.board+\",\"+self.signature+\",\"+self.ctpnum+\")\"\n output=self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")\n print \"input checksignature: \",output\n #self.signatureM=", "def verify(self):\n if not self.public_key:\n self.fetch_public_key()\n data = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}data\").text\n sig = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}sig\").text\n sig_contents = '.'.join([\n data,\n b64encode(b\"application/xml\").decode(\"ascii\"),\n b64encode(b\"base64url\").decode(\"ascii\"),\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n ])\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(RSA.importKey(self.public_key))\n if not cipher.verify(sig_hash, urlsafe_b64decode(sig)):\n raise SignatureVerificationError(\"Signature cannot be verified using the given public key\")", "def test_signature_validation(self):\n signature = app.utils.generate_signed_data(\n self._body,\n settings.PRIVATE_KEY\n )\n\n self.assertTrue(app.utils.validate_signed_data(\n self._body,\n signature,\n settings.PUBLIC_KEY\n ))", "def verify_signature(self, key, data):\n verify_signature(self, key, data)", "def test_unexpected_error_in_signature(self):\n # TODO\n one_process_workflow = \"\"\"buggy://B <- file://A\n echo A produces B > B\n \"\"\"\n process = run_first_process(one_process_workflow, extra_resource=BuggySignatureResource)\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle while retrieving signature' \\\n ) >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in signature()\")') >= 0, process.error_message\n assert process.error_message.find('Process cannot be considered complete.') >= 0, process.error_message", "def check(self):\n if self.is_signed():\n data = self._document.read()\n hash_value = data[-self._append_size+1:-1]\n data = data[:-self._append_size]\n\n encrypted = self._encryptor.encrypt_cbc(data, self._init_vector)\n current_hash_value = encrypted[-16:]\n\n if current_hash_value != hash_value:\n print(\"Hash values did not matched!\")\n else:\n print(\"Hash values matched!\")\n else:\n print(\"The document is not signed!\")", "def is_signed(self):\n file_size = os.stat(self._file_name).st_size\n self._document.seek(file_size - self._append_size)\n last = self._document.read()\n self._document.seek(0)\n\n if not (chr(last[0]) == self._seperator and chr(last[-1]) == self._seperator):\n return False\n else:\n return True", "def test_create_image_signature(self):\n pass", "def verify():", "def check_fileName(session) -> 'bool':\n c = get_client()\n cursor = c.find({},{\"size\":1, \"_id\":0})\n print(session)\n for document in cursor:\n print(document)\n if hmac.compare_digest(session, document[\"size\"]):\n return True\n print(\"size \", document[\"size\"])\n return False", "def verify_apk_signature(self):\n verify.verify_apk_sig(self.apk_path) # raises CryptoVerificationError\n print(' - APK signature is valid')", "def verify(self, signature, body, external_aad, public_key):", "def verify(key, file, sign):\n\n try:\n key = TomlKeyFormatter().from_string(key.read())\n signature = TomlSignatureFormatter().from_string(sign.read())\n\n if signature.verify(SignableBinaryIO(file), key):\n click.echo(\"---verified---\")\n exit(0)\n else:\n click.echo(\"---denied---\")\n exit(1)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except SignatureFormatError:\n click.echo(\"ERROR: Signature is in bad format\")", "def identify_file(self, file):", "def signature(request) -> str:\n return get_test_data(request, __name__, \"signature\", \"r\")", "def test_getSignature(self):\n self.assertTrue(ChangeType().getSignature(0) is not '')", "def verify_signature(\n self, path: Union[bytes, str], digest: bytes, signature: bytes\n ):\n path = _to_bytes_or_null(path)\n ret = lib.Fapi_VerifySignature(\n self._ctx, path, digest, len(digest), signature, len(signature)\n )\n _chkrc(ret)", "def _get_signature(search_results: SearchResults) -> Text:\n # Was previously logic here. Leaving method in case it's needed again\n return COMMENT_SIGNATURE", "def _validate_signature(self):\n signing_string = '{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n'.format(\n 'Message',\n self._message_encoded,\n 'MessageId',\n self._message_id,\n 'Timestamp',\n self._timestamp,\n 'TopicArn',\n self._topic_arn,\n 'Type',\n self._type)\n\n crt = crypto.load_certificate(crypto.FILETYPE_PEM, self._pem)\n signature = base64.b64decode(self._signature)\n\n try:\n crypto.verify(\n crt,\n signature,\n signing_string.encode('utf-8'),\n 'sha1')\n except:\n self.error = 'Invalid signature.'\n raise ValueError('Invalid signature.')\n\n return True", "def remove_sign(self):\n if self.is_signed():\n file_size = os.stat(self._file_name).st_size\n self._document.truncate(file_size - self._append_size)\n print(\"Sign removed from the document!\")\n else:\n print(\"The document is not signed!\")", "def check_specific_signatures(self):\r\n\r\n test1 = re.search(r'История операций по дебетовой карте за период', self.bank_text, re.IGNORECASE)\r\n # print(f\"{test1=}\")\r\n\r\n if not test1:\r\n raise exceptions.InputFileStructureError(\"Не найдены паттерны, соответствующие выписке\")" ]
[ "0.66871303", "0.6619433", "0.6604733", "0.65458876", "0.6505863", "0.6505863", "0.63899714", "0.63860995", "0.63552636", "0.6281927", "0.61876994", "0.61288184", "0.6126398", "0.6071543", "0.60413754", "0.6026036", "0.60181516", "0.5999449", "0.59889215", "0.5982611", "0.59699494", "0.59397066", "0.5930195", "0.5906953", "0.5901288", "0.5883293", "0.58661187", "0.58624965", "0.5845882", "0.5835772" ]
0.67912775
0
return path for lights file
def lightPath(self): return mfl.mayaFile( self._path + '/lights.ma' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lightLinkPath(self):\n\t\treturn fl.File( self._path + '/lights.data' )", "def darkpath(cam):\n return os.path.join(BASEPATH, cam + \"_dark\")", "def path_for(filename):\n if settings.value(Key.Theme) == Themes.Light.value:\n return (IMAGES_PATH / Themes.Light.value / filename).as_posix()\n return (IMAGES_PATH / Themes.Dark.value / filename).as_posix()", "def GetPath () :\n return sys.hal_log_values [\"__log_path\"]", "def path(self) -> str:\n return self.src + \"/\"", "def sirsam_target_path(data_sirsam):\n return os.path.join(data_sirsam, 'targets', 'geochem_sites_log.shp')", "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "def environmentImagesPath():\n # A recursion counter to make sure that the loop ends.\n count = 0\n # Get the path to the Blender executable.\n filePath = os.path.dirname(bpy.app.binary_path)\n # Find the lowest path level which contains Blender.\n while \"blender\" not in os.path.basename(filePath).lower():\n filePath = os.path.dirname(filePath)\n if not filePath or count == 20:\n break\n count += 1\n\n # Search all subpaths for the datafiles folder. Based on this folder\n # the path can be completed.\n for dirPath, dirs, fileList in os.walk(filePath):\n if os.path.basename(dirPath) == \"datafiles\":\n return os.path.join(os.path.join(dirPath, \"studiolights\"), \"world\")", "def rliPath():\r\n if isWindows():\r\n homeDir = win32api.GetShortPathName(os.path.expanduser('~'))\r\n return os.path.join(homeDir, 'AppData', 'Roaming', 'GRASS7', 'r.li')\r\n else:\r\n return os.path.join(os.path.expanduser(\"~\"), '.grass7', 'r.li')", "def flatpath(cam):\n return os.path.join(BASEPATH, cam + \"_flats\")", "def path(self):\n if self.filename:\n return os.path.join(self.season.path, self.filename)", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def file_path(self):\n return self._obs_file()", "def path(self):\n return self.file_path()", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "def file_path(self):\n return self.lib.file_path", "def filenameAsPath(self, app):\n return app.recordingsPath.child(self.filename).path", "def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)", "def FilePath(self) -> str:", "def neighbordb_path():\n\n filepath = runtime.default.data_root\n filename = runtime.neighbordb.filename\n return os.path.join(filepath, filename)", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def fpath(self):\n return os.path.join(self.path, self.name)", "def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath", "def aovsPath(self):\n\t\treturn fl.File( self._path + '/aovs.data' )", "def get_shp_file(self):\n files = os.listdir(self.targetpath)\n file = files[0].split('.')[0]\n return self.targetpath + '/' + file", "def darkfiles(cam):\n return fullpathlist(darkpath(cam))", "def getLogPath():\n pwd = os.path.dirname(os.path.abspath(__file__))\n log_file = os.path.join(pwd, 'log.txt')\n\n return log_file", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def filepath(self):\n return self.file.path", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory" ]
[ "0.82484734", "0.6809375", "0.66981214", "0.6469427", "0.6256557", "0.6256362", "0.62507707", "0.6244421", "0.62019056", "0.61183137", "0.61059153", "0.6080485", "0.6063016", "0.60597664", "0.60492027", "0.603537", "0.6034855", "0.602494", "0.5999086", "0.59892964", "0.59726596", "0.5952461", "0.5952201", "0.5946454", "0.5940829", "0.5936752", "0.59068924", "0.5887162", "0.58870095", "0.58761823" ]
0.820123
1
return the path for the shader file
def shaderPath(self): return mfl.mayaFile( self._path + '/shaders.ma' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFragmentShader(self):\n return self.fshader", "def getCompiled(self):\n if self.isCompiled():\n return self.shader\n else:\n raise Exception(\"el shader no ha sido compilado aun\")", "def dataShader(self):\n\t\treturn self._shader", "def location( self, shader, mode ):\n return shader.getLocation( mode, self.name, uniform=True )", "def printShader(self):\n print self.file", "def get_shader_codes(self):\n vs = VS_TEMPLATE\n fs = FS_TEMPLATE\n \n # Shader headers\n vs_header = self.get_header('vertex')\n fs_header = self.get_header('fragment')\n \n # Varyings\n for varying in self.varyings:\n s1, s2 = get_varying_declarations(varying)\n vs_header += s1\n fs_header += s2\n \n # vs_header += \"\".join(self.vs_headers)\n # fs_header += \"\".join(self.fs_headers)\n \n # Integrate shader headers\n vs = vs.replace(\"%VERTEX_HEADER%\", vs_header)\n fs = fs.replace(\"%FRAGMENT_HEADER%\", fs_header)\n \n # Vertex and fragment main code\n vs_main = self.get_main('vertex')\n fs_main = self.get_main('fragment')\n \n # Integrate shader headers\n vs = vs.replace(\"%VERTEX_MAIN%\", vs_main)\n fs = fs.replace(\"%FRAGMENT_MAIN%\", fs_main)\n \n # frag color or frag data\n if self.fragdata is None:\n fs = fs.replace('%FRAG%', \"\"\"gl_FragColor = out_color;\"\"\")\n else:\n fs = fs.replace('%FRAG%', \"\"\"gl_FragData[%d] = out_color;\"\"\" % self.fragdata)\n \n # Make sure there are no Windows carriage returns\n vs = vs.replace(b\"\\r\\n\", b\"\\n\")\n fs = fs.replace(b\"\\r\\n\", b\"\\n\")\n \n # OLDGLSL does not know the texture function\n if not OLDGLSL:\n fs = fs.replace(\"texture1D(\", \"texture(\" % 2)\n fs = fs.replace(\"texture2D(\", \"texture(\" % 2)\n \n # set default color\n fs = fs.replace('%DEFAULT_COLOR%', str(self.default_color))\n \n # replace GLSL version header\n vs = vs.replace('%GLSL_VERSION_HEADER%', self.version_header)\n fs = fs.replace('%GLSL_VERSION_HEADER%', self.version_header)\n \n # replace GLSL precision header\n vs = vs.replace('%GLSL_PRECISION_HEADER%', self.precision_header)\n fs = fs.replace('%GLSL_PRECISION_HEADER%', self.precision_header)\n \n return vs, fs", "def __str__(self):\n if self.fshader is None:\n f = \"not defined\"\n else:\n f = self.fshader.getPath()\n if self.vshader is None:\n v = \"not defined\"\n else:\n v = self.vshader.getPath()\n if self.enabled:\n e = \"enabled\"\n else:\n e = \"disabled\"\n if self.isCompiled():\n c = \"compiled | {0}\".format(e)\n else:\n c = \"not compiled | {0}\".format(e)\n return \"shader: {3}\\nfragment shader: {0}\\nvertex shader: {1}\\nstatus: {2}\".format(f, v, c, self.getName())", "def _path(name: str):\n return os.path.join(ASSET_PATH, name)", "def file_path(self) -> global___Expression:", "def compile(self):\n if not self.isCompiled():\n if self.file is not None:\n try:\n if self.tipo == VERTEX:\n self.shader = glCreateShader(GL_VERTEX_SHADER)\n else:\n self.shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(self.shader, self.file)\n glCompileShader(self.shader)\n self.compiled = True\n except:\n raise Exception(\"error al compilar el shader\")\n else:\n raise Exception(\"no se ha cargado un archivo\")\n else:\n print \"Error :: el shader ya ha sido compilado\"", "def path(self) -> str:\n return self.src + \"/\"", "def dataPath(self):\n\t\treturn fl.File( self._path + '/renderLayerData.data' )", "def lightPath(self):\n\t\treturn mfl.mayaFile( self._path + '/lights.ma' )", "def loadShader(shaderpath, shadername, vertexFormatList=None, fragmentFormatlist=None):\n fragment = Shader(shaderpath + shadername + \".fsh\", FRAGMENT, True, fragmentFormatlist)\n vertex = Shader(shaderpath + shadername + \".vsh\", VERTEX, True, vertexFormatList)\n return ShaderProgram(vertex, fragment, True)", "def glGetShaderSourceARB( baseOperation, obj ):\n length = int(glGetObjectParameterivARB(obj, GL_OBJECT_SHADER_SOURCE_LENGTH_ARB))\n if length > 0:\n source = ctypes.create_string_buffer(length)\n baseOperation(obj, length, None, source)\n return source.value.strip(_NULL_8_BYTE) # null-termination\n return ''", "def getVertexShader(self):\n return self.vshader", "def get_path(self):\n return StaticAsset.get_static_path(self._name)", "def get_shp_file(self):\n files = os.listdir(self.targetpath)\n file = files[0].split('.')[0]\n return self.targetpath + '/' + file", "def path(self):\n return os.path.join(FLOWJS_PATH, self.filename)", "def get_file_path(self):\n if self.file_path is None:\n return None\n if self.file_path.endswith('.pyc'):\n return self.file_path[:-1]\n return self.file_path", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def instantiate_for_spirv_args(self, testcase):\n shader, self.filename = tempfile.mkstemp(\n dir=testcase.directory, suffix=self.suffix)\n shader_object = os.fdopen(shader, 'w')\n shader_object.write(self.source)\n shader_object.close()\n return self.filename", "def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._R_script)", "def outputPath():\n scenePath = bpy.data.filepath\n # If the scene hasn't been saved yet the path is empty.\n # Returning an empty path prompts the user for saving the scene.\n if not scenePath:\n return\n renderPath = os.path.join(os.path.dirname(scenePath), \"{}_thumbs\".format(NAME))\n return renderPath", "def convert_shaders(self):\n raise NotImplementedError()", "def processed_texture_path(path):\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')", "def __relative_path(self, p4file):\n return self.ctx.depot_path(p4file.depot_path).to_gwt()", "def get_asset_path(name):\n return os.path.join(constants.ROOT_DIR, 'assets', name)", "def lightLinkPath(self):\n\t\treturn fl.File( self._path + '/lights.data' )" ]
[ "0.6595272", "0.6361699", "0.634404", "0.63148177", "0.6163704", "0.614001", "0.610097", "0.6075785", "0.6053845", "0.5924668", "0.58730847", "0.5861786", "0.58363956", "0.58298504", "0.58051777", "0.579176", "0.578321", "0.5744046", "0.5737306", "0.5688931", "0.56700575", "0.56690216", "0.5662235", "0.5617415", "0.561232", "0.56094676", "0.56071514", "0.56030697", "0.56013405", "0.55884683" ]
0.86577046
0
return the path for the aovs file
def aovsPath(self): return fl.File( self._path + '/aovs.data' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_path(self):\n return self._obs_file()", "def get_oc_path(cfg):\n return os.path.join(\n BASE_DATA_DIR,\n \"castp\",\n \"pH\" + str(cfg.pH),\n str(cfg.mut),\n \"oc\" + str(cfg.probe) + \".csv\")", "def file_path(self) -> global___Expression:", "def file_path(self):\n return self.lib.file_path", "def path(self) -> str:\n return self.src + \"/\"", "def get_path(self):\n try:\n return self._file.path\n except AttributeError:\n return os.path.abspath(self._file.name)", "def path(self):\n return self.file_path()", "def get_path(self):\n raise NotImplementedError(\"This asset does not support absolute paths\")", "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def fpath(self):\n return os.path.join(self.path, self.name)", "def filenameAsPath(self, app):\n return app.recordingsPath.child(self.filename).path", "def opath ( dir_name, file_name = None ):\n if file_name:\n return os.path.join(output_path, dir_name, file_name)\n return os.path.join(output_path, dir_name)", "def full_path(self):\n return os.path.abspath(self.path)", "def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)", "def FilePath(self) -> str:", "def outpath(self):\n return None", "def full_path(self, config_path=CONFIG_PATH):\n return os.path.join(config_path, self.filename)", "def _get_filepath(self) -> str:\n return os.path.join(\n os.sep.join(\n [\n self.period.value,\n 'activities',\n f'activities_{self._dt_string}.json'\n ]\n )\n )", "def sas_file(self):\n\n return os.path.normpath(self.path +'\\\\'+ cfg_dict['format_pgm'])", "def apk_path(self):\n return os.path.join(SETTINGS['repo_dir'], '%s.apk' % self.name)", "def get_scanrecpath(self):\n start_key = min(self.obsinfos)\n scanrecname = self.obsinfos[start_key].obsfoldername(\n source_name=self.scanrecparms['pointing'])\n scanrecpath = os.path.join(self.scanpath, scanrecname)\n return scanrecpath", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def output_file_path(self):\n return self.__output_file_path", "def file_path(self) -> Path:\n return self._input_file", "def get_file_save_path(self):\n return self.out", "def filepath(self):\n return self.file.path", "def rospath(fname,checkfs=True):\n\tif checkfs: assert os.path.exists(fname)\n\tif checkfs: fname = os.path.abspath(fname)\n\tfname = fname.rstrip(\"/\")\n\tmark = \"rosetta_source/src\"\n\tassert fname.find(mark) > 0\n\tr = fname[:fname.find(mark)+len(mark)-4]\t\n\treturn r", "def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")" ]
[ "0.7047884", "0.6801188", "0.67156875", "0.6595521", "0.6526361", "0.6481086", "0.64795697", "0.6452638", "0.6423643", "0.6420651", "0.64015955", "0.6397468", "0.63639444", "0.63354146", "0.63254094", "0.630958", "0.6307452", "0.62829566", "0.6233461", "0.6226466", "0.622618", "0.62031794", "0.62010825", "0.6191982", "0.61818427", "0.6180742", "0.6177824", "0.6173967", "0.61672175", "0.61605376" ]
0.87796766
0
return the path for the masterLayer data
def masterPath(self): return fl.File( self._path + '/master.data' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataPath(self):\n return ''", "def data_path(self):\n raise NotImplementedError", "def dataPath(self):\n\t\treturn fl.File( self._path + '/renderLayerData.data' )", "def path(self):\n return self._data_file", "def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep", "def _getDatasetPath(self):\n return self.__dataset_path", "def get_pathname(self):\n return self.image_data.path", "def get_data_path(path):\n\n data_path = Path(self.kard.meta.get('data_path', 'data'))\n\n if data_path.is_absolute():\n return str(data_path / path)\n\n return str(self.kard_folder_path / self.kard.name / data_path /\n path)", "def path(self) :\n return self.m_path", "def local_path(self):\n return self._data.get('local_path')", "def root_path(self):\n return os.path.dirname(self.image.path)", "def root_path(self) -> Path:\n return ARCHIVES_ROOT / self.source_name / self.key", "def get_master_url(self, identifier) -> None:\n # TODO(victorhc): Implement the following method to fetch the cluster\n # master_url from Dataproc.\n return '.'.join([\n self.cluster_metadata.project_id,\n self.cluster_metadata.region,\n self.cluster_metadata.cluster_name\n ])", "def path(self):\n return self.repository_obj.path / self.name", "def path(self):\n return self.path", "def get_data_path():\n return os.getcwd() + \"/data/\"", "def get_data_path():\n\treturn _paths[_DATA_DIRECTORY_KEY]", "def get_target_object_path(data_path: str) -> str:\n path_split = data_path.rsplit('.', 1)\n self_targeting = len(path_split) < 2\n if self_targeting:\n return \"\"\n return path_split[0]", "def path( self ) :\n\n return( self.__path )", "def get_root():\n\n return 'data/simulators/mg1'", "def getPath(self):\n return self.__folder", "def path(self):\n return self._container_dir", "def root_rel_path(self):\n return os.path.dirname(self.image.name)", "def path(self):\n return self.storage.path(self.name)", "def path_name(self):", "def get_root_filename(self):\n pass", "def getPath(self):\n return self.path", "def GetPath(self):\r\n\r\n return self.directory", "def kard_folder_path(self):\n if self._base_path is None:\n if is_running_in_docker():\n container_id = os.popen(\n 'cat /proc/self/cgroup | grep docker | '\n 'grep -o -E \"[0-9a-f]{64}\" | head -n 1').read().rstrip()\n cli = docker.DockerClient(version='auto')\n cont = cli.containers.get(container_id)\n mount = next((\n c for c in cont.attrs['Mounts']\n if c['Destination'] == str(get_kard_root_path())))\n self._base_path = Path(mount['Source'])\n else:\n self._base_path = Path(self.kard.path).parent\n return self._base_path", "def get_data_folder(self, mode='absolute'):\n\n path = Path(f'sub-{self.sub_id}', f'ses-{self.ses_id}', self.modality)\n\n if mode == 'absolute':\n if self.basedir is None:\n raise ValueError('No base directory set.')\n path = self.basedir / path\n\n return path" ]
[ "0.6769875", "0.6692991", "0.66356105", "0.642204", "0.63508654", "0.6319566", "0.63127893", "0.6197516", "0.6166026", "0.61191916", "0.6109279", "0.6093193", "0.60923404", "0.60806435", "0.60315734", "0.6025355", "0.60208064", "0.60151654", "0.6008396", "0.6005537", "0.59993035", "0.5991783", "0.59818435", "0.59721416", "0.5959343", "0.595346", "0.5951984", "0.59437186", "0.59374803", "0.59304136" ]
0.8091495
0
export master layer settings so we can re apply it
def exportMasterLayerSettings(self): master = rlayer.RenderLayer( 'defaultRenderLayer' ) master.makeCurrent() masterData = {} nodes = ['defaultArnoldRenderOptions','defaultResolution','defaultRenderGlobals'] mnNodes =[ mn.Node( n ) for n in nodes ] for n in mnNodes: for a in n.listAttr( se = True, v = True, w = True ): try: masterData[a] = a.v except: continue pickle.dump( masterData, open( self.masterPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importMasterSettings(self):\n\t\tpickleData = pickle.load( open( self.masterPath.path, \"rb\" ) )\n\t\tmaster = rlayer.RenderLayer( 'defaultRenderLayer' )\n\t\tmaster.makeCurrent()\n\t\tfor a in pickleData.keys():\n\t\t\ttry:\n\t\t\t\ta.v = pickleData[a]\n\t\t\texcept:\n\t\t\t\tcontinue", "def restore_export_preset():\n run_mel_command(\"FBXResetExport\")", "def exportData(self):\n\t\tlays = rlayer.renderlayers()\n\t\tdata = {}\n\t\tfor l in lays:\n\t\t\tif l.name == 'defaultRenderLayer':\n\t\t\t\tcontinue\n\t\t\tdata[l.name] = {'objects':l.objects, # OBJECTS IN LAYER\n\t\t\t\t\t\t\t'values' :l.overridesWithValues, # OVERRIDED ATTRIBUTES ONLY CHANGED VALUES\n\t\t\t\t\t\t\t'conns' :l.overridesWithConnections[0], # OVERRIDED ATTRIBUTES CHANGED CONNECTIONS\n\t\t\t\t\t\t\t'shader' :l.overridedShader # OVERRIDE RENDERLAYER SHADER\n\t\t\t\t\t\t\t}\n\t\tpickle.dump( data, open( self.dataPath.path, \"wb\" ) )", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def configureMaster(self):\n\t\t\n\t\tfin = open('/opt/google/earth/free/drivers.ini', 'r')\n\t\tfout = open('/etc/X11/ge-drivers.ini', 'w')\n\t\t\n\t\tfor line in fin.readlines():\n\t\t\tfout.write(line)\n\t\t\tif line.find('SETTINGS {') != 0:\n\t\t\t\tcontinue\n\t\t\tfout.write('\\tViewSync/send = true\\n')\n\t\t\tfout.write('\\tViewSync/receive = false\\n')\n\n\t\t\tfout.write('\\tViewSync/hostname = %s\\n' %\n\t\t\t\t self.db.getHostAttr('localhost',\n\t\t\t\t\t\t 'Kickstart_PrivateBroadcast'))\n fout.write('\\tViewSync/port = 21567\\n')\n\t\t\tfout.write('\\n')\n\t\t\tfout.write('\\tViewSync/horizFov = 60\\n')\n fout.write('\\tViewSync/rollOffset = 0\\n')\n fout.write('\\tViewSync/yawOffset = 0\\n')\n\t\t\tfout.write('\\tViewSync/pitchOffset = 0\\n')\n\t\t\tfout.write('\\n')\n\n\n\t\tfin.close()\n\t\tfout.close()\n\n\t\tshutil.copy('/etc/X11/ge-drivers.ini', '/opt/google/earth/free/drivers.ini')", "def ExtractInfoAndCopyMaster(self):\n self.ExtractandWriteInfo()\n self.CreateMasterCopy()\n return \"TurnOffMirror\"", "def save_layer(index, settings) -> Action:\n return {\n \"kind\": SAVE_LAYER,\n \"payload\": {\"index\": index, \"settings\": settings},\n }", "def save_config():\n # Order the load flags using load_keys...\n od_load_flags = OrderedDict()\n for k in load_keys:\n od_load_flags[k] = load_flags[k]\n pawstools.save_cfg(od_load_flags,cfg_file)", "def saveToolSettings(*args, **kwargs)->None:\n pass", "def save_switch_configs(self):", "def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()", "def export_configurations():\n pass", "def reset_cfg():\n _C.merge_from_other_cfg(_CFG_DEFAULT)", "def _save(self):\n\n out_dict = {}\n out_dict[\"version\"] = pyfx.__version__\n out_dict[\"name\"] = self._name\n out_dict[\"src\"] = self._src\n\n # Write out the background file as an image\n bg_file = os.path.join(self._name,\"master_bg_image.png\")\n pyfx.util.to_file(self._bg_frame,bg_file)\n out_dict[\"bg_frame\"] = bg_file\n\n f = open(os.path.join(self._name,\"pyfx.json\"),\"w\")\n json.dump(out_dict,f)\n f.close()", "def save_cfg(self, output_dir):\n output_path = os.path.join(output_dir, 'level_config.cfg')\n shutil.copy(self.cfg_path, output_path)", "def saveConfig(self):\r\n self.config[\"Settings\"] = {}\r\n settings = self.config[\"Settings\"]\r\n settings[\"datapath\"] = self.dataPath\r\n settings[\"videopath\"] = self.videoPath\r\n settings[\"dataoffset\"] = str(self.dataOffset)\r\n settings[\"colblindmode\"] = str(self.colBlindMode)\r\n with open(self.CONFIG_FILE,\"w\") as file:\r\n self.config.write(file)", "def loadLayerSettings(self):\r\n # Get layer attributes\r\n provider = self.layer.dataProvider()\r\n \r\n if not provider.isValid():\r\n logging.getLogger(type(self).__name__).error('invalid layer')\r\n return\r\n \r\n attributes = []\r\n numericAttributes = []\r\n \r\n for field in provider.fields():\r\n attributes.append(field.name())\r\n fieldType = field.type()\r\n if fieldType == QtCore.QVariant.Int or fieldType == QtCore.QVariant.Double:\r\n numericAttributes.append(field.name())\r\n \r\n self.comboBoxLayerAttribute.clear()\r\n self.comboBoxLayerAttribute.addItems(sorted(attributes))\r\n self.comboBoxLayerAttribute.setEnabled(True)\r\n \r\n self.comboBoxStyleCategorizedAttribute.clear()\r\n self.comboBoxStyleCategorizedAttribute.addItems(sorted(attributes))\r\n self.comboBoxStyleCategorizedAttribute.setEnabled(True)\r\n \r\n # Disable graduated style tab if there are no numeric attributes\r\n if numericAttributes:\r\n self.comboBoxStyleGraduatedAttribute.clear()\r\n self.comboBoxStyleGraduatedAttribute.addItems(sorted(numericAttributes))\r\n self.comboBoxStyleGraduatedAttribute.setEnabled(True)\r\n else:\r\n self.tabStyleGraduated.setDisabled(True)\r\n \r\n # Get layer transparency setting\r\n self.sliderLayerTransparency.setValue(self.layer.layerTransparency())\r\n self.spinBoxLayerTransparency.setValue(self.layer.layerTransparency())\r\n \r\n # Get layer symbol fill color\r\n symbols = self.layer.rendererV2().symbols()\r\n self.layerSymbolFillColor = self.styleCategorizedColor = self.styleGraduatedColor = self.styleRuleBasedColor = symbols[0].color()\r\n \r\n # Load layer renderer settings\r\n renderer = self.layer.rendererV2()\r\n \r\n if isinstance(renderer, QgsSingleSymbolRendererV2):\r\n symbols = renderer.symbols()\r\n self.layerSymbolFillColor = symbols[0].color()\r\n self.buttonLayerSymbolFillColor.setStyleSheet('background-color: {0};'.format(self.layerSymbolFillColor.name()))\r\n elif isinstance(renderer, QgsCategorizedSymbolRendererV2):\r\n categories = renderer.categories()\r\n for category in categories:\r\n color = category.symbol().color()\r\n value = str(category.value())\r\n label = category.label()\r\n self.addStyleCategorized(color, value, label)\r\n self.styleCategorizedColor = color\r\n self.buttonStyleCategorizedFillColor.setStyleSheet('background-color: {0};'.format(self.styleCategorizedColor.name()))\r\n attribute = renderer.classAttribute()\r\n self.comboBoxStyleCategorizedAttribute.setCurrentIndex(self.comboBoxStyleCategorizedAttribute.findText(attribute))\r\n self.comboBoxStyleType.setCurrentIndex(self.comboBoxStyleType.findText('Categorized'))\r\n elif isinstance(renderer, QgsGraduatedSymbolRendererV2):\r\n ranges = renderer.ranges()\r\n for range in ranges:\r\n color = range.symbol().color()\r\n lowerValue = range.lowerValue()\r\n upperValue = range.upperValue()\r\n label = range.label()\r\n self.addStyleGraduated(color, lowerValue, upperValue, label)\r\n self.styleGraduatedColor = color\r\n self.buttonStyleGraduatedFillColor.setStyleSheet('background-color: {0};'.format(self.styleGraduatedColor.name()))\r\n attribute = renderer.classAttribute()\r\n self.comboBoxStyleGraduatedAttribute.setCurrentIndex(self.comboBoxStyleGraduatedAttribute.findText(attribute))\r\n self.comboBoxStyleType.setCurrentIndex(self.comboBoxStyleType.findText('Graduated'))\r\n elif isinstance(renderer, QgsRuleBasedRendererV2):\r\n rootRule = renderer.rootRule()\r\n rules = rootRule.children()\r\n for aRule in rules:\r\n color = aRule.symbol().color()\r\n rule = aRule.filterExpression()\r\n label = aRule.label()\r\n minScale = aRule.scaleMinDenom()\r\n maxScale = aRule.scaleMaxDenom()\r\n self.addStyleRuleBased(color, rule, minScale, maxScale, label)\r\n self.styleRuleBasedColor = color\r\n self.buttonStyleRuleBasedFillColor.setStyleSheet('background-color: {0};'.format(self.styleRuleBasedColor.name()))\r\n self.comboBoxStyleType.setCurrentIndex(self.comboBoxStyleType.findText('Rule-based'))\r\n \r\n # Get layer label settings\r\n self.p = QgsPalLayerSettings()\r\n self.p.readFromLayer(self.layer)\r\n \r\n if self.p.enabled:\r\n self.checkBoxLayerLabelEnabled.setChecked(True)\r\n self.comboBoxLayerAttribute.setCurrentIndex(self.comboBoxLayerAttribute.findText(self.p.fieldName))\r\n self.spinBoxLabelSize.setValue(self.p.textFont.pointSize())\r\n self.labelColor = self.p.textColor\r\n self.buttonLabelColor.setStyleSheet('background-color: {0};'.format(self.labelColor.name()))", "def configure(self):\n if self.three_layer:\n config = self.config\n # remove the continental shelf\n config.set('soma', 'phi', '1e-16')\n config.set('soma', 'shelf_depth', '0.0')", "def tempcontrol_preset_save(self):\n with open(\n self.tempcontrol_presets_path\n + \"{}.json\".format(self.tempcontrol_preset_currentFilename),\n \"w\",\n ) as output:\n output.write(json.dumps(self.tempcontrol_conf))", "def configure_as_preset(self, val):\n if val == True:\n if self.active:\n self._preset_save_raw = self.active.ecc_raw\n self._preset_save_dataset = self.active.ecc_dataset\n self._preset_save_dataset_id = self.active.ecc_dataset_id\n self.active.ecc_raw = None\n self.active.ecc_dataset = None\n self.active.ecc_dataset_id = None\n if self.active._panel:\n self._preset_save_filename = self.active._panel._filename\n self.active._panel._filename = \"\"\n else:\n if self.active:\n self.active.ecc_raw = self._preset_save_raw\n self.active.ecc_dataset = self._preset_save_dataset\n self.active.ecc_dataset_id = self._preset_save_dataset_id\n self._preset_save_raw = None\n self._preset_save_dataset = None\n self._preset_save_dataset_id = None\n if self.active._panel:\n self.active._panel._filename = self._preset_save_filename\n self._preset_save_filename = \"\"", "def saveSettings():\t\n\tglobal settings\n\tfout = open(config_file,'w')\n\tfout.write(json.dumps(settings, sort_keys=True, indent=4))\n\tfout.close()", "def persist_tools_options(self, *args):\n\n\t\t# Panel-wide classic tools options (they are not Gio actions!)\n\t\tself._tools_gsettings.set_int('last-size', self.get_tool_width())\n\t\tself._persist_color(self.get_left_color(), 'last-left-rgba')\n\t\tself._persist_color(self.get_right_color(), 'last-right-rgba')\n\n\t\t# Tool-wide boolean actions\n\t\tfor action_name in self._boolean_actions_from_gsetting:\n\t\t\tkey_name = self._boolean_actions_from_gsetting[action_name]\n\t\t\tself._persist_boolean(action_name, key_name)\n\n\t\t# Tool-wide \"enum\" actions\n\t\tfor action_name in self._string_actions_from_gsetting:\n\t\t\tkey_name = self._string_actions_from_gsetting[action_name]\n\t\t\tself._persist_string(action_name, key_name)", "def copySettings(self):\n\n networkNode = self.returnNetworkNode\n attrs = cmds.listAttr(networkNode, ud=True, hd=True)\n\n attrData = []\n for attr in attrs:\n value = cmds.getAttr(networkNode + \".\" + attr)\n attrData.append([attr, value])\n\n # write out attrData to a temp file\n tempDir = cmds.internalVar(userTmpDir=True)\n clipboardFile = os.path.normcase(os.path.join(tempDir, \"ART_clipboard.txt\"))\n\n f = open(clipboardFile, 'w')\n\n # dump the data with json\n json.dump(attrData, f)\n f.close()", "def settings(args):\n data = {}\n data['train_x'] = load_pkl(os.path.join(args.data_dir, 'train_images.pkl'))\n data['train_y'] = load_pkl(os.path.join(args.data_dir, 'train_labels.pkl'))\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'valid_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, 'valid_labels.pkl'))\n if args.combine_train_val:\n data['train_x'].update(data['valid_x'])\n data['train_y'].update(data['valid_y'])\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'test_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, './data/bsd_pkl_float/test_labels.pkl'))\n args.display_step = len(data['train_x']) / 46\n # Default configuration\n if args.default_settings:\n args.n_epochs = 250\n args.batch_size = 10\n args.learning_rate = 3e-2\n args.std_mult = 0.8\n args.delay = 8\n args.filter_gain = 2\n args.filter_size = 5\n args.n_rings = 4\n args.n_filters = 7\n args.save_step = 5\n args.height = 321\n args.width = 481\n\n args.n_channels = 3\n args.lr_div = 10.\n args.augment = True\n args.sparsity = True\n\n args.test_path = args.save_name\n args.log_path = './logs'\n args.checkpoint_path = './checkpoints'\n\n make_dirs(args, args.test_path)\n make_dirs(args, args.log_path)\n make_dirs(args, args.checkpoint_path)\n\n return args, data", "def get_save_data(self):\n data = super().get_save_data()\n data['palette'] = self.palette\n data['levels'] = self.levels\n return data", "def clear_layers_name():\n set_keep['_layers_name_list'] =[]", "def export_layers(self, dest, show):\n doc = copy.deepcopy(self.document)\n for layer in doc.xpath('//svg:g[@inkscape:groupmode=\"layer\"]', namespaces=inkex.NSS):\n layer.attrib['style'] = 'display:none'\n id = layer.attrib[\"id\"]\n if id in show:\n layer.attrib['style'] = 'display:inline'\n\n doc.write(dest)", "def _augment_pipeline_cfg(self):", "def get_config(self):\n layer_config = {\n \"anchors\": self._anchors, \n \"classes\": self._classes,\n \"ignore_thresh\": self._ignore_thresh, \n \"truth_thresh\": self._truth_thresh, \n \"iou_thresh\": self._iou_thresh, \n \"loss_type\": self._loss_type, \n \"iou_normalizer\": self._iou_normalizer,\n \"cls_normalizer\": self._cls_normalizer, \n \"scale_x_y\": self._scale_x_y, \n }\n layer_config.update(super().get_config())\n return layer_config", "def save_settings():\n\n dont_save = ['VISIONEGG_CONFIG_FILE',\n 'VISIONEGG_SYSTEM_DIR',\n 'VISIONEGG_USER_DIR',\n ]\n\n if not VisionEgg.config.VISIONEGG_CONFIG_FILE:\n raise RuntimeError(\"No config file in use.\")\n re_setting_finder = re.compile(r\"^\\s?((?:VISIONEGG_[A-Z_]*)|(?:SYNCLYNC_[A-Z_]*))\\s?=\\s?(\\S*)\\s?$\",re.IGNORECASE)\n\n orig_file = open(VisionEgg.config.VISIONEGG_CONFIG_FILE,\"r\")\n orig_lines = orig_file.readlines()\n\n line_ending = orig_lines[0][-2:]\n if line_ending[0] not in ['\\r','\\n','\\l']:\n line_ending = line_ending[1]\n\n out_file_lines = []\n\n saved_config_vars = []\n\n for line in orig_lines:\n out_line = line # The output is the same as the input unless there's a match\n match = re_setting_finder.match(line)\n if match:\n name = match.group(1).upper()\n if name in VisionEgg.config.__dict__.keys():\n if name not in dont_save:\n # Change the output line\n out_line = (\"%s = %s\"%(name,getattr(VisionEgg.config,name,))) + line_ending\n saved_config_vars.append(name)\n out_file_lines.append(out_line)\n\n # Close and reopen orig_file in write mode\n orig_file.close()\n orig_file = open(VisionEgg.config.VISIONEGG_CONFIG_FILE,\"w\")\n for line in out_file_lines:\n orig_file.write(line)" ]
[ "0.72975564", "0.5729428", "0.56773335", "0.5671888", "0.5636887", "0.5510333", "0.5465108", "0.54372466", "0.54338557", "0.5410532", "0.53921604", "0.53159", "0.52870184", "0.52698493", "0.5225601", "0.5222329", "0.5184534", "0.5180232", "0.5179325", "0.517133", "0.516891", "0.51393145", "0.5114553", "0.5097077", "0.5089205", "0.50671285", "0.504586", "0.5023687", "0.50234926", "0.50170094" ]
0.8537631
0
export data from scene, objects overrides in renderlayers.. etc
def exportData(self): lays = rlayer.renderlayers() data = {} for l in lays: if l.name == 'defaultRenderLayer': continue data[l.name] = {'objects':l.objects, # OBJECTS IN LAYER 'values' :l.overridesWithValues, # OVERRIDED ATTRIBUTES ONLY CHANGED VALUES 'conns' :l.overridesWithConnections[0], # OVERRIDED ATTRIBUTES CHANGED CONNECTIONS 'shader' :l.overridedShader # OVERRIDE RENDERLAYER SHADER } pickle.dump( data, open( self.dataPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_scene_data(self, scene, data, tmp_dir):\n pass", "def writeObject(self,view,renderer):\n\n if not view.Source:\n return \"\"\n\n # point light hook\n proxy = getattr(view.Source,\"Proxy\",None)\n if getattr(proxy,\"type\",None) == \"PointLight\":\n return self.writePointLight(view,renderer)\n\n # get color and alpha\n mat = None\n color = None\n alpha = None\n if view.Material:\n mat = view.Material\n else:\n if \"Material\" in view.Source.PropertiesList:\n if view.Source.Material:\n mat = view.Source.Material\n if mat:\n if \"Material\" in mat.PropertiesList:\n if \"DiffuseColor\" in mat.Material:\n color = mat.Material[\"DiffuseColor\"].strip(\"(\").strip(\")\").split(\",\")[:3]\n if \"Transparency\" in mat.Material:\n if float(mat.Material[\"Transparency\"]) > 0:\n alpha = 1.0 - float(mat.Material[\"Transparency\"])\n else:\n alpha = 1.0\n\n if view.Source.ViewObject:\n if not color:\n if hasattr(view.Source.ViewObject,\"ShapeColor\"):\n color = view.Source.ViewObject.ShapeColor[:3]\n if not alpha:\n if hasattr(view.Source.ViewObject,\"Transparency\"):\n if view.Source.ViewObject.Transparency > 0:\n alpha = 1.0-(float(view.Source.ViewObject.Transparency)/100.0)\n if not color:\n color = (1.0, 1.0, 1.0)\n if not alpha:\n alpha = 1.0\n\n # get mesh\n mesh = None\n if hasattr(view.Source,\"Group\"):\n shps = [o.Shape for o in Draft.getGroupContents(view.Source) if hasattr(o,\"Shape\")]\n mesh = MeshPart.meshFromShape(Shape=Part.makeCompound(shps),\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Part::Feature\"):\n mesh = MeshPart.meshFromShape(Shape=view.Source.Shape,\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Mesh::Feature\"):\n mesh = view.Source.Mesh\n if not mesh:\n return \"\"\n\n return renderer.writeObject(view,mesh,color,alpha)", "def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)", "def test_to_from_scene(self): # pragma: lpy\n super(TestObjDict, self).test_to_from_scene(_as_obj=True)", "def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def export( self, captionMode, copyFiles, outputDir ):\n scene = slicer.mrmlScene\n nodes = scene.GetNumberOfNodes()\n\n self.__nodes = {}\n\n # 1 for model name, 2 for parent name\n self.__captionMode = captionMode\n # TRUE if we shall copy the files to the outputDir\n self.__copyFiles = copyFiles\n self.__outputDir = outputDir\n\n self.__tree = Tree()\n self.__tree.create_node( \"Scene\", \"scene\" )\n\n for n in xrange( nodes ):\n\n node = scene.GetNthNode( n )\n\n self.parseNode( node )\n\n [header, footer] = self.configureXrenderers()\n output = header\n output += self.createXtree( \"scene\" )\n output += footer\n\n return output", "def __render_scene(self, scene):\n\n # Name and location of the exported project.\n project_dir = os.path.join(tempfile.gettempdir(), \"blenderseed\", \"render\")\n project_filepath = os.path.join(project_dir, \"render.appleseed\")\n\n # Create target directories if necessary.\n if not os.path.exists(project_dir):\n try:\n os.makedirs(project_dir)\n except os.error:\n self.report({\"ERROR\"}, \"The directory {0} could not be created. Check directory permissions.\".format(project_dir))\n return\n\n # Generate project on disk.\n self.update_stats(\"\", \"appleseed Rendering: Exporting Scene\")\n writer = projectwriter.Writer()\n writer.write(scene, project_filepath)\n\n # Render project.\n self.__render_project_file(scene, project_filepath, project_dir)", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def send_scene_informations(self):\n self.send_player_position()\n self.send_player_direction()\n self.send_grafik_objects()", "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def objects_to_bmesh(objs, transform=True):\n\n # CAUTION: Removes/destroys custom layer props\n\n # Creates the mesh used to merge the entire scene\n bm_all = bmesh.new()\n\n # Adds the objects\" meshes to the bmesh\n for obj in objs:\n dprint(\"Preparing object {} for export...\".format(obj.name))\n # Creates a bmesh from the supplied object\n bm = bmesh.new()\n bm.from_mesh(obj.data)\n\n # Makes sure all layers exist so values don't get lost while exporting\n uv_layer = bm.loops.layers.uv.get(\"UVMap\")\n tex_layer = bm.faces.layers.tex.get(\"UVMap\")\n vc_layer = (bm.loops.layers.color.get(\"Col\") or\n bm.loops.layers.color.new(\"Col\"))\n env_layer = (bm.loops.layers.color.get(\"Env\") or\n bm.loops.layers.color.new(\"Env\"))\n env_alpha_layer = (bm.faces.layers.float.get(\"EnvAlpha\") or\n bm.faces.layers.float.new(\"EnvAlpha\"))\n va_layer = (bm.loops.layers.color.get(\"Alpha\") or\n bm.loops.layers.color.new(\"Alpha\"))\n texnum_layer = bm.faces.layers.int.get(\"Texture Number\")\n type_layer = (bm.faces.layers.int.get(\"Type\") or\n bm.faces.layers.int.new(\"Type\"))\n material_layer = (bm.faces.layers.int.get(\"Material\") or\n bm.faces.layers.int.new(\"Material\"))\n\n # Removes the parent for exporting and applies transformation\n parent = obj.parent\n if parent:\n mat = obj.matrix_world.copy()\n old_mat = obj.matrix_basis.copy()\n obj.parent = None\n obj.matrix_world = mat\n\n spc = obj.matrix_basis\n bmesh.ops.scale(\n bm,\n vec=obj.scale,\n space=spc,\n verts=bm.verts\n )\n if transform:\n bmesh.ops.transform(\n bm,\n matrix=Matrix.Translation(obj.location),\n space=spc,\n verts=bm.verts\n )\n bmesh.ops.rotate(\n bm,\n cent=obj.location,\n matrix=obj.rotation_euler.to_matrix(),\n space=spc,\n verts=bm.verts\n )\n\n # Restores the parent relationship\n if parent and not obj.parent:\n obj.parent = parent\n obj.matrix_basis = old_mat\n\n # Converts the transformed bmesh to mesh\n new_mesh = bpy.data.meshes.new(\"ncp_export_temp\")\n bm.to_mesh(new_mesh)\n\n # Adds the transformed mesh to the big bmesh\n bm_all.from_mesh(new_mesh)\n\n # Removes unused meshes\n bpy.data.meshes.remove(new_mesh, do_unlink=True)\n bm.free()\n\n return bm_all", "def afterLoadSceneObject(self):\n\t\tpass", "def save_scene(force=True, **kwargs):\n\n pass", "def dump_objects():\n pass", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def read_layout(outFile=None, linked=False, append=False):\n from cgl.plugins.blender.lumbermill import scene_object, LumberObject, import_file\n from cgl.core.utils.read_write import load_json\n import bpy\n\n if outFile == None:\n outFileObject = scene_object().copy(ext='json', task='lay', user='publish').latest_version()\n outFileObject.set_attr(filename='%s_%s_%s.%s' % (outFileObject.seq,\n outFileObject.shot,\n outFileObject.task,\n 'json'\n ))\n outFile = outFileObject.path_root\n # outFile = scene_object().path_root.replace(scene_object().ext, 'json')\n\n\n\n data = load_json(outFile)\n\n for p in data:\n print(p)\n data_path = data[p]['source_path']\n blender_transform = data[p]['blender_transform']\n\n transform_data = []\n for value in blender_transform:\n transform_data.append(value)\n\n print(transform_data)\n\n pathToFile = os.path.join(scene_object().root, data_path)\n lumberObject = LumberObject(pathToFile)\n\n\n\n if lumberObject.filename in bpy.data.libraries:\n lib = bpy.data.libraries[lumberObject.filename]\n bpy.data.batch_remove(ids=([lib]))\n import_file(lumberObject.path_root, linked=linked, append=append)\n else:\n import_file(lumberObject.path_root, linked=linked, append=append)\n\n if p not in bpy.context.collection.objects:\n obj = bpy.data.objects.new(p, None)\n bpy.context.collection.objects.link(obj)\n obj.instance_type = 'COLLECTION'\n obj.instance_collection = bpy.data.collections[lumberObject.asset]\n obj.location = (transform_data[0], transform_data[1], transform_data[2])\n obj.rotation_euler = (transform_data[3], transform_data[4], transform_data[5])\n obj.scale = (transform_data[6], transform_data[7], transform_data[8])\n\n bpy.ops.file.make_paths_relative()", "def loadMultiple(method, *args):\n\n ### Declaring attributes\n selectedCurve = selectedMesh = None\n minRangeX = minRangeY = minRangeZ = maxRangeX = maxRangeY = maxRangeZ = 0\n selectedObjects = []\n\n ### Query UI values\n # Choise between standin / assembly\n selectedRadio = cmds.radioCollection(loadMethodRadio, query=True, select=True)\n # List of all asset icons on UI\n objectIconsList = cmds.layout(objectScroll, query=True, childArray=True)\n # Amount of copies\n buildingAmount = cmds.intSliderGrp(SpawnObjectsTab.BuildingAmount, query=True, value=True)\n # Deviation from original rotation\n rotationVariation = cmds.floatSliderGrp(SpawnObjectsTab.RandomRotation, query=True, value=True)\n # Deviation from original scale\n scaleVariation = cmds.floatSliderGrp(SpawnObjectsTab.RandomScale, query=True, value=True)\n\n ### Iterate over each asset icon\n for obj in objectIconsList:\n\n # Append to list if the asset is selected\n isSelected = cmds.iconTextCheckBox(obj, query=True, value=True)\n\n if isSelected:\n selectedObjects.append(cmds.iconTextCheckBox(obj, query=True, label=True))\n\n # Exit the function if no asset is selected\n if not selectedObjects:\n return\n \n # Reference to the function that will scatter the copies\n scatteringFunction = None\n\n ### The user chose \"curve\"\n if method == \"curve\":\n \n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnCurve\n\n # Get curve reference\n selectedCurve = cmds.ls(selection=True)\n if not selectedCurve:\n return\n selectedCurve = selectedCurve[0]\n\n ### The user chose \"range\"\n if method == \"range\":\n\n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnRange\n\n # Query minimum values from floatField\n minValues = cmds.floatFieldGrp(SpawnObjectsTab.MinimumField, query=True, value=True)\n minRangeX, minRangeY, minRangeZ = minValues[0], minValues[1], minValues[2]\n # Query maximum values from floatField\n maxValues = cmds.floatFieldGrp(SpawnObjectsTab.MaximumField, query=True, value=True)\n maxRangeX, maxRangeY, maxRangeZ = maxValues[0], maxValues[1], maxValues[2]\n\n ### The user chose \"mesh\"\n if method == \"mesh\":\n\n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnMesh\n\n # Get reference of selected object\n selectedMesh = cmds.ls(selection=True)\n if not selectedMesh:\n return\n selectedMesh = selectedMesh[0]\n\n # Create group for the spawned copies \n finalGroup = cmds.group(name=\"CurveAssetGroup\", empty=True)\n cmds.select(clear=True)\n\n ### Iterate over the generated positions of the function with given parameters\n # scatteringFunction is a reference to a function in ObjectScattering.py\n # these functions are generators, they yield a value and we can iterate\n # to get the next value generated.\n for position in scatteringFunction(objectCount=buildingAmount, curve=selectedCurve,\n minX=minRangeX, minY=minRangeY, minZ=minRangeZ, maxX=maxRangeX, maxY=maxRangeY, maxZ=maxRangeZ,\n mesh=selectedMesh):\n \n # Randomly instance an asset from the selectedObjects list\n asset = AssetIcon(random.choice(selectedObjects))\n loadedAssetNode = None\n\n # Create copy based on the mode selected by the user\n if \"standin\" in selectedRadio:\n loadedAssetNode = asset.loadArnoldAsset()\n else: \n loadedAssetNode = asset.loadAsset()\n\n # Move this copy to the generated position\n cmds.move(position[0], position[1], position[2], loadedAssetNode, absolute=True)\n\n # If there is a fourth index on the position, that means we have rotation info\n # use that info to rotate the asset.\n # It is used to match an objects rotation to a face normal.\n if len(position) == 4:\n cmds.rotate(position[3][0], position[3][1], position[3][2], loadedAssetNode, absolute=True)\n \n # Add random rotation\n angle = random.uniform(-rotationVariation, rotationVariation)\n cmds.rotate(angle, loadedAssetNode, y=True, relative=True, objectSpace=True)\n\n # Add random scale\n newScale = random.uniform(1, 1+scaleVariation)\n cmds.scale(newScale, newScale, newScale, loadedAssetNode, absolute=True)\n\n #cmds.FreezeTransformations(loadedAssetNode)\n\n # Parent copy to group\n cmds.parent(loadedAssetNode, finalGroup)", "def _setup_scene(self):\n\n scene = bpy.context.scene\n\n bpy.ops.object.select_all(action=\"DESELECT\")\n\n # remove non mesh objects\n for obj in scene.objects:\n obj.select = (obj.type != \"MESH\")\n bpy.ops.object.delete()\n\n # empty sequences are false by default\n if scene.objects:\n\n # unlink objects (all meshes) from parents\n bpy.ops.object.select_all()\n bpy.ops.object.parent_clear(type=\"CLEAR_KEEP_TRANSFORM\")\n\n # join all meshes in one single object\n scene.objects.active = bpy.data.objects[0]\n bpy.ops.object.join()\n bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)\n bpy.context.object.name = \"Object\"\n bpy.context.object.dimensions = bpy.context.object.dimensions / max(bpy.context.object.dimensions)\n\n # set the origin of the object to the cursor location\n scene.cursor_location = [0, 0, 0]\n bpy.ops.object.origin_set(type=\"ORIGIN_CURSOR\")\n # bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\", center=\"BOUNDS\")\n bpy.ops.object.origin_set(type=\"ORIGIN_CENTER_OF_MASS\", center=\"BOUNDS\")\n\n if self.add_ground_plane:\n bpy.ops.mesh.primitive_plane_add(radius=10.)\n\n bpy.ops.object.select_all(action=\"DESELECT\")", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def create_scene(self):\n \n self.scene=soya.World()", "def __getitem__(self, index):\n path, name, txt = self.imgs[index]\n img = self.loader(path)\n\n img_size = img.size\n img_size = (400,400)\n\n loader = loadjson\n \n data = loader(txt, self.objectsofinterest,img)\n\n pointsBelief = data['pointsBelief'] \n objects_centroid = data['centroids']\n points_all = data['points']\n points_keypoints = data['keypoints_2d']\n translations = torch.from_numpy(np.array(\n data['translations'])).float()\n rotations = torch.from_numpy(np.array(\n data['rotations'])).float() \n\n if len(points_all) == 0:\n points_all = torch.zeros(1, 10, 2).double()\n \n # self.save == true assumes there is only \n # one object instance in the scene. \n if translations.size()[0] > 1:\n translations = translations[0].unsqueeze(0)\n rotations = rotations[0].unsqueeze(0)\n\n # If there are no objects, still need to return similar shape array\n if len(translations) == 0:\n translations = torch.zeros(1,3).float()\n rotations = torch.zeros(1,4).float()\n\n # Camera intrinsics\n path_cam = path.replace(name,'_camera_settings.json')\n with open(path_cam) as data_file: \n data = json.load(data_file)\n # Assumes one camera\n cam = data['camera_settings'][0]['intrinsic_settings']\n\n matrix_camera = np.zeros((3,3))\n matrix_camera[0,0] = cam['fx']\n matrix_camera[1,1] = cam['fy']\n matrix_camera[0,2] = cam['cx']\n matrix_camera[1,2] = cam['cy']\n matrix_camera[2,2] = 1\n\n # Load the cuboid sizes\n path_set = path.replace(name,'_object_settings.json')\n with open(path_set) as data_file: \n data = json.load(data_file)\n\n cuboid = torch.zeros(1)\n\n if self.objectsofinterest is None:\n cuboid = np.array(data['exported_objects'][0]['cuboid_dimensions'])\n else:\n for info in data[\"exported_objects\"]:\n if self.objectsofinterest in info['class']:\n cuboid = np.array(info['cuboid_dimensions'])\n\n img_original = img.copy() \n\n \n def Reproject(points,tm, rm):\n \"\"\"\n Reprojection of points when rotating the image\n \"\"\"\n proj_cuboid = np.array(points)\n\n rmat = np.identity(3)\n rmat[0:2] = rm\n tmat = np.identity(3)\n tmat[0:2] = tm\n\n new_cuboid = np.matmul(\n rmat, np.vstack((proj_cuboid.T, np.ones(len(points)))))\n new_cuboid = np.matmul(tmat, new_cuboid)\n new_cuboid = new_cuboid[0:2].T\n\n return new_cuboid\n\n # Random image manipulation, rotation and translation with zero padding\n dx = round(np.random.normal(0, 2) * float(self.random_translation[0]))\n dy = round(np.random.normal(0, 2) * float(self.random_translation[1]))\n angle = round(np.random.normal(0, 1) * float(self.random_rotation))\n\n tm = np.float32([[1, 0, dx], [0, 1, dy]])\n rm = cv2.getRotationMatrix2D(\n (img.size[0]/2, img.size[1]/2), angle, 1)\n\n for i_objects in range(len(pointsBelief)):\n points = pointsBelief[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n pointsBelief[i_objects] = new_cuboid.tolist()\n objects_centroid[i_objects] = tuple(new_cuboid.tolist()[-1])\n pointsBelief[i_objects] = list(map(tuple, pointsBelief[i_objects]))\n\n for i_objects in range(len(points_keypoints)):\n points = points_keypoints[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n points_keypoints[i_objects] = new_cuboid.tolist()\n points_keypoints[i_objects] = list(map(tuple, points_keypoints[i_objects]))\n \n image_r = cv2.warpAffine(np.array(img), rm, img.size)\n result = cv2.warpAffine(image_r, tm, img.size)\n img = Image.fromarray(result)\n\n # Note: All point coordinates are in the image space, e.g., pixel value.\n # This is used when we do saving --- helpful for debugging\n if self.save or self.test: \n # Use the save to debug the data\n if self.test:\n draw = ImageDraw.Draw(img_original)\n else:\n draw = ImageDraw.Draw(img)\n \n # PIL drawing functions, here for sharing draw\n def DrawKeypoints(points):\n for key in points:\n DrawDot(key,(12, 115, 170),7) \n \n def DrawLine(point1, point2, lineColor, lineWidth):\n if not point1 is None and not point2 is None:\n draw.line([point1,point2],fill=lineColor,width=lineWidth)\n\n def DrawDot(point, pointColor, pointRadius):\n if not point is None:\n xy = [point[0]-pointRadius, point[1]-pointRadius, point[0]+pointRadius, point[1]+pointRadius]\n draw.ellipse(xy, fill=pointColor, outline=pointColor)\n\n def DrawCube(points, which_color = 0, color = None):\n '''Draw cube with a thick solid line across the front top edge.'''\n lineWidthForDrawing = 2\n lineColor1 = (255, 215, 0) # yellow-ish\n lineColor2 = (12, 115, 170) # blue-ish\n lineColor3 = (45, 195, 35) # green-ish\n if which_color == 3:\n lineColor = lineColor3\n else:\n lineColor = lineColor1\n\n if not color is None:\n lineColor = color \n\n # draw front\n DrawLine(points[0], points[1], lineColor, 8) #lineWidthForDrawing)\n DrawLine(points[1], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[0], lineColor, lineWidthForDrawing)\n \n # draw back\n DrawLine(points[4], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[7], lineColor, lineWidthForDrawing)\n DrawLine(points[4], points[7], lineColor, lineWidthForDrawing)\n \n # draw sides\n DrawLine(points[0], points[4], lineColor, lineWidthForDrawing)\n DrawLine(points[7], points[3], lineColor, lineWidthForDrawing)\n DrawLine(points[5], points[1], lineColor, lineWidthForDrawing)\n DrawLine(points[2], points[6], lineColor, lineWidthForDrawing)\n\n # draw dots\n DrawDot(points[0], pointColor=(255,255,255), pointRadius = 3)\n DrawDot(points[1], pointColor=(0,0,0), pointRadius = 3)\n\n # Draw all the found objects. \n for points_belief_objects in pointsBelief:\n DrawCube(points_belief_objects)\n for keypoint in points_keypoints:\n DrawKeypoints(keypoint)\n\n img = self.transform(img)\n \n return {\n \"img\":img,\n \"translations\":translations,\n \"rot_quaternions\":rotations,\n 'pointsBelief':np.array(points_all[0]),\n 'matrix_camera':matrix_camera,\n 'img_original': np.array(img_original),\n 'cuboid': cuboid,\n 'file_name':name,\n }\n\n # Create the belief map\n beliefsImg = CreateBeliefMap(\n img, \n pointsBelief=pointsBelief,\n nbpoints = 9,\n sigma = self.sigma)\n\n # Create the image maps for belief\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n for j in range(len(beliefsImg)):\n beliefsImg[j] = self.target_transform(beliefsImg[j])\n # beliefsImg[j].save('{}.png'.format(j))\n beliefsImg[j] = totensor(beliefsImg[j])\n\n beliefs = torch.zeros((len(beliefsImg),beliefsImg[0].size(1),beliefsImg[0].size(2)))\n for j in range(len(beliefsImg)):\n beliefs[j] = beliefsImg[j][0]\n \n\n # Create affinity maps\n scale = 8\n if min (img.size) / 8.0 != min (img_size)/8.0:\n # print (scale)\n scale = min (img.size)/(min (img_size)/8.0)\n\n affinities = GenerateMapAffinity(img,8,pointsBelief,objects_centroid,scale)\n img = self.transform(img)\n\n # Transform the images for training input\n w_crop = np.random.randint(0, img.size[0] - img_size[0]+1)\n h_crop = np.random.randint(0, img.size[1] - img_size[1]+1)\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n if not self.normal is None:\n normalize = transforms.Compose([transforms.Normalize\n ((self.normal[0],self.normal[0],self.normal[0]),\n (self.normal[1],self.normal[1],self.normal[1])),\n AddNoise(self.noise)])\n else:\n normalize = transforms.Compose([AddNoise(0.0001)])\n \n img = crop(img,h_crop,w_crop,img_size[1],img_size[0])\n img = totensor(img)\n\n img = normalize(img)\n\n w_crop = int(w_crop/8)\n h_crop = int(h_crop/8)\n\n affinities = affinities[:,h_crop:h_crop+int(img_size[1]/8),w_crop:w_crop+int(img_size[0]/8)]\n beliefs = beliefs[:,h_crop:h_crop+int(img_size[1]/8),w_crop:w_crop+int(img_size[0]/8)]\n\n if affinities.size()[1] == 49 and not self.test:\n affinities = torch.cat([affinities,torch.zeros(16,1,50)],dim=1)\n\n if affinities.size()[2] == 49 and not self.test:\n affinities = torch.cat([affinities,torch.zeros(16,50,1)],dim=2)\n\n return {\n 'img':img, \n \"affinities\":affinities, \n 'beliefs':beliefs,\n }", "def export3DModel(self, fileName, filePath, fileFormat=\".step\", object_list=[], removed_objects=[]):\n if not object_list:\n allObjects = self.modeler.primitives.object_names\n if removed_objects:\n for rem in removed_objects:\n allObjects.remove(rem)\n else:\n if \"Region\" in allObjects:\n allObjects.remove(\"Region\")\n else:\n allObjects = object_list[:]\n\n self.add_info_message(\"Exporting {} objects\".format(len(allObjects)))\n\n stringa = \",\".join(allObjects)\n arg = [\n \"NAME:ExportParameters\",\n \"AllowRegionDependentPartSelectionForPMLCreation:=\",\n True,\n \"AllowRegionSelectionForPMLCreation:=\",\n True,\n \"Selections:=\",\n stringa,\n \"File Name:=\",\n str(filePath) + \"/\" + str(fileName) + str(fileFormat),\n \"Major Version:=\",\n -1,\n \"Minor Version:=\",\n -1,\n ]\n\n self.modeler.oeditor.Export(arg)\n return True", "def show(data_objects, **options):\n if not is_loaded():\n return data_objects\n\n # (else)\n if not hasattr(data_objects, '__iter__'):\n data_objects = [data_objects]\n\n # print(data_objects)\n scene = pygeojs.scene(**options)\n scene.createLayer('osm')\n\n if not data_objects:\n print('No data objects')\n return scene\n\n # feature_layer = scene.createLayer('feature')\n feature_layer = None\n\n combined_bounds = None\n # Reverse order so that first item ends on top\n for data_object in reversed(data_objects):\n if data_object._getdatatype() == gaia.types.VECTOR:\n # print('Adding vector object')\n # Special handling for vector datasets:\n # First, make a copy of the geopandas frame\n df = geopandas.GeoDataFrame.copy(data_object.get_data())\n\n # Convert to lon-lat if needed\n epsg = data_object.get_epsg()\n if epsg and str(epsg) != '4326':\n print('Converting crs')\n df[df.geometry.name] = df.geometry.to_crs(epsg='4326')\n\n # Strip any z coordinates (force to z = 1)\n df.geometry = df.geometry.scale(zfact=0.0).translate(zoff=1.0)\n # df.to_file('/home/john/temp/df.pandas')\n # print(df)\n # print(df.geometry)\n\n # Calculate bounds\n geopandas_bounds = df.geometry.total_bounds\n xmin, ymin, xmax, ymax = geopandas_bounds\n meta_bounds = [\n [xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]\n ]\n\n # Add map feature\n if feature_layer is None:\n feature_layer = scene.createLayer('feature')\n\n # Use __geo_interface__ to get the geojson\n feature_layer.readGeoJSON(df.__geo_interface__)\n # print(df.__geo_interface__)\n else:\n # Get bounds, in order to compute overall bounds\n meta = data_object.get_metadata()\n # print('meta: {}'.format(meta))\n # print(meta)\n raster_bounds = meta.get('bounds').get('coordinates')[0]\n # print(meta_bounds)\n assert raster_bounds, 'data_object missing bounds'\n\n # meta bounds inconsistent between sources, so compute brute force\n xvals, yvals = zip(*raster_bounds)\n xmin, xmax = min(xvals), max(xvals)\n ymin, ymax = min(yvals), max(yvals)\n meta_bounds = [\n [xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]\n ]\n\n # Bounds format is [xmin, ymin, xmax, ymax]\n bounds = [\n meta_bounds[0][0], meta_bounds[0][1],\n meta_bounds[2][0], meta_bounds[2][1]\n ]\n\n # print(bounds)\n if combined_bounds is None:\n combined_bounds = bounds\n else:\n combined_bounds[0] = min(combined_bounds[0], bounds[0])\n combined_bounds[1] = min(combined_bounds[1], bounds[1])\n combined_bounds[2] = max(combined_bounds[2], bounds[2])\n combined_bounds[3] = max(combined_bounds[3], bounds[3])\n\n # print('options:', options)\n rep = options.get('representation')\n if rep == 'outline':\n # Create polygon object\n rect = [\n [bounds[0], bounds[1]],\n [bounds[2], bounds[1]],\n [bounds[2], bounds[3]],\n [bounds[0], bounds[3]],\n [bounds[0], bounds[1]],\n ]\n geojs_polygon = geojson.Polygon([rect])\n properties = {\n 'fillColor': '#fff',\n 'fillOpacity': 0.1,\n 'stroke': True,\n 'strokeColor': '#333',\n 'strokeWidth': 2\n }\n geojson_feature = geojson.Feature(\n geometry=geojs_polygon, properties=properties)\n geojson_collection = geojson.FeatureCollection([geojson_feature])\n # print(geojson_collection)\n\n if feature_layer is None:\n feature_layer = scene.createLayer('feature')\n\n feature_layer.createFeature(\n 'geojson', geojson_collection, **options)\n\n elif data_object.__class__.__name__ == 'GirderDataObject':\n if data_object._getdatatype() == 'raster':\n # Use large-image display\n # Todo - verify that it is installed\n tiles_url = data_object._get_tiles_url()\n # print('tiles_url', tiles_url)\n opacity = 1.0\n if hasattr(data_object, 'opacity'):\n opacity = data_object.opacity\n scene.createLayer(\n 'osm', url=tiles_url, keepLower=False, opacity=opacity)\n else:\n raise GaiaException(\n 'Cannot display GirderDataObject with data type {}'.format(\n data_object._getdatatype()))\n\n elif data_object._getdatatype() == gaia.types.VECTOR:\n pass # vector objects handled above\n else:\n msg = 'Cannot display dataobject, type {}'.format(\n data_object.__class__.__name__)\n raise GaiaException(msg)\n\n # Send custom message to (javascript) client to set zoom & center\n rpc = {'method': 'set_zoom_and_center', 'params': combined_bounds}\n scene.send(rpc)\n return scene", "def export_world(file, world, scene, global_matrix, tab_write):\n render = scene.pov\n agnosticrender = scene.render\n camera = scene.camera\n # matrix = global_matrix @ camera.matrix_world # view dependant for later use NOT USED\n if not world:\n return\n\n # These lines added to get sky gradient (visible with PNG output)\n\n # For simple flat background:\n if not world.pov.use_sky_blend:\n # No alpha with Sky option:\n if render.alpha_mode == \"SKY\" and not agnosticrender.film_transparent:\n tab_write(\n file, \"background {rgbt<%.3g, %.3g, %.3g, 0>}\\n\" % (world.pov.horizon_color[:])\n )\n\n elif render.alpha_mode == \"STRAIGHT\" or agnosticrender.film_transparent:\n tab_write(\n file, \"background {rgbt<%.3g, %.3g, %.3g, 1>}\\n\" % (world.pov.horizon_color[:])\n )\n else:\n # Non fully transparent background could premultiply alpha and avoid\n # anti-aliasing display issue\n tab_write(\n file,\n \"background {rgbft<%.3g, %.3g, %.3g, %.3g, 0>}\\n\"\n % (\n world.pov.horizon_color[0],\n world.pov.horizon_color[1],\n world.pov.horizon_color[2],\n render.alpha_filter,\n ),\n )\n\n world_tex_count = 0\n # For Background image textures\n for t in world.pov_texture_slots: # risk to write several sky_spheres but maybe ok.\n if t:\n tex = bpy.data.textures[t.texture]\n if tex.type is not None:\n world_tex_count += 1\n # XXX No enable checkbox for world textures yet (report it?)\n # if t and tex.type == 'IMAGE' and t.use:\n if tex.type == \"IMAGE\":\n image_filename = path_image(tex.image)\n if tex.image.filepath != image_filename:\n tex.image.filepath = image_filename\n if image_filename != \"\" and t.use_map_blend:\n textures_blend = image_filename\n # colvalue = t.default_value\n t_blend = t\n\n # Commented below was an idea to make the Background image oriented as camera\n # taken here:\n # http://news.pov.org/pov.newusers/thread/%[email protected]%3E/\n # Replace 4/3 by the ratio of each image found by some custom or existing\n # function\n # mapping_blend = (\" translate <%.4g,%.4g,%.4g> rotate z*degrees\" \\\n # \"(atan((camLocation - camLookAt).x/(camLocation - \" \\\n # \"camLookAt).y)) rotate x*degrees(atan((camLocation - \" \\\n # \"camLookAt).y/(camLocation - camLookAt).z)) rotate y*\" \\\n # \"degrees(atan((camLocation - camLookAt).z/(camLocation - \" \\\n # \"camLookAt).x)) scale <%.4g,%.4g,%.4g>b\" % \\\n # (t_blend.offset.x / 10 , t_blend.offset.y / 10 ,\n # t_blend.offset.z / 10, t_blend.scale.x ,\n # t_blend.scale.y , t_blend.scale.z))\n # using camera rotation valuesdirectly from blender seems much easier\n if t_blend.texture_coords == \"ANGMAP\":\n mapping_blend = \"\"\n else:\n # POV-Ray \"scale\" is not a number of repetitions factor, but its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # UV scale is 0.5,0.5 in blender and 0,0 in POV\n # Further Scale by 2 and translate by -1 are\n # required for the sky_sphere not to repeat\n\n mapping_blend = (\n \"scale 2 scale <%.4g,%.4g,%.4g> translate -1 \"\n \"translate <%.4g,%.4g,%.4g> rotate<0,0,0> \"\n % (\n (1.0 / t_blend.scale.x),\n (1.0 / t_blend.scale.y),\n (1.0 / t_blend.scale.z),\n 0.5 - (0.5 / t_blend.scale.x) - t_blend.offset.x,\n 0.5 - (0.5 / t_blend.scale.y) - t_blend.offset.y,\n t_blend.offset.z,\n )\n )\n\n # The initial position and rotation of the pov camera is probably creating\n # the rotation offset should look into it someday but at least background\n # won't rotate with the camera now.\n # Putting the map on a plane would not introduce the skysphere distortion and\n # allow for better image scale matching but also some waay to chose depth and\n # size of the plane relative to camera.\n tab_write(file, \"sky_sphere {\\n\")\n tab_write(file, \"pigment {\\n\")\n tab_write(\n file,\n 'image_map{%s \"%s\" %s}\\n'\n % (image_format(textures_blend), textures_blend, img_map_bg(t_blend)),\n )\n tab_write(file, \"}\\n\")\n tab_write(file, \"%s\\n\" % mapping_blend)\n # The following layered pigment opacifies to black over the texture for\n # transmit below 1 or otherwise adds to itself\n tab_write(file, \"pigment {rgb 0 transmit %s}\\n\" % tex.intensity)\n tab_write(file, \"}\\n\")\n # tab_write(file, \"scale 2\\n\")\n # tab_write(file, \"translate -1\\n\")\n\n # For only Background gradient\n\n if world_tex_count == 0 and world.pov.use_sky_blend:\n tab_write(file, \"sky_sphere {\\n\")\n tab_write(file, \"pigment {\\n\")\n # maybe Should follow the advice of POV doc about replacing gradient\n # for skysphere..5.5\n tab_write(file, \"gradient y\\n\")\n tab_write(file, \"color_map {\\n\")\n\n if render.alpha_mode == \"TRANSPARENT\":\n tab_write(\n file,\n \"[0.0 rgbft<%.3g, %.3g, %.3g, %.3g, 0>]\\n\"\n % (\n world.pov.horizon_color[0],\n world.pov.horizon_color[1],\n world.pov.horizon_color[2],\n render.alpha_filter,\n ),\n )\n tab_write(\n file,\n \"[1.0 rgbft<%.3g, %.3g, %.3g, %.3g, 0>]\\n\"\n % (\n world.pov.zenith_color[0],\n world.pov.zenith_color[1],\n world.pov.zenith_color[2],\n render.alpha_filter,\n ),\n )\n if agnosticrender.film_transparent or render.alpha_mode == \"STRAIGHT\":\n tab_write(file, \"[0.0 rgbt<%.3g, %.3g, %.3g, 0.99>]\\n\" % (world.pov.horizon_color[:]))\n # aa premult not solved with transmit 1\n tab_write(file, \"[1.0 rgbt<%.3g, %.3g, %.3g, 0.99>]\\n\" % (world.pov.zenith_color[:]))\n else:\n tab_write(file, \"[0.0 rgbt<%.3g, %.3g, %.3g, 0>]\\n\" % (world.pov.horizon_color[:]))\n tab_write(file, \"[1.0 rgbt<%.3g, %.3g, %.3g, 0>]\\n\" % (world.pov.zenith_color[:]))\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n # Sky_sphere alpha (transmit) is not translating into image alpha the same\n # way as 'background'\n\n # if world.pov.light_settings.use_indirect_light:\n # scene.pov.radio_enable=1\n\n # Maybe change the above to a function copyInternalRenderer settings when\n # user pushes a button, then:\n # scene.pov.radio_enable = world.pov.light_settings.use_indirect_light\n # and other such translations but maybe this would not be allowed either?\n\n # -----------------------------------------------------------------------------\n\n mist = world.mist_settings\n\n if mist.use_mist:\n tab_write(file, \"fog {\\n\")\n if mist.falloff == \"LINEAR\":\n tab_write(file, \"distance %.6f\\n\" % ((mist.start + mist.depth) * 0.368))\n elif mist.falloff in [\"QUADRATIC\", \"INVERSE_QUADRATIC\"]: # n**2 or squrt(n)?\n tab_write(file, \"distance %.6f\\n\" % ((mist.start + mist.depth) ** 2 * 0.368))\n tab_write(\n file,\n \"color rgbt<%.3g, %.3g, %.3g, %.3g>\\n\"\n % (*world.pov.horizon_color, (1.0 - mist.intensity)),\n )\n # tab_write(file, \"fog_offset %.6f\\n\" % mist.start) #create a pov property to prepend\n # tab_write(file, \"fog_alt %.6f\\n\" % mist.height) #XXX right?\n # tab_write(file, \"turbulence 0.2\\n\")\n # tab_write(file, \"turb_depth 0.3\\n\")\n tab_write(file, \"fog_type 1\\n\") # type2 for height\n tab_write(file, \"}\\n\")\n if scene.pov.media_enable:\n tab_write(file, \"media {\\n\")\n tab_write(\n file,\n \"scattering { %d, rgb %.12f*<%.4g, %.4g, %.4g>\\n\"\n % (\n int(scene.pov.media_scattering_type),\n scene.pov.media_diffusion_scale,\n *(scene.pov.media_diffusion_color[:]),\n ),\n )\n if scene.pov.media_scattering_type == \"5\":\n tab_write(file, \"eccentricity %.3g\\n\" % scene.pov.media_eccentricity)\n tab_write(file, \"}\\n\")\n tab_write(\n file,\n \"absorption %.12f*<%.4g, %.4g, %.4g>\\n\"\n % (scene.pov.media_absorption_scale, *(scene.pov.media_absorption_color[:])),\n )\n tab_write(file, \"\\n\")\n tab_write(file, \"samples %.d\\n\" % scene.pov.media_samples)\n tab_write(file, \"}\\n\")", "def get_objects_data(self):\n pass", "def create_scene_obs(name, dimension, is_mesh, mesh_file, orientation, z_offset):\n obs_dict = {}\n obs_dict['name'] = name #string\n obs_dict['dim'] = dimension\n obs_dict['is_mesh'] = is_mesh\n obs_dict['mesh_file'] = mesh_file\n obs_dict['orientation'] = orientation\n obs_dict['z_offset'] = z_offset\n return obs_dict", "def exports():", "def save_and_reload_scene():\n\n flg = logging.getLogger(\"lettuce.xgenSetup.save_and_reload_scene\")\n\n current_file = mc.file(save=True)\n flg.info(\"Current File: {}\".format(current_file))\n mc.file(current_file, ignoreVersion=True, open=True, force=True)" ]
[ "0.63344496", "0.62675095", "0.6227879", "0.6224553", "0.6189659", "0.6154672", "0.6018009", "0.59942466", "0.5959543", "0.5952904", "0.591055", "0.5901034", "0.588326", "0.5835366", "0.58250904", "0.5816034", "0.57584465", "0.57536954", "0.5716126", "0.57042253", "0.5703749", "0.56667525", "0.56217647", "0.56033295", "0.5602407", "0.5595913", "0.55618834", "0.5528604", "0.551677", "0.5508003" ]
0.73334426
0
export lights from scene
def exportLights(self): #TODO! REMOVE CONSTRAINS lights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 ) mc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' ) litsToExport = [] for li in lights: finalLi = li.split( '|' ) if len(finalLi) == 1: litsToExport.append( finalLi[0] ) else: litsToExport.append( finalLi[1] ) if litsToExport: mc.select( litsToExport, r=1, ne=1 ) mc.file( self.lightPath.path, op="v=0", typ="mayaAscii", pr=1, es=1 ) #export Light Linking self.exportLightLinking()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and keep their count in a global variable\n for exported_lights_count, ob in enumerate(lamps, start=1):\n lamp = ob.data\n\n matrix = global_matrix @ ob.matrix_world\n\n # Color is no longer modified by energy\n # any way to directly get bpy_prop_array as tuple?\n color = tuple(lamp.color)\n\n tab_write(file, \"light_source {\\n\")\n tab_write(file, \"< 0,0,0 >\\n\")\n tab_write(file, \"color srgb<%.3g, %.3g, %.3g>\\n\" % color)\n\n if lamp.type == \"POINT\":\n pass\n elif lamp.type == \"SPOT\":\n tab_write(file, \"spotlight\\n\")\n\n # Falloff is the main radius from the centre line\n tab_write(file, \"falloff %.2f\\n\" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH\n tab_write(\n file, \"radius %.6f\\n\" % ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend))\n )\n\n # Blender does not have a tightness equivalent, 0 is most like blender default.\n tab_write(file, \"tightness 0\\n\") # 0:10f\n\n tab_write(file, \"point_at <0, 0, -1>\\n\")\n if lamp.pov.use_halo:\n tab_write(file, \"looks_like{\\n\")\n tab_write(file, \"sphere{<0,0,0>,%.6f\\n\" % lamp.distance)\n tab_write(file, \"hollow\\n\")\n tab_write(file, \"material{\\n\")\n tab_write(file, \"texture{\\n\")\n tab_write(file, \"pigment{rgbf<1,1,1,%.4f>}\\n\" % (lamp.pov.halo_intensity * 5.0))\n tab_write(file, \"}\\n\")\n tab_write(file, \"interior{\\n\")\n tab_write(file, \"media{\\n\")\n tab_write(file, \"emission 1\\n\")\n tab_write(file, \"scattering {1, 0.5}\\n\")\n tab_write(file, \"density{\\n\")\n tab_write(file, \"spherical\\n\")\n tab_write(file, \"color_map{\\n\")\n tab_write(file, \"[0.0 rgb <0,0,0>]\\n\")\n tab_write(file, \"[0.5 rgb <1,1,1>]\\n\")\n tab_write(file, \"[1.0 rgb <1,1,1>]\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n elif lamp.type == \"SUN\":\n tab_write(file, \"parallel\\n\")\n tab_write(file, \"point_at <0, 0, -1>\\n\") # *must* be after 'parallel'\n\n elif lamp.type == \"AREA\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n # Area lights have no falloff type, so always use blenders lamp quad equivalent\n # for those?\n tab_write(file, \"fade_power %d\\n\" % 2)\n size_x = lamp.size\n samples_x = lamp.pov.shadow_ray_samples_x\n if lamp.shape == \"SQUARE\":\n size_y = size_x\n samples_y = samples_x\n else:\n size_y = lamp.size_y\n samples_y = lamp.pov.shadow_ray_samples_y\n\n tab_write(\n file,\n \"area_light <%.6f,0,0>,<0,%.6f,0> %d, %d\\n\"\n % (size_x, size_y, samples_x, samples_y),\n )\n tab_write(file, \"area_illumination\\n\")\n if lamp.pov.shadow_ray_sample_method == \"CONSTANT_JITTERED\":\n if lamp.pov.use_jitter:\n tab_write(file, \"jitter\\n\")\n else:\n tab_write(file, \"adaptive 1\\n\")\n tab_write(file, \"jitter\\n\")\n\n # No shadow checked either at global or light level:\n if not scene.pov.use_shadows or (lamp.pov.shadow_method == \"NOSHADOW\"):\n tab_write(file, \"shadowless\\n\")\n\n # Sun shouldn't be attenuated. Area lights have no falloff attribute so they\n # are put to type 2 attenuation a little higher above.\n if lamp.type not in {\"SUN\", \"AREA\"}:\n if lamp.falloff_type == \"INVERSE_SQUARE\":\n tab_write(file, \"fade_distance %.6f\\n\" % (sqrt(lamp.distance / 2.0)))\n tab_write(file, \"fade_power %d\\n\" % 2) # Use blenders lamp quad equivalent\n elif lamp.falloff_type == \"INVERSE_LINEAR\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 1) # Use blenders lamp linear\n elif lamp.falloff_type == \"CONSTANT\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 3)\n # Use blenders lamp constant equivalent no attenuation.\n # Using Custom curve for fade power 3 for now.\n elif lamp.falloff_type == \"CUSTOM_CURVE\":\n tab_write(file, \"fade_power %d\\n\" % 4)\n\n write_matrix(file, matrix)\n\n tab_write(file, \"}\\n\")\n\n # v(A,B) rotates vector A about origin by vector B.\n file.write(\n \"#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\\n\"\n % (\n exported_lights_count,\n -ob.location.x,\n -ob.location.y,\n -ob.location.z,\n ob.rotation_euler.x,\n ob.rotation_euler.y,\n ob.rotation_euler.z,\n )\n )", "def exportLightLinking(self):\n\t\tlights = [a for a in mc.ls( typ = ['light','aiAreaLight'] ) if not 'eye' in a]\n\t\tallShapes = [s for s in mc.ls( type = 'geometryShape', ni = 1) if not (mc.objectType( s ) in ( 'aiAreaLight','aiSkyDomeLight' ))]\n\t\tlitLinks = {}\n\t\tfor l in lights:\n\t\t\tlightLinkShapes = mc.lightlink( query=True, light=l ,shp=1,t=0,set=0,h=0)\n\t\t\tlitLinks[l]\t = list( set( allShapes ) - set( lightLinkShapes ) )#SHAPES WITH NO LINK TO THIS LIGHT\n\t\tpickle.dump( litLinks, open( self.lightLinkPath.path, \"wb\" ) )", "def flicker_lights(self):\n print 'Lights Set'", "async def lights(self, context):\n\n await random_image(context, 'lights')", "def lights(self):\n return list(self.GetLights())", "def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\t\t\t\t\t\"ambientLight\"))\n\t\tself.ambientLight.node().setColor(Vec4(.8,.8,.8,1))\n\t\trender.setLight(self.ambientLight)\n\n\t\tdLight1 = DirectionalLight(\"dLight1\")\n\t\tdLight1.setColor(Vec4(6,5,7,1))\n\t\tdLight1.setDirection(Vec3(1,1,1))\n\t\tdlnp1 = render.attachNewNode(dLight1)\n\t\tdlnp1.setHpr(30,-160,0)\n\t\trender.setLight(dlnp1)\n\n\t\tdLight2 = DirectionalLight(\"dLight2\")\n\t\tdLight2.setColor(Vec4(.6,.7,1,1))\n\t\tdLight2.setDirection(Vec3(-1,-1,-1))\n\t\tself.dlnp2 = render.attachNewNode(dLight2)\n\t\tself.dlnp2.node().setScene(render)\n\t\tself.dlnp2.setHpr(-70,-60,0)\n\t\trender.setLight(self.dlnp2)", "def testLighExport(self):\n\n archive = OArchive(\"light1.abc\")\n emptyLightObj = OLight(archive.getTop(), \"emptyLight\")\n lightObj = OLight(archive.getTop(), \"myLight\" )\n\n samp = CameraSample()\n lightObj.getSchema().setCameraSample( samp )\n\n samp = CameraSample( -0.35, 0.75, 0.1, 0.5 )\n lightObj.getSchema().getChildBoundsProperty().setValue(\n Box3d( V3d( 0.0, 0.1, 0.2 ), V3d( 0.3, 0.4, 0.5 ) ) )\n\n lightObj.getSchema().setCameraSample( samp )\n\n arg = lightObj.getSchema().getArbGeomParams()\n param = OFloatGeomParam( arg, \"test\", False, kConstantScope, 1 )\n user = lightObj.getSchema().getUserProperties()\n OFloatProperty( user, \"test\" )", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def render_sample(latents, material_names, include_lights, output_filename, save_scene):\n\n # set output path\n bpy.context.scene.render.filepath = output_filename\n\n # set objects and lights\n update_objects_and_lights(latents, material_names, include_lights)\n\n rgba_background = colorsys.hsv_to_rgb(latents[9] / (2.0 * np.pi), 0.60, 1.0) + (\n 1.0,\n )\n render_utils.change_material(\n bpy.data.objects[\"Ground\"].data.materials[-1], Color=rgba_background\n )\n\n # set scene background\n bpy.ops.render.render(write_still=True)\n\n if save_scene:\n # just for debugging\n bpy.ops.wm.save_as_mainfile(\n filepath=f\"scene_{os.path.basename(output_filename)}.blend\"\n )", "def gl_lighting():\n for viewer in nuke.allNodes('Viewer'):\n val = int(viewer.knob('gl_lighting').getValue())\n viewer.knob('gl_lighting').setValue(not val)", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "async def Turn_On_Lights() -> Dict[str, Any]:\n busylightapi.manager.light_on(ALL_LIGHTS)\n return {\n \"action\": \"on\",\n \"light_id\": \"all\",\n \"color\": \"green\",\n }", "def lights(self) -> List[dict]:\n return self.items_by_domain(\"light\")", "def import_scene(file_path):\n\n pass", "def setup_lights(self, settings):\n\n for light in settings.lights: # for each light listed in yaml file\n lst = Light(light, settings.lights, settings) # create a Light instance with settings\n self.lights.append(lst) # add it to the list of lights", "def enableLighting(self):\r\n\t\t\r\n\t\tglEnable(GL_LIGHTING)", "def turnLightingSystemOn():\n dislin.light('ON')", "def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False", "def turn_on_lights(bridge):\n for light in bridge.lights:\n bridge.set_light(light.light_id, {'ct': 350, 'bri': 254, 'on': True})", "def _on_load_scene_shaders(self):\n\n artellapipe.ShadersMgr().load_scene_shaders()", "def export_mesh(remote, path):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_ExportMeshFile_CurrentSelection(path)\n remote.runCommand(cmd)", "def setup_scene_for_rgb_render(scene, outdir):\n # Use node rendering for python control\n scene.use_nodes = True\n tree = scene.node_tree\n links = tree.links\n\n # Make sure there are no existing nodes\n for node in tree.nodes:\n tree.nodes.remove(node)\n\n # Set up a renderlayer and plug it into our remapping layer\n inp = tree.nodes.new('CompositorNodeRLayers')\n\n if (bpy.app.version[1] >= 70): # Don't apply color transformation -- changed in Blender 2.70\n scene.view_settings.view_transform = 'Raw'\n scene.sequencer_colorspace_settings.name = 'Non-Color'\n\n # Save it out\n if outdir:\n out = tree.nodes.new('CompositorNodeOutputFile')\n ident = str(uu.uuid4())\n out.file_slots[0].path = ident\n out.base_path = outdir\n # out.format.color_mode = 'BW'\n # out.format.color_depth = settings.DEPTH_BITS_PER_CHANNEL\n out.format.color_mode = 'RGB'\n out.format.color_depth = settings.COLOR_BITS_PER_CHANNEL\n out.format.file_format = settings.PREFERRED_IMG_EXT.upper()\n links.new(inp.outputs[0], out.inputs[0])\n ext = utils.img_format_to_ext[settings.PREFERRED_IMG_EXT.lower()]\n temp_filename = \"{0}0001.{1}\".format(ident, ext)\n return os.path.join(outdir, temp_filename)\n else:\n out = tree.nodes.new('CompositorNodeComposite')\n links.new(inp.outputs[0], out.inputs[0])\n return None", "def render(self):\r\n \r\n # --------------------------------\r\n # Set world-level Panda properties\r\n # --------------------------------\r\n\r\n # Create Ambient Light 1\r\n ambientLight = AmbientLight( 'ambientLight_1' )\r\n ambientLight.setColor( Vec4( 0.2, 0.2, 0.2, 1 ) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, 50, 50)\r\n render.setLight(ambientLightNP)\r\n\r\n # Create Ambient Light 2\r\n ambientLight = AmbientLight( 'ambientLight_2' )\r\n ambientLight.setColor( Vec4(0.2, 0.2, 0.2, 1) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, -50, 50)\r\n render.setLight(ambientLightNP)\r\n# \r\n# # Directional light 01\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.8, 0.2, 0.2, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing backwards, towards the camera.\r\n# directionalLightNP.setHpr(180, 20, 0)\r\n# render.setLight(directionalLightNP)\r\n#\r\n# # Directional light 02\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.2, 0.2, 0.8, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing forwards, away from the camera.\r\n# directionalLightNP.setHpr(0, -20, 0)\r\n# render.setLight(directionalLightNP)\r\n\r\n #create a directional light\r\n #light = DirectionalLight('my dlight')\r\n\r\n #create a point light\r\n light = PointLight('plight')\r\n #light.setColor(VBase4(0.2, 0.2, 0.2, 1))\r\n\r\n #The following line doesn't work in Panda3D 1.7.0\r\n #lightPath = render.attachNewNode(light.upcastToPandaNode())\r\n\r\n lightPath = render.attachNewNode(light)\r\n lightPath.setPos( 10, 10, 10)\r\n\r\n #lightPath.lookAt(objPath)\r\n\r\n #illuminate all\r\n render.setLight(lightPath)\r\n #illuminate only objPath objects\r\n #objPath.setLight(lightPath)\r\n\r\n #self.SetMouseControls(objPath)\r\n #self.setKeyboardControls()\r\n \r\n taskMgr.add(self.mouseControlsTask, 'mouseControlsTask')\r\n #taskMgr.add(self.cameraMovementTask, 'cameraMovementTask') \r\n\r\n base.setBackgroundColor( .0, .0, .0 )\r\n\r\n #taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")\r\n #core.cmd.exeCommand(\"LoadEdge\", obj, file_name+self.WingedEdgeExtensions[0], file_name+self.WingedEdgeExtensions[1], file_name+self.WingedEdgeExtensions[2], file_name+self.WingedEdgeExtensions[3])\r\n #self.model = importer.loadFile(fileName)\r\n #if self.model is None:\r\n # print \"Unsupported file\"\r\n # return\r", "def writeObject(self,view,renderer):\n\n if not view.Source:\n return \"\"\n\n # point light hook\n proxy = getattr(view.Source,\"Proxy\",None)\n if getattr(proxy,\"type\",None) == \"PointLight\":\n return self.writePointLight(view,renderer)\n\n # get color and alpha\n mat = None\n color = None\n alpha = None\n if view.Material:\n mat = view.Material\n else:\n if \"Material\" in view.Source.PropertiesList:\n if view.Source.Material:\n mat = view.Source.Material\n if mat:\n if \"Material\" in mat.PropertiesList:\n if \"DiffuseColor\" in mat.Material:\n color = mat.Material[\"DiffuseColor\"].strip(\"(\").strip(\")\").split(\",\")[:3]\n if \"Transparency\" in mat.Material:\n if float(mat.Material[\"Transparency\"]) > 0:\n alpha = 1.0 - float(mat.Material[\"Transparency\"])\n else:\n alpha = 1.0\n\n if view.Source.ViewObject:\n if not color:\n if hasattr(view.Source.ViewObject,\"ShapeColor\"):\n color = view.Source.ViewObject.ShapeColor[:3]\n if not alpha:\n if hasattr(view.Source.ViewObject,\"Transparency\"):\n if view.Source.ViewObject.Transparency > 0:\n alpha = 1.0-(float(view.Source.ViewObject.Transparency)/100.0)\n if not color:\n color = (1.0, 1.0, 1.0)\n if not alpha:\n alpha = 1.0\n\n # get mesh\n mesh = None\n if hasattr(view.Source,\"Group\"):\n shps = [o.Shape for o in Draft.getGroupContents(view.Source) if hasattr(o,\"Shape\")]\n mesh = MeshPart.meshFromShape(Shape=Part.makeCompound(shps),\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Part::Feature\"):\n mesh = MeshPart.meshFromShape(Shape=view.Source.Shape,\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Mesh::Feature\"):\n mesh = view.Source.Mesh\n if not mesh:\n return \"\"\n\n return renderer.writeObject(view,mesh,color,alpha)", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def deleteAllModelsFromScene(self):\n #productive #onButton\n profprint()\n while slicer.util.getNodes('python-catch-round_*') != {}:\n nodes = slicer.util.getNodes('python-catch-round_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('manual-seg_*') != {}:\n nodes = slicer.util.getNodes('manual-seg_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('obturator-seg_*') != {}:\n nodes = slicer.util.getNodes('obturator-seg_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n #while slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode') !={}:\n # nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode')\n # for node in nodes.values():\n # slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('template slice position*') != {}:\n nodes = slicer.util.getNodes('template slice position*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\n if sYellow ==None :\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\n sYellow.SetSliceVisible(0)\n reformatLogic = slicer.vtkSlicerReformatLogic()\n reformatLogic.SetSliceNormal(sYellow,1,0,0)\n tempFidNodes = slicer.mrmlScene.GetNodesByName('Temp')\n for i in range(tempFidNodes.GetNumberOfItems()):\n node = tempFidNodes.GetItemAsObject(i)\n if node:\n slicer.mrmlScene.RemoveNode(node)\n sYellow.Modified()" ]
[ "0.70548475", "0.6534689", "0.6220881", "0.6124191", "0.6103418", "0.5953071", "0.59508586", "0.592486", "0.5756125", "0.5698417", "0.5659417", "0.56495863", "0.55890954", "0.55873734", "0.5572616", "0.55421257", "0.54985136", "0.54800266", "0.54774386", "0.54722345", "0.54558796", "0.54512435", "0.5450689", "0.5436893", "0.5420839", "0.53971976", "0.5364416", "0.5355828", "0.5346665", "0.5330729" ]
0.7741908
0
export all the lightlinking in the scene
def exportLightLinking(self): lights = [a for a in mc.ls( typ = ['light','aiAreaLight'] ) if not 'eye' in a] allShapes = [s for s in mc.ls( type = 'geometryShape', ni = 1) if not (mc.objectType( s ) in ( 'aiAreaLight','aiSkyDomeLight' ))] litLinks = {} for l in lights: lightLinkShapes = mc.lightlink( query=True, light=l ,shp=1,t=0,set=0,h=0) litLinks[l] = list( set( allShapes ) - set( lightLinkShapes ) )#SHAPES WITH NO LINK TO THIS LIGHT pickle.dump( litLinks, open( self.lightLinkPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exportLights(self):\n\t\t#TODO! REMOVE CONSTRAINS\n\t\tlights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 )\n\t\tmc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' )\n\t\tlitsToExport = []\n\t\tfor li in lights:\n\t\t\tfinalLi = li.split( '|' )\n\t\t\tif len(finalLi) == 1:\n\t\t\t\tlitsToExport.append( finalLi[0] )\n\t\t\telse:\n\t\t\t\tlitsToExport.append( finalLi[1] )\n\t\tif litsToExport:\n\t\t\tmc.select( litsToExport, r=1, ne=1 )\n\t\t\tmc.file( self.lightPath.path, op=\"v=0\", typ=\"mayaAscii\", pr=1, es=1 )\n\t\t\t#export Light Linking\n\t\t\tself.exportLightLinking()", "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and keep their count in a global variable\n for exported_lights_count, ob in enumerate(lamps, start=1):\n lamp = ob.data\n\n matrix = global_matrix @ ob.matrix_world\n\n # Color is no longer modified by energy\n # any way to directly get bpy_prop_array as tuple?\n color = tuple(lamp.color)\n\n tab_write(file, \"light_source {\\n\")\n tab_write(file, \"< 0,0,0 >\\n\")\n tab_write(file, \"color srgb<%.3g, %.3g, %.3g>\\n\" % color)\n\n if lamp.type == \"POINT\":\n pass\n elif lamp.type == \"SPOT\":\n tab_write(file, \"spotlight\\n\")\n\n # Falloff is the main radius from the centre line\n tab_write(file, \"falloff %.2f\\n\" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH\n tab_write(\n file, \"radius %.6f\\n\" % ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend))\n )\n\n # Blender does not have a tightness equivalent, 0 is most like blender default.\n tab_write(file, \"tightness 0\\n\") # 0:10f\n\n tab_write(file, \"point_at <0, 0, -1>\\n\")\n if lamp.pov.use_halo:\n tab_write(file, \"looks_like{\\n\")\n tab_write(file, \"sphere{<0,0,0>,%.6f\\n\" % lamp.distance)\n tab_write(file, \"hollow\\n\")\n tab_write(file, \"material{\\n\")\n tab_write(file, \"texture{\\n\")\n tab_write(file, \"pigment{rgbf<1,1,1,%.4f>}\\n\" % (lamp.pov.halo_intensity * 5.0))\n tab_write(file, \"}\\n\")\n tab_write(file, \"interior{\\n\")\n tab_write(file, \"media{\\n\")\n tab_write(file, \"emission 1\\n\")\n tab_write(file, \"scattering {1, 0.5}\\n\")\n tab_write(file, \"density{\\n\")\n tab_write(file, \"spherical\\n\")\n tab_write(file, \"color_map{\\n\")\n tab_write(file, \"[0.0 rgb <0,0,0>]\\n\")\n tab_write(file, \"[0.5 rgb <1,1,1>]\\n\")\n tab_write(file, \"[1.0 rgb <1,1,1>]\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n elif lamp.type == \"SUN\":\n tab_write(file, \"parallel\\n\")\n tab_write(file, \"point_at <0, 0, -1>\\n\") # *must* be after 'parallel'\n\n elif lamp.type == \"AREA\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n # Area lights have no falloff type, so always use blenders lamp quad equivalent\n # for those?\n tab_write(file, \"fade_power %d\\n\" % 2)\n size_x = lamp.size\n samples_x = lamp.pov.shadow_ray_samples_x\n if lamp.shape == \"SQUARE\":\n size_y = size_x\n samples_y = samples_x\n else:\n size_y = lamp.size_y\n samples_y = lamp.pov.shadow_ray_samples_y\n\n tab_write(\n file,\n \"area_light <%.6f,0,0>,<0,%.6f,0> %d, %d\\n\"\n % (size_x, size_y, samples_x, samples_y),\n )\n tab_write(file, \"area_illumination\\n\")\n if lamp.pov.shadow_ray_sample_method == \"CONSTANT_JITTERED\":\n if lamp.pov.use_jitter:\n tab_write(file, \"jitter\\n\")\n else:\n tab_write(file, \"adaptive 1\\n\")\n tab_write(file, \"jitter\\n\")\n\n # No shadow checked either at global or light level:\n if not scene.pov.use_shadows or (lamp.pov.shadow_method == \"NOSHADOW\"):\n tab_write(file, \"shadowless\\n\")\n\n # Sun shouldn't be attenuated. Area lights have no falloff attribute so they\n # are put to type 2 attenuation a little higher above.\n if lamp.type not in {\"SUN\", \"AREA\"}:\n if lamp.falloff_type == \"INVERSE_SQUARE\":\n tab_write(file, \"fade_distance %.6f\\n\" % (sqrt(lamp.distance / 2.0)))\n tab_write(file, \"fade_power %d\\n\" % 2) # Use blenders lamp quad equivalent\n elif lamp.falloff_type == \"INVERSE_LINEAR\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 1) # Use blenders lamp linear\n elif lamp.falloff_type == \"CONSTANT\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 3)\n # Use blenders lamp constant equivalent no attenuation.\n # Using Custom curve for fade power 3 for now.\n elif lamp.falloff_type == \"CUSTOM_CURVE\":\n tab_write(file, \"fade_power %d\\n\" % 4)\n\n write_matrix(file, matrix)\n\n tab_write(file, \"}\\n\")\n\n # v(A,B) rotates vector A about origin by vector B.\n file.write(\n \"#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\\n\"\n % (\n exported_lights_count,\n -ob.location.x,\n -ob.location.y,\n -ob.location.z,\n ob.rotation_euler.x,\n ob.rotation_euler.y,\n ob.rotation_euler.z,\n )\n )", "def export_blend_connections():\n selection_list = pm.ls(tr=1, sl=1, l=1)\n\n dialog_return = pm.fileDialog2(cap=\"Save As\", fm=0, ff='Text Files(*.txt)')\n\n filename = dialog_return[0]\n print(filename)\n\n print(\"\\n\\nFiles written:\\n--------------------------------------------\\n\")\n\n with open(filename, 'w') as fileId:\n for i in range(0, len(selection_list)):\n shapes = pm.listRelatives(selection_list[i], s=True, f=True)\n\n main_shape = \"\"\n for j in range(0, len(shapes)):\n if pm.getAttr(shapes[j] + '.intermediateObject') == 0:\n main_shape = shapes\n break\n if main_shape == \"\":\n main_shape = shapes[0]\n\n con = pm.listConnections(main_shape, t=\"blendShape\", c=1, s=1, p=1)\n\n cmd = \"connectAttr -f %s.worldMesh[0] %s;\" % (\n ''.join(map(str, main_shape)),\n ''.join(map(str, con[0].name()))\n )\n print (cmd + \"\\n\")\n fileId.write(\"%s\\n\" % cmd)\n\n print(\"\\n------------------------------------------------------\\n\")\n print(\"filename: %s ...done\\n\" % filename)", "def render(self):\r\n \r\n # --------------------------------\r\n # Set world-level Panda properties\r\n # --------------------------------\r\n\r\n # Create Ambient Light 1\r\n ambientLight = AmbientLight( 'ambientLight_1' )\r\n ambientLight.setColor( Vec4( 0.2, 0.2, 0.2, 1 ) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, 50, 50)\r\n render.setLight(ambientLightNP)\r\n\r\n # Create Ambient Light 2\r\n ambientLight = AmbientLight( 'ambientLight_2' )\r\n ambientLight.setColor( Vec4(0.2, 0.2, 0.2, 1) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, -50, 50)\r\n render.setLight(ambientLightNP)\r\n# \r\n# # Directional light 01\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.8, 0.2, 0.2, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing backwards, towards the camera.\r\n# directionalLightNP.setHpr(180, 20, 0)\r\n# render.setLight(directionalLightNP)\r\n#\r\n# # Directional light 02\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.2, 0.2, 0.8, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing forwards, away from the camera.\r\n# directionalLightNP.setHpr(0, -20, 0)\r\n# render.setLight(directionalLightNP)\r\n\r\n #create a directional light\r\n #light = DirectionalLight('my dlight')\r\n\r\n #create a point light\r\n light = PointLight('plight')\r\n #light.setColor(VBase4(0.2, 0.2, 0.2, 1))\r\n\r\n #The following line doesn't work in Panda3D 1.7.0\r\n #lightPath = render.attachNewNode(light.upcastToPandaNode())\r\n\r\n lightPath = render.attachNewNode(light)\r\n lightPath.setPos( 10, 10, 10)\r\n\r\n #lightPath.lookAt(objPath)\r\n\r\n #illuminate all\r\n render.setLight(lightPath)\r\n #illuminate only objPath objects\r\n #objPath.setLight(lightPath)\r\n\r\n #self.SetMouseControls(objPath)\r\n #self.setKeyboardControls()\r\n \r\n taskMgr.add(self.mouseControlsTask, 'mouseControlsTask')\r\n #taskMgr.add(self.cameraMovementTask, 'cameraMovementTask') \r\n\r\n base.setBackgroundColor( .0, .0, .0 )\r\n\r\n #taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")\r\n #core.cmd.exeCommand(\"LoadEdge\", obj, file_name+self.WingedEdgeExtensions[0], file_name+self.WingedEdgeExtensions[1], file_name+self.WingedEdgeExtensions[2], file_name+self.WingedEdgeExtensions[3])\r\n #self.model = importer.loadFile(fileName)\r\n #if self.model is None:\r\n # print \"Unsupported file\"\r\n # return\r", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def testLighExport(self):\n\n archive = OArchive(\"light1.abc\")\n emptyLightObj = OLight(archive.getTop(), \"emptyLight\")\n lightObj = OLight(archive.getTop(), \"myLight\" )\n\n samp = CameraSample()\n lightObj.getSchema().setCameraSample( samp )\n\n samp = CameraSample( -0.35, 0.75, 0.1, 0.5 )\n lightObj.getSchema().getChildBoundsProperty().setValue(\n Box3d( V3d( 0.0, 0.1, 0.2 ), V3d( 0.3, 0.4, 0.5 ) ) )\n\n lightObj.getSchema().setCameraSample( samp )\n\n arg = lightObj.getSchema().getArbGeomParams()\n param = OFloatGeomParam( arg, \"test\", False, kConstantScope, 1 )\n user = lightObj.getSchema().getUserProperties()\n OFloatProperty( user, \"test\" )", "def importLightLinking(self, asset = '', searchAndReplace = ['',''] ):\n\t\tLayersInfo = pickle.load( open( self.lightLinkPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tif not asset == '':\n\t\t\tLayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace )\n\t\tfor l in LayersInfo.keys():\n\t\t\tobjsToBreakLink = []\n\t\t\tfor link in LayersInfo[l]:\n\t\t\t\tif mc.objExists( link ):\n\t\t\t\t\tobjsToBreakLink.append( link )\n\t\t\tmc.lightlink( b = True, light = l, o = objsToBreakLink )\n\t\tmc.refresh( su = 0 )", "def _on_lowres_assets(self):\n\n scene_assets = artellapipe.AssetsMgr().get_scene_assets()\n if not scene_assets:\n return\n\n for scene_asset in scene_assets:\n scene_asset.switch_to_proxy()", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def writeObject(self,view,renderer):\n\n if not view.Source:\n return \"\"\n\n # point light hook\n proxy = getattr(view.Source,\"Proxy\",None)\n if getattr(proxy,\"type\",None) == \"PointLight\":\n return self.writePointLight(view,renderer)\n\n # get color and alpha\n mat = None\n color = None\n alpha = None\n if view.Material:\n mat = view.Material\n else:\n if \"Material\" in view.Source.PropertiesList:\n if view.Source.Material:\n mat = view.Source.Material\n if mat:\n if \"Material\" in mat.PropertiesList:\n if \"DiffuseColor\" in mat.Material:\n color = mat.Material[\"DiffuseColor\"].strip(\"(\").strip(\")\").split(\",\")[:3]\n if \"Transparency\" in mat.Material:\n if float(mat.Material[\"Transparency\"]) > 0:\n alpha = 1.0 - float(mat.Material[\"Transparency\"])\n else:\n alpha = 1.0\n\n if view.Source.ViewObject:\n if not color:\n if hasattr(view.Source.ViewObject,\"ShapeColor\"):\n color = view.Source.ViewObject.ShapeColor[:3]\n if not alpha:\n if hasattr(view.Source.ViewObject,\"Transparency\"):\n if view.Source.ViewObject.Transparency > 0:\n alpha = 1.0-(float(view.Source.ViewObject.Transparency)/100.0)\n if not color:\n color = (1.0, 1.0, 1.0)\n if not alpha:\n alpha = 1.0\n\n # get mesh\n mesh = None\n if hasattr(view.Source,\"Group\"):\n shps = [o.Shape for o in Draft.getGroupContents(view.Source) if hasattr(o,\"Shape\")]\n mesh = MeshPart.meshFromShape(Shape=Part.makeCompound(shps),\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Part::Feature\"):\n mesh = MeshPart.meshFromShape(Shape=view.Source.Shape,\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Mesh::Feature\"):\n mesh = view.Source.Mesh\n if not mesh:\n return \"\"\n\n return renderer.writeObject(view,mesh,color,alpha)", "def read_layout(outFile=None, linked=False, append=False):\n from cgl.plugins.blender.lumbermill import scene_object, LumberObject, import_file\n from cgl.core.utils.read_write import load_json\n import bpy\n\n if outFile == None:\n outFileObject = scene_object().copy(ext='json', task='lay', user='publish').latest_version()\n outFileObject.set_attr(filename='%s_%s_%s.%s' % (outFileObject.seq,\n outFileObject.shot,\n outFileObject.task,\n 'json'\n ))\n outFile = outFileObject.path_root\n # outFile = scene_object().path_root.replace(scene_object().ext, 'json')\n\n\n\n data = load_json(outFile)\n\n for p in data:\n print(p)\n data_path = data[p]['source_path']\n blender_transform = data[p]['blender_transform']\n\n transform_data = []\n for value in blender_transform:\n transform_data.append(value)\n\n print(transform_data)\n\n pathToFile = os.path.join(scene_object().root, data_path)\n lumberObject = LumberObject(pathToFile)\n\n\n\n if lumberObject.filename in bpy.data.libraries:\n lib = bpy.data.libraries[lumberObject.filename]\n bpy.data.batch_remove(ids=([lib]))\n import_file(lumberObject.path_root, linked=linked, append=append)\n else:\n import_file(lumberObject.path_root, linked=linked, append=append)\n\n if p not in bpy.context.collection.objects:\n obj = bpy.data.objects.new(p, None)\n bpy.context.collection.objects.link(obj)\n obj.instance_type = 'COLLECTION'\n obj.instance_collection = bpy.data.collections[lumberObject.asset]\n obj.location = (transform_data[0], transform_data[1], transform_data[2])\n obj.rotation_euler = (transform_data[3], transform_data[4], transform_data[5])\n obj.scale = (transform_data[6], transform_data[7], transform_data[8])\n\n bpy.ops.file.make_paths_relative()", "def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\t\t\t\t\t\"ambientLight\"))\n\t\tself.ambientLight.node().setColor(Vec4(.8,.8,.8,1))\n\t\trender.setLight(self.ambientLight)\n\n\t\tdLight1 = DirectionalLight(\"dLight1\")\n\t\tdLight1.setColor(Vec4(6,5,7,1))\n\t\tdLight1.setDirection(Vec3(1,1,1))\n\t\tdlnp1 = render.attachNewNode(dLight1)\n\t\tdlnp1.setHpr(30,-160,0)\n\t\trender.setLight(dlnp1)\n\n\t\tdLight2 = DirectionalLight(\"dLight2\")\n\t\tdLight2.setColor(Vec4(.6,.7,1,1))\n\t\tdLight2.setDirection(Vec3(-1,-1,-1))\n\t\tself.dlnp2 = render.attachNewNode(dLight2)\n\t\tself.dlnp2.node().setScene(render)\n\t\tself.dlnp2.setHpr(-70,-60,0)\n\t\trender.setLight(self.dlnp2)", "def link_residues(self) -> None:\n ...", "def lightlink(*args, b: bool=True, hierarchy: bool=True, light: Union[name, List[name]]=None,\n make: bool=True, object: Union[name, List[name]]=None, sets: bool=True, shadow:\n bool=True, shapes: bool=True, transforms: bool=True, useActiveLights: bool=True,\n useActiveObjects: bool=True, q=True, query=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def render_sample(latents, material_names, include_lights, output_filename, save_scene):\n\n # set output path\n bpy.context.scene.render.filepath = output_filename\n\n # set objects and lights\n update_objects_and_lights(latents, material_names, include_lights)\n\n rgba_background = colorsys.hsv_to_rgb(latents[9] / (2.0 * np.pi), 0.60, 1.0) + (\n 1.0,\n )\n render_utils.change_material(\n bpy.data.objects[\"Ground\"].data.materials[-1], Color=rgba_background\n )\n\n # set scene background\n bpy.ops.render.render(write_still=True)\n\n if save_scene:\n # just for debugging\n bpy.ops.wm.save_as_mainfile(\n filepath=f\"scene_{os.path.basename(output_filename)}.blend\"\n )", "def targets(self):\n self.renderer.begin_rendering(\"targets\")\n for target in self.targets:\n self.renderer.draw_rect_3d(target, 10, 10, True, self.renderer.blue())\n self.renderer.end_rendering()", "def exportData(self):\n\t\tlays = rlayer.renderlayers()\n\t\tdata = {}\n\t\tfor l in lays:\n\t\t\tif l.name == 'defaultRenderLayer':\n\t\t\t\tcontinue\n\t\t\tdata[l.name] = {'objects':l.objects, # OBJECTS IN LAYER\n\t\t\t\t\t\t\t'values' :l.overridesWithValues, # OVERRIDED ATTRIBUTES ONLY CHANGED VALUES\n\t\t\t\t\t\t\t'conns' :l.overridesWithConnections[0], # OVERRIDED ATTRIBUTES CHANGED CONNECTIONS\n\t\t\t\t\t\t\t'shader' :l.overridedShader # OVERRIDE RENDERLAYER SHADER\n\t\t\t\t\t\t\t}\n\t\tpickle.dump( data, open( self.dataPath.path, \"wb\" ) )", "def lightLinkPath(self):\n\t\treturn fl.File( self._path + '/lights.data' )", "def flicker_lights(self):\n print 'Lights Set'", "def viewAll(self):\n self._sceneviewer.viewAll()", "def load_morph_links():\n dtu_path = os.path.abspath(Definitions.EXPORT_DIR + \"\\FIG\\FIG0\")\n dtu_loader = DtuLoader.DtuLoader(dtu_path)\n morph_links = dtu_loader.get_morph_links_dict()\n return morph_links", "def lights(self):\n return list(self.GetLights())", "def batch_export_ortho():\r\n global path_to_project\r\n \r\n for path in path_to_project:\r\n export_filename = os.path.basename(path['ProjectPath']).replace('.psz','.tif')\r\n export_path = os.path.join(export_folder,export_filename)\r\n try:\r\n project = PhotoScan.app.document\r\n project.open(path['ProjectPath'])\r\n \r\n dx, dy = mosaic.get_resolution(path['Flight_id'], path['Field'], path['Camera'])\r\n \r\n if dx is not None and dy is not None:\r\n status = project.activeChunk.exportOrthophoto(\r\n export_path, format=\"tif\", color_correction=False, blending='average', dx=dx, dy=dy,\r\n projection=project.activeChunk.projection)\r\n else:\r\n status = project.activeChunk.exportOrthophoto(export_path, format=\"tif\", color_correction=False, blending='average',projection=project.activeChunk.projection)\r\n except Exception as e:\r\n print(e)\r\n if status is True:\r\n print(\"Perfect\")\r\n app = PhotoScan.Application()\r\n app.quit()", "def export( self, captionMode, copyFiles, outputDir ):\n scene = slicer.mrmlScene\n nodes = scene.GetNumberOfNodes()\n\n self.__nodes = {}\n\n # 1 for model name, 2 for parent name\n self.__captionMode = captionMode\n # TRUE if we shall copy the files to the outputDir\n self.__copyFiles = copyFiles\n self.__outputDir = outputDir\n\n self.__tree = Tree()\n self.__tree.create_node( \"Scene\", \"scene\" )\n\n for n in xrange( nodes ):\n\n node = scene.GetNthNode( n )\n\n self.parseNode( node )\n\n [header, footer] = self.configureXrenderers()\n output = header\n output += self.createXtree( \"scene\" )\n output += footer\n\n return output", "def lightPath(self):\n\t\treturn mfl.mayaFile( self._path + '/lights.ma' )", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def _add_links_from_mergers(self):\n for i, node_name in enumerate(self.node_list):\n self.builder.addDirectedLink(node_name, self, islot=i)", "def export(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net') #+ name)\n export_path = os.path.join(self.configuration['export_path'], 'exported_net_{}.pth'.format(name))\n batch_fixed = self.input[:,1,:,:,:]\n batch_moving = self.input[:,2,:,:,:]\n traced_script_module = torch.jit.trace(net, (batch_moving, batch_fixed))\n traced_script_module.save(export_path)", "def render(self):\n # Remove existing fresnel geometries from the scene\n for geometry in self._geometries:\n geometry.remove()\n\n # Clear the list of fresnel geometries\n self._geometries = []\n\n # Add fresnel scene geometries from plato scene primitives\n for prim in self._primitives:\n geometry = prim.render(self._fresnel_scene)\n self._geometries.append(geometry)\n\n # Set up the camera\n camera_up = rowan.rotate(rowan.conjugate(self.rotation), [0, 1, 0])\n camera_position = rowan.rotate(rowan.conjugate(self.rotation), -self.translation)\n camera_look_at = camera_position + rowan.rotate(rowan.conjugate(self.rotation), [0, 0, -1])\n camera_height = self.size[1]/self.zoom\n try:\n orthographic_camera = fresnel.camera.Orthographic\n except AttributeError:\n # Support fresnel < 0.13.0\n orthographic_camera = fresnel.camera.orthographic\n self._fresnel_scene.camera = orthographic_camera(\n position=camera_position,\n look_at=camera_look_at,\n up=camera_up,\n height=camera_height)\n\n # Set up lights\n lights = []\n if 'ambient_light' in self.enabled_features:\n config = self.get_feature_config('ambient_light')\n magnitude = config.get('value', 0.25)\n if magnitude > 0:\n lights.append(fresnel.light.Light(direction=(0, 0, 1),\n color=(magnitude, magnitude, magnitude),\n theta=np.pi))\n if 'directional_light' in self.enabled_features:\n config = self.get_feature_config('directional_light')\n directions = config.get('value', (.25, .5, -1))\n directions = np.atleast_2d(directions).astype(np.float32)\n for direction in directions:\n magnitude = np.linalg.norm(direction)\n if magnitude > 0:\n lights.append(fresnel.light.Light(direction=-direction,\n color=(magnitude, magnitude, magnitude),\n theta=0.7))\n if len(lights) > 0:\n self._fresnel_scene.lights = lights\n\n # Set up tracer\n if 'pathtracer' in self.enabled_features:\n # Use path tracer if enabled\n config = self.get_feature_config('pathtracer')\n tracer = self._path_tracer\n samples = config.get('samples', 64)\n def render_function(scene, **kwargs):\n return tracer.sample(scene, samples, **kwargs)\n else:\n # Use preview tracer by default\n tracer = self._preview_tracer\n tracer.anti_alias = 'antialiasing' in self.enabled_features\n render_function = tracer.render\n\n self._output = render_function(self._fresnel_scene)" ]
[ "0.780936", "0.6795471", "0.5878799", "0.5863147", "0.5783751", "0.57724273", "0.5699046", "0.5667319", "0.5579326", "0.5527161", "0.5456355", "0.54548293", "0.5452703", "0.5451087", "0.54337513", "0.5404285", "0.5384086", "0.5363154", "0.53607535", "0.53221077", "0.53212357", "0.52984256", "0.5261833", "0.5256882", "0.5253868", "0.5237455", "0.52366996", "0.52057", "0.5196203", "0.51946366" ]
0.8114935
0
export aovs from scene
def exportAovs(self): aovs = mc.ls( typ = 'aiAOV' ) aovData = {} for a in aovs: aovData[a] = {} aovData[a]['enabled'] = mc.getAttr( a + '.enabled' ) aovData[a]['name'] = mc.getAttr( a + '.name' ) aovData[a]['type'] = mc.getAttr( a + '.type' ) pickle.dump( aovData, open( self.aovsPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def create_scene(self):\n \n self.scene=soya.World()", "def import_scene(file_path):\n\n pass", "def exports():", "def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)", "def export( self, captionMode, copyFiles, outputDir ):\n scene = slicer.mrmlScene\n nodes = scene.GetNumberOfNodes()\n\n self.__nodes = {}\n\n # 1 for model name, 2 for parent name\n self.__captionMode = captionMode\n # TRUE if we shall copy the files to the outputDir\n self.__copyFiles = copyFiles\n self.__outputDir = outputDir\n\n self.__tree = Tree()\n self.__tree.create_node( \"Scene\", \"scene\" )\n\n for n in xrange( nodes ):\n\n node = scene.GetNthNode( n )\n\n self.parseNode( node )\n\n [header, footer] = self.configureXrenderers()\n output = header\n output += self.createXtree( \"scene\" )\n output += footer\n\n return output", "def save_scene(force=True, **kwargs):\n\n pass", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def importAovs(self):\n\t\tLayersInfo = pickle.load( open( self.aovsPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tfor ao in LayersInfo.keys():\n\t\t\taov.create( ao, LayersInfo[ao]['name'], LayersInfo[ao]['type'], LayersInfo[ao]['enabled'] )\n\t\tmc.refresh( su = 0 )", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def main():\n viewer = Viewer()\n\n # paramètre de transformation des paramètres\n #sol\n ground_size = 512\n ground_offset = 20\n\n #dinosaure\n characters_offset_x = 0\n characters_offset_y = -20\n characters_offset_z = 0\n characters_scale = 15\n characters_rotate_deg = 180\n\n #forêt\n forest_offset = -15\n forest_scale = 1.5\n\n #skybox\n Skysphere_scale = 3\n\n characters = Node(transform = translate(characters_offset_x, characters_offset_y, characters_offset_z) @ scale(characters_scale) @ rotate(axis=(0, 1, 0), angle = characters_rotate_deg))\n characters.add(*load_skinned(\"dino/Dinosaurus_roar.dae\"))\n\n forest = Node(transform = translate(0, forest_offset, 0) @ scale(forest_scale))\n forest.add(*load_textured(\"trees9/forest.obj\"))\n\n ground = Node(transform = translate(-ground_size>>1, ground_offset, -ground_size>>1))\n ground.add(sol(ground_size))\n\n Skysphere = Node(transform = scale(Skysphere_scale))\n Skysphere.add(*load_textured(\"Skysphere/skysphere.obj\"))\n\n scene = Node(transform = identity(), children = [characters, forest, ground, Skysphere])\n\n viewer.add(scene)\n\n viewer.run()", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def pov_render(self, camera_position = (0,0,-10), camera_target = (0,0,0)):\n\n \"\"\"\n f=pov.File(\"demo.pov\",\"colors.inc\",\"stones.inc\")\n \n cam = pov.Camera(location=camera_position, sky=(1,0,1),look_at=camera_target)\n light = pov.LightSource( camera_position, color=\"White\")\n \n povObjs = [cam, light]\n for obj in self.objects[1:]:\n # test coordinate transfroms\n # print M\n # vectors = np.array([[0,0,0,1], #origin\n # [1,0,0,1], # x\n # [0,1,0,1], # y\n # [0,0,1,1]]).transpose() # z\n # origin,x,y,z = (T*vectors).transpose()\n povObjs.append(povObj(obj))\n \n #print tuple(povObjs)\n f.write(*tuple(povObjs))\n f.close()\n #sphere1 = pov.Sphere( (1,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n #sphere2 = pov.Sphere( (0,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n # composite2 = None#pov.Difference(sphere1, sphere2)\n # \n \n \n \n \n \n # f.write( cam, composite2, light )\n # f.close()\n subprocess.call(\"povray +H2400 +W3200 demo.pov\", shell=True)\n os.system(\"open demo.png\")\n \"\"\"", "def open_scene(file_path, save=True):\n\n pass", "def __render_scene(self, scene):\n\n # Name and location of the exported project.\n project_dir = os.path.join(tempfile.gettempdir(), \"blenderseed\", \"render\")\n project_filepath = os.path.join(project_dir, \"render.appleseed\")\n\n # Create target directories if necessary.\n if not os.path.exists(project_dir):\n try:\n os.makedirs(project_dir)\n except os.error:\n self.report({\"ERROR\"}, \"The directory {0} could not be created. Check directory permissions.\".format(project_dir))\n return\n\n # Generate project on disk.\n self.update_stats(\"\", \"appleseed Rendering: Exporting Scene\")\n writer = projectwriter.Writer()\n writer.write(scene, project_filepath)\n\n # Render project.\n self.__render_project_file(scene, project_filepath, project_dir)", "def main():\r\n # create the EdenLudo sample\r\n EdenEvolves = EdenLudo()\r\n # ru-n the scene\r\n run()", "def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()", "def __init__(self, *args, **kwargs):\n super(MayaScene, self).__init__(*args, **kwargs)", "def exportAssetAssembly(name, rigTopNode, meshTopNode, path, postScript=None):\n if pm.ls(rigTopNode):\n rigTopNode = pm.PyNode(rigTopNode)\n else:\n pm.displayError(\n \"{} doesn't exist or duplicated. Please check your \"\n \"scene\".format(rigTopNode))\n return\n\n if pm.ls(meshTopNode):\n meshTopNode = pm.PyNode(meshTopNode)\n else:\n pm.displayError(\n \"{} doesn't exist or duplicated. Please check \"\n \"your scene\".format(meshTopNode))\n return\n # check the folder and script\n # if the target name exist abort and request another name\n\n deformer_jnts = rigTopNode.rigGroups[3].connections()[0].members()\n if not deformer_jnts:\n pm.displayError(\n \"{} is empty. The tool can't find any joint\".format(meshTopNode))\n\n # export connections and cut joint connections\n file_path = os.path.join(path, name + \".jmm\")\n dm_nodes = exportConnections(source=deformer_jnts,\n filePath=file_path,\n disc=True)\n\n # cut al possible remaining connection and adjust hierarchy\n # joint or visibility\n jnt_org = pm.PyNode(\"jnt_org\")\n pm.disconnectAttr(rigTopNode.jnt_vis, jnt_org.visibility)\n\n # restructure model\n model = pm.createNode(\"transform\",\n n=\"model\",\n p=None,\n ss=True)\n pm.addAttr(model, ln=\"rigGroups\", at='message', m=1)\n pm.parent(meshTopNode, jnt_org, model)\n\n # disconnect jnt set\n sets = rigTopNode.listConnections(type=\"objectSet\")\n\n deformersGrp = None\n for oSet in sets:\n if \"deformers_grp\" in oSet.name():\n deformersGrp = oSet\n\n if deformersGrp:\n for cnx in deformersGrp.message.listConnections(p=True):\n pm.disconnectAttr(deformersGrp.message, cnx)\n pm.connectAttr(deformersGrp.message, model.attr(\"rigGroups[0]\"))\n\n # disconnect bindPoses\n dg_poses = rigTopNode.message.listConnections(type=\"dagPose\", p=True)\n for dgp in dg_poses:\n if dgp.node().name().startswith(\"bindPose\"):\n pm.disconnectAttr(rigTopNode.message, dgp)\n\n # post script\n if postScript:\n try:\n exec(compile(open(postScript, \"rb\").read(), postScript, 'exec'))\n except Exception as ex:\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n pm.displayError(message)\n cont = pm.confirmBox(\"FAIL: Script Fail\",\n \"Do you want to export anyway?\" + \"\\n\\n\"\n + message + \"\\n\\n\" + traceback.format_exc(),\n \"Continue\", \"Cancel\")\n if not cont:\n pm.undo()\n return\n\n # export rig model\n pm.select(dm_nodes, r=True)\n pm.select(rigTopNode, add=True)\n file_path = os.path.join(path, name + \"_rig.ma\")\n exp = pm.exportSelected(file_path, f=True, type=\"mayaAscii\")\n pm.displayInfo(exp)\n\n # export mesh and joints\n pm.select(model, r=True)\n file_path = os.path.join(path, name + \"_model.ma\")\n exp = pm.exportSelected(file_path, f=True, type=\"mayaAscii\")\n pm.displayInfo(exp)", "def scene_name():\n\n pass", "def __init__(self, scene: Scene):\n self.scene = scene", "def __init__(self, scene: Scene):\n super(SceneGUI, self).__init__()\n\n self.scene = scene # save instance of Scene class to this object\n if scene.photons.size == 0:\n raise(Exception, \"no data stored in scene\")\n\n # QImage require data to be 32 bit aligned. Thus, we need to make sure out_size is even\n out_size = (round(scene.n_rows * 150/scene.n_cols)*2, 300)\n self.image = imresize(scene.srgb, out_size, interp='nearest')\n\n # set status bar\n self.statusBar().showMessage(\"Ready\")\n\n # set menu bar\n menu_bar = self.menuBar()\n menu_file = menu_bar.addMenu(\"&File\")\n menu_plot = menu_bar.addMenu(\"&Plot\")\n\n # add load scene to file menu\n load_scene = QtGui.QAction(\"Load Scene\", self)\n load_scene.setStatusTip(\"Load scene from file\")\n load_scene.triggered.connect(self.menu_load_scene)\n menu_file.addAction(load_scene)\n\n # add save scene to file menu\n save_scene = QtGui.QAction(\"Save Scene\", self)\n save_scene.setStatusTip(\"Save scene to file\")\n save_scene.setShortcut(\"Ctrl+S\")\n save_scene.triggered.connect(self.menu_save_scene)\n menu_file.addAction(save_scene)\n\n # add illuminant energy to plot menu\n plot_il_energy = QtGui.QAction(\"Illuminant (Energy)\", self)\n plot_il_energy.setStatusTip(\"Plot spectra power distribution of scene illuminant\")\n plot_il_energy.triggered.connect(lambda: self.scene.plot(\"illuminant energy\"))\n menu_plot.addAction(plot_il_energy)\n\n # add illuminant photons to plot menu\n plot_il_quanta = QtGui.QAction(\"Illuminant (Photons)\", self)\n plot_il_quanta.setStatusTip(\"Plot spectra power distribution of scene illuminant\")\n plot_il_quanta.triggered.connect(lambda: self.scene.plot(\"illuminant photons\"))\n menu_plot.addAction(plot_il_quanta)\n\n # set up left panel\n left_panel = self.init_image_panel()\n\n # set up right panel\n right_panel = self.init_control_panel()\n\n splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)\n splitter.addWidget(left_panel)\n splitter.addWidget(right_panel)\n\n QtGui.QApplication.setStyle(QtGui.QStyleFactory().create('Cleanlooks'))\n\n widget = QtGui.QWidget()\n hbox = QtGui.QHBoxLayout(widget)\n hbox.addWidget(splitter)\n\n self.setCentralWidget(widget)\n\n # set size and put window to center of the screen\n self.resize(600, 400)\n qr = self.frameGeometry()\n qr.moveCenter(QtGui.QDesktopWidget().availableGeometry().center())\n self.move(qr.topLeft())\n\n # set title and show\n self.setWindowTitle(\"Scene GUI: \" + scene.name)\n self.show()", "def _export_button_cb(self):\n filename = asksaveasfile(\n mode='w',\n filetypes=(('YAML files', '*.yaml'), ('All files', '*.*'))\n )\n\n if not filename:\n return\n\n with open(filename.name, 'w') as f:\n f.write('obstacles:\\n')\n for obstacle in self.obstacles:\n f.write(f' - {str(obstacle)}')\n f.write('\\n')", "def export_mesh(remote, path):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_ExportMeshFile_CurrentSelection(path)\n remote.runCommand(cmd)", "def my_handler(scene):\n cube_winkel_x = degrees(bpy.data.objects['Cube'].rotation_euler.x)\n cube_winkel_y = degrees(bpy.data.objects['Cube'].rotation_euler.y)\n\n\n # Aktionen auf den servos:\n cube_servo.turnAngle(cube_winkel_x)\n kiefer_servo.turnAngle(cube_winkel_y)", "def getScene():\n #print \"servers direct scenes are \",soya.IDLER.scenes[:]\n \n return soya.IDLER.scenes[0]", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def test_to_from_scene(self): # pragma: lpy\n super(TestObjDict, self).test_to_from_scene(_as_obj=True)", "def export_onnx():\r\n model = DivideBy255()\r\n X = torch.randn(1, 3, 256, 256, dtype=torch.float)\r\n onnx_name = \"DivideBy255.onnx\"\r\n\r\n print(f\"Generating {onnx_name}\")\r\n torch.onnx.export(\r\n model,\r\n (X),\r\n onnx_name,\r\n opset_version=10,\r\n do_constant_folding=True,\r\n # verbose=True,\r\n # input_names=['Identity_1', 'Identity'],\r\n output_names=['input_1']\r\n )", "def rdmb_povray_save_q(out_file,\n vs,\n ucs, vcs,\n width=800, height=600,\n rotx=0, roty=0, rotz=0,\n angle=14):\n\n ucmax = 6.0\n ucs = ucs / ucmax\n ucs[ucs > 1.0] = 1.0\n # ucs = ucs / np.max(ucs)\n\n rot1 = [rotx, 0, 0]\n rot2 = [0, roty, 0]\n rot3 = [0, 0, rotz]\n\n camera = Camera('location', [0, 0, -25],\n 'look_at', [0, 0, 0],\n 'angle', angle,\n 'right x*image_width/image_height')\n\n light = LightSource([0, 0, -10],\n 'color', [1.0, 1.0, 1.0], 'parallel', 'shadowless')\n light1 = LightSource([-10, 0, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light2 = LightSource([10, 0, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light3 = LightSource([0, -10, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light4 = LightSource([0, 10, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n\n background = Background('color', [1, 1, 1, 1])\n\n spheres = [Sphere(v, 0.02,\n Finish('ambient', 1.0),\n Texture(Pigment('color',\n [0.3+uc*0.7, 0.2+uc*0.8, 0.2+uc*0.8])),\n 'rotate', rot1,\n 'rotate', rot2,\n 'rotate', rot3) for v, uc in zip(vs, ucs)]\n\n objects = [light, light1, light2, light3, light4, background] + spheres\n\n scene = Scene(camera, objects=objects)\n scene.render(out_file, width=width, height=height,\n output_alpha=True, antialiasing=0.001,\n tempfile=out_file+\"__temp__.pov\")" ]
[ "0.6807009", "0.61583364", "0.60997474", "0.6089782", "0.5898155", "0.5882427", "0.5869277", "0.58672017", "0.5798506", "0.56990576", "0.5693678", "0.56637156", "0.5612492", "0.5589226", "0.553898", "0.5532267", "0.55292517", "0.55200547", "0.5514314", "0.5509473", "0.54984117", "0.5487525", "0.5467995", "0.54168653", "0.5416319", "0.5412217", "0.5410536", "0.54101694", "0.54058725", "0.5381438" ]
0.6433381
1
import lights in scene
def importLights(self, asset = '', searchAndReplace = ['',''] ): if self.lightPath.exists: self.lightPath.imp() if self.lightLinkPath.exists: self.importLightLinking( asset, searchAndReplace )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\t\t\t\t\t\"ambientLight\"))\n\t\tself.ambientLight.node().setColor(Vec4(.8,.8,.8,1))\n\t\trender.setLight(self.ambientLight)\n\n\t\tdLight1 = DirectionalLight(\"dLight1\")\n\t\tdLight1.setColor(Vec4(6,5,7,1))\n\t\tdLight1.setDirection(Vec3(1,1,1))\n\t\tdlnp1 = render.attachNewNode(dLight1)\n\t\tdlnp1.setHpr(30,-160,0)\n\t\trender.setLight(dlnp1)\n\n\t\tdLight2 = DirectionalLight(\"dLight2\")\n\t\tdLight2.setColor(Vec4(.6,.7,1,1))\n\t\tdLight2.setDirection(Vec3(-1,-1,-1))\n\t\tself.dlnp2 = render.attachNewNode(dLight2)\n\t\tself.dlnp2.node().setScene(render)\n\t\tself.dlnp2.setHpr(-70,-60,0)\n\t\trender.setLight(self.dlnp2)", "def flicker_lights(self):\n print 'Lights Set'", "async def lights(self, context):\n\n await random_image(context, 'lights')", "def InitLightBasic(self):\r\n\t\t\r\n\t\taLight = AmbientLight(\"AmbientLight\")\r\n\t\taLight.setColor(Vec4(0.3, 0.3, 0.3, 1))\r\n\t\trender.setLight(render.attachNewNode(aLight))\r\n\t\r\n\t\tdLight1 = DirectionalLight(\"DirectionalLight1\")\r\n\t\tdLight1.setColor(Vec4(0.65, 0.6, 0.6, 1))\t\t\r\n\t\tdLight1NP = render.attachNewNode(dLight1)\r\n\t\tdLight1NP.setHpr(100, -40, 0)\r\n\t\trender.setLight(dLight1NP)\r\n\t\r\n\t\tdLight2 = DirectionalLight(\"DirectionalLight2\")\r\n\t\tdLight2.setColor(Vec4(0.35, 0.35, 0.3, 1))\r\n\t\tdLight2NP = render.attachNewNode(dLight2)\r\n\t\tdLight2NP.setHpr(150, -60, 0)\r\n\t\trender.setLight(dLight2NP)", "def setup_lights(self, settings):\n\n for light in settings.lights: # for each light listed in yaml file\n lst = Light(light, settings.lights, settings) # create a Light instance with settings\n self.lights.append(lst) # add it to the list of lights", "def import_scene(file_path):\n\n pass", "def exportLights(self):\n\t\t#TODO! REMOVE CONSTRAINS\n\t\tlights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 )\n\t\tmc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' )\n\t\tlitsToExport = []\n\t\tfor li in lights:\n\t\t\tfinalLi = li.split( '|' )\n\t\t\tif len(finalLi) == 1:\n\t\t\t\tlitsToExport.append( finalLi[0] )\n\t\t\telse:\n\t\t\t\tlitsToExport.append( finalLi[1] )\n\t\tif litsToExport:\n\t\t\tmc.select( litsToExport, r=1, ne=1 )\n\t\t\tmc.file( self.lightPath.path, op=\"v=0\", typ=\"mayaAscii\", pr=1, es=1 )\n\t\t\t#export Light Linking\n\t\t\tself.exportLightLinking()", "def turnLightingSystemOn():\n dislin.light('ON')", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def test_light_interface(light_name='head_green_light'):\n l = Lights()\n rospy.loginfo(\"All available lights on this robot:\\n{0}\\n\".format(\n ', '.join(l.list_all_lights())))\n rospy.loginfo(\"Blinking Light: {0}\".format(light_name))\n on_off = lambda x: 'ON' if l.get_light_state(x) else 'OFF'\n rospy.loginfo(\"Initial state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn off light\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # reset output\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"Final state: {0}\".format(on_off(light_name)))", "def gl_lighting():\n for viewer in nuke.allNodes('Viewer'):\n val = int(viewer.knob('gl_lighting').getValue())\n viewer.knob('gl_lighting').setValue(not val)", "def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False", "def testLightImport(self):\n\n archive = IArchive(\"light1.abc\")\n emptyLightObj = ILight(archive.getTop(), \"emptyLight\" )\n lightObj = ILight(archive.getTop(), \"myLight\" )\n\n self.assertFalse(emptyLightObj.getSchema().getArbGeomParams().valid())\n self.assertFalse(emptyLightObj.getSchema().getUserProperties().valid())\n self.assertEqual(lightObj.getSchema().getArbGeomParams().getNumProperties(), 1)\n self.assertEqual(lightObj.getSchema().getUserProperties().getNumProperties(), 1)\n\n samp = lightObj.getSchema().getCameraSchema().getValue( 0 )\n window = samp.getScreenWindow();\n self.assertAlmostEqual( window['top'], 0.666666666666667 )\n self.assertAlmostEqual( window['bottom'], -0.666666666666667 )\n self.assertAlmostEqual( window['left'], -1.0 )\n self.assertAlmostEqual( window['right'], 1.0 )\n\n samp = lightObj.getSchema().getCameraSchema().getValue( 1 )\n window = samp.getScreenWindow();\n self.assertAlmostEqual( window['top'], -0.35 )\n self.assertAlmostEqual( window['bottom'], 0.75 )\n self.assertAlmostEqual( window['left'], 0.1 )\n self.assertAlmostEqual( window['right'], 0.5 )\n\n self.assertFalse(lightObj.getSchema().getCameraSchema().getChildBoundsProperty().valid())", "def enableLighting(self):\r\n\t\t\r\n\t\tglEnable(GL_LIGHTING)", "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and keep their count in a global variable\n for exported_lights_count, ob in enumerate(lamps, start=1):\n lamp = ob.data\n\n matrix = global_matrix @ ob.matrix_world\n\n # Color is no longer modified by energy\n # any way to directly get bpy_prop_array as tuple?\n color = tuple(lamp.color)\n\n tab_write(file, \"light_source {\\n\")\n tab_write(file, \"< 0,0,0 >\\n\")\n tab_write(file, \"color srgb<%.3g, %.3g, %.3g>\\n\" % color)\n\n if lamp.type == \"POINT\":\n pass\n elif lamp.type == \"SPOT\":\n tab_write(file, \"spotlight\\n\")\n\n # Falloff is the main radius from the centre line\n tab_write(file, \"falloff %.2f\\n\" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH\n tab_write(\n file, \"radius %.6f\\n\" % ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend))\n )\n\n # Blender does not have a tightness equivalent, 0 is most like blender default.\n tab_write(file, \"tightness 0\\n\") # 0:10f\n\n tab_write(file, \"point_at <0, 0, -1>\\n\")\n if lamp.pov.use_halo:\n tab_write(file, \"looks_like{\\n\")\n tab_write(file, \"sphere{<0,0,0>,%.6f\\n\" % lamp.distance)\n tab_write(file, \"hollow\\n\")\n tab_write(file, \"material{\\n\")\n tab_write(file, \"texture{\\n\")\n tab_write(file, \"pigment{rgbf<1,1,1,%.4f>}\\n\" % (lamp.pov.halo_intensity * 5.0))\n tab_write(file, \"}\\n\")\n tab_write(file, \"interior{\\n\")\n tab_write(file, \"media{\\n\")\n tab_write(file, \"emission 1\\n\")\n tab_write(file, \"scattering {1, 0.5}\\n\")\n tab_write(file, \"density{\\n\")\n tab_write(file, \"spherical\\n\")\n tab_write(file, \"color_map{\\n\")\n tab_write(file, \"[0.0 rgb <0,0,0>]\\n\")\n tab_write(file, \"[0.5 rgb <1,1,1>]\\n\")\n tab_write(file, \"[1.0 rgb <1,1,1>]\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n elif lamp.type == \"SUN\":\n tab_write(file, \"parallel\\n\")\n tab_write(file, \"point_at <0, 0, -1>\\n\") # *must* be after 'parallel'\n\n elif lamp.type == \"AREA\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n # Area lights have no falloff type, so always use blenders lamp quad equivalent\n # for those?\n tab_write(file, \"fade_power %d\\n\" % 2)\n size_x = lamp.size\n samples_x = lamp.pov.shadow_ray_samples_x\n if lamp.shape == \"SQUARE\":\n size_y = size_x\n samples_y = samples_x\n else:\n size_y = lamp.size_y\n samples_y = lamp.pov.shadow_ray_samples_y\n\n tab_write(\n file,\n \"area_light <%.6f,0,0>,<0,%.6f,0> %d, %d\\n\"\n % (size_x, size_y, samples_x, samples_y),\n )\n tab_write(file, \"area_illumination\\n\")\n if lamp.pov.shadow_ray_sample_method == \"CONSTANT_JITTERED\":\n if lamp.pov.use_jitter:\n tab_write(file, \"jitter\\n\")\n else:\n tab_write(file, \"adaptive 1\\n\")\n tab_write(file, \"jitter\\n\")\n\n # No shadow checked either at global or light level:\n if not scene.pov.use_shadows or (lamp.pov.shadow_method == \"NOSHADOW\"):\n tab_write(file, \"shadowless\\n\")\n\n # Sun shouldn't be attenuated. Area lights have no falloff attribute so they\n # are put to type 2 attenuation a little higher above.\n if lamp.type not in {\"SUN\", \"AREA\"}:\n if lamp.falloff_type == \"INVERSE_SQUARE\":\n tab_write(file, \"fade_distance %.6f\\n\" % (sqrt(lamp.distance / 2.0)))\n tab_write(file, \"fade_power %d\\n\" % 2) # Use blenders lamp quad equivalent\n elif lamp.falloff_type == \"INVERSE_LINEAR\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 1) # Use blenders lamp linear\n elif lamp.falloff_type == \"CONSTANT\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 3)\n # Use blenders lamp constant equivalent no attenuation.\n # Using Custom curve for fade power 3 for now.\n elif lamp.falloff_type == \"CUSTOM_CURVE\":\n tab_write(file, \"fade_power %d\\n\" % 4)\n\n write_matrix(file, matrix)\n\n tab_write(file, \"}\\n\")\n\n # v(A,B) rotates vector A about origin by vector B.\n file.write(\n \"#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\\n\"\n % (\n exported_lights_count,\n -ob.location.x,\n -ob.location.y,\n -ob.location.z,\n ob.rotation_euler.x,\n ob.rotation_euler.y,\n ob.rotation_euler.z,\n )\n )", "def run(self) -> None:\n self._hass.turn_on('scene.{0}'.format(self._args['scene']))", "def initialize_lights(self):\n\t\tfor light in OUTPUT.LIGHTS:\n\t\t\tif light != -1:\n\t\t\t\tio.set_bit(light, 0)\n\t\tfor order in self.orderQueue.yield_orders(exclude=(None,)):\n\t\t\tself.set_button_light(order.floor, OUTPUT.IN_LIGHTS, 1)", "def _on_load_scene_shaders(self):\n\n artellapipe.ShadersMgr().load_scene_shaders()", "def _create_example_light():\n return Light({\"warning\": False, \"off\": True})", "def lights(self):\n return list(self.GetLights())", "def render(self):\r\n \r\n # --------------------------------\r\n # Set world-level Panda properties\r\n # --------------------------------\r\n\r\n # Create Ambient Light 1\r\n ambientLight = AmbientLight( 'ambientLight_1' )\r\n ambientLight.setColor( Vec4( 0.2, 0.2, 0.2, 1 ) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, 50, 50)\r\n render.setLight(ambientLightNP)\r\n\r\n # Create Ambient Light 2\r\n ambientLight = AmbientLight( 'ambientLight_2' )\r\n ambientLight.setColor( Vec4(0.2, 0.2, 0.2, 1) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, -50, 50)\r\n render.setLight(ambientLightNP)\r\n# \r\n# # Directional light 01\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.8, 0.2, 0.2, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing backwards, towards the camera.\r\n# directionalLightNP.setHpr(180, 20, 0)\r\n# render.setLight(directionalLightNP)\r\n#\r\n# # Directional light 02\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.2, 0.2, 0.8, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing forwards, away from the camera.\r\n# directionalLightNP.setHpr(0, -20, 0)\r\n# render.setLight(directionalLightNP)\r\n\r\n #create a directional light\r\n #light = DirectionalLight('my dlight')\r\n\r\n #create a point light\r\n light = PointLight('plight')\r\n #light.setColor(VBase4(0.2, 0.2, 0.2, 1))\r\n\r\n #The following line doesn't work in Panda3D 1.7.0\r\n #lightPath = render.attachNewNode(light.upcastToPandaNode())\r\n\r\n lightPath = render.attachNewNode(light)\r\n lightPath.setPos( 10, 10, 10)\r\n\r\n #lightPath.lookAt(objPath)\r\n\r\n #illuminate all\r\n render.setLight(lightPath)\r\n #illuminate only objPath objects\r\n #objPath.setLight(lightPath)\r\n\r\n #self.SetMouseControls(objPath)\r\n #self.setKeyboardControls()\r\n \r\n taskMgr.add(self.mouseControlsTask, 'mouseControlsTask')\r\n #taskMgr.add(self.cameraMovementTask, 'cameraMovementTask') \r\n\r\n base.setBackgroundColor( .0, .0, .0 )\r\n\r\n #taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")\r\n #core.cmd.exeCommand(\"LoadEdge\", obj, file_name+self.WingedEdgeExtensions[0], file_name+self.WingedEdgeExtensions[1], file_name+self.WingedEdgeExtensions[2], file_name+self.WingedEdgeExtensions[3])\r\n #self.model = importer.loadFile(fileName)\r\n #if self.model is None:\r\n # print \"Unsupported file\"\r\n # return\r", "def lights(id, all, connect, info, action, bri):\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n\n if connect:\n # If the app is not registered and the button is not pressed,\n # press the button and call connect()\n # (this only needs to be run a single time)\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n else:\n click.secho(\"Already connected\", fg='green')\n\n return\n\n if info:\n # TODO: Print details of all lights\n click.secho('Light details', fg='green')\n for l in bridge.lights:\n\n click.secho(\n '\\t %d: %s is %s' % (l.light_id, l.name, get_state(l.on)),\n fg='green')\n\n if all:\n # TODO: Add api to Run action on all\n click.secho('TODO ADD: Run action on all', fg='green')\n for l in bridge.lights:\n action_on_light_by_id(bridge, l.light_id, action)\n\n else:\n if not valid_id(id):\n return\n action_on_light_by_id(bridge, int(id), action)", "def set_light(self, light, num=0):\r\n #TODO (pg) need MAXLIGHTS global variable, room for two now but shader\r\n # only uses 1.\r\n if num > 1 or num < 0:\r\n num = 0\r\n stn = 24 + num * 9\r\n self.unif[stn:(stn + 3)] = light.lightpos[0:3]\r\n self.unif[(stn + 3):(stn + 6)] = light.lightcol[0:3]\r\n self.unif[(stn + 6):(stn + 9)] = light.lightamb[0:3]", "def __init__(self, LightFun):\n self.setParameters()\n self.Light = LightFun", "async def Turn_On_Lights() -> Dict[str, Any]:\n busylightapi.manager.light_on(ALL_LIGHTS)\n return {\n \"action\": \"on\",\n \"light_id\": \"all\",\n \"color\": \"green\",\n }", "def testLighExport(self):\n\n archive = OArchive(\"light1.abc\")\n emptyLightObj = OLight(archive.getTop(), \"emptyLight\")\n lightObj = OLight(archive.getTop(), \"myLight\" )\n\n samp = CameraSample()\n lightObj.getSchema().setCameraSample( samp )\n\n samp = CameraSample( -0.35, 0.75, 0.1, 0.5 )\n lightObj.getSchema().getChildBoundsProperty().setValue(\n Box3d( V3d( 0.0, 0.1, 0.2 ), V3d( 0.3, 0.4, 0.5 ) ) )\n\n lightObj.getSchema().setCameraSample( samp )\n\n arg = lightObj.getSchema().getArbGeomParams()\n param = OFloatGeomParam( arg, \"test\", False, kConstantScope, 1 )\n user = lightObj.getSchema().getUserProperties()\n OFloatProperty( user, \"test\" )", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def turn_on_lights(bridge):\n for light in bridge.lights:\n bridge.set_light(light.light_id, {'ct': 350, 'bri': 254, 'on': True})", "def importLightLinking(self, asset = '', searchAndReplace = ['',''] ):\n\t\tLayersInfo = pickle.load( open( self.lightLinkPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tif not asset == '':\n\t\t\tLayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace )\n\t\tfor l in LayersInfo.keys():\n\t\t\tobjsToBreakLink = []\n\t\t\tfor link in LayersInfo[l]:\n\t\t\t\tif mc.objExists( link ):\n\t\t\t\t\tobjsToBreakLink.append( link )\n\t\t\tmc.lightlink( b = True, light = l, o = objsToBreakLink )\n\t\tmc.refresh( su = 0 )", "def init_gl(self):\n\n # default background color is white-ish\n background = [.99, .99, .99, 1.0]\n # if user passed a background color use it\n if 'background' in self.kwargs:\n try:\n # convert to (4,) uint8 RGBA\n background = to_rgba(self.kwargs['background'])\n # convert to 0.0 - 1.0 float\n background = background.astype(np.float64) / 255.0\n except BaseException:\n log.error('background color wrong!',\n exc_info=True)\n # apply the background color\n gl.glClearColor(*background)\n\n max_depth = (np.abs(self.scene.bounds).max(axis=1) ** 2).sum() ** .5\n max_depth = np.clip(max_depth, 500.00, np.inf)\n gl.glDepthRange(0.0, max_depth)\n\n gl.glClearDepth(1.0)\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glDepthFunc(gl.GL_LEQUAL)\n\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glEnable(gl.GL_LIGHTING)\n gl.glEnable(gl.GL_LIGHT0)\n gl.glEnable(gl.GL_LIGHT1)\n\n # put the light at one corner of the scenes AABB\n gl.glLightfv(gl.GL_LIGHT0,\n gl.GL_POSITION,\n rendering.vector_to_gl(np.append(self.scene.bounds[1], 0)))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_SPECULAR,\n rendering.vector_to_gl(.5, .5, 1, 1))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_DIFFUSE,\n rendering.vector_to_gl(1, 1, 1, .75))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_AMBIENT,\n rendering.vector_to_gl(.1, .1, .1, .2))\n\n gl.glColorMaterial(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT_AND_DIFFUSE)\n gl.glEnable(gl.GL_COLOR_MATERIAL)\n gl.glShadeModel(gl.GL_SMOOTH)\n\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_AMBIENT,\n rendering.vector_to_gl(0.192250, 0.192250, 0.192250))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_DIFFUSE,\n rendering.vector_to_gl(0.507540, 0.507540, 0.507540))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_SPECULAR,\n rendering.vector_to_gl(.5082730, .5082730, .5082730))\n\n gl.glMaterialf(gl.GL_FRONT,\n gl.GL_SHININESS,\n .4 * 128.0)\n\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n\n gl.glEnable(gl.GL_LINE_SMOOTH)\n gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)\n\n gl.glLineWidth(1.5)\n gl.glPointSize(4)" ]
[ "0.7125975", "0.6562322", "0.65274394", "0.6518946", "0.6397606", "0.63027537", "0.62996095", "0.6106556", "0.6032271", "0.6012362", "0.59544396", "0.5917386", "0.59018606", "0.58937645", "0.58840954", "0.58633345", "0.5855364", "0.577707", "0.57750875", "0.5726249", "0.56992894", "0.56819457", "0.5663281", "0.56577843", "0.56570166", "0.56344575", "0.5629733", "0.5618883", "0.56015676", "0.55990416" ]
0.65937054
1
import light linking to lights
def importLightLinking(self, asset = '', searchAndReplace = ['',''] ): LayersInfo = pickle.load( open( self.lightLinkPath.path, "rb") ) mc.refresh( su = 1 ) if not asset == '': LayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace ) for l in LayersInfo.keys(): objsToBreakLink = [] for link in LayersInfo[l]: if mc.objExists( link ): objsToBreakLink.append( link ) mc.lightlink( b = True, light = l, o = objsToBreakLink ) mc.refresh( su = 0 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importLights(self, asset = '', searchAndReplace = ['',''] ):\n\t\tif self.lightPath.exists:\n\t\t\tself.lightPath.imp()\n\t\t\tif self.lightLinkPath.exists:\n\t\t\t\tself.importLightLinking( asset, searchAndReplace )", "def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\t\t\t\t\t\"ambientLight\"))\n\t\tself.ambientLight.node().setColor(Vec4(.8,.8,.8,1))\n\t\trender.setLight(self.ambientLight)\n\n\t\tdLight1 = DirectionalLight(\"dLight1\")\n\t\tdLight1.setColor(Vec4(6,5,7,1))\n\t\tdLight1.setDirection(Vec3(1,1,1))\n\t\tdlnp1 = render.attachNewNode(dLight1)\n\t\tdlnp1.setHpr(30,-160,0)\n\t\trender.setLight(dlnp1)\n\n\t\tdLight2 = DirectionalLight(\"dLight2\")\n\t\tdLight2.setColor(Vec4(.6,.7,1,1))\n\t\tdLight2.setDirection(Vec3(-1,-1,-1))\n\t\tself.dlnp2 = render.attachNewNode(dLight2)\n\t\tself.dlnp2.node().setScene(render)\n\t\tself.dlnp2.setHpr(-70,-60,0)\n\t\trender.setLight(self.dlnp2)", "def exportLightLinking(self):\n\t\tlights = [a for a in mc.ls( typ = ['light','aiAreaLight'] ) if not 'eye' in a]\n\t\tallShapes = [s for s in mc.ls( type = 'geometryShape', ni = 1) if not (mc.objectType( s ) in ( 'aiAreaLight','aiSkyDomeLight' ))]\n\t\tlitLinks = {}\n\t\tfor l in lights:\n\t\t\tlightLinkShapes = mc.lightlink( query=True, light=l ,shp=1,t=0,set=0,h=0)\n\t\t\tlitLinks[l]\t = list( set( allShapes ) - set( lightLinkShapes ) )#SHAPES WITH NO LINK TO THIS LIGHT\n\t\tpickle.dump( litLinks, open( self.lightLinkPath.path, \"wb\" ) )", "def exportLights(self):\n\t\t#TODO! REMOVE CONSTRAINS\n\t\tlights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 )\n\t\tmc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' )\n\t\tlitsToExport = []\n\t\tfor li in lights:\n\t\t\tfinalLi = li.split( '|' )\n\t\t\tif len(finalLi) == 1:\n\t\t\t\tlitsToExport.append( finalLi[0] )\n\t\t\telse:\n\t\t\t\tlitsToExport.append( finalLi[1] )\n\t\tif litsToExport:\n\t\t\tmc.select( litsToExport, r=1, ne=1 )\n\t\t\tmc.file( self.lightPath.path, op=\"v=0\", typ=\"mayaAscii\", pr=1, es=1 )\n\t\t\t#export Light Linking\n\t\t\tself.exportLightLinking()", "async def Turn_On_Light(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(light_id)\n return {\n \"action\": \"on\",\n \"light_id\": light_id,\n \"color\": \"green\",\n }", "def setup_lights(self, settings):\n\n for light in settings.lights: # for each light listed in yaml file\n lst = Light(light, settings.lights, settings) # create a Light instance with settings\n self.lights.append(lst) # add it to the list of lights", "def turn_on_lights(bridge):\n for light in bridge.lights:\n bridge.set_light(light.light_id, {'ct': 350, 'bri': 254, 'on': True})", "def test_light_interface(light_name='head_green_light'):\n l = Lights()\n rospy.loginfo(\"All available lights on this robot:\\n{0}\\n\".format(\n ', '.join(l.list_all_lights())))\n rospy.loginfo(\"Blinking Light: {0}\".format(light_name))\n on_off = lambda x: 'ON' if l.get_light_state(x) else 'OFF'\n rospy.loginfo(\"Initial state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn off light\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # reset output\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"Final state: {0}\".format(on_off(light_name)))", "def InitLightBasic(self):\r\n\t\t\r\n\t\taLight = AmbientLight(\"AmbientLight\")\r\n\t\taLight.setColor(Vec4(0.3, 0.3, 0.3, 1))\r\n\t\trender.setLight(render.attachNewNode(aLight))\r\n\t\r\n\t\tdLight1 = DirectionalLight(\"DirectionalLight1\")\r\n\t\tdLight1.setColor(Vec4(0.65, 0.6, 0.6, 1))\t\t\r\n\t\tdLight1NP = render.attachNewNode(dLight1)\r\n\t\tdLight1NP.setHpr(100, -40, 0)\r\n\t\trender.setLight(dLight1NP)\r\n\t\r\n\t\tdLight2 = DirectionalLight(\"DirectionalLight2\")\r\n\t\tdLight2.setColor(Vec4(0.35, 0.35, 0.3, 1))\r\n\t\tdLight2NP = render.attachNewNode(dLight2)\r\n\t\tdLight2NP.setHpr(150, -60, 0)\r\n\t\trender.setLight(dLight2NP)", "def lights(id, all, connect, info, action, bri):\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n\n if connect:\n # If the app is not registered and the button is not pressed,\n # press the button and call connect()\n # (this only needs to be run a single time)\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n else:\n click.secho(\"Already connected\", fg='green')\n\n return\n\n if info:\n # TODO: Print details of all lights\n click.secho('Light details', fg='green')\n for l in bridge.lights:\n\n click.secho(\n '\\t %d: %s is %s' % (l.light_id, l.name, get_state(l.on)),\n fg='green')\n\n if all:\n # TODO: Add api to Run action on all\n click.secho('TODO ADD: Run action on all', fg='green')\n for l in bridge.lights:\n action_on_light_by_id(bridge, l.light_id, action)\n\n else:\n if not valid_id(id):\n return\n action_on_light_by_id(bridge, int(id), action)", "def flicker_lights(self):\n print 'Lights Set'", "def addLight(self, id):\r\n\t\t\r\n\t\tnewLight = Light(id)\r\n\t\tself.lights[id] = newLight", "async def Turn_On_Light_With_Color(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n color: str = Path(..., title=\"Color name or hexadecimal string\"),\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(light_id, color)\n return {\n \"action\": \"on\",\n \"light_id\": light_id,\n \"color\": color,\n }", "def turnLightingSystemOn():\n dislin.light('ON')", "def lightlink(*args, b: bool=True, hierarchy: bool=True, light: Union[name, List[name]]=None,\n make: bool=True, object: Union[name, List[name]]=None, sets: bool=True, shadow:\n bool=True, shapes: bool=True, transforms: bool=True, useActiveLights: bool=True,\n useActiveObjects: bool=True, q=True, query=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def nextLight():\n global light\n pin.setAllOutPinsLow()\n light += 1\n light %= len(traffic_lights)\n print traffic_colors[light]\n pin.setOutPinHigh(traffic_lights[light])", "async def lights(self, context):\n\n await random_image(context, 'lights')", "def _create_example_light():\n return Light({\"warning\": False, \"off\": True})", "def testLightImport(self):\n\n archive = IArchive(\"light1.abc\")\n emptyLightObj = ILight(archive.getTop(), \"emptyLight\" )\n lightObj = ILight(archive.getTop(), \"myLight\" )\n\n self.assertFalse(emptyLightObj.getSchema().getArbGeomParams().valid())\n self.assertFalse(emptyLightObj.getSchema().getUserProperties().valid())\n self.assertEqual(lightObj.getSchema().getArbGeomParams().getNumProperties(), 1)\n self.assertEqual(lightObj.getSchema().getUserProperties().getNumProperties(), 1)\n\n samp = lightObj.getSchema().getCameraSchema().getValue( 0 )\n window = samp.getScreenWindow();\n self.assertAlmostEqual( window['top'], 0.666666666666667 )\n self.assertAlmostEqual( window['bottom'], -0.666666666666667 )\n self.assertAlmostEqual( window['left'], -1.0 )\n self.assertAlmostEqual( window['right'], 1.0 )\n\n samp = lightObj.getSchema().getCameraSchema().getValue( 1 )\n window = samp.getScreenWindow();\n self.assertAlmostEqual( window['top'], -0.35 )\n self.assertAlmostEqual( window['bottom'], 0.75 )\n self.assertAlmostEqual( window['left'], 0.1 )\n self.assertAlmostEqual( window['right'], 0.5 )\n\n self.assertFalse(lightObj.getSchema().getCameraSchema().getChildBoundsProperty().valid())", "async def Turn_On_Lights() -> Dict[str, Any]:\n busylightapi.manager.light_on(ALL_LIGHTS)\n return {\n \"action\": \"on\",\n \"light_id\": \"all\",\n \"color\": \"green\",\n }", "def set_light(self, light, num=0):\r\n #TODO (pg) need MAXLIGHTS global variable, room for two now but shader\r\n # only uses 1.\r\n if num > 1 or num < 0:\r\n num = 0\r\n stn = 24 + num * 9\r\n self.unif[stn:(stn + 3)] = light.lightpos[0:3]\r\n self.unif[(stn + 3):(stn + 6)] = light.lightcol[0:3]\r\n self.unif[(stn + 6):(stn + 9)] = light.lightamb[0:3]", "def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False", "def place_headlamp_light():\n\n lx = 1.0\n ly = light_height\n lz = 2.0\n #light_position = [lx, ly, lz, 1.0]\n light_position = [0.0, 0.0, 0.0, 1]\n light_ambient = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_diffuse = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_specular = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_direction = [1.0, -1.0, 1.0, 0.0] # Light points down\n # glViewport(0, 0, win_width, win_height)\n # glMatrixMode(GL_PROJECTION)\n # glLoadIdentity()\n # gluPerspective(40.0, float(win_width) / float(win_height), 0.01, 100.0)\n #\n # glMatrixMode(GL_MODELVIEW)\n # glLoadIdentity()\n # glPushMatrix()\n glLightfv(GL_LIGHT4, GL_POSITION, light_position)\n\n\n\n #glLightfv(GL_LIGHT4, GL_POSITION, (GLfloat * 4)(0.0, 0.0, 0.0, 1))\n glLightfv(GL_LIGHT4, GL_AMBIENT, light_ambient)\n glLightfv(GL_LIGHT4, GL_DIFFUSE, light_diffuse)\n glLightfv(GL_LIGHT4, GL_SPECULAR, light_specular)\n\n # Constant attenuation (for distance, etc.)\n # Only works for fixed light locations! Otherwise disabled\n # glLightf(GL_LIGHT1, GL_CONSTANT_ATTENUATION, 2.0)\n # glLightf(GL_LIGHT1, GL_LINEAR_ATTENUATION, 0.0)\n # glLightf(GL_LIGHT1, GL_QUADRATIC_ATTENUATION, 0.0)\n\n glLightf(GL_LIGHT4, GL_CONSTANT_ATTENUATION, 3.0)\n glLightf(GL_LIGHT4, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT4, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if headlamp_is_on:\n glLightf(GL_LIGHT4, GL_SPOT_CUTOFF, 30.0)\n glLightf(GL_LIGHT4, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT4, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT4, GL_SPOT_CUTOFF, 180.0)\n glLightf(GL_LIGHT4, GL_SPOT_EXPONENT, 0.0)\n\n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n\n glEnable(GL_LIGHT4)\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx, ly, lz)\n glDisable(GL_LIGHTING)\n glColor3f(brightness, brightness, brightness)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()", "def lighton(update: Update, context: CallbackContext) -> None:\n if __sauna.control.getPortValue(\"Light Sensor\") == 0:\n # TODO Mit Stromstossrelais ist dieser Code richtig\n # __sauna.control.togglePortValue(\"Light Switch\")\n update.message.reply_text(\"Light is on\")\n else:\n update.message.reply_text(\"Light was already on\")\n\n __sauna.control.setPortValue(\"Light Switch\")\n val = __sauna.control.getPortValue(\"Light Switch\")\n update.message.reply_text(\"Light Switch := \" + str(val))", "def set_lighting(self):\n lightPosition = [-1, 1, 1, 0]\n glLightfv(GL_LIGHT0, GL_POSITION, lightPosition)\n\n ambientLight = [1, 1, 0.4, 0.5]\n\n if self.lighting:\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight)\n else:\n glDisable(GL_LIGHTING)\n glDisable(GL_LIGHT0)", "def set_lighting(self):\n lightPosition = [-1, 1, 1, 0]\n glLightfv(GL_LIGHT0, GL_POSITION, lightPosition)\n\n ambientLight = [0.1, 0.1, 0.1, 1]\n\n if self.lighting:\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight)\n glLightfv(GL_LIGHT0, GL_DIFFUSE, self.diffuse_light)\n else:\n glDisable(GL_LIGHTING)\n glDisable(GL_LIGHT0)", "def set_light_on(self):\r\n self._light = \"ON\"", "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and keep their count in a global variable\n for exported_lights_count, ob in enumerate(lamps, start=1):\n lamp = ob.data\n\n matrix = global_matrix @ ob.matrix_world\n\n # Color is no longer modified by energy\n # any way to directly get bpy_prop_array as tuple?\n color = tuple(lamp.color)\n\n tab_write(file, \"light_source {\\n\")\n tab_write(file, \"< 0,0,0 >\\n\")\n tab_write(file, \"color srgb<%.3g, %.3g, %.3g>\\n\" % color)\n\n if lamp.type == \"POINT\":\n pass\n elif lamp.type == \"SPOT\":\n tab_write(file, \"spotlight\\n\")\n\n # Falloff is the main radius from the centre line\n tab_write(file, \"falloff %.2f\\n\" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH\n tab_write(\n file, \"radius %.6f\\n\" % ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend))\n )\n\n # Blender does not have a tightness equivalent, 0 is most like blender default.\n tab_write(file, \"tightness 0\\n\") # 0:10f\n\n tab_write(file, \"point_at <0, 0, -1>\\n\")\n if lamp.pov.use_halo:\n tab_write(file, \"looks_like{\\n\")\n tab_write(file, \"sphere{<0,0,0>,%.6f\\n\" % lamp.distance)\n tab_write(file, \"hollow\\n\")\n tab_write(file, \"material{\\n\")\n tab_write(file, \"texture{\\n\")\n tab_write(file, \"pigment{rgbf<1,1,1,%.4f>}\\n\" % (lamp.pov.halo_intensity * 5.0))\n tab_write(file, \"}\\n\")\n tab_write(file, \"interior{\\n\")\n tab_write(file, \"media{\\n\")\n tab_write(file, \"emission 1\\n\")\n tab_write(file, \"scattering {1, 0.5}\\n\")\n tab_write(file, \"density{\\n\")\n tab_write(file, \"spherical\\n\")\n tab_write(file, \"color_map{\\n\")\n tab_write(file, \"[0.0 rgb <0,0,0>]\\n\")\n tab_write(file, \"[0.5 rgb <1,1,1>]\\n\")\n tab_write(file, \"[1.0 rgb <1,1,1>]\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n elif lamp.type == \"SUN\":\n tab_write(file, \"parallel\\n\")\n tab_write(file, \"point_at <0, 0, -1>\\n\") # *must* be after 'parallel'\n\n elif lamp.type == \"AREA\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n # Area lights have no falloff type, so always use blenders lamp quad equivalent\n # for those?\n tab_write(file, \"fade_power %d\\n\" % 2)\n size_x = lamp.size\n samples_x = lamp.pov.shadow_ray_samples_x\n if lamp.shape == \"SQUARE\":\n size_y = size_x\n samples_y = samples_x\n else:\n size_y = lamp.size_y\n samples_y = lamp.pov.shadow_ray_samples_y\n\n tab_write(\n file,\n \"area_light <%.6f,0,0>,<0,%.6f,0> %d, %d\\n\"\n % (size_x, size_y, samples_x, samples_y),\n )\n tab_write(file, \"area_illumination\\n\")\n if lamp.pov.shadow_ray_sample_method == \"CONSTANT_JITTERED\":\n if lamp.pov.use_jitter:\n tab_write(file, \"jitter\\n\")\n else:\n tab_write(file, \"adaptive 1\\n\")\n tab_write(file, \"jitter\\n\")\n\n # No shadow checked either at global or light level:\n if not scene.pov.use_shadows or (lamp.pov.shadow_method == \"NOSHADOW\"):\n tab_write(file, \"shadowless\\n\")\n\n # Sun shouldn't be attenuated. Area lights have no falloff attribute so they\n # are put to type 2 attenuation a little higher above.\n if lamp.type not in {\"SUN\", \"AREA\"}:\n if lamp.falloff_type == \"INVERSE_SQUARE\":\n tab_write(file, \"fade_distance %.6f\\n\" % (sqrt(lamp.distance / 2.0)))\n tab_write(file, \"fade_power %d\\n\" % 2) # Use blenders lamp quad equivalent\n elif lamp.falloff_type == \"INVERSE_LINEAR\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 1) # Use blenders lamp linear\n elif lamp.falloff_type == \"CONSTANT\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 3)\n # Use blenders lamp constant equivalent no attenuation.\n # Using Custom curve for fade power 3 for now.\n elif lamp.falloff_type == \"CUSTOM_CURVE\":\n tab_write(file, \"fade_power %d\\n\" % 4)\n\n write_matrix(file, matrix)\n\n tab_write(file, \"}\\n\")\n\n # v(A,B) rotates vector A about origin by vector B.\n file.write(\n \"#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\\n\"\n % (\n exported_lights_count,\n -ob.location.x,\n -ob.location.y,\n -ob.location.z,\n ob.rotation_euler.x,\n ob.rotation_euler.y,\n ob.rotation_euler.z,\n )\n )", "def gl_lighting():\n for viewer in nuke.allNodes('Viewer'):\n val = int(viewer.knob('gl_lighting').getValue())\n viewer.knob('gl_lighting').setValue(not val)", "def set_light_on(self):\n self._light = \"ON\"" ]
[ "0.7286741", "0.6968058", "0.667978", "0.6516438", "0.6463352", "0.6461872", "0.6437315", "0.6400645", "0.63854384", "0.63444823", "0.6283221", "0.62622285", "0.6234883", "0.61583436", "0.61548394", "0.614277", "0.6137061", "0.61298877", "0.61078626", "0.61018175", "0.6089106", "0.6081094", "0.6052358", "0.60355777", "0.59865034", "0.5972309", "0.5947632", "0.59467125", "0.5939675", "0.5898696" ]
0.70111126
1
filter light linking data for the specific asset
def filterLightLinksData(self, LayersInfo , asset, sAr = ['',''] ): lightData = [(a.replace( sAr[0], sAr[1] ),LayersInfo[a].replace( sAr[0], sAr[1] )) for a in LayersInfo.keys() if asset in a] return dict( lightData )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importLightLinking(self, asset = '', searchAndReplace = ['',''] ):\n\t\tLayersInfo = pickle.load( open( self.lightLinkPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tif not asset == '':\n\t\t\tLayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace )\n\t\tfor l in LayersInfo.keys():\n\t\t\tobjsToBreakLink = []\n\t\t\tfor link in LayersInfo[l]:\n\t\t\t\tif mc.objExists( link ):\n\t\t\t\t\tobjsToBreakLink.append( link )\n\t\t\tmc.lightlink( b = True, light = l, o = objsToBreakLink )\n\t\tmc.refresh( su = 0 )", "def filter(self, filters):", "def get_filters(self):", "def filterToLight( bmp, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n HSL = RGBtoHSL( bmp.pixels[h][w] )\n lit = int(255*HSL[2]) # convert to 0-255 range\n bmp.pixels[h][w] = (lit,lit,lit)\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def apply_filter(self, image):\n pass", "def filter(self, drawable):\n pass", "def broadbandfilter(self):\n _, = self.broadbandfilters\n return _", "def exportLightLinking(self):\n\t\tlights = [a for a in mc.ls( typ = ['light','aiAreaLight'] ) if not 'eye' in a]\n\t\tallShapes = [s for s in mc.ls( type = 'geometryShape', ni = 1) if not (mc.objectType( s ) in ( 'aiAreaLight','aiSkyDomeLight' ))]\n\t\tlitLinks = {}\n\t\tfor l in lights:\n\t\t\tlightLinkShapes = mc.lightlink( query=True, light=l ,shp=1,t=0,set=0,h=0)\n\t\t\tlitLinks[l]\t = list( set( allShapes ) - set( lightLinkShapes ) )#SHAPES WITH NO LINK TO THIS LIGHT\n\t\tpickle.dump( litLinks, open( self.lightLinkPath.path, \"wb\" ) )", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))", "def healthcare_filter(df_all): \n #get requested assets under healthcare tag \n df_filtered = pandas.DataFrame(columns=['osm_id','asset','geometry']) #create df for saving data\n for row in range(len(df_all.index)): \n if 'healthcare' in df_all[\"asset\"][row]: #check if healthcare key is present\n df_filtered = df_filtered.append(df_all.loc[row]) #if so, save in df \n if '\"healthcare\"=>\"doctor\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'doctors' #to be consistent with asset list \n elif '\"healthcare\"=>\"pharmacy\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'pharmacy'\n elif '\"healthcare\"=>\"hospital\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'hospital'\n elif '\"healthcare\"=>\"clinic\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'clinic'\n elif '\"healthcare\"=>\"dentist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'dentist'\n elif '\"healthcare\"=>\"physiotherapist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'physiotherapist'\n elif '\"healthcare\"=>\"alternative\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'alternative'\n elif '\"healthcare\"=>\"laboratory\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'laboratory'\n elif '\"healthcare\"=>\"optometrist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'optometrist'\n elif '\"healthcare\"=>\"rehabilitation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'rehabilitation'\n elif '\"healthcare\"=>\"blood_donation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'blood_donation'\n elif '\"healthcare\"=>\"birthing_center\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'birthing_center'\n else:\n df_filtered = df_filtered.drop(index=row)\n \n return df_filtered", "def process_asset_data(data):\n buffered_assets = []\n\n for asset in data:\n asset_geom = shape(asset['geometry'])\n buffered_geom = asset_geom.buffer(100)\n\n asset['buffer'] = buffered_geom\n buffered_assets.append(asset)\n\n output = []\n assets_seen = set()\n\n for asset in tqdm(buffered_assets):\n if asset['properties']['Opref'] in assets_seen:\n continue\n assets_seen.add(asset['properties']['Opref'])\n touching_assets = []\n for other_asset in buffered_assets:\n if asset['buffer'].intersects(other_asset['buffer']):\n touching_assets.append(other_asset)\n assets_seen.add(other_asset['properties']['Opref'])\n\n dissolved_shape = cascaded_union([a['buffer'] for a in touching_assets])\n final_centroid = dissolved_shape.centroid\n output.append({\n 'type': \"Feature\",\n 'geometry': {\n \"type\": \"Point\",\n \"coordinates\": [final_centroid.coords[0][0], final_centroid.coords[0][1]],\n },\n 'properties':{\n 'name': asset['properties']['name'],\n }\n })\n\n return output", "def specific_asset(self, asset: str) -> dict:\n \n specific_asset_url = self.network + bf_assets_url + asset\n\n response = query_blockfrost(specific_asset_url, self.api_key, self.proxies)\n \n return response", "def __apply_filters(url, dataset_code):\n if '?' not in url:\n url += '?'\n else:\n url += '&'\n for key in dataset_code.FILTERS:\n if isinstance(dataset_code.FILTERS[key], list):\n for value in dataset_code.FILTERS[key]:\n url += key + '=' + str(value) + '&'\n else:\n url += key + '=' + str(dataset_code.FILTERS[key]) + '&'\n url = url[0:-1]\n return url", "def reference_filters(self, version, options):\n pass", "def filters(im, detail=False, sharpen=False, **kwargs):\n filters = []\n if detail:\n filters.append(('detail', True))\n if sharpen:\n filters.append(('sharpen', True))\n return im", "def get_data_filter(args):\n diff_data(args, \"filter\")", "def crossWalkGeoBlacklight(data):\n\n dataJsonObj=deep_get(data,\"xml.fgdc\",[])\n if len (dataJsonObj)>0:\n dataJsonObj=deep_get(dataJsonObj[0],\"data\",{})\n else:\n dataJsonObj={}\n layername=os.path.splitext(os.path.basename(data['file']))[0]\n geoserver_layername = data['geoserverStoreName']\n gblight = assignMetaDataComponents(dataJsonObj,layername,geoserver_layername,data[\"resource_type\"])\n gblight['solr_geom']=data['bounds']\n data['geoblacklightschema']=gblight\n return data", "async def filter(self, **kwargs):\n\n pass", "def planes_with_light_profiles(tracer):\n # NOTE: Find all planes with light profiles\n # NOTE:\n # # image = tracer.galaxies[1].profile_image_from_grid(grid=grid)\n # # plt.figure()\n # # plt.imshow(image.in_2d)\n # # plt.show()\n #\n # # asd = list(map(lambda plane: plane.has_light_profile, tracer.planes))\n # # print(asd)\n # #print(tracer.planes)\n #\n # #print(tracer.has_light_profile)\n # #print(list(map(lambda plane: plane.has_light_profile, tracer.planes)))\n # #print(tracer.galaxies_with_light_profile)\n #\n # #print(tracer.planes[1].galaxies_with_light_profile)\n #\n # galaxies = tracer.planes[1].galaxies_with_light_profile\n # galaxy = galaxies[0]\n #\n # galaxy_light_profiles = galaxy.light_profiles\n #\n # image_0 = galaxy_light_profiles[0].profile_image_from_grid(grid=grid)\n # image_0_in_2d = image_0.in_2d\n #\n # image_1 = galaxy_light_profiles[1].profile_image_from_grid(grid=grid)\n # image_1_in_2d = image_1.in_2d", "def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data", "def importLights(self, asset = '', searchAndReplace = ['',''] ):\n\t\tif self.lightPath.exists:\n\t\t\tself.lightPath.imp()\n\t\t\tif self.lightLinkPath.exists:\n\t\t\t\tself.importLightLinking( asset, searchAndReplace )", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def filter(self, *args, **kwargs):", "async def add(self, ctx, *, link):\r\n try: # compatability with older versions\r\n self.adkillr[ctx.message.server.id]['filters'].append(link)\r\n except KeyError:\r\n self.adkillr[ctx.message.server.id]['filters'] = [link]\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)\r\n await self.bot.say(\"Filter added.\")", "async def low_pass(\n client,\n event,\n smoothing: P('float', 'smoothing', min_value = 0.0, max_value = 5.0),\n):\n player = get_player_or_abort(client, event)\n \n filter = LowPass(smoothing)\n player.add_filter(filter)\n await player.apply_filters()\n \n return create_filter_added_embed(filter)", "def lightlink(*args, b: bool=True, hierarchy: bool=True, light: Union[name, List[name]]=None,\n make: bool=True, object: Union[name, List[name]]=None, sets: bool=True, shadow:\n bool=True, shapes: bool=True, transforms: bool=True, useActiveLights: bool=True,\n useActiveObjects: bool=True, q=True, query=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))", "def filter_items(self, context, data, propname):\n\n helper_funcs = bpy.types.UI_UL_list\n\n items = getattr(data, propname)\n\n # Filtering by name\n filtered = helper_funcs.filter_items_by_name(\n self.filter_name, self.bitflag_filter_item, items, \"name\", reverse=False\n )\n\n if not filtered:\n filtered = [self.bitflag_filter_item] * len(items)\n\n d = context.active_object.data\n anim_ret = context.active_object.anim_ret\n\n for index, bone in enumerate(items):\n excluded = False\n found = False\n\n anim_ret_bone = bone.anim_ret_bone\n\n if not anim_ret_bone:\n excluded = True\n if not excluded and anim_ret_bone.source_bone_name == \"\":\n excluded = True\n if bone.name.startswith(ObjectAnimRet.prefix):\n excluded = True\n if not excluded and not anim_ret.show_def and \"DEF-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_mch and \"MCH-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_org and \"ORG-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_fk and \"fk\" in bone.name.lower():\n excluded = True\n if not excluded and not anim_ret.show_ik and \"ik\" in bone.name.lower():\n excluded = True\n if not excluded and anim_ret.filter_layers:\n data_bone = d.bones[bone.name]\n for layer_id, layer in enumerate(d.layers):\n if layer:\n if data_bone.layers[layer_id]:\n found = True\n break\n\n if excluded or not found:\n filtered[index] &= ~self.bitflag_filter_item\n\n ordered = []\n\n # Reorder by name or average weight.\n if self.use_filter_sort_alpha:\n sort = [(idx, getattr(it, \"name\", \"\")) for idx, it in enumerate(items)]\n\n ordered = helper_funcs.sort_items_helper(sort, lambda e: e[1].lower())\n\n return filtered, ordered", "def use_effect(self):\n if self.preview_name in FILTERS:\n photo = Image.open(self.path.url[1:])\n preview = photo.filter(FILTERS.get(self.preview_name))\n preview.save(self.path.url[1:])", "def pwgrwlfilter(self):\n return None" ]
[ "0.5837777", "0.5805665", "0.56016874", "0.53269315", "0.53257084", "0.52495986", "0.52477026", "0.5235335", "0.51835656", "0.51263887", "0.5116933", "0.51134336", "0.50554407", "0.505449", "0.5049599", "0.5036466", "0.5035692", "0.50177574", "0.50123763", "0.49872875", "0.4979257", "0.4975004", "0.4927894", "0.4924816", "0.49214503", "0.49190053", "0.49179408", "0.49038744", "0.49023634", "0.48989546" ]
0.7130668
0
import aovs into scene
def importAovs(self): LayersInfo = pickle.load( open( self.aovsPath.path, "rb") ) mc.refresh( su = 1 ) for ao in LayersInfo.keys(): aov.create( ao, LayersInfo[ao]['name'], LayersInfo[ao]['type'], LayersInfo[ao]['enabled'] ) mc.refresh( su = 0 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_scene(file_path):\n\n pass", "def importBaseScene(self):\n logger.debug(\"Func: importBaseScene\")\n relSceneFile = self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"RelativePath\"]\n absSceneFile = os.path.join(self.projectDir, relSceneFile)\n if os.path.isfile(absSceneFile):\n # cmds.file(absSceneFile, i=True)\n nuke.nodePaste(absSceneFile)\n return 0\n else:\n msg = \"File in Scene Manager database doesnt exist\"\n self._exception(210, msg)\n return -1, msg", "def main():\r\n # create the EdenLudo sample\r\n EdenEvolves = EdenLudo()\r\n # ru-n the scene\r\n run()", "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def importExternal(*args):\n goTo = pi.currentProject\n impFile = cmds.fileDialog2(fm=1, dir = goTo)[0]\n if impFile:\n cmds.file(impFile, i=True)", "def __init__(self, *args, **kwargs):\n super(MayaScene, self).__init__(*args, **kwargs)", "def set_up_scenes():\n cmd.zoom('Cathepsin', 10) # Zoom out to get a view on the whole complex\n cmd.scene('001', 'store', message='This is the first scene with a view on the complex!')\n cmd.set_view(closeup) # Get a close-up of the ligand by using the manually chosen viewpoint\n cmd.scene('002', 'store', message='This is the second scene with a close-up on the ligand!')", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def open_scene(file_path, save=True):\n\n pass", "def create_scene(self):\n \n self.scene=soya.World()", "def main():\n viewer = Viewer()\n\n # paramètre de transformation des paramètres\n #sol\n ground_size = 512\n ground_offset = 20\n\n #dinosaure\n characters_offset_x = 0\n characters_offset_y = -20\n characters_offset_z = 0\n characters_scale = 15\n characters_rotate_deg = 180\n\n #forêt\n forest_offset = -15\n forest_scale = 1.5\n\n #skybox\n Skysphere_scale = 3\n\n characters = Node(transform = translate(characters_offset_x, characters_offset_y, characters_offset_z) @ scale(characters_scale) @ rotate(axis=(0, 1, 0), angle = characters_rotate_deg))\n characters.add(*load_skinned(\"dino/Dinosaurus_roar.dae\"))\n\n forest = Node(transform = translate(0, forest_offset, 0) @ scale(forest_scale))\n forest.add(*load_textured(\"trees9/forest.obj\"))\n\n ground = Node(transform = translate(-ground_size>>1, ground_offset, -ground_size>>1))\n ground.add(sol(ground_size))\n\n Skysphere = Node(transform = scale(Skysphere_scale))\n Skysphere.add(*load_textured(\"Skysphere/skysphere.obj\"))\n\n scene = Node(transform = identity(), children = [characters, forest, ground, Skysphere])\n\n viewer.add(scene)\n\n viewer.run()", "def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"rb\") as f:\n self.scene = pickle.load(f)", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def import_game_graph(self):\n self._import_win()\n self._import_loose()", "def main():\r\n \r\n world = WorldModel()\r\n #uncomment these lines and comment out the next 2 if you want to use the\r\n #full Baxter model\r\n #print \"Loading full Baxter model (be patient, this will take a minute)...\"\r\n #world.loadElement(os.path.join(model_dir,\"baxter.rob\"))\r\n print \"Loading simplified Baxter model...\"\r\n world.loadElement(os.path.join(model_dir,\"baxter_col.rob\"))\r\n print \"Loading Kiva pod model...\"\r\n world.loadElement(os.path.join(model_dir,\"kiva_pod/model.obj\"))\r\n print \"Loading plane model...\"\r\n world.loadElement(os.path.join(model_dir,\"plane.env\"))\r\n \r\n #shift the Baxter up a bit (95cm)\r\n Rbase,tbase = world.robot(0).getLink(0).getParentTransform()\r\n world.robot(0).getLink(0).setParentTransform(Rbase,(0,0,0.95))\r\n \r\n #translate pod to be in front of the robot, and rotate the pod by 90 degrees \r\n Trel = (so3.rotation((0,0,1),math.pi/2),[1.1,0,0])\r\n T = world.rigidObject(0).getTransform()\r\n world.rigidObject(0).setTransform(*se3.mul(Trel,T))\r\n \r\n #run the visualizer\r\n visualizer = MyGLViewer(world)\r\n visualizer.run()", "def chooseOpenFile(self):\n fname = QFileDialog.getOpenFileName(self, 'Open file',\n filter=\"Meshes (*.stl)\")\n if fname[0] == '':\n return\n name = fname[0][:-4].split('/')[-1]\n self.files[name] = AmpObject(fname[0], 'limb')\n amp = self.files[name]\n amp.addActor()\n amp.tform = vtk.vtkTransform()\n amp.tform.PostMultiply()\n amp.actor.SetUserTransform(amp.tform)\n# amp.centre()\n self.fileManager.addRow(name, amp)\n self.display()\n self.filesDrop.append(name)\n if hasattr(self, 'alCont'):\n self.alCont.getNames()\n if hasattr(self, 'regCont'):\n self.regCont.getNames()", "def do_poortego_import(self, arg, opt):\n poortego_import(self.my_interface, arg, opt)", "def __init__(self):\r\n self.label = \"OVL to Feature\"\r\n self.description = \"OVL to Feature converts an OVL file from CPOF, C2PC, GCCS or similar system and converts it to a series of Feature Class for Point, Line, and Polygons.\"\r\n self.canRunInBackground = False", "def reference_scene(file_path, **kwargs):\n\n pass", "def WriteImport(self, filename, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):\r\n step = os.path.basename(outputDir)\r\n execution = os.path.basename(os.path.dirname(outputDir))\r\n test = os.path.basename(os.path.dirname(os.path.dirname(outputDir)))\r\n path = os.path.join(self.__scenesDir, test, execution, step)\r\n if (not os.path.isdir(path)):\r\n os.makedirs(path)\r\n self.__pathMap.append((path, outputDir))\r\n \r\n self.__logFiles.append(os.path.join(path, os.path.basename(logname)))\r\n self.__importLogFiles.append(self.__logFiles[-1])\r\n \r\n command = (\"SetValue \\\"preferences.scripting.cmdlogfilename\\\", \\\"\" + \r\n self.__logFiles[-1].replace(\"\\\\\", \"\\\\\\\\\") + \"\\\"\\n\"\r\n \"NewScene, false\\n\")\r\n if (FUtils.GetExtension(filename) == \"dae\"):\r\n command = (command + \r\n \"set myIProp = CreateImportFTKOptions()\\n\" +\r\n \"myIProp.Parameters(\\\"Filename\\\").Value = \\\"\" + \r\n filename.replace(\"\\\\\", \"\\\\\\\\\") +\"\\\"\\n\" +\r\n \"myIProp.Parameters(\\\"Verbose\\\").Value = True\\n\")\r\n for setting in settings:\r\n value = setting.GetValue().strip()\r\n if (value == \"\"):\r\n value = self.FindDefault(FXsi.__IMPORT_OPTIONS, \r\n setting.GetPrettyName())\r\n command = (command + \"myIProp.Parameters(\\\"\" + \r\n setting.GetCommand() + \"\\\").Value = \" + value + \"\\n\")\r\n command = command + \"ImportFTK myIProp.Name \\n\"\r\n elif (FUtils.GetExtension(filename) == \"scn\"):\r\n command = (command +\r\n \"OpenScene \\\"\" + filename.replace(\"\\\\\",\"\\\\\\\\\") + \"\\\"\\n\")\r\n else: \r\n return\r\n \r\n self.__currentImportProperName = FUtils.GetProperFilename(filename)\r\n basename = self.__currentImportProperName + \".scn\"\r\n\r\n# self.__script.write(\r\n# command +\r\n# \"SearchAndReplacePath \\\"All\\\", \\\"\" + FXsi.__REPLACE_PATH + \r\n# \"\\\", \\\"\" + \r\n# os.path.dirname(filename).replace(\"\\\\\", \"\\\\\\\\\") + \r\n# \"\\\", True\\n\" +\r\n# \"SaveSceneAs \\\"\" + \r\n# os.path.join(path, basename).replace(\"\\\\\", \"\\\\\\\\\") +\r\n# \"\\\"\\n\"\r\n# )\r\n \r\n self.__script.write(\r\n command +\r\n \"SaveSceneAs \\\"\" + \r\n os.path.join(path, basename).replace(\"\\\\\", \"\\\\\\\\\") +\r\n \"\\\"\\n\"\r\n )\r\n \r\n self.__testCount = self.__testCount + 1\r\n \r\n return [basename,]", "def test_visuThreeD1(self):\n\n visu_logic = slicer.modules.visuThreeDWidget.logic\n #visu_logic.set_user_table(self.user_table)\n #visu_logic.set_user_file('/work/maria5/EBDS_CIVILITY/DataShare/TestMatricesForVisualization/AAL78/PerNodeMetrics/Conte_EigenVectorCentrality_4Yr_AAL78Regions.csv')\n #visu_logic.set_user_file('/Users/Wieke/Documents/visuThreeD/neo-0042-4year_AvgSym_normFull.csv')\n # visu_logic.create_node_actors()\n # visu_logic.create_line_actors()\n # visu_logic.update()\n #visu_logic.set_node_range()", "def import_(self, version):\n #nuke.nodePaste(version.absolute_full_path)\n return True", "def run():\n from cgl.plugins.blender.tasks.rig import parent_mdl_to_rig\n parent_mdl_to_rig()", "def visualize(self):\n app = QtGui.QApplication([''])\n SceneGUI(self)\n app.exec_()", "def connectMasterScene():\n try:\n nuke.toNode('Viewer1').setInput(0, nuke.toNode('MASTER_SCENE'))\n except:\n print 'no master scene found!'", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def main():\n obj = UnityFilesystem()\n obj.perform_module_operation()" ]
[ "0.7213218", "0.63318026", "0.59511834", "0.5773653", "0.5763489", "0.5640468", "0.5608729", "0.5585865", "0.55745476", "0.5565132", "0.5552705", "0.5474964", "0.5472537", "0.5469946", "0.5382948", "0.5356013", "0.5355673", "0.5343927", "0.5336113", "0.53249675", "0.5310616", "0.53012156", "0.52845025", "0.5273526", "0.5262926", "0.52469516", "0.52268344", "0.5213845", "0.52136564", "0.52130127" ]
0.7212844
1
import shaders into scene
def importShaders(self): if self.shaderPath.exists: self.shaderPath.imp()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _on_load_scene_shaders(self):\n\n artellapipe.ShadersMgr().load_scene_shaders()", "def init_shaders():\n global shaders\n\n vertex_shader = glCreateShader(GL_VERTEX_SHADER)\n glShaderSource(vertex_shader,open('shaders/vs-phong-interp.c','r').read())\n glCompileShader(vertex_shader)\n result = glGetShaderiv(vertex_shader, GL_COMPILE_STATUS)\n if result:\n print('Vertex shader compilation successful.')\n else:\n print('Vertex shader compilation FAILED:')\n print(glGetShaderInfoLog(vertex_shader))\n sys.exit(-1)\n\n fragment_shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(fragment_shader, open('shaders/fs-phong-interp.c','r').read())\n glCompileShader(fragment_shader)\n result = glGetShaderiv(fragment_shader, GL_COMPILE_STATUS)\n if result:\n print('Fragment shader compilation successful.')\n else:\n print('Fragment shader compilation FAILED:')\n print(glGetShaderInfoLog(fragment_shader))\n sys.exit(-1)\n\n shaders = glCreateProgram()\n glAttachShader(shaders,vertex_shader)\n glAttachShader(shaders,fragment_shader)\n glLinkProgram(shaders)", "def bs_importShaders(shaderPath, jsonPath):\n # import shaders.\n bs_mayaFile.bs_importFile(shaderPath)\n # read shader data from json file.\n with open(jsonPath) as json_data:\n shaderData = json.load(json_data)\n print shaderData\n # apply shaders.\n for each in shaderData.keys():\n # for x in shaderData[each]:\n # pm.select(shaderData[each][x],r=True)\n pm.select(shaderData[each], r=True)\n pm.windows.hyperShade(a=each)\n bs_qui.bs_displayMessage('success', 'shader import success.')\n return True", "def convert_shaders(self):\n raise NotImplementedError()", "def importShaders(self, namespace=':'):\n self.logger.info(\"Import Shaders\")\n\n if self.data['abcShadersAttr']:\n\n abcfile = self.data['abcShadersAttr']\n \n # shotgun query for maya file\n mayafile = find_shader_package_from_shader_file(file_path=abcfile, file_type='ma')\n if mayafile != {}:\n mayafile = mayafile['ma']\n self.logger.debug(\"Found maya shader file: %s\" % mayafile)\n else:\n localfile = abcfile.replace('.abc', '.ma')\n if os.path.isfile(localfile):\n mayafile = localfile\n self.logger.debug(\"Found maya shader file: %s\" % mayafile)\n else:\n self.logger.error(\"Missing file : %s\" % self.data['abcShadersAttr'])\n return False\n\n if os.path.isfile(mayafile):\n try: \n imported_shaders = cmds.file(mayafile, i=True, returnNewNodes=True, renameAll=True, mergeNamespacesOnClash=True, namespace=namespace)\n self.setAttr(\"abcShaders\", \"\")\n self.logger.debug(\"Imported under %s namespace\" % namespace)\n\n # reset selection back to alembicHolder\n cmds.select(self.data['shapeNode'])\n self.logger.info(\"Imported : %s\" % self.data['abcShadersAttr'])\n return True\n\n except Exception, e:\n self.logger.error(\"Import Json Error : %s\" % e)\n return False\n else:\n self.logger.error(\"Missing file : %s\" % self.data['abcShadersAttr'])\n return False\n else:\n self.logger.info(\"Empty attribute : %s.abcShadersAttr\" % self.data['shapeNode'])\n return False", "def loadShader(shaderpath, shadername, vertexFormatList=None, fragmentFormatlist=None):\n fragment = Shader(shaderpath + shadername + \".fsh\", FRAGMENT, True, fragmentFormatlist)\n vertex = Shader(shaderpath + shadername + \".vsh\", VERTEX, True, vertexFormatList)\n return ShaderProgram(vertex, fragment, True)", "def use(self):\r\n opengles.glUseProgram(self.program)", "def _reload_shader(self):\n self.render_pipeline.reload_shaders()\n\n self.render_pipeline.set_effect(self.terrain.get_node(), \"effects/terrain.yaml\", {\n \"render_gbuffer\": True,\n \"render_shadows\": False,\n\n })\n\n self.render_pipeline.set_effect(self.terrain_shadow.get_node(), \"effects/terrain_shadow.yaml\", {\n \"render_gbuffer\": False,\n \"render_shadows\": True,\n }, 5000)", "def _load_opengl(self):\r\n pass", "def compile(self):\n if not self.isCompiled():\n if self.file is not None:\n try:\n if self.tipo == VERTEX:\n self.shader = glCreateShader(GL_VERTEX_SHADER)\n else:\n self.shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(self.shader, self.file)\n glCompileShader(self.shader)\n self.compiled = True\n except:\n raise Exception(\"error al compilar el shader\")\n else:\n raise Exception(\"no se ha cargado un archivo\")\n else:\n print \"Error :: el shader ya ha sido compilado\"", "def import_scene(file_path):\n\n pass", "def link_shaders(*shaders):\n program = gl.glCreateProgram()\n for shader in shaders:\n gl.glAttachShader(program, shader)\n gl.glLinkProgram(program)\n # check linking error\n result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetProgramInfoLog(program))\n return program", "def init_shader(self):\r\n self.attrib_locs = {\r\n \"mc_vertex\": -1,\r\n \"vert_tex_coord\": -1,\r\n }\r\n self.uniform_locs = {\r\n \"model_matrix\": -1,\r\n \"view_matrix\": -1,\r\n \"proj_matrix\": -1,\r\n }\r\n vert_prog = self._compile_shader(ORTH_VERT_SOURCE, gl.GL_VERTEX_SHADER)\r\n frag_prog = self._compile_shader(\r\n ORTH_FRAG_SOURCE, gl.GL_FRAGMENT_SHADER)\r\n self.shader = gl.glCreateProgram()\r\n gl.glAttachShader(self.shader, vert_prog)\r\n gl.glAttachShader(self.shader, frag_prog)\r\n gl.glLinkProgram(self.shader)\r\n assert (gl.glGetProgramiv(self.shader, gl.GL_LINK_STATUS) ==\r\n gl.GL_TRUE), (\r\n \"Error: %s\" % (gl.glGetProgramInfoLog(self.shader)))\r\n\r\n self.attrib_locs = {\r\n name: gl.glGetAttribLocation(self.shader, name)\r\n for name in self.attrib_locs\r\n }\r\n self.uniform_locs = {\r\n name: gl.glGetUniformLocation(self.shader, name)\r\n for name in self.uniform_locs\r\n }\r\n\r\n # Load vertices for final ortho view\r\n self.vao = gl.glGenVertexArrays(1)\r\n gl.glBindVertexArray(self.vao)\r\n self.buffers['mc_vertex'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['mc_vertex'])\r\n\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(ORTH_VERTICES),\r\n ORTH_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['mc_vertex'], 4,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['mc_vertex'])\r\n\r\n self.buffers['vert_tex_coord'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['vert_tex_coord'])\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(TEXTURE_VERTICES),\r\n TEXTURE_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['vert_tex_coord'], 2,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['vert_tex_coord'])\r\n gl.glActiveTexture(gl.GL_TEXTURE0)", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def initializeGL(self):\n # background color\n gl.glClearColor(0, 0, 0, 0)\n # create a Vertex Buffer Object with the specified data\n self.vbo = glvbo.VBO(self.data)\n # compile the vertex shader\n vs = compile_vertex_shader(VS)\n # compile the fragment shader\n fs = compile_fragment_shader(FS)\n # compile the vertex shader\n self.shaders_program = link_shader_program(vs, fs)\n vs2 = compile_vertex_shader(VS2)\n fs2 = compile_fragment_shader(FS2)\n self.my_shaders_program = link_shader_program(vs2, fs2)", "def _build_shaders(self, program):\n\n # Check if we have at least something to attach\n if not self._verts:\n raise ValueError(\"No vertex shader has been given\")\n if not self._frags:\n raise ValueError(\"No fragment shader has been given\")\n\n log.debug(\"GPU: Attaching shaders to program\")\n\n # Attach shaders\n attached = gl.glGetAttachedShaders(program)\n shaders = self._verts + self._frags + self._geoms\n for shader in shaders: #self._verts:\n if shader.need_update:\n if shader.handle in attached:\n gl.glDetachShader(program, handle)\n shader.activate()\n if isinstance(shader, GeometryShader):\n if shader.vertices_out is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_VERTICES_OUT_EXT,\n shader.vertices_out)\n if shader.input_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_INPUT_TYPE_EXT,\n shader.input_type)\n if shader.output_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_OUTPUT_TYPE_EXT,\n shader.output_type)\n gl.glAttachShader(program, shader.handle)\n shader._program = self", "def set_shaders(self, vert, frag):\n self._linked = False\n # Create temporary shader objects\n vert_handle = gl.glCreateShader(gl.GL_VERTEX_SHADER)\n frag_handle = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)\n # For both vertex and fragment shader: set source, compile, check\n for code, handle, type_ in [(vert, vert_handle, 'vertex'), \n (frag, frag_handle, 'fragment')]:\n gl.glShaderSource(handle, code)\n gl.glCompileShader(handle)\n status = gl.glGetShaderParameter(handle, gl.GL_COMPILE_STATUS)\n if not status:\n errors = gl.glGetShaderInfoLog(handle)\n errormsg = self._get_error(code, errors, 4)\n raise RuntimeError(\"Shader compilation error in %s:\\n%s\" % \n (type_ + ' shader', errormsg))\n # Attach shaders\n gl.glAttachShader(self._handle, vert_handle)\n gl.glAttachShader(self._handle, frag_handle)\n # Link the program and check\n gl.glLinkProgram(self._handle)\n if not gl.glGetProgramParameter(self._handle, gl.GL_LINK_STATUS):\n raise RuntimeError('Program linking error:\\n%s'\n % gl.glGetProgramInfoLog(self._handle))\n # Now we can remove the shaders. We no longer need them and it\n # frees up precious GPU memory:\n # http://gamedev.stackexchange.com/questions/47910\n gl.glDetachShader(self._handle, vert_handle)\n gl.glDetachShader(self._handle, frag_handle)\n gl.glDeleteShader(vert_handle)\n gl.glDeleteShader(frag_handle)\n # Now we know what variables will be used by the program\n self._unset_variables = self._get_active_attributes_and_uniforms()\n self._handles = {}\n self._known_invalid = set()\n self._linked = True", "def transfer_shaders(source, target):\n if isinstance(source, pm.nt.Transform):\n source_shape = source.getShape()\n else:\n source_shape = source\n\n if isinstance(target, pm.nt.Transform):\n target_shape = target.getShape()\n else:\n target_shape = target\n\n # get the shadingEngines\n shading_engines = source_shape.outputs(type=pm.nt.ShadingEngine)\n\n data_storage = []\n\n # get the assigned faces\n for shading_engine in shading_engines:\n faces = pm.sets(shading_engine, q=1)\n for faceGroup in faces:\n str_face = str(faceGroup)\n # replace the objectName\n new_face = \\\n str_face.replace(source_shape.name(), target_shape.name())\n data_storage.append((shading_engine.name(), new_face))\n\n for data in data_storage:\n shading_engine = data[0]\n new_face = data[1]\n pm.select(new_face)\n # now assign the newFaces to the set\n pm.sets(shading_engine, fe=1)", "def _on_unload_scene_shaders(self):\n\n artellapipe.ShadersMgr().unload_shaders()", "def __prepare_shaders(self, rotation_matrix=None, light_matrix=None,\n depth=True):\n self.__sh.add_attribute(0, self.__mean_face, 'mean_position')\n self.__sh.bind_buffer()\n\n self.__sh.use_shaders()\n\n self.__sh.bind_uniform_matrix(light_matrix.dot(rotation_matrix),\n 'light_matrix')\n if not depth:\n self.__sh.bind_uniform_matrix(rotation_matrix, 'rotation_matrix')\n self.__sh.bind_uniform_vector(self.__face.light_cartesian,\n 'light_vector')\n coefficients_amount = len(self.__face.coefficients)\n indices = -ones(199, dtype='i')\n indices[:coefficients_amount] = array(range(coefficients_amount))\n self.__sh.bind_uniform_ints(indices, 'indices')\n\n coefficients = zeros(199, dtype='f')\n coefficients[:coefficients_amount] = self.__face.coefficients\n self.__sh.bind_uniform_floats(coefficients, 'coefficients')\n\n glActiveTexture(GL_TEXTURE0)\n self.__sh.bind_texture(0)\n if not depth:\n glActiveTexture(GL_TEXTURE1)\n self.__sh.bind_texture(1)", "def __init__(self, vertex_source, fragment_source):\n self.glid = None\n vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)\n frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)\n if vert and frag:\n self.glid = GL.glCreateProgram() # pylint: disable=E1111\n GL.glAttachShader(self.glid, vert)\n GL.glAttachShader(self.glid, frag)\n GL.glLinkProgram(self.glid)\n GL.glDeleteShader(vert)\n GL.glDeleteShader(frag)\n status = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)\n if not status:\n print(GL.glGetProgramInfoLog(self.glid).decode('ascii'))\n sys.exit(1)", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def __init__(self, vertex_source, fragment_source):\n self.glid = None\n vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)\n frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)\n if vert and frag:\n self.glid = GL.glCreateProgram() # pylint: disable=E1111\n GL.glAttachShader(self.glid, vert)\n GL.glAttachShader(self.glid, frag)\n GL.glLinkProgram(self.glid)\n GL.glDeleteShader(vert)\n GL.glDeleteShader(frag)\n status = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)\n if not status:\n #print(GL.glGetProgramInfoLog(self.glid).decode('ascii'))\n GL.glDeleteProgram(self.glid)\n self.glid = None", "def addShaderFromSourceFile(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "def setShader(self, *args):\n return _osgAnimation.RigTransformHardware_setShader(self, *args)", "def __init__(self, vertex_source, fragment_source):\n self.glid = None\n vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)\n frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)\n if vert and frag:\n self.glid = GL.glCreateProgram() # pylint: disable=E1111\n GL.glAttachShader(self.glid, vert)\n GL.glAttachShader(self.glid, frag)\n GL.glLinkProgram(self.glid)\n GL.glDeleteShader(vert)\n GL.glDeleteShader(frag)\n status = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)\n if not status:\n print(GL.glGetProgramInfoLog(self.glid).decode('ascii'))\n GL.glDeleteProgram(self.glid)\n self.glid = None", "def convert_shaders(convert, shaders):\n \n # New version of the shaders\n out = []\n \n if convert == 'es2':\n \n for isfragment, shader in enumerate(shaders):\n has_version = False\n has_prec_float = False\n has_prec_int = False\n lines = []\n # Iterate over lines\n for line in shader.lstrip().splitlines():\n if line.startswith('#version'):\n has_version = True\n continue\n if line.startswith('precision '):\n has_prec_float = has_prec_float or 'float' in line\n has_prec_int = has_prec_int or 'int' in line\n lines.append(line.rstrip())\n # Write\n # BUG: fails on WebGL (Chrome)\n # if True:\n # lines.insert(has_version, '#line 0')\n if not has_prec_float:\n lines.insert(has_version, 'precision highp float;')\n if not has_prec_int:\n lines.insert(has_version, 'precision highp int;')\n # BUG: fails on WebGL (Chrome)\n # if not has_version:\n # lines.insert(has_version, '#version 100')\n out.append('\\n'.join(lines))\n \n elif convert == 'desktop':\n \n for isfragment, shader in enumerate(shaders):\n has_version = False\n lines = []\n # Iterate over lines\n for line in shader.lstrip().splitlines():\n has_version = has_version or line.startswith('#version')\n if line.startswith('precision '):\n line = ''\n for prec in (' highp ', ' mediump ', ' lowp '):\n line = line.replace(prec, ' ')\n lines.append(line.rstrip())\n # Write\n if not has_version:\n lines.insert(0, '#version 120\\n')\n out.append('\\n'.join(lines))\n \n else:\n raise ValueError('Cannot convert shaders to %r.' % convert)\n \n return tuple(out)", "def add_vertex_main(self, *args, **kwargs):\n kwargs['shader'] = 'vertex'\n self.add_main(*args, **kwargs)", "def init(filename):\n global trackball, flashlight, vertex_buffer, normal_buffer, color_buffer, colors, vertices, normals\n\n # initialize quaternions for the light and trackball\n flashlight = quat.for_rotation(0.0,vector(1.0,0.0,0.0))\n trackball = quat.for_rotation(0.0,vector(1.0,0.0,0.0))\n\n # read the .OBJ file into VBOs\n scene.read(filename)\n vertices,normals,colors = scene.compile()\n \n vertex_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, vertex_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(vertices)*4, \n (c_float*len(vertices))(*vertices), GL_STATIC_DRAW)\n\n normal_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, normal_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(normals)*4, \n (c_float*len(normals))(*normals), GL_STATIC_DRAW)\n\n color_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, color_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(colors)*4, \n (c_float*len(colors))(*colors), GL_STATIC_DRAW)\n\n\n # set up the object shaders\n init_shaders()\n\n glEnable (GL_DEPTH_TEST)", "def compileShaders(self):\n if self.flatShader is not None: self.flatShader.destroy()\n if self.dataShader is not None: self.dataShader.destroy()\n\n self.activeShader = None\n\n fslgl.glmesh_funcs.compileShaders(self)" ]
[ "0.7801303", "0.74852675", "0.7035145", "0.69157875", "0.68511426", "0.68024266", "0.67894316", "0.65204686", "0.64844674", "0.64807636", "0.6422864", "0.6374863", "0.6357216", "0.6319503", "0.61505246", "0.61424196", "0.6106217", "0.60896313", "0.60892934", "0.60849136", "0.6044291", "0.604092", "0.60254896", "0.6008178", "0.5987104", "0.5962349", "0.5936018", "0.5915529", "0.5906484", "0.58891714" ]
0.7719651
1
import data from file asset = Only import for the asset that you want searchAndReplace = Change any part of the objects name to another word
def importData( self, asset = '', searchAndReplace = ['',''] ): pickleData = pickle.load( open( self.dataPath.path, "rb" ) ) layers = [RenderLayerData(l,d) for l,d in pickleData.items() if not ':' in l] for l in layers: if not searchAndReplace [0]== '' or not searchAndReplace[1] == '': l.filterMe( asset, searchAndReplace ) l.create() l.addObjects() l.makeOverrides() l.makeOverrideConnections() l.makeShaderOverride()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace(name, newobject):", "def renameAssetObjects(self):\n\t\tfor i,o in enumerate( self.objects ):\n\t\t\tmn.Node( o ).name = self.name + '%i'%i", "def loadAssetTypeSpecialCaseFromFile(file):\n\tstringToTuple = compose(\n\t\ttuple\n\t , partial(map, lambda s: s.strip())\n\t , lambda s: s.split(',')\n\t)\n\n\n\tupdatePosition = lambda position: mergeDict(\n\t\tposition\n\t , { 'Portfolio': str(int(position['Portfolio'])) \\\n\t \t\t\t\t\tif isinstance(position['Portfolio'], float) \\\n\t \t\t\t\t\telse position['Portfolio']\n\t \t, 'AssetType': stringToTuple(position['AssetType'])\n\t \t}\n\t)\n\n\n\treturn \\\n\tcompose(\n\t\tdict\n\t , partial(map, lambda p: (p['ID'], p))\n\t , partial(map, updatePosition)\n\t , getRawPositions\n\t , fileToLines\n\t , partial(join, getDataDirectory())\n\t)(file)", "def _update_object_content(name, input):\n content = input._content\n\n hrefs = re.compile(r'<\\s*[^\\>]*href\\s*=\\s*([\"\\'])(.*?)\\1')\n srcs = re.compile(r'<\\s*[^\\>]*src\\s*=\\s*([\"\\'])(.*?)\\1')\n\n matches = hrefs.findall(content)\n matches.extend(srcs.findall(content))\n relative_paths = []\n for found in matches:\n found = found[1]\n if found not in relative_paths:\n relative_paths.append(found)\n\n for relative_path in relative_paths:\n if not \"://\" in relative_path: # we don't want to rewrite protocols\n dest_path = os.sep.join((get_relative_path(name), \"static\",\n relative_path))\n content = content.replace(relative_path, dest_path)\n\n return content", "def importLightLinking(self, asset = '', searchAndReplace = ['',''] ):\n\t\tLayersInfo = pickle.load( open( self.lightLinkPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tif not asset == '':\n\t\t\tLayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace )\n\t\tfor l in LayersInfo.keys():\n\t\t\tobjsToBreakLink = []\n\t\t\tfor link in LayersInfo[l]:\n\t\t\t\tif mc.objExists( link ):\n\t\t\t\t\tobjsToBreakLink.append( link )\n\t\t\tmc.lightlink( b = True, light = l, o = objsToBreakLink )\n\t\tmc.refresh( su = 0 )", "def replace_includes(self, file_name):\n\n indexBegin = 0\n indexEnd = 0\n text = self.dir_helper.read_file(file_name)\n while indexBegin != -1:\n indexBegin = text.find('\\input{', indexBegin+1)\n indexEnd = text.find('}', indexBegin+1)\n text_to_replace = text[indexBegin:indexEnd+1]\n if indexBegin != -1:\n # print 'text_to_replace : ' + text_to_replace\n new_path = self.construct_path(text_to_replace)\n new_text = self.replace_includes(file_name = new_path)\n text = text.replace(text_to_replace, new_text)\n\n return text", "def _mangle_petsc_intersphinx():\n\n if 'LOC' in os.environ and os.path.isfile(os.path.join(os.environ['LOC'],'objects.inv')):\n base_doc_url = os.environ['LOC']\n url=f\"file://\" + os.path.join(base_doc_url,'objects.inv')\n else:\n website = intersphinx_mapping['petsc'][0].partition('/release/')[0]\n branch = get_doc_branch()\n base_doc_url = f\"{website}/{branch}/\"\n url=f\"{base_doc_url}objects.inv\"\n print(\"Using PETSC inventory from \"+url)\n inventory = sphobjinv.Inventory(url=url)\n print(inventory)\n\n for obj in inventory.objects:\n if obj.name.startswith(\"manualpages\"):\n obj.name = \"petsc.\" + \"/\".join(obj.name.split(\"/\")[2:])\n obj.role = \"class\"\n obj.domain = \"py\"\n\n new_inventory_filename = \"petsc_objects.inv\"\n sphobjinv.writebytes(\n new_inventory_filename,\n sphobjinv.compress(inventory.data_file(contract=True))\n )\n intersphinx_mapping['petsc'] = (base_doc_url, new_inventory_filename)", "def _map_source(source):\n for pattern, replacement in \\\n settings.REFINERY_FILE_SOURCE_MAP.iteritems():\n translated_source = re.sub(pattern, replacement, source)\n if translated_source != source:\n return translated_source\n return source", "def updateCountryNames(self):\n try:\n with open('countryNameMapping.json', 'r') as file:\n name_mapping = json.loads(file.read())\n except:\n sys.exit('countryNameMapping.json file is unavailable in current directory.')\n \n for key, value in name_mapping.items():\n self.covid_df.replace(key, value, inplace=True)\n \n try:\n with open('countryNameISO2.json', 'r') as file:\n self.name_iso2_mapping = json.loads(file.read())\n except:\n print('countryNameISO2.json file is unavailable in current directory, creating file...')\n self.writeCountryCodeFile()\n print('Re-importing required JSONs...')\n self.updateCountryNames()", "def replace_sandesh_obj_name(self, obj, file):\n obj_class_name = obj.__class__.__name__\n if hasattr(obj, 'sreq_class'):\n try:\n subprocess.call(\"sed -i 's/\" + obj_class_name + \"/\" +\n obj.sreq_class + \"/g' \" + file, shell=True)\n except Exception as e:\n self.logger.error(\n \"Failed to replace sandesh obj name = \" +\n obj_class_name)\n self.logger.error(e)", "def read_and_clean_files(clueweb_file, ann_file, data_dir, ann_dir):\n annotation_input = fileinput.FileInput(os.path.join(ann_dir, ann_file), openhook=fileinput.hook_compressed)\n annotation_list = []\n for line in annotation_input:\n\tannotation_list.append(Annotation.parse_annotation(line))\n\n warc_path = os.path.join(data_dir, clueweb_file)\n warc_file = warc.open(warc_path)\n print \"Replacing entity mentions for \", clueweb_file, \":\", ann_file, \"...\"\n start = time.time()\n warc_entry = WarcEntry(warc_path, warc_file, annotation_list)\n cleaned_records = warc_entry.replace_entity_mentions()\n end = time.time()\n print \"Time used: \", end - start\n warc_file.close()\n return cleaned_records", "def parse_file_replace(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n full_data = fisier.read()\n fisier.close()\n\n try:\n fisier = open(path, \"w+\")\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n\n data = \"\"\n for line in full_data:\n data += line\n\n if args.ignore_case:\n pattern = re.compile(re.escape(args.pattern), re.IGNORECASE)\n pattern.sub(args.pattern, data)\n else:\n data = data.replace(args.pattern, args.replace)\n\n fisier.write(data)\n fisier.close()", "def reverse_update_source_names(apps, schema_editor):\n Source = apps.get_model(\"vast_pipeline\", \"Source\")\n while Source.objects.filter(name__startswith=\"J\").exists():\n # do the updates in transaction batches of 1000 in case the source table is large\n with transaction.atomic():\n for source in Source.objects.filter(name__startswith=\"J\")[:1000]:\n source.name = (\n f\"ASKAP_{deg2hms(source.wavg_ra, precision=2)}\"\n f\"{deg2dms(source.wavg_dec, precision=2)}\"\n ).replace(\":\", \"\")\n source.save()", "def object_import(request, simulation, object_name):\n try:\n if object_name == 'function':\n parent = simulation.scenario.supply.functionset\n else:\n parent = simulation.scenario.supply.network\n query = get_query(object_name, simulation)\n user_id_set = set(query.values_list('user_id', flat=True))\n if object_name == 'link':\n # To import links, we retrieve the user ids of all centroids, crossings\n # and functions and we build mappings between ids and objects.\n centroids = get_query('centroid', simulation)\n centroid_ids = set(centroids.values_list('user_id', flat=True))\n crossings = get_query('crossing', simulation)\n crossing_ids = set(crossings.values_list('user_id', flat=True))\n node_ids = centroid_ids.union(crossing_ids)\n # Mapping between the user id and the id of the nodes.\n node_mapping = dict()\n for centroid in centroids:\n node_mapping[centroid.user_id] = centroid.id\n for crossing in crossings:\n node_mapping[crossing.user_id] = crossing.id\n functions = get_query('function', simulation)\n function_ids = set(functions.values_list('user_id', flat=True))\n # Mapping between the user id and the id of the functions.\n function_id_mapping = dict()\n # Mapping between the user id and the instance of the functions\n function_mapping = dict()\n for function in functions:\n function_id_mapping[function.user_id] = function.id\n function_mapping[function.user_id] = function\n # Convert imported file to a csv DictReader.\n encoded_file = request.FILES['import_file']\n tsv_file = StringIO(encoded_file.read().decode())\n reader = csv.DictReader(tsv_file, delimiter='\\t')\n to_be_updated = set()\n to_be_created = list()\n # Store the user_id of the imported instance to avoid two instances\n # with the same id.\n imported_ids = set()\n if object_name == 'centroid':\n # Do not import centroid with same id as a crossing.\n crossings = get_query('crossing', simulation)\n imported_ids = set(crossings.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Centroid(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'crossing':\n # Do not import crossing with same id as a centroid.\n centroids = get_query('centroid', simulation)\n imported_ids = set(centroids.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Crossing(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'function':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], row['expression'])\n )\n else:\n to_be_created.append(\n Function(user_id=id, name=row['name'],\n expression=row['expression'])\n )\n elif object_name == 'link':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'],\n node_mapping[int(row['origin'])],\n node_mapping[int(row['destination'])],\n function_id_mapping[int(row['function'])],\n float(row['lanes']), float(row['length']),\n float(row['speed']), float(row['capacity']))\n )\n else:\n if int(row['origin']) in node_ids \\\n and int(row['destination']) in node_ids \\\n and int(row['function']) in function_ids:\n # Ignore the links with unidentified origin,\n # destination or function.\n to_be_created.append(\n Link(user_id=id, name=row['name'],\n origin=node_mapping[int(row['origin'])],\n destination=node_mapping[int(row['destination'])],\n vdf=function_mapping[int(row['function'])],\n lanes=float(row['lanes']),\n length=float(row['length']),\n speed=float(row['speed']),\n capacity=float(row['capacity']))\n )\n if to_be_updated:\n if object_name in ('centroid', 'crossing'):\n values = set(query.values_list('user_id', 'name', 'x', 'y'))\n elif object_name == 'function':\n values = set(query.values_list('user_id', 'name', 'expression'))\n elif object_name == 'link':\n values = set(query.values_list('user_id', 'name', 'origin',\n 'destination', 'vdf_id', 'lanes',\n 'length', 'speed', 'capacity'))\n # Find the instances that really need to be updated (the values have\n # changed).\n to_be_updated = to_be_updated.difference(values)\n if object_name in ('centroid', 'crossing', 'function'):\n # Update the objects (it would be faster to delete and re-create\n # them but this would require to also change the foreign keys of\n # the links).\n for values in to_be_updated:\n # Index 0 of values is the id column i.e. the user_id.\n instance = query.filter(user_id=values[0])\n if object_name in ('centroid', 'crossing'):\n instance.update(name=values[1], x=values[2], y=values[3])\n else: # Function\n instance.update(name=values[1], expression=values[2])\n elif object_name == 'link':\n # Delete the links and re-create them.\n ids = list(query.values_list('id', 'user_id'))\n # Create a mapping between the user ids and the ids.\n id_mapping = dict()\n for i in range(len(values)):\n id_mapping[ids[i][1]] = ids[i][0]\n # Retrieve the ids of the links to be updated with the mapping and\n # delete them.\n to_be_updated_ids = [id_mapping[values[0]]\n for values in to_be_updated]\n with connection.cursor() as cursor:\n chunk_size = 20000\n chunks = [\n to_be_updated_ids[x:x + chunk_size]\n for x in range(0, len(to_be_updated_ids), chunk_size)\n ]\n for chunk in chunks:\n # Delete the relations first.\n cursor.execute(\n \"DELETE FROM Network_Link \"\n \"WHERE link_id IN %s;\",\n [chunk]\n )\n cursor.execute(\n \"DELETE FROM Link \"\n \"WHERE id IN %s;\",\n [chunk]\n )\n # Create a mapping between the id and the instance of the\n # functions.\n function_mapping = dict()\n for function in functions:\n function_mapping[function.id] = function\n # Now, create the updated instances with the new values.\n to_be_created += [\n Link(user_id=values[0], name=values[1], origin=values[2],\n destination=values[3], vdf=function_mapping[values[4]],\n lanes=values[5], length=values[6], speed=values[7],\n capacity=values[8])\n for values in to_be_updated\n ]\n # Create the new objects in bulk.\n # The chunk size is limited by the MySQL engine (timeout if it is too big).\n chunk_size = 10000\n chunks = [to_be_created[x:x + chunk_size]\n for x in range(0, len(to_be_created), chunk_size)]\n # Remove the orphan instances.\n if object_name == 'function':\n query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all()) \\\n .delete()\n else:\n query.model.objects.exclude(network__in=Network.objects.all()).delete()\n for chunk in chunks:\n # Create the new instances.\n query.model.objects.bulk_create(chunk, chunk_size)\n # Retrieve the newly created instances and add the many-to-many\n # relation.\n # Add the many-to-many relation.\n if object_name == 'function':\n new_instances = query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all())\n for instance in new_instances:\n instance.functionset.add(parent)\n else:\n new_instances = query.model.objects \\\n .exclude(network__in=Network.objects.all())\n for instance in new_instances:\n instance.network.add(parent)\n simulation.has_changed = True\n simulation.save()\n return HttpResponseRedirect(\n reverse('metro:object_list', args=(simulation.id, object_name,))\n )\n except Exception as e:\n print(e)\n context = {\n 'simulation': simulation,\n 'object': object_name,\n }\n return render(request, 'metro_app/import_error.html', context)", "def replace(self, filter, asset_dict): # client_dict provides the uuid\n mongo_core = MainDb.get_core_db_instance()\n replace_result = mongo_core.get_assets().find_one_and_replace(\n {\"uuid\": asset_dict[\"uuid\"]}, asset_dict, upsert=True, return_document=ReturnDocument.AFTER)\n if replace_result[\"uuid\"] == asset_dict[\"uuid\"]:\n return True, \"MongoAsset replaced\"\n else:\n return False, \"Failed to replace asset\"", "def merge_asset(self, other):\n for asset in other.asset:\n asset_name = asset.get(\"name\")\n asset_type = asset.tag\n # Avoids duplication\n pattern = \"./{}[@name='{}']\".format(asset_type, asset_name)\n if self.asset.find(pattern) is None:\n self.asset.append(asset)", "def update_source_names(apps, schema_editor):\n Source = apps.get_model(\"vast_pipeline\", \"Source\")\n while Source.objects.filter(name__startswith=\"ASKAP_\").exists():\n # do the updates in transaction batches of 1000 in case the source table is large\n with transaction.atomic():\n for source in Source.objects.filter(name__startswith=\"ASKAP_\")[:1000]:\n source.name = (\n f\"J{deg2hms(source.wavg_ra, precision=1)}\"\n f\"{deg2dms(source.wavg_dec, precision=0)}\"\n ).replace(\":\", \"\")\n source.save()", "def preprocessing_objects(img_data, hierarchy_mapping, object_file_name='objects.p'):\n\n object_path_token = \"{0}.{1}.{2}\".format(DATA, VISUAL_GENOME, get_name_from_file(object_file_name))\n\n # Check if pickles are already created\n objects_path = FilesManager().get_file_path(object_path_token)\n\n if os.path.isfile(objects_path):\n Logger().log('File is already exist {0}'.format(objects_path))\n objects = FilesManager().load_file(object_path_token)\n return objects\n\n # Bad urls which should be sorted out\n bad_urls = get_bad_urls()\n\n # Get the whole objects from entities\n objects_lst = []\n correct_labels = hierarchy_mapping.keys()\n idx = 0\n for img in img_data:\n\n # Get the url image\n url = img.image.url\n\n # Sorting bad urls\n if url in bad_urls:\n continue\n\n # Get the objects per image\n objects = img.objects\n for object in objects:\n\n # Get the lable of object\n label = object.names[0]\n\n # Check if it is a correct label\n if label not in correct_labels:\n continue\n\n new_object_mapping = ObjectMapping(object.id, object.x, object.y, object.width, object.height, object.names,\n object.synsets, url)\n # Append the new objectMapping to objects_lst\n objects_lst.append(new_object_mapping)\n\n idx += 1\n Logger().log(\"Finished img: {}\".format(idx))\n\n # Pickle objects_lst\n objects_array = np.array(objects_lst)\n # Save the objects files to the disk\n FilesManager().save_file(object_path_token, objects_array)\n return objects_array", "def replaceStringInFile():\n sel = nuke.selectedNodes()\n pane = nuke.Panel('replace string in file knob')\n pane.addSingleLineInput('replace this', '')\n pane.addSingleLineInput('by this', '')\n val = pane.show()\n\n if val and sel:\n for node in sel:\n try:\n str1 = pane.value('replace this')\n str2 = pane.value('by this')\n file = str(node['file'].value())\n newfile = file.replace(str1, str2)\n node['file'].setValue(newfile)\n print 'replacing string in', node.name()\n except:\n print 'failed on', node.name()", "def __replaceFiles(self):\n self.ui.showReplaceFilesDialog(self.textForFind())", "def removez_all(self,name):\n\t\tnew_name = string.replace(name,' ', '.')\n\t\tnew_name = self.remove_uploader(new_name)\n\t\tnew_name = string.replace(new_name,'..', '.')\n\t\t\n\t\t#new_name = string.replace(name,'\\&.', '.') BUG\n\t\t\n\t\tnew_name = string.replace(new_name,'-', '.')\n\t\tnew_name = string.replace(new_name,'_', '.')\t\t\n\t\tnew_name = string.replace(new_name,'(', '')\n\t\tnew_name = string.replace(new_name,')', '')\n\t\tnew_name = string.replace(new_name,'..', '.')\n\t\t\t\t\t\n\t\tnew_name = string.replace(new_name,'X264', 'x264')\n\t\tnew_name = string.replace(new_name,'XVID', 'XviD')\n\t\tnew_name = string.replace(new_name,'TRUEHD', 'TrueHD')\n\t\t\t\t\t\n\t\tnew_name = string.replace(new_name,'multi', 'MULTi')\n\t\tnew_name = string.replace(new_name,'Multi', 'MULTi')\n\t\tnew_name = string.replace(new_name,'MULTI', 'MULTi')\n\t\tnew_name = string.replace(new_name,'MULTiF', 'MULTi')\n\t\tnew_name = string.replace(new_name,'VO.VF','MULTi')\n\t\tnew_name = string.replace(new_name,'VF.VOSTFR','MULTi')\n\t\tnew_name = string.replace(new_name,'VF.VO+ST','MULTi')\n\t\t\n\t\t\n\t\tnew_name = string.replace(new_name,'TRUE.HD', 'TRUEHD')\n\t\tnew_name = string.replace(new_name,'blueray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'bluray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Bluray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BluraY', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu-Ray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu.Ray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu.ray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'(Bluray-rip)', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu-Ray Rip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BDRip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BDRIP', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BDRiP', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BRDRiP', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BRDRip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BRRip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BD', 'BluRay')\n\t\tnew_name = string.replace(new_name,'HD-DVDRiP', 'HDRiP')\n\t\tnew_name = string.replace(new_name,'HD.DVDRiP', 'HDRiP')\n\t\tnew_name = string.replace(new_name,'HDVD', 'HDRiP')\n\t\tnew_name = string.replace(new_name,'HDDVD', 'HDRiP')\t\t\t\t\n\t\tnew_name = string.replace(new_name,'DVDrip','DVDRiP')\n\t\tnew_name = string.replace(new_name,'DVDriP','DVDRiP')\n\t\tnew_name = string.replace(new_name,'dvdrip','DVDRiP')\n\t\tnew_name = string.replace(new_name,'DVD5','DVDRiP')\n\t\tnew_name = string.replace(new_name,'.DVD.','DVDRiP')\n\t\t\n\t\t\n\t\tnew_name = string.replace(new_name,'.DD.5.1','DD5.1')\n\t\tnew_name = string.replace(new_name,'6.Canaux','5.1')\t\n\t\tnew_name = string.replace(new_name,'dts', 'DTS')\n\t\tnew_name = string.replace(new_name,'Dts', 'DTS')\n\t\tnew_name = string.replace(new_name,'DtS', 'DTS')\n\t\tnew_name = string.replace(new_name,'DTS.DTS','DTS')\n\t\tnew_name = string.replace(new_name,'DTSHD.','DTS.')\n\t\tnew_name = string.replace(new_name,'.HD.','.')\n\t\t\n\t\tnew_name = string.replace(new_name,'hdma', 'HDMA')\n\t\tnew_name = string.replace(new_name,'HD MA', 'HDMA')\n\t\tnew_name = string.replace(new_name,'HD.MA', 'HDMA')\n\t\tnew_name = string.replace(new_name,'.MA.', '.HDMA.')\n\t\tnew_name = string.replace(new_name,'ac3','AC3')\n\t\tnew_name = string.replace(new_name,'Ac3','AC3')\n\t\tnew_name = string.replace(new_name,'AC.3.','AC3.')\n\t\t\n\t\tnew_name = string.replace(new_name,'HD.HRA','HRA') #High resolution audio\n\t\t#new_name = string.replace(new_name,'.HRA.', '.')\n\t\t\n\t\tnew_name = string.replace(new_name,'.fr.', '.FRENCH.')\n\t\tnew_name = string.replace(new_name,'.Fr.', '.FRENCH.')\n\t\tnew_name = string.replace(new_name,'.FR.', '.FRENCH.')\n\t\tnew_name = string.replace(new_name,'french', 'FRENCH')\n\t\tnew_name = string.replace(new_name,'French', 'FRENCH')\n\t\tnew_name = string.replace(new_name,'VF.', 'FRENCH.')\n\t\tnew_name = string.replace(new_name,'VFF', 'TRUEFRENCH')\t\t\n\t\tnew_name = string.replace(new_name,'truefrench', 'TRUEFRENCH')\n\t\tnew_name = string.replace(new_name,'Truefrench', 'TRUEFRENCH')\n\t\tnew_name = string.replace(new_name,'TrueFrench', 'TRUEFRENCH')\n\t\tnew_name = string.replace(new_name,'TrueFRENCH', 'TRUEFRENCH')\n\t\t\n\t\tnew_name = string.replace(new_name,'VF', 'FRENCH')\n\t\tnew_name = string.replace(new_name,'.PAL.', '.')\n\t\tnew_name = string.replace(new_name,'HD1080', '1080p')\n\t\tnew_name = string.replace(new_name,'1080P', '1080p')\n\t\tnew_name = string.replace(new_name,'720P', '720p')\n\t\t\n\t\tnew_name = string.replace(new_name,'VERSION.LONGUE','EXTENDED')\n\t\tnew_name = string.replace(new_name,'Version.Longue','EXTENDED')\n\t\tnew_name = string.replace(new_name,'Extended.Cut', 'EXTENDED')\n\t\tnew_name = string.replace(new_name,'Extended.Edition', 'EXTENDED')\n\t\tnew_name = string.replace(new_name,'Director\\'s.Cut', 'DIRECTOR.CUT')\n\t\tnew_name = string.replace(new_name,'Directors.Cut', 'DIRECTOR.CUT')\n\t\tnew_name = string.replace(new_name,'DC', 'DIRECTOR.CUT')\n\t\tnew_name = string.replace(new_name,'D/C', 'DIRECTOR.CUT')\t\t\n\t\tnew_name = string.replace(new_name,'Remastered','REMASTERED')\n\t\tnew_name = string.replace(new_name,'Theatrical.Cut','THEATRICAL.CUT')\n\t\tnew_name = string.replace(new_name,'Theatricul.Cut','THEATRICAL.CUT')\n\t\tnew_name = string.replace(new_name,'Sunshine.Edition','SUNSHINE.EDITION')\n\t\tnew_name = string.replace(new_name,'Revisited.The.Final.Cut','REVISITED.FiNAL.CUT')\t\t\n\t\tnew_name = string.replace(new_name,'LIMITED','LiMiTED')\n\t\t\n\t\tnew_name = string.replace(new_name,'iNT','iNTERNAL')\n\t\tnew_name = string.replace(new_name,'JKF.3D', 'JFK3D')\n\t\tnew_name = string.replace(new_name,'GAIA', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'Gaïa', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'GAÏA', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'GAϏA', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'GAiA', 'GAÏA')\n\t\t\n\t\tnew_name = string.replace(new_name,'dxva', 'DXVA') #<harwdare decode\n\t\tnew_name = string.replace(new_name,'rip','')\n\t\tnew_name = string.replace(new_name,'Rip','')\n\t\tnew_name = string.replace(new_name,'Ripp','')\n\t\tnew_name = string.replace(new_name,'.mkv.mkv', '.mkv')\n\t\t#new_name = string.replace(new_name,'..', '.')\t#USELESS\n\t\treturn self.refactor_line(new_name)", "def preload(self):\n # load the objects\n for otype, fname in self.TYPE2NAME.items():\n if fname:\n path = os.path.join(self.anodir, fname + \".gz\")\n if os.path.isfile(path):\n with gzip.open(path, \"rt\") as handler:\n for line in handler:\n omap = json.loads(line)\n cls = self.TYPE2CLASS[otype]\n item = cls.from_map(omap, self)\n self.caches[otype][item.id] = item", "def replace_parts(file, file_out, replacements):\n # Read in original file\n with open(file, \"r\") as f:\n lines = f.readlines()\n\n # Replace lines in file\n for i, line in enumerate(lines[:]):\n # Replace file name and tag\n for key, val in replacements.items():\n if key in line:\n lines[i] = line.replace(str(key), str(val))\n\n with open(file_out, \"w\") as f:\n f.writelines(lines)", "def update(self, namein, nameout):\n\t\ttext = self.dict.sub(self.readFile(namein))\n\t\tself.writeFile(nameout, text)\n\t\treturn", "def appendSkinFile(appendFileName, skinPartSearchAndReplace):\n\trsSkinLines = []\n\n# TODO:\n#\tfile_lines = fileReadLines(appendFileName, source=\"MyMetrixLite\")\n\tskFile = open(appendFileName, \"r\")\n\tfile_lines = skFile.readlines()\n\tskFile.close()\n\n\tfor skinLine in file_lines:\n\t\tfor item in skinPartSearchAndReplace:\n\t\t\tskinLine = skinLine.replace(item[0], item[1])\n\t\trsSkinLines.append(skinLine)\n\n\treturn rsSkinLines", "def importLights(self, asset = '', searchAndReplace = ['',''] ):\n\t\tif self.lightPath.exists:\n\t\t\tself.lightPath.imp()\n\t\t\tif self.lightLinkPath.exists:\n\t\t\t\tself.importLightLinking( asset, searchAndReplace )", "def process(self,line):\n\n pattern_str = f\"src=.?[\\s\\\"].*?[\\s\\\"]\"\n p = re.compile(pattern_str)\n for m in p.finditer(line):\n\n file = m.group(0).split(\"src=\")[1][1:-1]\n if file.startswith(\"http\"):\n continue\n\n new_file = self._copy_file(file)\n\n re.sub(file,new_file,line)\n\n return line", "def replace(file,original_text,replacement_text):\n with open(file, \"rt\") as fin:\n with open(str(file+\"temp\"), \"wt\") as fout:\n for line in fin:\n fout.write(line.replace(original_text,replacement_text))\n os.rename(str(file+\"temp\"),file)\n return", "def rewrite_importlib_resources(pkg_files, new_root):\n for file in pkg_files.glob('*.py'):\n text = file.read_text().replace('importlib_resources.abc', '.abc')\n text = text.replace('zipp', '..zipp')\n file.write_text(text)", "def patch(text):\n if match := re.search(r'\\d+__\\d+__\\d+', text):\n tag = match.group(0)\n if tag not in ocds_tags:\n if ocds_version or not use_development_version:\n text = text.replace(tag, ocds_tag)\n else:\n text = text.replace(ocds_schema_base_url + tag, development_base_url)\n return text" ]
[ "0.61271197", "0.5805997", "0.5651167", "0.5605872", "0.5587787", "0.5498569", "0.54969203", "0.53437096", "0.53432715", "0.5332981", "0.532139", "0.53159696", "0.5253007", "0.52161473", "0.52138823", "0.51546794", "0.5153983", "0.5098517", "0.5057885", "0.5048195", "0.5045116", "0.5001853", "0.4977753", "0.49767575", "0.49727008", "0.49699062", "0.4966435", "0.49655357", "0.49627945", "0.4955837" ]
0.67480993
0
import master settings from data file
def importMasterSettings(self): pickleData = pickle.load( open( self.masterPath.path, "rb" ) ) master = rlayer.RenderLayer( 'defaultRenderLayer' ) master.makeCurrent() for a in pickleData.keys(): try: a.v = pickleData[a] except: continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_settings(self, config):\n user = config.get(self.bot.client.host, 'master')\n # Replace old master\n if ':master' in self.users and user != self.users[':master']:\n self.users[self.users[':master']]['rank'] = 'none'\n if not user in self.users:\n self.users[user] = User('master', datetime.datetime.now(), None,\n hash_password(user, user))\n if not ':master' in self.users or self.users[':master'] != user:\n self.users[':master'] = user\n self.users[':new_master'] = True", "def load_settings_from_cli():\n load_user_from_cli()\n load_local_contacts()", "def load_new_data():\n require('settings', provided_by=[production, staging])\n \n maintenance_up()\n load_data()\n maintenance_down()", "def loadSettings(self, filename='short_240.settings'):\n global master_run_no\n self.settingsFilename = filename\n # print 'self.settingsFilename = ', self.settingsFilename\n if os.path.exists(filename):\n stream = open(filename, 'r')\n else:\n stream = open(master_lattice_location+filename, 'r')\n self.settings = yaml.load(stream, Loader=yaml.UnsafeLoader)\n self.globalSettings = self.settings['global']\n master_run_no = self.globalSettings['run_no'] if 'run_no' in self.globalSettings else 1\n self.fileSettings = self.settings['files']\n elements = self.settings['elements']\n self.groups = self.settings['groups'] if 'groups' in self.settings and self.settings['groups'] is not None else {}\n stream.close()\n\n # for name, elem in list(self.groups.items()):\n # group = globals()[elem['type']](name, self.elementObjects, **elem)\n # self.groupObjects[name] = group\n\n for name, elem in list(elements.items()):\n self.read_Element(name, elem)\n\n # for name, lattice in list(self.fileSettings.items()):\n # self.read_Lattice(name, lattice)", "def load_measurement_settings_file():\n\n # First update the settings that the state machine is up to date\n self.variables.ui_plugins[\"Settings_window\"].load_new_settings()\n\n fileDialog = QFileDialog()\n file = fileDialog.getOpenFileName()\n\n if file[0]:\n file = open(str(file[0]), \"r\")\n dict = yaml.load(file)\n file.close()\n\n # l.info(\"Loaded new measurement settings file: \" + str(file[0]))\n self.variables.default_values_dict[\"settings\"].update(\n dict\n ) # Updates the values of the dict, it either updates the values or adds them if not incluced\n self.variables.ui_plugins[\"Settings_window\"].configure_settings()", "def load_data_conf(self):\n data_file = select_file(os.getcwd())\n if data_file is not None:\n self.load_tab(data_file)\n else:\n msg_window('please select valid data config file')", "def load(self):\n if not path.isfile(self.SETTINGS_FILE):\n return\n data = load_json_from_disk(self.SETTINGS_FILE)\n for (key, value) in data.items():\n self.__dict__[key] = value", "def update_from_file(self):\n config_path = os.environ.get('MINDINSIGHT_CONFIG', '')\n if not config_path:\n return\n\n config_module = None\n\n # python:full.path.for.config.module\n if config_path.startswith('python:'):\n config_module = import_module(config_path[len('python:'):])\n\n # file:full/path/for/config.py\n elif config_path.startswith('file:'):\n config_path = config_path[len('file:'):]\n module_name = '__mindinsightconfig__'\n config_module = types.ModuleType(module_name)\n machinery = import_module('importlib.machinery')\n loader = machinery.SourceFileLoader(module_name, config_path)\n loader.exec_module(config_module)\n\n if config_module is None:\n return\n\n for setting in dir(config_module):\n if setting.isupper() and setting in self._default_settings:\n setting_value = getattr(config_module, setting)\n setattr(self, setting, setting_value)\n self._explicit_settings.add(setting)", "def load_settings(self):\n\n self.std = settings.settings", "def load(filename):\n conf = CommonConfig.get()\n conf.update(toml.load(filename))\n return conf", "def load_settings(self, outfile='settings.p'):\n settings = pickle.load(open(path,'rb'))\n self.__dict__.update(settings)", "def set_master_table(filepath):\n my_globals['master_table_path'] = filepath\n my_globals['master_table_data'] = None", "def load_from_file(self):\n if not os.path.exists(self.settings_file):\n return\n \n with open(self.settings_file, 'rb') as settings_file:\n try:\n options = json.load(settings_file)\n \n if self._settings_coordinate(options):\n self.options = options\n except:\n self.load_default()", "def import_config(self):\n # Get the config file\n import config\n\n # Get all keys from keyvalue pairs in the config file\n settingsFromConfigFile = [x for x in dir(config) if not x.startswith('__')]\n\n # Convert config file into dict\n for key in settingsFromConfigFile:\n value = getattr(config, key)\n self.config[key] = value\n\n # Settings validation: specify keys which are valid settings\n # If there are rows in the config file which are not listed here, an\n # error will be raised\n validSettings = {\n 'data_dir',\n 'running_data_dir',\n 'unison_log_dir',\n 'unisonctrl_log_dir',\n 'log_file',\n 'make_root_directories_if_not_found',\n 'sync_hierarchy_rules',\n 'unison_local_root',\n 'unison_remote_root',\n 'unison_path',\n 'global_unison_config_options',\n 'unison_remote_ssh_conn',\n 'unison_remote_ssh_keyfile',\n 'unison_local_hostname',\n 'unison_home_dir',\n 'unison_user',\n 'webhooks',\n 'rotate_logs',\n }\n\n # If a setting contains a directory path, add it's key here and it will\n # be sanatized (whitespace and trailing whitespaces stripped)\n settingPathsToSanitize = {\n 'data_dir',\n 'unison_home_dir',\n 'running_data_dir',\n 'unison_log_dir',\n 'unisonctrl_log_dir',\n }\n\n # Values here are used as config values unless overridden in the\n # config.py file\n defaultSettings = {\n 'data_dir': '/tmp/unisonctrl',\n 'log_file': '/dev/null',\n 'make_root_directories_if_not_found': True,\n 'unison_path': '/usr/bin/unison', # Default ubuntu path for unison\n 'unison_remote_ssh_keyfile': \"\",\n 'unison_local_hostname': platform.node(),\n 'running_data_dir': self.config['data_dir'] + os.sep + \"running-sync-instance-information\",\n 'unison_log_dir': self.config['data_dir'] + os.sep + \"unison-logs\",\n 'unisonctrl_log_dir': self.config['data_dir'] + os.sep + \"unisonctrl-logs\",\n 'unison_user': getpass.getuser(),\n 'rotate_logs': \"time\",\n }\n\n # TODO: Implement allowedSettings, which force settings to be\n # in a given list of options\n\n # Apply default settings to fill gaps between explicitly set ones\n for key in defaultSettings:\n if (key not in self.config):\n self.config[key] = defaultSettings[key]\n\n # Ensure all required keys are specified\n for key in validSettings:\n if (key not in self.config):\n raise LookupError(\"Required config entry '\" + key + \"' not specified\")\n\n # Ensure no additional keys are specified\n for key in self.config:\n if (key not in validSettings):\n raise LookupError(\"Unknown config entry: '\" + key + \"'\")\n\n # Sanatize directory paths\n for key in settingPathsToSanitize:\n self.config[key] = self.sanatize_path(self.config[key])\n\n # If you reach here, configuration was read and imported without error\n\n return True", "def load(name):\n\n update(settings.all())\n\n config_specific_settings = _config.pop('config', None) or {}\n if name:\n if name not in names():\n errors.string_exit('config {} not found in .ssha file'.format(name))\n if name in config_specific_settings:\n update(config_specific_settings[name])\n add('config.name', name)\n\n if not _get('ssh.username'):\n add('ssh.username', '$(whoami)')\n\n if _get('bastion') and not _get('ssh.proxy_command'):\n add('ssh.proxy_command', 'ssh -W %h:%p ${bastion.address}')\n\n iam_group_specific_settings = get('iam.group')\n if iam_group_specific_settings:\n from . import iam\n for group in iam.groups():\n if group in iam_group_specific_settings:\n update(iam_group_specific_settings[group])", "def read_settings(self):\n self.settings = read_settings(self.settings_path)", "def load(self,fileName,doSave=True):\n #--Load masters\n modFileNames = self.keys()\n for master,size in self[fileName].tes3.masters:\n if master in modFileNames and master != fileName:\n self.load(master,False)\n #--Load self\n mwIniFile.load(fileName,doSave)", "def apply_config(filename):\n with open(filename) as config_file:\n config = json.load(config_file)\n for setting, value in config.items():\n CoreConfig.__dict__[setting] = value", "def merge_into_settings(self, settings):\n if not self._meta_dict:\n self._load_from_file()\n\n settings.chat_name = self._meta_dict[DumpMetadata.CHAT_NAME]\n settings.last_message_id = self._meta_dict[DumpMetadata.LAST_MESSAGE_ID]\n settings.exporter = self._meta_dict[DumpMetadata.EXPORTER]", "def import_db(import_file):\n import_data(import_file)", "def add_settings_early(self):\n\n # config settings\n config = {\n # some generic settings for every site, to point to location of some stuff\n mconst.DEF_SETTINGNAME_pkgdirimps_sitempacks: [pkgdirimp_sitempacks],\n mconst.DEF_SETTINGNAME_controllerroot: pkgdirimp_controllers,\n mconst.DEF_SETTINGNAME_sitefilepath: misc.calc_modulefiledirpath(__file__),\n # should we also load mewlo site installed setuptools plugins\n mconst.DEF_SETTINGNAME_flag_importsetuptoolspacks: True,\n mconst.DEF_SETTINGNAME_replaceshadowpath: '${sitefilepath}/replaceshadow',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # Name of site\n mconst.DEF_SETTINGNAME_sitename: 'Mewlo',\n # Specify where this site serves from\n # these siteurls should not end in / so if you are serving a site at root just use relative of '' and absolute of 'http://sitename.com'\n mconst.DEF_SETTINGNAME_siteurl_relative: '',\n mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080',\n #mconst.DEF_SETTINGNAME_siteurl_relative: '/public/publicity',\n #mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080/public/publicity',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # online status information\n mconst.DEF_SETTINGNAME_isenabled: True,\n mconst.DEF_SETTINGNAME_isonline: True,\n mconst.DEF_SETTINGNAME_offline_mode: 'maintenance',\n mconst.DEF_SETTINGNAME_offline_message: 'We are down for leap-year maintenance; we will be back soon.',\n mconst.DEF_SETTINGNAME_offline_allowadmin: False,\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n\n\n # extension pack config -- we need to explicitly enable plugins\n packconfig = {\n 'mouser.mewlotestplug' : {\n 'isenabled': False,\n },\n 'mouser.testpack' : {\n 'isenabled': False,\n },\n 'mewlo.siteaddon.account' : {\n 'isenabled': True,\n },\n 'mewlo.siteaddon.group' : {\n 'isenabled': True,\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_packs, packconfig)\n\n\n # database config\n databaseconfig = {\n 'settings' : {\n 'sqlalchemy_loglevel' : logging.NOTSET,\n #'sqlalchemy_loglevel' : logging.INFO,\n },\n 'default' : {\n 'url' : 'sqlite:///${dbfilepath}/mewlo_testsite1.sqlite',\n #'tablename_prefix': 'mewlo_',\n 'flag_echologging' : False,\n },\n 'mysql_unused' : {\n # Sample configuration for mysql\n 'url' : 'mysql://mewlo_user:mewlo_pass@localhost:3306/mewlo_testsite1',\n 'tablename_prefix': 'mewlo_'\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_database, databaseconfig)\n self.settings.listappend_settings_key(mconst.DEF_SETTINGSEC_make_dirs, '${dbfilepath}')\n\n # email config settings\n mailconfig = {\n # online status information\n 'smtp_host': self.get_configval('mail_smtp_host'),\n 'smtp_login': self.get_configval('mail_smtp_login'),\n 'smtp_port': self.get_configval('mail_smtp_port'),\n 'smtp_mode': self.get_configval('mail_smtp_mode'),\n 'smtp_password': self.get_configval('mail_smtp_password'),\n 'mail_from' : self.get_configval('mail_from'),\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_mail, mailconfig)\n\n\n # account siteaddon settings\n siteaddonconfig = {\n # online status information\n 'registration_mode': 'immediate',\n 'flag_require_email_verified_before_login': False,\n }\n self.settings.merge_settings_key('siteaddon_account', siteaddonconfig)\n\n\n\n # ATTN: UNFINISHED\n # asset mounts config\n if (False):\n assetmountconfig = {\n 'default' : {\n # an internal assetmount just needs a url route\n 'type': 'internal',\n 'routeid': 'static_files',\n },\n 'external' : {\n 'type': 'external',\n 'filepath': '${mewlofilepath}/public_assets',\n 'urlpath': 'http://127.0.0.1/mewlo/public_assets',\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_asset_mounts, assetmountconfig)\n\n\n\n\n\n #print \"TESTING CONFIG1:\"\n #self.run_configfunc('sayhello',1,2,3)\n #print \"TESTING CONFIG2:\"\n #self.run_allconfigfuncs('sayhello',1,2,3)", "def load(self):\n settings_path = os.path.join(self.file_path, \"__file_data.json\")\n if os.path.exists( settings_path ):\n self.fileList = simplejson.loads( open( settings_path, 'r' ).read() )\n\n settings_path = os.path.join(self.file_path, \"__user_data.json\")\n if os.path.exists( settings_path ):\n self.userList = simplejson.loads( open( settings_path, 'r' ).read() )", "def import_settings(path_to_settings=None):\n\n file_path = 'settings.json' if path_to_settings is None else path_to_settings\n\n if not os.path.isfile(file_path):\n # settings file doesn't exist\n raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), 'settings.json')\n\n with open(file_path) as in_file:\n data = json.load(in_file)\n settings = Settings()\n\n # required attributes, fail if missing\n try:\n settings.input_file_path = os.path.join(os.path.dirname(sys.argv[0]), data['input_folder'], data['input_file'])\n settings.output_file_path = os.path.join(os.path.dirname(sys.argv[0]), data['output_folder'], data['output_file'])\n settings.default_timezone = data['default_timezone']\n settings.output_timezone = data['output_timezone']\n settings.custom_column_headers = data.get('custom_column_headers', [])\n settings.app_id = data['app_id']\n except KeyError as e:\n print(\"Key not found in {}: \".format(file_path) + str(e))\n sys.exit(1)\n\n return settings", "def importVarious(context):\n #VAMOS A PONERLA EN EL CONFIG\n USERNAME='PPM'\n username=USERNAME\n title='cenditel.ppm'\n # Ordinarily, GenericSetup handlers check for the existence of XML files.\n # Here, we are not parsing an XML file, but we use this text file as a \n # flag to check that we actually meant for this import step to be run.\n # The file is found in profiles/default.\n\n if context.readDataFile('cenditel.ppm.txt') is None:\n return\n\n portal = context.getSite()\n obj = SetupEnviron() \n logger = obj.getLogger(\"cenditel.ppm\")\n MakeDefaultUser(context, username, title)\n #TODO configureCMFNotification(portal,logger) ", "def load_settings(self):\n # Set the default settings. In case in a later version of this script the settings change, new default variables will be added automatically\n self.settings = {\n # Connection settings to OBS Studio websockets plugin\n \"host\": \"localhost\",\n \"port\": 4444,\n \"password\": \"\",\n \"update_frequency\": 1, # seconds, how often the script loads the SC2 UI location\n }\n if os.path.isfile(self.settings_path):\n with open(self.settings_path) as f:\n self.settings.update(json.load(f))", "def load(initial=False):\n log.info(\"Loading settings file\")\n try:\n if initial and _initialCache: # If we are initial and we already have cached load, don't go to file\n data = _initialCache\n else:\n with open(SETTINGS_FILE) as file:\n data = json.load(file)\n if initial: # Store this for later\n _initialCache.update(data)\n except FileNotFoundError:\n log.warning(\"No log file found to load! At '{}'\".format(SETTINGS_FILE))\n return\n except json.JSONDecodeError:\n log.error(\"Settings file was corrupt! Cannot load settings\")\n return\n else:\n for name in _names: # Make sure we update in place and don't make new values\n _names[name].clear()\n _names[name].update(data[name])\n\n for id in _names[name].info: # Check to make sure we have all values at least at a default\n if id not in _names[name]:\n _names[name][id] = _names[name].info[id][\"default\"]\n\n # Legacy updating code for updating 1.2.2.0 to newer versions\n if \"voiceChannel\" in _names[\"discord\"] and type(_names[\"discord\"][\"voiceChannel\"]) == str:\n log.warning(\"Updating save file to newest version!\")\n _names[\"discord\"][\"voiceChannel\"] = int(_names[\"discord\"][\"voiceChannel\"])\n save() # Save these changes", "def import_data(self, data):\n # Import additional data for tuning\n # data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'\n pass", "def und_add_setting(udb_file, configuration_path_file):\n subprocess.call(f\"und import {configuration_path_file} {udb_file}\")", "def _import(self, datadict):\n self.GUID = datadict.get(\"GUID\", uuid.uuid1())\n self.FileName = datadict.get(\"FileName\", \"\")\n self.Name = datadict.get(\"Name\", \"\")\n self.Projects = datadict.get(\"Projects\", [])\n self.VSVersion = datadict.get(\"VSVersion\", None)", "def load_from_conf(self):\r\n raise NotImplementedError" ]
[ "0.62687576", "0.6112625", "0.60982084", "0.599278", "0.59767", "0.5931371", "0.5877606", "0.58080935", "0.5805709", "0.58034456", "0.57821137", "0.57619953", "0.57555234", "0.5754049", "0.5751691", "0.57266325", "0.5715067", "0.5707333", "0.5681978", "0.56536126", "0.5650209", "0.56176126", "0.5614506", "0.5605488", "0.5593285", "0.55932117", "0.5577377", "0.5576538", "0.55555993", "0.55518323" ]
0.7781248
0
return the overrides in the layer
def dataOverrides(self): return self._overrides
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def overrides(self) -> ConfigNodePropertyArray:\n return self._overrides", "def default_binding_overrides(self):\n return self.__default_binding_overrides", "def ComponentOverrides(self):\n return tuple(self._json_obj.get('component_overrides', {}).items())", "def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides", "def overrides(self) -> tuple[dict[str, Any], dict[str, Any]]:\n settings = {}\n if self.actions:\n settings = self.actions.overrides\n if self.validations:\n settings |= self.validations.overrides\n\n filter_settings = {}\n if self.extra_fields:\n filter_settings = self.extra_fields.model_dump(exclude_unset=True)\n\n return settings, filter_settings", "def test_get_overrides(self):\n # FormOverrideMixIn.get_overrides\n pass", "def get_overrides(conn):\n with conn.cursor(cursor_factory=RealDictCursor) as cur:\n cur.execute(sql_overrides)\n return cur.fetchall()", "def withOverrides(overrides):", "def filter_contiguity_overrides(self):\n return self.filter_nodes('//ContiguityOverrides/ContiguityOverride')", "def get_overrides(token_fields_base, token_fields_from_args):\n overrides = []\n for key_raw, _ in token_fields_from_args.items():\n keys = key_raw.split('.')\n base_ref = token_fields_base\n try:\n for key in keys:\n base_ref = base_ref[key]\n # no KeyError means that the token_fields_base has an existing value corresponding with the arg\n overrides.append(key_raw)\n except KeyError:\n pass\n return overrides", "def get_overrides(self, app, name, namespace):\n try:\n return self._list(self._path(app) +\n '?name=' + name +\n '&namespace=' + namespace)[0]\n except IndexError:\n return None", "def shrinkage_overrides(self):\n return self._shrinkage_overrides", "def get_overrides_columns(self):\n\n if hasattr(self, '_overrides'):\n return list(self._overrides.columns)\n return []", "def _resolve_overrides(self):\r\n if not self.override_targets:\r\n return self._pre_override_dependencies\r\n\r\n result = OrderedSet()\r\n\r\n # resolve overrides and fetch all of their \"artifact-providing\" dependencies\r\n excludes = set()\r\n for override_target in self.override_targets:\r\n # add pre_override deps of the target as exclusions\r\n for resolved in override_target.resolve():\r\n excludes.update(self._excludes(resolved))\r\n # prepend the target as a new target\r\n result.add(override_target)\r\n\r\n # add excludes for each artifact\r\n for direct_dep in self._pre_override_dependencies:\r\n # add relevant excludes to jar dependencies\r\n for jar_dep in self._jar_dependencies(direct_dep):\r\n for exclude in excludes:\r\n jar_dep.exclude(exclude.org, exclude.name)\r\n result.add(direct_dep)\r\n\r\n return result", "def _get_layers(self) :\n \n return self._layers", "def conditional_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudChannelV1ConditionalOverrideArgs']]]]:\n return pulumi.get(self, \"conditional_overrides\")", "def add_over(self, override: ItemConfig) -> None:\n self.all_conf = lazy_conf.concat(self.all_conf, override.all_conf)\n\n for vers_id, styles in override.versions.items():\n our_styles = self.versions.setdefault(vers_id, {})\n for sty_id, style in styles.items():\n if sty_id not in our_styles:\n our_styles[sty_id] = style\n else:\n our_styles[sty_id] = lazy_conf.concat(our_styles[sty_id], style)", "def layers(self):\n return self['layers']", "def override_paramset(self, override_str):\n\n paramset = ParamSet()\n if not override_str:\n return paramset\n\n override = eval(override_str, {}, {})\n if not override:\n return paramset\n\n for override_name in override:\n # The override can have a node_name/parm format which allows for point\n # instance overrides to override parms in a network.\n\n cached_override = self.override_cache.get(override_name, None)\n if cached_override is not None:\n # Hint to just skip\n if cached_override == -1:\n continue\n if isinstance(cached_override, PBRTParam):\n # textures which can't be overriden\n paramset.add(cached_override)\n continue\n pbrt_name, pbrt_type, tuple_names = cached_override\n if tuple_names:\n value = [override[x] for x in tuple_names]\n else:\n value = override[override_name]\n pbrt_param = PBRTParam(pbrt_type, pbrt_name, value)\n paramset.add(pbrt_param)\n continue\n\n override_match = self.override_pat.match(override_name)\n spectrum_type = override_match.group(\"spectrum\")\n parm_name = override_match.group(\"parm\")\n override_node = override_match.group(\"node\")\n if override_node is not None and override_node != self.name:\n self.override_cache[override_name] = -1\n continue\n\n # There can be two style of \"overrides\" one is a straight parm override\n # which is similar to what Houdini does. The other style of override is\n # for the spectrum type parms. Since spectrum parms can be of different\n # types and the Material Overrides only support \"rgb\" we are limited\n # in the types of spectrum overrides we can do. To work around this we'll\n # support a different style, override_parm:spectrum_type. If the parm name\n # ends in one of the \"rgb/color\" types then we'll handle it differently.\n # TODO add a comment as to what the value would look like\n\n # NOTE: The material SOP will use a parm style dictionary if there\n # parm name matches exactly\n # ie) if there is a color parm you will get\n # {'colorb':0.372511,'colorg':0.642467,'colorr':0.632117,}\n # But if the parm name doesn't match (which we are allowing\n # for you will get something like this -\n # {'colora':(0.632117,0.642467,0.372511),}\n\n # Once we have a parm name, we need to determine what \"style\" it is.\n # Whether its a hou.ParmTuple or hou.Parm style.\n tuple_names = tuple()\n parm_tuple = self.node.parmTuple(parm_name)\n if parm_tuple is None:\n # We couldn't find a tuple of that name, so let's try a parm\n parm = self.node.parm(parm_name)\n if parm is None:\n # Nope, not valid either, let's move along\n self.override_cache[override_name] = -1\n continue\n # if its a parm but not a parmtuple it must be a split.\n parm_tuple = parm.tuple()\n # we need to \"combine\" these and process them all at once and\n # then skip any other occurances. The skipping is handled by\n # the overall caching mechanism. self.override_cache\n tuple_names = tuple([x.name() for x in parm_tuple])\n\n # This is for wrangling parm names of texture nodes due to having a\n # signature parm.\n pbrt_parm_name = self.pbrt_parm_name(parm_tuple.name())\n\n if spectrum_type is None and tuple_names:\n # This is a \"traditional\" override, no spectrum or node name prefix\n value = [override[x] for x in tuple_names]\n pbrt_param = self._hou_parm_to_pbrt_param(\n parm_tuple, pbrt_parm_name, value\n )\n elif spectrum_type in (\"spectrum\", \"xyz\", \"blackbody\"):\n pbrt_param = PBRTParam(\n spectrum_type, pbrt_parm_name, override[override_name]\n )\n elif not tuple_names:\n pbrt_param = self._hou_parm_to_pbrt_param(\n parm_tuple, pbrt_parm_name, override[override_name]\n )\n else:\n raise ValueError(\"Unable to wrangle override name: %s\" % override_name)\n\n paramset.add(pbrt_param)\n\n # From here to the end of the loop is to allow for caching\n\n if pbrt_param.type == \"texture\":\n self.override_cache[override_name] = pbrt_param\n continue\n\n # we are making an assumption a split parm will never be a spectrum\n # or have a node prefix. The Material SOP doesn't allow for it as well.\n for name in tuple_names:\n # The -1 means \"continue\"\n self.override_cache[name] = -1\n # Sanity check\n if tuple_names and override_name not in tuple_names:\n raise ValueError(\n \"Override name: %s, not valid for a parmTuple\" % override_name\n )\n # override_name must match one of the tuple_names\n self.override_cache[override_name] = (\n pbrt_param.name,\n pbrt_param.param_type,\n tuple_names,\n )\n return paramset", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetHeatmapColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def GetDefaultLayerProperties():\r\n pass", "def tweaks(self) -> None:\n pass", "def layer_offsets(self):\n ...", "def get_patches(self):\n self.get_source_patch_masks()\n self.get_target_patch_masks()\n self.get_source_patches()", "def apply_replacements(self, env, **kw):\n ovr = self.replacements.apply(env)\n kw = self.replacements.apply(kw, True)\n return (env.Override(ovr), kw)", "def _overrides(self, tense, overrides, attr_name,persons=None): \n if not hasattr(self, attr_name):\n self_overrides = [ None ] * len(Tense)\n setattr(self, attr_name, self_overrides) \n else:\n self_overrides = getattr(self, attr_name)\n \n if tense in Tense.Person_Agnostic():\n if isinstance(overrides, str) or self_overrides[tense] is None:\n self_overrides[tense] = [ overrides ]\n else:\n self_overrides[tense].append(overrides)\n return\n \n if persons is None:\n _persons = Person\n elif isinstance(persons, int):\n _persons = [ persons ]\n elif isinstance(persons, list):\n _persons = persons\n else:\n self.__raise(\"persons must be None, integer or list of integers\", tense)\n \n if self_overrides[tense] is None:\n self_overrides[tense] = [None] * len(Person)\n \n if isinstance(overrides, str) or inspect.isfunction(overrides) or inspect.ismethod(overrides): \n for person in _persons:\n if isinstance(overrides, str) or self_overrides[tense][person] is None:\n # if a hard replacement (string), previous overrides are discarded because they will be replaced.\n # or this is the first override\n self_overrides[tense][person] = [overrides]\n else:\n self_overrides[tense][person].append(overrides) \n \n elif isinstance(overrides, list):\n for person, override in enumerate(overrides):\n if override is not None:\n if isinstance(override, str) or self_overrides[tense][person] is None:\n # if a hard replacement (string), previous overrides are discarded because they will be replaced.\n # or this is the first override\n self_overrides[tense][person] = [override]\n else:\n self_overrides[tense][person].append(override)", "def control_plane_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ControlPlaneUpgradeOverride']]]]]:\n return pulumi.get(self, \"control_plane_overrides\")", "def _state_overridden(self, want, have):\n # overridden behavior is the same as replaced except for scope.\n cmds = []\n for i in have:\n obj_in_want = search_obj_in_list(i[\"name\"], want, \"name\")\n if obj_in_want:\n if i != obj_in_want:\n v4_cmds = self._v4_cmds(\n obj_in_want.pop(\"ipv4\", []),\n i.pop(\"ipv4\", []),\n state=\"overridden\",\n )\n replaced_cmds = self._state_replaced(obj_in_want, [i])\n replaced_cmds.extend(v4_cmds)\n self.cmd_order_fixup(replaced_cmds, obj_in_want[\"name\"])\n cmds.extend(replaced_cmds)\n else:\n deleted_cmds = self.generate_delete_commands(i)\n self.cmd_order_fixup(deleted_cmds, i[\"name\"])\n cmds.extend(deleted_cmds)\n\n for i in want:\n if [item for item in have if i[\"name\"] == item[\"name\"]]:\n continue\n cmds.extend(self.add_commands(i, name=i[\"name\"]))\n\n return cmds", "def overrides(self, overrides: ConfigNodePropertyArray):\n\n self._overrides = overrides", "def get_testing_overrides() -> Dict[Callable, Callable]:\n # Every function in the PyTorch API that can be overriden needs an entry\n # in this dict.\n #\n # Optimally we would use inspect to get the function signature and define\n # the lambda function procedurally but that is blocked by generating\n # function signatures for native kernels that can be consumed by inspect.\n # See Issue #28233.\n ret = {}\n ret.update(get_tensor_overrides())\n ret.update(get_torch_overrides())\n ret.update(get_nn_functional_overrides())\n return ret" ]
[ "0.6976614", "0.6493725", "0.6390087", "0.6375561", "0.6312835", "0.6241755", "0.60139596", "0.59983903", "0.5893521", "0.5878425", "0.5835658", "0.5816514", "0.5811458", "0.58073103", "0.5771545", "0.5764795", "0.57407266", "0.5684022", "0.5561158", "0.5544201", "0.55372936", "0.55325943", "0.549828", "0.54922616", "0.5461393", "0.5453749", "0.5451989", "0.54368544", "0.54365575", "0.5431538" ]
0.72761494
0
When the user posts the find_org_to_create_account form, redirect to that page
def find_org_to_create_account(request): if request.method != 'POST' or not request.POST.get('organization_slug'): return HttpResponseRedirect(reverse('home')) else: org_slug = request.POST.get('organization_slug') return HttpResponseRedirect(reverse('create_org_account', args=[org_slug]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def oauth_start_flow():\n # Have to do authentication!\n rest.default_user_authentication()\n\n account_type = flask.request.args.get('type')\n if account_type is None:\n flask.abort(400)\n\n cls = ACCOUNT_TYPES.get(account_type, None)\n if cls is None:\n flask.about(400)\n\n key = str(uuid.uuid4())\n instance = cls(id=key)\n instance.put()\n\n return flask.redirect(instance.AUTH_URL %\n {'client_id': instance.CLIENT_ID,\n 'state': key})", "def office_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n office_form = OfficeForm()\n return render_to_response('office_form.html', {'form': office_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n office_form = OfficeForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if office_form.is_valid():\n of = office_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def org_organisation_create_onaccept(form):\n\n db = current.db\n s3db = current.s3db\n ftable = s3db.pr_forum\n\n # Lookup the Reserves Forum\n forum = db(ftable.name == \"Reserves\").select(ftable.pe_id,\n limitby = (0, 1)\n ).first()\n try:\n reserves_pe_id = forum.pe_id\n except AttributeError:\n current.log.error(\"Unable to link Org Forum to Reserves Forum: Forum not Found\")\n return\n\n form_vars_get = form.vars.get\n organisation_id = form_vars_get(\"id\")\n\n # Lookup the Organisation\n otable = s3db.org_organisation\n org = db(otable.id == organisation_id).select(otable.pe_id,\n limitby = (0, 1)\n ).first()\n org_pe_id = org.pe_id\n\n # Create Forum\n record = {\"organisation_id\": organisation_id,\n \"name\": \"%s Reserves\" % form_vars_get(\"name\"),\n }\n forum_id = ftable.insert(**record)\n record[\"id\"] = forum_id\n s3db.update_super(ftable, record)\n forum_pe_id = record[\"pe_id\"]\n\n # Add the Hierarchy links\n s3db.pr_add_affiliation(org_pe_id, forum_pe_id, role=\"Realm Hierarchy\")\n s3db.pr_add_affiliation(reserves_pe_id, forum_pe_id, role=\"Realm Hierarchy\")", "def org_view(org_id):\n org_detail = None\n try:\n org_detail = Organisation.query.filter_by(id=org_id).first()\n\n except IndexError:\n pass\n\n if org_detail is not None:\n return render_template('organisations/org_view.html', org_detail=org_detail, org=org_detail)\n\n\n elif org_detail == None:\n return redirect(url_for('main.create_org'))\n\n else:\n abort(404)", "def form_valid(self, form):\n form.instance.auth_user = self.request.user\n form.instance.group = self.get_local_group()\n\n super(CreateApplicationView, self).form_valid(form)\n\n return redirect(self.success_url + '?id=' + str(self.object.pk))", "def post(self, request, *args, **kwargs):\n #set request form as new form\n form = SignUpForm(request.POST)\n #validation; if the form is invalid, return empty form again\n if not form.is_valid():\n return render(request, 'accounts/signup.html', {'form': form})\n\n #save form info into user database\n user_info_save = form.save(commit=False)\n user_info_save.set_password(form.cleaned_data['password'])\n user_info_save.is_manager = self.kwargs.get('is_manager')\n user_info_save.save()\n\n # #login; save the user data and update the database\n # auth_login(request, user_info_save)\n\n # redirect to shift index\n now = datetime.today()\n kwargs['month'] = now.month\n kwargs['year'] = now.year\n kwargs['day'] = now.day\n return redirect('accounts:login')", "def account_profile(request):\n get_or_creat(request)\n return redirect(\"/\")", "def form_valid(self, form):\n redirect_url = self.accept_auth_request(form.get_user())\n return HttpResponseRedirect(redirect_url)", "def post(self, request, *args, **kwargs):\n\n if request.user.is_authenticated:\n return redirect(reverse_lazy('feed_view'))\n\n return super(SignUpView, self).post(request, *args, **kwargs)", "def award_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n award_form = AwardForm()\n return render_to_response('award_form.html', {'form': award_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n award_form = AwardForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if award_form.is_valid():\n af = award_form.save(commit=False)\n af.company = company\n af.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('award_form.html', \n {'form': award_form, 'form_errors': award_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def create_account():\n form = CreateAccountForm(request.form)\n form.set_site_choices()\n\n if not form.validate():\n return create_account_form(form)\n\n screen_name = form.screen_name.data.strip()\n first_names = form.first_names.data.strip()\n last_name = form.last_name.data.strip()\n email_address = form.email_address.data.lower()\n password = form.password.data\n site_id = form.site_id.data\n\n if site_id:\n site = site_service.get_site(site_id)\n else:\n site = None\n\n if user_service.is_screen_name_already_assigned(screen_name):\n flash_error(gettext('This username cannot be used.'))\n return create_account_form(form)\n\n if user_service.is_email_address_already_assigned(email_address):\n flash_error(gettext('This email address cannot be used.'))\n return create_account_form(form)\n\n initiator_id = g.user.id\n\n try:\n user, event = user_creation_service.create_basic_user(\n screen_name,\n email_address,\n password,\n first_names=first_names,\n last_name=last_name,\n creator_id=initiator_id,\n )\n except user_creation_service.UserCreationFailed:\n flash_error(\n gettext(\n 'User \"%(screen_name)s\" could not be created.',\n screen_name=screen_name,\n )\n )\n return create_account_form(form)\n\n flash_success(\n gettext(\n 'User \"%(screen_name)s\" has been created.',\n screen_name=user.screen_name,\n )\n )\n\n if site:\n user_creation_service.request_email_address_confirmation(\n user, email_address, site_id\n )\n flash_success(\n gettext('An email has been sent to the corresponding address.'),\n icon='email',\n )\n\n user_signals.account_created.send(None, event=event)\n\n return redirect_to('.view', user_id=user.id)", "def create_account(request, role):\n context = {}\n if request.method == \"POST\":\n if(role.lower() == \"academic\"):\n form = AcademicRegisterForm(request.POST)\n elif(role.lower() == \"average\"):\n form = AvgRegisterForm(request.POST)\n\n if(form.is_valid()):\n createNewUser(form)\n username = form.cleaned_data.get('username')\n messages.success(request, f\"Account has been created for {username}!\")\n return redirect('login')\n else:\n if(role.lower() == \"academic\"):\n form = AcademicRegisterForm()\n elif(role.lower() == \"average\"):\n form = AvgRegisterForm()\n else:\n context['error'] = \"URL does not exist. Please return to home and try again\"\n return render(request, 'classroom_main/create_account.html', context)\n\n context[\"type\"] = role\n context['title'] = \"Sign up to the Online Coding Classroom\"\n context['form'] = form\n\n return render(request, 'classroom_main/create_account.html', context)", "def funding_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n funding_form = FundingForm()\n return render_to_response('funding_form.html', {'form': funding_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n funding_form = FundingForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if funding_form.is_valid():\n of = funding_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('funding_form.html', \n {'form': funding_form, 'form_errors': funding_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def form_valid(self, form):\n login(self.request, form.get_user())\n return redirect('profile', id=form.get_user().id)", "def goto_make_new_user():\n\n return render_template('users/new.html')", "def post(self):\n cont = self.request_string('continue', default=\"/\")\n self.redirect(users.create_login_url(cont))", "def management_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n management_form = ManagementForm()\n return render_to_response('management_form.html', {'form': management_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n management_form = ManagementForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if management_form.is_valid():\n mf = management_form.save(commit=False)\n mf.company = company\n mf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('management_form.html', \n {'form': management_form, 'form_errors': management_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def customer_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n customer_form = CustomerForm()\n return render_to_response('customer_form.html', {'form': customer_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n customer_form = CustomerForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if customer_form.is_valid():\n of = customer_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('customer_form.html', \n {'form': customer_form, 'form_errors': customer_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def test_successful_registration_redirects_to_right_place(self):\n response = self.register_bob(follow=True)\n self.assertTrue(\n response.redirect_chain[0][0] == '/registration/register/complete/')", "def create_account():\n\n return render_template('account.html')", "def acquisition_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n acquisition_form = AcquisitionForm()\n return render_to_response('acquisition_form.html', {'form': acquisition_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n acquisition_form = AcquisitionForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if acquisition_form.is_valid():\n aqf = acquisition_form.save(commit=False)\n aqf.company = company\n aqf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('acquisition_form.html', \n {'form': acquisition_form, 'form_errors': acquisition_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def user_signup():\n\n if request.method == \"GET\":\n return render_template(\"signup_form.html\")\n\n # post request logic starts here\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n\n if email_is_valid(email):\n\n flash(\"It looks like you are already signed up for Readerboard! Try signing in instead.\")\n return redirect(\"/signin\")\n\n else:\n\n new_user = User()\n db.session.add(new_user)\n db.session.commit()\n new_acct = Account(user_id=new_user.user_id, email=email, password=password)\n db.session.add(new_acct)\n\n db.session.commit()\n session['acct'] = new_acct.acct_id\n\n return redirect(\"/auth/goodreads\")", "def create_user():\n if request.method == 'POST':\n PLAN.create_user(request.form['fname'],\n request.form['lname'],\n request.form['username'],\n request.form['password'],\n request.form['email'])\n return redirect(url_for('index'))\n return render_template('newuser.html')", "def post(self) :\n self.redirect('/admin')", "def login_success(request):\n if not hasattr(request.user, 'profile'):\n return redirect('index')\n else:\n return redirect('registration_process')", "def post(self, request, *args, **kwargs):\n application = self.get_object()\n app_complete = Application.objects.filter(\n pk=self.kwargs['app_complete']\n ).first()\n if is_application_owner(self.request.user, application) and (\n application.questionnaire.status != 'complete'\n ) and app_complete is not None and (\n app_complete.authorized_email is not None\n ) and app_complete.questionnaire.completed_by_candidate and (\n app_complete.questionnaire.status == 'complete'\n ):\n\n \"\"\"Attach authorized email & questionnaire to application\"\"\"\n application.authorized_email = app_complete.authorized_email\n application.questionnaire = app_complete.questionnaire\n application.save()\n\n \"\"\"Submit application if nomination is complete too\"\"\"\n if application.nomination.status == 'complete':\n submit_application(application)\n\n return redirect(self.get_success_url())\n else:\n raise Http404(_(\"No application found matching the query\"))", "def _signup(request, eamap, retfun=None):\r\n # save this for use by student.views.create_account\r\n request.session['ExternalAuthMap'] = eamap\r\n\r\n if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP', ''):\r\n # do signin immediately, by calling create_account, instead of asking\r\n # student to fill in form. MIT students already have information filed.\r\n username = eamap.external_email.split('@', 1)[0]\r\n username = username.replace('.', '_')\r\n post_vars = dict(username=username,\r\n honor_code=u'true',\r\n terms_of_service=u'true')\r\n log.info('doing immediate signup for %s, params=%s', username, post_vars)\r\n student.views.create_account(request, post_vars)\r\n # should check return content for successful completion before\r\n if retfun is not None:\r\n return retfun()\r\n else:\r\n return redirect('/')\r\n\r\n # default conjoin name, no spaces, flattened to ascii b/c django can't handle unicode usernames, sadly\r\n # but this only affects username, not fullname\r\n username = re.sub(r'\\s', '', _flatten_to_ascii(eamap.external_name), flags=re.UNICODE)\r\n\r\n context = {'has_extauth_info': True,\r\n 'show_signup_immediately': True,\r\n 'extauth_domain': eamap.external_domain,\r\n 'extauth_id': eamap.external_id,\r\n 'extauth_email': eamap.external_email,\r\n 'extauth_username': username,\r\n 'extauth_name': eamap.external_name,\r\n 'ask_for_tos': True,\r\n }\r\n\r\n # Some openEdX instances can't have terms of service for shib users, like\r\n # according to Stanford's Office of General Counsel\r\n uses_shibboleth = (settings.FEATURES.get('AUTH_USE_SHIB') and\r\n eamap.external_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX))\r\n if uses_shibboleth and settings.FEATURES.get('SHIB_DISABLE_TOS'):\r\n context['ask_for_tos'] = False\r\n\r\n # detect if full name is blank and ask for it from user\r\n context['ask_for_fullname'] = eamap.external_name.strip() == ''\r\n\r\n # validate provided mail and if it's not valid ask the user\r\n try:\r\n validate_email(eamap.external_email)\r\n context['ask_for_email'] = False\r\n except ValidationError:\r\n context['ask_for_email'] = True\r\n\r\n log.info('EXTAUTH: Doing signup for %s', eamap.external_id)\r\n\r\n return student.views.register_user(request, extra_context=context)", "def home_page():\n return redirect('/register')", "def form_valid(self, form):\n auth_login(self.request, form.get_user())\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self, form):\n auth_login(self.request, form.get_user())\n return HttpResponseRedirect(self.get_success_url())" ]
[ "0.603463", "0.6004999", "0.5971245", "0.5959396", "0.5919654", "0.58651173", "0.58611304", "0.58471966", "0.582282", "0.57899594", "0.578932", "0.57571214", "0.57511485", "0.574679", "0.57035786", "0.56987613", "0.56865185", "0.5668221", "0.5650241", "0.5633905", "0.5633754", "0.56261593", "0.56210124", "0.558143", "0.5575769", "0.5558815", "0.55578035", "0.55500686", "0.55401987", "0.55401987" ]
0.81982374
0
df is a function of x_i, y_i, beta
def sgd_step(df, alpha, prev_beta, xy_i): x_i, y_i = xy_i gradient = df(x_i, y_i, prev_beta) return [beta_j + alpha * df_j for beta_j, df_j in zip(prev_beta, gradient)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_beta(self,df,tick,ind):\n cov = get_cov(df,tick,ind)\n var = df[ind].var()\n beta = cov / var\n return beta", "def create_beta_posteriors(df):\n goods = df.num_matured - df.fpd\n df['alpha_p'] = df.alpha + df.fpd\n df['beta_p'] = df.beta + goods\n return df", "def create_beta_priors(df):\n df['alpha'] = np.minimum(np.maximum((1 - df.expected) * np.power(df.expected, 2) / df.variance - df.expected, 0.1), 15)\n df['beta'] = df.alpha / df.expected - df.alpha\n return df", "def smale_beta(f, x0, df, args=()):\n _args = (x0,) + args\n beta = numpy.abs(f(*_args) / df[0](*_args))\n return beta", "def fun(_, y):\n return np.array([-self.r * self.beta * y[2] * y[0] / self.N,\n self.r * self.beta * y[2] * y[0] / self.N - self.sigma * y[1],\n self.sigma * y[1] - self.gamma * y[2],\n self.gamma * y[2]])", "def fun(_, y):\n return np.array([-self.r * self.beta * y[1] * y[0] / self.N,\n self.r * self.beta * y[1] * y[0] / self.N - self.gamma * y[1],\n self.gamma * y[1]])", "def df(x):\n raise NotImplementedError", "def test_fn(df, fn):\n\n y_pred = []\n y_true = []\n\n for key in df.index:\n y_t, *inputs = df.loc[key]\n y_true.append(y_t)\n y_p = fn(*inputs)\n y_pred.append(y_p)\n\n # linear regression without intercept\n c = np.mean(y_true) / np.mean(y_pred)\n y_pred = np.multiply(y_pred, c)\n\n rmse = np.sqrt(np.mean(np.subtract(y_pred, y_true) ** 2))\n return rmse, y_pred, y_true, c", "def eval(self, df):\n ## Check invariant; model inputs must be subset of df columns\n if not set(self.var).issubset(set(df.columns)):\n raise ValueError(\n \"Model function `{}` var not a subset of given columns\".format(\n self.name\n )\n )\n\n ## Set up output\n n_rows = df.shape[0]\n results = zeros((n_rows, len(self.out)))\n\n for ind in range(n_rows):\n results[ind] = self.func(*df.loc[ind, self.var])\n\n ## Package output as DataFrame\n return DataFrame(data=results, columns=self.out)", "def from_dataframe(df):\n X = sm.add_constant(np.array(df['x']))\n y = np.array(df['y']).reshape(-1,1)\n return y, X", "def _fit(self, df):\n return df", "def evaluate_df(self, df):\n ## Check invariant; model inputs must be subset of df columns\n var_diff = set(self.var).difference(set(df.columns))\n if len(var_diff) != 0:\n raise ValueError(\n \"Model inputs not a subset of given columns;\\n\"\n + \"missing var = {}\".format(var_diff)\n )\n\n df_tmp = df.copy().drop(self.out, axis=1, errors=\"ignore\")\n ## Evaluate each function\n for func in self.functions:\n ## Concatenate to make intermediate results available\n df_tmp = concat((df_tmp, func.eval(df_tmp)), axis=1)\n\n return df_tmp[self.out]", "def df_model(self):\n return self.Kernel.df(self.xdata)", "def df(x_i):\n return [2 * x_ij for x_ij in x_i]", "def df(x_i):\n return [2 * x_ij for x_ij in x_i]", "def eval(self, df):\n df_res = self.func(df)\n return df_res[self.out]", "def objective(beta, lambdat, X, y):\n return 1/len(y) * (np.sum(\n (np.maximum(0, 1-((y[:, np.newaxis]*X).dot(beta)))**2)))\\\n + lambdat * np.linalg.norm(beta)**2", "def SGD_beta(X, y, eta=1e-4, gamma=0.01):\n\n\t# Stochastic Gradient Descent, shuffle?\n\tbeta = np.random.randn(len(X[0]), 1)\n\tn = len(X)\n\tM = 10 #0.05*n \t # Size of each minibatch, should be smaller than n\n\tm = int(n/M) \t # Number of minibatches\n\tn_epochs = 500 \t\t # Nmber of epochs\n\n\tacc = np.zeros(n_epochs+1)\n\tepoch_list = np.zeros(n_epochs+1)\n\n\t#z_i = np.zeros(m)\n\t#model_i = np.zeros(m)\n\t#y_i = np.zeros(m)\n\n\tfor epoch in range(1,n_epochs+1):\n\t\tfor i in range(m):\n\n\t\t\trandom_index = np.random.randint(m) #Pick the k-th minibatch at random\n\t\t\txi = X[random_index:random_index+1]\n\t\t\tyi = y[random_index:random_index+1]\n\n\t\t\t#Compute the gradient using the data in minibatch Bk\n\t\t\tgrad_beta_C = beta_gradients(xi, yi, beta)\n\t\t\tbeta -= eta - gamma * grad_beta_C\n\n\t\t\t#y_i[i] = yi\n\t\t\t#z_i[i] = xi@beta\n\t\t\t#model_i[i] = logistic_function(z_i[i])\n\n\t\t#acc[epoch] = accuracy(model_i, y_i)\n\t\t#epoch_list[epoch] = epoch\n\n\treturn beta", "def solve_beta_mnt(X, Y, pos=False, learning_rate=0.01, stop_criteria=10**-4):\n n = len(Y)\n p = X.shape[1]\n iso_order = np.arange(p)\n \n # initialize\n beta_prev = np.ones(p)\n beta = np.random.normal(size = X.shape[1])\n \n # gradient descent\n i = 0.0 # iteration number\n while sum((beta-beta_prev)**2)**0.5 > stop_criteria:\n i += 1\n# print(sum((beta-beta_prev)**2)**0.5) # used for debug\n \n # calculate gradient\n beta_grad = -2/n * (X.T@Y - X.T@X@beta)\n # update beta_prev\n beta_prev = beta\n # update beta with projection\n beta = beta - (1/i) * learning_rate * beta_grad\n beta = IsotonicRegression().fit_transform(iso_order, beta)\n # if pos == True, assign zero to negative coordinates\n if pos: beta = np.where(beta > 0, beta, 0)\n# print(sum((beta-beta_prev)**2)**0.5) # used for testing\n return beta", "def bayes_cov_col(Y,X,cols,lm):\n\n #EM iterateit\n Yhat=pd.DataFrame(lm.predict(X))\n Yhat.index=Y.index\n Yhat.columns=Y.columns\n SSE_all=np.square(Y.subtract(Yhat))\n X_adjust=X.copy()\n\n\n df_SSE = []\n df_logit = []\n\n for curcov in cols:\n\n curcells=X[X[curcov]>0].index\n\n if len(curcells)>2:\n\n X_notcur=X.copy()\n X_notcur[curcov]=[0]*len(X_notcur)\n\n X_sub=X_notcur.loc[curcells]\n\n Y_sub=Y.loc[curcells]\n\n GENE_var=2.0*Y_sub.var(axis=0)\n vargenes=GENE_var[GENE_var>0].index\n\n Yhat_notcur=pd.DataFrame(lm.predict(X_sub))\n Yhat_notcur.index=Y_sub.index\n Yhat_notcur.columns=Y_sub.columns\n\n SSE_notcur=np.square(Y_sub.subtract(Yhat_notcur))\n SSE=SSE_all.loc[curcells].subtract(SSE_notcur)\n SSE_sum=SSE.sum(axis=1)\n\n SSE_transform=SSE.div(GENE_var+0.5)[vargenes].sum(axis=1)\n logitify=np.divide(1.0,1.0+np.exp(SSE_transform))#sum))\n\n df_SSE.append(SSE_sum)\n df_logit.append(logitify)\n\n X_adjust[curcov].loc[curcells]=logitify\n\n return X_adjust", "def beta(self, index):\n index_change = index.close.pct_change()\n beta = self.pct_change.cov(index_change) / index_change.var()\n return beta", "def gradient_descent(f, df, x, sigma=0.5, epsilon=1e-8):\n pass", "def compute_grad(beta, lambdat, X, y):\n return -2/len(y)*(np.maximum(0, 1-(\n (y[:, np.newaxis]*X).dot(beta)))).dot(\n y[:, np.newaxis]*X) + 2 * lambdat * beta", "def df(x):\n\n # coefficients\n A = 728.0\n B = 0.317\n C = 0.486\n D = -8.99 * 1.6\n\n # function\n dfx = 2 * D / x**3 + A / B**2 * math.exp(- x / B) - 42 * C / x**8\n\n return dfx", "def get_dynamic_bias_from_df(self, x: pd.Series,\n country_df: pd.DataFrame) -> np.ndarray:", "def calc_beta(fx, dfx):\n assert fx.ndim == 1 and fx.shape == dfx.shape\n n = fx.size\n f_bar = fx.mean()\n ratio = (dfx**2).sum() / ((fx - f_bar)**2).sum() * (n-1) / float(n)\n beta = sqrt(((fx - f_bar)**2).sum() / (n-1) * exp(-ratio))\n return beta", "def construct_df(t,y):\n\n df = np.zeros((3,3))\n\n df[0][0] = 77.27*(1.0 - y(1) -2.*8.375e-6*y(0))\n df[0][1] = 77.27*(1.0 -y(0) )\n df[0][2] = 0.0;\n df[1][0] = -1.0/77.27;\n df[1][1] = (-1.0/77.27)*(1.0+y(0))\n df[1][2] = 1.0/77.27\n df[2][0] = 0.161\n df[2][1] = 0.0\n df[2][2] = -0.161\n\n return df", "def gradientDescent(f, df, x, niter=10):\n\n points = []\n\n for i in xrange(niter):\n point = -dfx\n slope = np.dot(point,-point)\n \n #calculate a\n a = backtracking(f,slope,x,point)\n \n\n #update the search point\n x_k = x + a*p\n points.append(x_k)\n x = x_k\n\n return points", "def df(self):\n return (self.x-1.0)*(self.y-1.0)", "def ml_df(df, parameters, t_size, model = DecisionTreeRegressor()):\n ndf = df[parameters]\n x = ndf.loc[:, ndf.columns != 'T_exp']\n y = ndf['T_exp']\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=t_size)\n model = model\n p = PolynomialFeatures(degree = 2)\n X_poly = p.fit_transform(x_train)\n X_poly_test = p.fit_transform(x_test)\n model.fit(X_poly,y_train)\n y_train_pred = model.predict(X_poly)\n y_test_pred = model.predict(X_poly_test)\n result = pd.DataFrame()\n result['T_exp'] = y_test\n result['T_prd'] = y_test_pred\n result['ratio'] = result['T_exp']/result['T_prd']\n return result" ]
[ "0.68780404", "0.6571103", "0.65438884", "0.6472203", "0.62649596", "0.622851", "0.6134441", "0.6110367", "0.6021685", "0.5859265", "0.5851953", "0.5832189", "0.57926506", "0.5776251", "0.5776251", "0.5743775", "0.56781954", "0.56520283", "0.5651162", "0.5608268", "0.5604049", "0.5603729", "0.5601341", "0.5599801", "0.5573712", "0.5558541", "0.55469805", "0.5507145", "0.5475143", "0.54740906" ]
0.66248494
1
Get the color of the mask at position. Using 2 bits as a color.
def get_color(mask: int, position: int): return (mask >> (position << 1)) & 3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mask_color(self):\n return self._mask_color", "def set_color(mask: int, position: int, color: int):\n return mask | (color << (position << 1))", "def get_color(self, point):\n \n d = point - self._origin\n dist = int(d.dot(d) ** 0.5) % 2\n if dist == 0:\n return self.c1.dup()\n else:\n return self.c2.dup()", "def get_color(self, _pos):\n return self.__framebuffer[_pos]", "def _to_color(indx, base):\n base2 = base * base\n b = 2 - indx / base2\n r = 2 - (indx % base2) / base\n g = 2 - (indx % base2) % base\n return b * 127, r * 127, g * 127", "def getPixelColor(self, n):\n\t\treturn self.leds[n]", "def get_red(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3]", "def get_color(self, point):\n return self._color.dup()", "def getColor(self):\n return self._l[2]", "def get_color(self, coord):\n return self.board[coord[0], coord[1]]", "def decode_target_colorful(cls, mask):\n return cls.cmap[mask]", "def decode_target_colorful(cls, mask):\n return cls.cmap[mask]", "def _get_color(self, r, g, b):\n clr = (r, g, b)\n return clr", "def show_red_mask(img, mask):\n img_ = img\n mask_ = np.bool_(mask)\n red = img_[:, :, 0]\n green = img_[:, :, 1]\n blue = img_[:, :, 2]\n red[mask_] = 255\n green[mask_] = 0\n blue[mask_] = 0\n return img_", "def get_colour(self, x, y):\n if x >= self.width or y >= self.height:\n return (0, 0, 0)\n\n return self.env_img.get_at((int(x), int(y))).normalize()[0:3]", "def getPixelColor(self, n):\n self._logger.debug(\"getPixelColor\")", "def color_map(val):\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def get_color(self, value):\n value = min(max(0,value), 1) * 510\n\n if value < 255:\n redValue = 255\n greenValue = math.sqrt(value) * 16\n greenValue = int(greenValue)\n else:\n greenValue = 255\n value = value - 255\n redValue = 255 - (value * value / 255)\n redValue = int(redValue)\n return '#' + f\"{redValue:0{2}x}\" + f\"{greenValue:0{2}x}\" + '00'", "def to_color(self):\n return (int(self.r * 255), int(self.g * 255), int(self.b * 255))", "def color_motion_mask(mask, color=None):\n if color is None:\n color = (220, 20, 60)\n h, w = mask.shape\n ext_mask = np.stack([mask, mask, mask], -1).astype(np.uint8)\n color = np.ones_like(ext_mask) * color\n index = np.ones_like(ext_mask) * 1.0\n final_mask = np.where(ext_mask == index, color, ext_mask).astype(np.uint8)\n return final_mask", "def colorize_mask(mask):\n # mask: numpy array of the mask\n new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')\n new_mask.putpalette(palette)\n return new_mask", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def mask2rgb(mask: NDArray[Int]) -> ndarray:\n color_label_dict = {0: (0, 0, 0),\n 1: (128, 0, 0),\n 2: (0, 128, 0),\n 3: (128, 128, 0),\n 4: (0, 0, 128),\n 5: (128, 0, 128),\n 6: (0, 128, 128),\n 7: (128, 128, 128)}\n\n maskRGB = np.empty((mask.shape[1], mask.shape[2], 3))\n mask = np.squeeze(mask)\n for key in color_label_dict.keys():\n pixel_value = color_label_dict[key]\n maskRGB[mask == key] = pixel_value\n\n return maskRGB.astype(np.uint8)", "def get_red(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_red()", "def mask(self):\n return ((2**(self.width) - 1) << self.lsb)", "def get_color(in_val, min_val=0, max_val=100):\n width = max_val - min_val\n unit = width / len(continuum)\n return continuum[min(int(in_val / unit), 19)]", "def get_at(\n self,\n pos: Tuple2NumberType,\n ignore_alpha: bool = False\n ) -> Union[Tuple3IntType, Tuple4IntType, 'pygame.Color']:\n assert_vector(pos, 2)\n color = self._surface.get_at(pos)\n if ignore_alpha:\n return color[0], color[1], color[2]\n return color" ]
[ "0.6762996", "0.6617752", "0.6400042", "0.6352335", "0.6179756", "0.607116", "0.60303086", "0.5943762", "0.59284455", "0.58960706", "0.58665735", "0.58665735", "0.58594924", "0.5858215", "0.58338976", "0.57897294", "0.5776139", "0.5772793", "0.5763152", "0.5751781", "0.57433766", "0.5725651", "0.5725651", "0.5725651", "0.5725651", "0.5704427", "0.5688157", "0.5670523", "0.5668946", "0.56093186" ]
0.8632774
0
Set the color of the mask at position. Using 2 bits as a color.
def set_color(mask: int, position: int, color: int): return mask | (color << (position << 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_color(mask: int, position: int):\n return (mask >> (position << 1)) & 3", "def SetMaskColour(*args, **kwargs):\n return _gdi_.Bitmap_SetMaskColour(*args, **kwargs)", "def setColorIndex(idx):\n dislin.setclr(idx)", "def set_pixel(framebuf, x, y, color):\n index = (y >> 3) * framebuf.stride + x\n offset = y & 0x07\n framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | (\n (color != 0) << offset\n )", "def set_pixel(framebuf, x, y, color):\n index = (y * framebuf.stride + x) >> 2\n pixel = framebuf.buf[index]\n\n shift = (x & 0b11) << 1\n mask = 0b11 << shift\n color = (color & 0b11) << shift\n\n framebuf.buf[index] = color | (pixel & (~mask))", "def set_red(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3] = value", "def set_pixel(framebuf, x, y, color):\n index = (y * framebuf.stride + x) // 8\n offset = 7 - x & 0x07\n framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | (\n (color != 0) << offset\n )", "def set_pixel(self, x, y, v):\n self.buf[y][x] = v & 0x07", "def set_pixel(framebuf, x, y, color):\n index = (y * framebuf.stride + x) * 3\n if isinstance(color, tuple):\n framebuf.buf[index : index + 3] = bytes(color)\n else:\n framebuf.buf[index : index + 3] = bytes(\n ((color >> 16) & 255, (color >> 8) & 255, color & 255)\n )", "def _set_color_mode(self, mode):\n self._write(ST7789_COLMOD, bytes([mode & 0x77]))", "def set_red(self, x, y, newval):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].set_red(newval)", "def setPixelColor(self, n, color):\n self._logger.debug(\"setPixelColor\")", "def SetMask(*args, **kwargs):\n return _gdi_.Bitmap_SetMask(*args, **kwargs)", "def set_mask(self, h, k, value):\n self.mask[(self.h==h)&(self.k==k)] = value", "def set_pixel(self, framebuf, x, y, color):\n index = (y * framebuf.stride + x) * 2\n framebuf.buf[index : index + 2] = self.color_to_rgb565(color)", "def set_pixel(self, pos, color):\n if pos[0] >= 0 and pos[0] < self.width and pos[1] >= 0 and pos[1] < self.height:\n # Ensure that the y axis increases upwards\n inv_y = self.height - 1 - pos[1]\n pos = (inv_y * self.width * 3) + (pos[0] * 3)\n self.data[pos + 0] = color[0]\n self.data[pos + 1] = color[1]\n self.data[pos + 2] = color[2]", "def set_pixel(self, x, y, value):\r\n \r\n # Rotation and mirroring\r\n a = x\r\n x = y\r\n y = 7-a\r\n \r\n # From the baseclass\r\n if x < 0 or x > 7 or y < 0 or y > 7:\r\n # Ignore out of bounds pixels.\r\n return\r\n # Set green LED based on 1st bit in value.\r\n self.set_led(y * 16 + x, 1 if value & Display.COLOR_GREEN > 0 else 0)\r\n # Set red LED based on 2nd bit in value.\r\n self.set_led(y * 16 + x + 8, 1 if value & Display.COLOR_RED > 0 else 0)", "def setPixelColor(self, n, color):\n\t\t#print \"pxl %s = %s\" % (n, color)\n\t\tif isinstance(n, slice):\n\t\t\tself.leds[n] = [color]*len(self.leds[n])\n\t\telse:\n\t\t\tif n >= 0 or n <= self.size:\n\t\t\t\tself.leds[n] = color\n\t\t#pprint(self.leds)", "def set_pixel(image, pt, color):\n\timage[pt[0], pt[1]] = color", "def set_mask(self, mask):\n self.mask = mask", "def __setitem__(self, pos, value):\n\t\t#pprint(pos)\n\t\t#pprint(self.leds.__getitem__(pos))\n\t\t# Handle if a slice of positions are passed in by setting the appropriate\n\t\t# LED data values to the provided values.\n\t\tself.setPixelColor(pos, value)", "def setPixel (self, x, y, colour):\r\n self.image [y][x] = colour", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def set_blue(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3 + 2] = value", "def set_at(self, pos: Tuple2NumberType, color: ColorInputType) -> 'BaseImage':\n assert_vector(pos, 2)\n self._surface.set_at(pos, assert_color(color))\n return self", "def put_color(self, _pos, _color):\n assert(((len(_pos) == 2) and (len(_color) == self.__resolution[2])) or\n ((len(_pos) == 3) and (len(_color) == 1)))\n self.__framebuffer[_pos] = _color", "def set_green(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3+ 1] = value", "def setMask(self, mask):\n self.mask = mask", "def set_general_position_red(self, position):\n\n self._general_position_red = position", "def set_bitmask(self, value):\r\n self.__bitmask__ = value | 0xFF00" ]
[ "0.67938983", "0.67375386", "0.6516931", "0.6429108", "0.64179796", "0.6417907", "0.63744414", "0.6285334", "0.61303943", "0.6114359", "0.61034447", "0.6082238", "0.6080526", "0.6059235", "0.60527635", "0.6043658", "0.60351825", "0.59847206", "0.5964086", "0.5936446", "0.59183013", "0.59039086", "0.5898309", "0.58022964", "0.5801325", "0.5767835", "0.5766899", "0.57531756", "0.57263404", "0.57239443" ]
0.8528009
0
Create a new ir.Set instance with given attributes. Absolutely all ir.Set instances must be created using this constructor.
def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set: ir_set = irast.Set(**kwargs) ctx.all_sets.append(ir_set) return ir_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, **attributes):\n self.set(**attributes)", "def __init__(self,\n *,\n attributes: List['Attribute'] = None) -> None:\n self.attributes = attributes", "def newChemAtomSet(self, **attrlinks):\n return ChemAtomSet(self, **attrlinks)", "def __init__(self, **attributes):\n for key, value in attributes.items():\n setattr(self, key, value)", "def __init__(self, values=None):\n\n self.dict = {} # each instance of Set has its own dict property\n # which is what we'll use to track memnerships\n if values is not None:\n for value in values:\n self.add(value)", "def __init__(self,s={}) -> None:\n\n self.set=list()", "def new_set_from_set(\n ir_set: irast.Set, *,\n preserve_scope_ns: bool=False,\n path_id: typing.Optional[irast.PathId]=None,\n stype: typing.Optional[s_types.Type]=None,\n ctx: context.ContextLevel) -> irast.Set:\n if path_id is None:\n path_id = ir_set.path_id\n if not preserve_scope_ns:\n path_id = path_id.merge_namespace(ctx.path_id_namespace)\n if stype is None:\n stype = ir_set.stype\n result = new_set(\n path_id=path_id,\n path_scope_id=ir_set.path_scope_id,\n stype=stype,\n expr=ir_set.expr,\n ctx=ctx\n )\n result.rptr = ir_set.rptr\n return result", "def set_attributes(self, attributes):\n self.attributes = attributes", "def __init__(self, attributes_names: list):\r\n self.attributes_names = attributes_names", "def __init__(self, attributes_names: list):\r\n self.attributes_names = attributes_names", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def __init__(self):\n self.EntireSet = []", "def set_hdf5_attributes(dset, attributes):\n for key in attributes.iterkeys():\n dset.attrs[key] = attributes[key]\n\n return dset", "def __init__(self):\n self.set = set()", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def __init__(self, *args):\n _snap.TIntSet_swiginit(self, _snap.new_TIntSet(*args))", "def __init__(self, set_ptr=None):\n\n if set_ptr is None:\n self.set = ipset.ipset_new()\n else:\n self.set = set_ptr", "def __init__(self, name: unicode, set: ghidra.util.graph.KeyIndexableSet):\n ...", "def __init__(self, attribute_names):\r\n self.attribute_names = attribute_names\r\n self.tree = None", "def __init__(self, name: str, attributes: List[Attribute], description: str = \"\"):\n self.name: str = name\n self.attributes = sorted(\n attributes, key=lambda x: x.name\n ) # type: List[Attribute]\n self._check_validity()\n self.attributes_by_name = {a.name: a for a in self.attributes}\n self.description = description", "def __init__(self, **attrs):\n \n # set given attributes\n for name, value in attrs.items():\n if hasattr(self, name):\n setattr(self, name, value)\n else:\n raise AttributeError(\"Attribute not found! --> %s\" % name)", "def __init__(self, **initial_attributes):\n\n for attribute_name, attribute_value in initial_attributes.items():\n setattr(self, attribute_name, attribute_value)", "def create_set(self, setname='new_set', based_on='data file', included=None,\n excluded=None, strings='keep', arrays='masks', replace=None,\n overwrite=False):\n meta = self._meta\n sets = meta['sets']\n # prove setname\n if not isinstance(setname, str):\n raise TypeError(\"'setname' must be a str.\")\n if setname in sets and not overwrite:\n raise KeyError(\"{} is already in `meta['sets'].`\".format(setname))\n # prove based_on\n if not based_on in sets:\n raise KeyError(\"based_on set '{}' is not in meta['sets'].\".format(based_on))\n # prove included\n if not included: included = [var.split('@')[-1] for var in sets[based_on]['items']]\n\n # prove replace\n if not replace: replace = {}\n elif not isinstance(replace, dict):\n raise TypeError(\"'replace' must be a dict.\")\n else:\n for var in list(replace.keys()) + list(replace.values()):\n if var not in included:\n raise KeyError(\"{} is not in 'included'\".format(var))\n\n # prove arrays\n if not arrays in ['masks', 'columns']:\n raise ValueError (\n \"'arrays' must be either 'masks' or 'columns'.\")\n # filter set and create new set\n fset = filtered_set(meta=meta,\n based_on=based_on,\n masks=True if arrays == 'masks' else False,\n included=included,\n excluded=excluded,\n strings=strings)\n\n # if arrays=='both':\n # new_items = []\n # items = fset['items']\n # for item in items:\n # new_items.append(item)\n # if item.split('@')[0]=='masks':\n # for i in meta['masks'][item.split('@')[-1]]['items']:\n # new_items.append(i['source'])\n # fset['items'] = new_items\n\n if replace:\n new_items = fset['items']\n for k, v in list(replace.items()):\n for x, item in enumerate(new_items):\n if v == item.split('@')[-1]: posv, move = x, item\n if k == item.split('@')[-1]: posk = x\n new_items[posk] = move\n new_items.pop(posv)\n fset['items'] = new_items\n\n add = {setname: fset}\n sets.update(add)\n\n return None", "def __init__(self):\n self.ds = set()\n self.keys = []", "def __init__(self, elements):\n self.elements = set()\n for el in elements:\n if not isinstance(el, Element):\n el = Element(el)\n self.elements.add(el)", "def __init__(self, tag=None, attributes=(), header=None, column_number=None):\n if tag:\n tag = tag.lower()\n self.tag = tag\n self.header = header\n self.column_number = column_number\n self.attributes = set([a.lower() for a in attributes])\n self.attribute_list = [a.lower() for a in attributes] # to preserve order", "def construct(self):\n\n newSet = {}\n current_index = 0\n\n for key_1, value_1 in self._sets[self._currentSet].items():\n current_index += 1\n for key_2,value_2 in list(self._sets[self._currentSet].items())[current_index:]:\n # join the 2 tuples\n join = key_1 + key_2\n # remove duplicates\n join = tuple(set(join))\n # get combinations\n combined = tuple(combinations(join, self._currentSet+1))\n # sort combination\n combined = tuple(sorted(combined[0]))\n\n # append new combination to dict\n if len(combined) != 0 :\n newSet[combined] = 0\n\n self._currentSet += 1\n # append the new itemset in the sets dict \n self._sets[self._currentSet] = newSet" ]
[ "0.7283344", "0.6427488", "0.64081055", "0.621952", "0.620814", "0.6190213", "0.6113432", "0.6045844", "0.600017", "0.600017", "0.59091234", "0.59091234", "0.59091234", "0.58970034", "0.584854", "0.5807153", "0.5795997", "0.5795997", "0.579595", "0.57853985", "0.57681274", "0.5765502", "0.5747845", "0.5729245", "0.5681843", "0.5653271", "0.56460977", "0.56094784", "0.56030023", "0.5585468" ]
0.7015722
1
Create a new ir.Set from another ir.Set. The new Set inherits source Set's scope, schema item, expression, and, if preserve_scope_ns is set, path_id. If preserve_scope_ns is False, the new Set's path_id will be namespaced with the currently active scope namespace.
def new_set_from_set( ir_set: irast.Set, *, preserve_scope_ns: bool=False, path_id: typing.Optional[irast.PathId]=None, stype: typing.Optional[s_types.Type]=None, ctx: context.ContextLevel) -> irast.Set: if path_id is None: path_id = ir_set.path_id if not preserve_scope_ns: path_id = path_id.merge_namespace(ctx.path_id_namespace) if stype is None: stype = ir_set.stype result = new_set( path_id=path_id, path_scope_id=ir_set.path_scope_id, stype=stype, expr=ir_set.expr, ctx=ctx ) result.rptr = ir_set.rptr return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set:\n ir_set = irast.Set(**kwargs)\n ctx.all_sets.append(ir_set)\n return ir_set", "def create_set(self, setname='new_set', based_on='data file', included=None,\n excluded=None, strings='keep', arrays='masks', replace=None,\n overwrite=False):\n meta = self._meta\n sets = meta['sets']\n # prove setname\n if not isinstance(setname, str):\n raise TypeError(\"'setname' must be a str.\")\n if setname in sets and not overwrite:\n raise KeyError(\"{} is already in `meta['sets'].`\".format(setname))\n # prove based_on\n if not based_on in sets:\n raise KeyError(\"based_on set '{}' is not in meta['sets'].\".format(based_on))\n # prove included\n if not included: included = [var.split('@')[-1] for var in sets[based_on]['items']]\n\n # prove replace\n if not replace: replace = {}\n elif not isinstance(replace, dict):\n raise TypeError(\"'replace' must be a dict.\")\n else:\n for var in list(replace.keys()) + list(replace.values()):\n if var not in included:\n raise KeyError(\"{} is not in 'included'\".format(var))\n\n # prove arrays\n if not arrays in ['masks', 'columns']:\n raise ValueError (\n \"'arrays' must be either 'masks' or 'columns'.\")\n # filter set and create new set\n fset = filtered_set(meta=meta,\n based_on=based_on,\n masks=True if arrays == 'masks' else False,\n included=included,\n excluded=excluded,\n strings=strings)\n\n # if arrays=='both':\n # new_items = []\n # items = fset['items']\n # for item in items:\n # new_items.append(item)\n # if item.split('@')[0]=='masks':\n # for i in meta['masks'][item.split('@')[-1]]['items']:\n # new_items.append(i['source'])\n # fset['items'] = new_items\n\n if replace:\n new_items = fset['items']\n for k, v in list(replace.items()):\n for x, item in enumerate(new_items):\n if v == item.split('@')[-1]: posv, move = x, item\n if k == item.split('@')[-1]: posk = x\n new_items[posk] = move\n new_items.pop(posv)\n fset['items'] = new_items\n\n add = {setname: fset}\n sets.update(add)\n\n return None", "def extend_path(\n source_set: irast.Set,\n ptrcls: s_pointers.Pointer,\n direction: PtrDir=PtrDir.Outbound,\n target: typing.Optional[s_nodes.Node]=None, *,\n ignore_computable: bool=False,\n force_computable: bool=False,\n unnest_fence: bool=False,\n same_computable_scope: bool=False,\n ctx: context.ContextLevel) -> irast.Set:\n\n if ptrcls.is_link_property(ctx.env.schema):\n src_path_id = source_set.path_id.ptr_path()\n else:\n if direction != s_pointers.PointerDirection.Inbound:\n source = ptrcls.get_near_endpoint(ctx.env.schema, direction)\n if not source_set.stype.issubclass(ctx.env.schema, source):\n # Polymorphic link reference\n source_set = class_indirection_set(\n source_set, source, optional=True, ctx=ctx)\n\n src_path_id = source_set.path_id\n\n if target is None:\n target = ptrcls.get_far_endpoint(ctx.env.schema, direction)\n path_id = src_path_id.extend(ptrcls, direction, target,\n ns=ctx.path_id_namespace,\n schema=ctx.env.schema)\n\n target_set = new_set(stype=target, path_id=path_id, ctx=ctx)\n\n ptr = irast.Pointer(\n source=source_set,\n target=target_set,\n ptrcls=ptrcls,\n direction=direction\n )\n\n target_set.rptr = ptr\n\n if (not ignore_computable and _is_computable_ptr(\n ptrcls, force_computable=force_computable, ctx=ctx)):\n target_set = computable_ptr_set(\n ptr, unnest_fence=unnest_fence,\n same_computable_scope=same_computable_scope, ctx=ctx)\n\n return target_set", "def copy(self):\n products_by_target = defaultdict(OrderedSet)\n for key, value in self._products_by_target.items():\n products_by_target[key] = OrderedSet(value)\n return UnionProducts(products_by_target=products_by_target)", "def copy(self):\n r = SubsSet()\n r.rewrites = self.rewrites.copy()\n for expr, var in self.items():\n r[expr] = var\n return r", "def copySet(_session, _set_src, _set_dst, _segment):\n it = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_a,\n _set_src,\n sc.SC_ARC,\n 0), True)\n \n while not it.is_over():\n# s_el = it.value(2)\n# _idtf = _session.get_idtf(s_el)\n# el = s_el\n# if isSystemId(_idtf):\n# el = _session.create_el(_segment, _session.get_type(s_el))\n createPair(_session, _segment, _set_dst, it.value(2), _session.get_type(it.value(1)))\n it.next()", "def fixSets(namespace):\n\ttry:\n\t\tset\n\texcept:\n\t\timport sets\n\t\tnamespace[\"set\"] = sets.Set\n\t\tnamespace[\"frozenset\"] = sets.ImmutableSet", "def __iter__(self):\n new_set = self._clone()\n new_set.tree.iterator = self.tree.traverse()\n return new_set", "def make_set(node):\n node.parent = node\n node.rank = 0", "def copy(self):\n copy = GeneSet(dict())\n\n copy.gene_sets = deepcopy(self.gene_sets)\n copy.gene_set_names = deepcopy(self.gene_set_names)\n copy.gene_set_size = deepcopy(self.gene_set_size)\n copy.interactors = deepcopy(self.interactors)\n copy.n_curated = deepcopy(self.n_curated)\n copy.n_interactors = deepcopy(self.n_interactors)\n\n return copy", "def copy(self):\n return self.__class__(*self.sets)", "def from_sets(cls, set1, set2, universe_size=None):\n if not isinstance(set1, Set):\n set1 = set(set1)\n if not isinstance(set2, Set):\n set2 = set(set2)\n TP = len(set1 & set2)\n FP = len(set2) - TP\n FN = len(set1) - TP\n if universe_size is None:\n TN = 0\n else:\n TN = universe_size - TP - FP - FN\n if TN < 0:\n raise ValueError(\n \"universe_size must be at least as large as set union\")\n return cls(TP, FN, FP, TN)", "def copy(self) -> 'RangeSet':\n return RangeSet(self)", "def newChemAtomSet(self, **attrlinks):\n return ChemAtomSet(self, **attrlinks)", "def from_node(cls, variable):\n return cls(variable.name, variable.container is ast.ContainerTypes.Set)", "def make_network_set(name, networkUris=[]):\n\n return {\n 'name': name,\n 'type': 'network-set',\n 'nativeNetworkUri': None,\n 'networkUris': networkUris[:],\n 'connectionTemplateUri': None}", "def subset(self, variables=None, from_set=None, inplace=False):\n if not (variables or from_set) or (variables and from_set):\n err = \"Must pass either 'variables' or 'from_set'!\"\n raise ValueError(err)\n subset_ds = self.clone() if not inplace else self\n sets = subset_ds._meta['sets']\n if variables:\n from_set = 'subset'\n subset_ds.create_set(setname='subset', included=variables)\n else:\n if not from_set in sets:\n err = \"'{}' not found in meta 'sets' collection!\"\n raise KeyError(err.format(from_set))\n variables = [v.split('@')[-1] for v in sets[from_set]['items']]\n all_vars = subset_ds.columns() + subset_ds.masks()\n for var in all_vars:\n if not var in variables:\n if not self._is_array_item(var): subset_ds.drop(var)\n sets['data file']['items'] = sets[from_set]['items']\n del subset_ds._meta['sets'][from_set]\n\n if not inplace:\n return subset_ds\n else:\n return None", "def __init__(self, set_ptr=None):\n\n if set_ptr is None:\n self.set = ipset.ipset_new()\n else:\n self.set = set_ptr", "def cast_value_to_set(self, name: str, value: Iterable) -> Set:\n return set(self.get_object_from_name(elem, name) for elem in value)", "def set(self) -> set:\n return set(self)", "def copySetFrom (self, other):\n\n if other.hasLocalTimeString():\n self.localTimeString=other.localTimeString\n self._myHasLocalTimeString=other._myHasLocalTimeString\n self._myLocalTimeStringRequested=other._myLocalTimeStringRequested\n \n if other.hasUtcTimeString():\n self.utcTimeString=other.utcTimeString\n self._myHasUtcTimeString=other._myHasUtcTimeString\n self._myUtcTimeStringRequested=other._myUtcTimeStringRequested\n \n if other.hasDaylightSavingTime():\n self.daylightSavingTime=other.daylightSavingTime\n self._myHasDaylightSavingTime=other._myHasDaylightSavingTime\n self._myDaylightSavingTimeRequested=other._myDaylightSavingTimeRequested\n \n if other.hasEpoch():\n self.epoch=other.epoch\n self._myHasEpoch=other._myHasEpoch\n self._myEpochRequested=other._myEpochRequested\n \n if other.hasUtcOffsetMinutes():\n self.utcOffsetMinutes=other.utcOffsetMinutes\n self._myHasUtcOffsetMinutes=other._myHasUtcOffsetMinutes\n self._myUtcOffsetMinutesRequested=other._myUtcOffsetMinutesRequested", "def to_id_set_entity(self) -> dict:\n id_set_entity = self.dict()\n id_set_entity[\"file_path\"] = str(self.path)\n id_set_entity[\"pack\"] = self.in_pack.object_id # type: ignore[union-attr]\n return id_set_entity", "def __eq__(self, values):\n set_values = [set_value(val) for val in values]\n self.filter.filter_domain.set.elements.extend(set_values)\n return self", "def copy(self):\n return set(self)", "def construct(self):\n\n newSet = {}\n current_index = 0\n\n for key_1, value_1 in self._sets[self._currentSet].items():\n current_index += 1\n for key_2,value_2 in list(self._sets[self._currentSet].items())[current_index:]:\n # join the 2 tuples\n join = key_1 + key_2\n # remove duplicates\n join = tuple(set(join))\n # get combinations\n combined = tuple(combinations(join, self._currentSet+1))\n # sort combination\n combined = tuple(sorted(combined[0]))\n\n # append new combination to dict\n if len(combined) != 0 :\n newSet[combined] = 0\n\n self._currentSet += 1\n # append the new itemset in the sets dict \n self._sets[self._currentSet] = newSet", "def __and__(self, rs):\n revs = {}\n for r in self._revs.keys():\n if r in rs:\n revs[r] = 1\n return RevisionSet(revs)", "def get_id_set(id_set_path: str) -> dict:\n if id_set_path:\n id_set = open_id_set_file(id_set_path)\n else:\n id_set, _, _ = IDSetCreator(print_logs=False).create_id_set()\n return id_set", "def copy(self):\n return IntervalSet(self)", "def clone(self):\n return _libsbml.QualPkgNamespaces_clone(self)", "def add_elements_to_set(s: set, *args) -> set:\n s.update(set(*args))\n return s" ]
[ "0.6440217", "0.58299524", "0.56834817", "0.53686655", "0.5344023", "0.5315017", "0.52834666", "0.5264932", "0.5153018", "0.50790906", "0.5055263", "0.5012567", "0.49638426", "0.49290437", "0.49098164", "0.4880802", "0.48652387", "0.4844571", "0.48155996", "0.48035938", "0.47817656", "0.477516", "0.47622675", "0.47521996", "0.4746269", "0.47249737", "0.46983075", "0.46812952", "0.46733373", "0.46682075" ]
0.85868055
0
Return ir.Set for a pointer defined as a computable.
def computable_ptr_set( rptr: irast.Pointer, *, unnest_fence: bool=False, same_computable_scope: bool=False, ctx: context.ContextLevel) -> irast.Set: ptrcls = rptr.ptrcls source_set = rptr.source source_scls = source_set.stype # process_view() may generate computable pointer expressions # in the form "self.linkname". To prevent infinite recursion, # self must resolve to the parent type of the view NOT the view # type itself. Similarly, when resolving computable link properties # make sure that we use rptr.ptrcls.derived_from. if source_scls.is_view(ctx.env.schema): source_set = new_set_from_set( source_set, preserve_scope_ns=True, ctx=ctx) source_set.stype = source_scls.peel_view(ctx.env.schema) source_set.shape = [] if source_set.rptr is not None: schema = ctx.env.schema derived_from = source_set.rptr.ptrcls.get_derived_from(schema) if (derived_from is not None and not derived_from.generic(schema) and derived_from.get_derived_from(schema) is not None and ptrcls.is_link_property(schema)): source_set.rptr.ptrcls = derived_from try: qlexpr, qlctx, inner_source_path_id, path_id_ns = \ ctx.source_map[ptrcls] except KeyError: ptrcls_default = ptrcls.get_default(ctx.env.schema) if not ptrcls_default: ptrcls_sn = ptrcls.get_shortname(ctx.env.schema) raise ValueError( f'{ptrcls_sn!r} is not a computable pointer') if isinstance(ptrcls_default, s_expr.ExpressionText): qlexpr = astutils.ensure_qlstmt(qlparser.parse(ptrcls_default)) else: qlexpr = qlast.BaseConstant.from_python(ptrcls_default) qlctx = None inner_source_path_id = None path_id_ns = None if qlctx is None: # Schema-level computable, completely detached context newctx = ctx.detached else: newctx = _get_computable_ctx( rptr=rptr, source=source_set, source_scls=source_scls, inner_source_path_id=inner_source_path_id, path_id_ns=path_id_ns, same_scope=same_computable_scope, qlctx=qlctx, ctx=ctx) if ptrcls.is_link_property(ctx.env.schema): source_path_id = rptr.source.path_id.ptr_path() else: source_path_id = rptr.target.path_id.src_path() path_id = source_path_id.extend( ptrcls, s_pointers.PointerDirection.Outbound, ptrcls.get_target(ctx.env.schema), ns=ctx.path_id_namespace, schema=ctx.env.schema) with newctx() as subctx: subctx.view_scls = ptrcls.get_target(ctx.env.schema) subctx.view_rptr = context.ViewRPtr( source_scls, ptrcls=ptrcls, rptr=rptr) subctx.anchors[qlast.Source] = source_set subctx.empty_result_type_hint = ptrcls.get_target(ctx.env.schema) if isinstance(qlexpr, qlast.Statement) and unnest_fence: subctx.stmt_metadata[qlexpr] = context.StatementMetadata( is_unnest_fence=True) comp_ir_set = dispatch.compile(qlexpr, ctx=subctx) if ptrcls in ctx.pending_cardinality: comp_ir_set_copy = copy.copy(comp_ir_set) specified_card, source_ctx = ctx.pending_cardinality[ptrcls] stmtctx.get_pointer_cardinality_later( ptrcls=ptrcls, irexpr=comp_ir_set_copy, specified_card=specified_card, source_ctx=source_ctx, ctx=ctx) def _check_cardinality(ctx): if ptrcls.singular(ctx.env.schema): stmtctx.enforce_singleton_now(comp_ir_set_copy, ctx=ctx) stmtctx.at_stmt_fini(_check_cardinality, ctx=ctx) comp_ir_set.stype = ptrcls.get_target(ctx.env.schema) comp_ir_set.path_id = path_id comp_ir_set.rptr = rptr rptr.target = comp_ir_set return comp_ir_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSet(unique_name):", "def getSet(unique_name):", "def set():", "def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)", "def set_of(element: Type) -> SetType:\n return SetType(element)", "def __rxor__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__rxor__', other)", "def owningSet(self) -> ghidra.util.graph.KeyIndexableSet:\n ...", "def set(self) -> set:\n return set(self)", "def set(x):\n pass", "def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s", "def force_frozenset(obj): \n # make it a set/tuple of 1 if it is a scalar and not a set already\n return tuple(force_hashable(obj))", "def sat_generate_candidate_assignments(self):\n # YOUR CODE HERE\n short = min(len(c) for c in self.clauses)\n for c in self.clauses:\n if len(c) == short:\n return set(c.literals)\n # return (set(x.literals) for x in self.clauses if len(x) == min(len(c) for c in self.clauses))", "def create_C1(data_set):\r\n C1 = set()\r\n for t in data_set:\r\n for item in t:\r\n item_set = frozenset([item])\r\n C1.add(item_set)\r\n return C1", "def cfset_to_set(cfset):\n count = cf.CFSetGetCount(cfset)\n buffer = (c_void_p * count)()\n cf.CFSetGetValues(cfset, byref(buffer))\n return set([cftype_to_value(c_void_p(buffer[i])) for i in range(count)])", "def create_C1(data_set):\n C1 = set()\n for t in data_set:\n for item in t:\n item_set = frozenset([item])\n C1.add(item_set)\n return C1", "def to_set(self):\n\n return frozenset(\n (i, j, self[i][j]) for i, j in self.cell_index_iter if self[i][j] is not None\n )", "def set():\n pass", "def components(self) -> Iterable[Mapping[T, Set[T]]]:", "def __or__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__or__', other)", "def get_from_set(set_):\n for e in set_: return e", "def find_set(self):\n return self._set_set(self._find_set())", "def set(self):\n return self.cdb.code_to_card_set[self.set_code]", "def chain_set(mixed_chains):\n return set([i[0] for i in mixed_chains])", "def define_set():\n set_1 = set([1, 2, 3])\n print type(set_1)\n print set_1\n\n set_2 = {2, 3, 2}\n print type(set_2)\n # <type 'set'>\n print set_2\n # set([2, 3])\n\n a = set((1, 2, 3, 4))\n b = set([3, 4, 5, 6])\n print a | b # Union\n # {1, 2, 3, 4, 5, 6}\n print a & b # Intersection\n # {3, 4}\n print a < b # Subset\n # False\n print a - b # Difference\n # {1, 2}\n print a ^ b # Symmetric Difference\n # {1, 2, 5, 6}", "def valueSet(rbt):\n try:\n vlist = lt.newList('SINGLE_LINKED', rbt['cmpfunction'])\n vlist = valueSetTree(rbt['root'], vlist)\n return vlist\n except Exception as exp:\n error.reraise(exp, 'RBT:valueSet')", "def make_set(node):\n node.parent = node\n node.rank = 0", "def unique(self):\n return frozenset(self)", "def __getitem__(self, name: str) -> Set[BaseAssignment]:\n ...", "def cast_value_to_set(self, name: str, value: Iterable) -> Set:\n return set(self.get_object_from_name(elem, name) for elem in value)", "def commonSetElementPredicate(field_set: Sequence[Any]) -> FrozenSet[str]:\n\n return frozenset(str(item) for item in field_set)" ]
[ "0.63821733", "0.63821733", "0.6101638", "0.60650355", "0.5940626", "0.5939873", "0.5794654", "0.5785868", "0.5782201", "0.5721857", "0.5620131", "0.55960476", "0.559237", "0.55909944", "0.55824697", "0.5576209", "0.55492747", "0.5549152", "0.55308235", "0.551", "0.5487735", "0.5482171", "0.5480994", "0.54633623", "0.5419847", "0.54120576", "0.54119426", "0.540239", "0.535908", "0.53522295" ]
0.69799614
0
return founder and offspring subset of basename.ped containing only the markers in lcd lcd contains a sorted list of (chrom,offset,rs) for the common snps in all maps we need to keep genotypes all in the same column order
def subsetPed(basename="",lcdmap = [],faff='1', ofaff='2'): mf = file('%s.map' % basename,'r').readlines() lmap = [x.strip().split() for x in mf] rscols = {} # lookup marker table colrs = [] # lookup rs from column for i,m in enumerate(lmap): # get columns to keep in the order we want them rscols[m[1]] = i # keep track of where each rs is in this map colrs.append(m[1]) # and keep the list of rs for tracking alleles wewant = [rscols[x[2]] for x in lcdmap] # columns we want to keep print '#Subsetped faff=%s ofaff=%s keeping %d (%s) of potential lcd %d for %s' % \ (faff,ofaff,len(wewant),wewant[:20],len(lcdmap),basename) pf = file('%s.ped' % basename,'r') ogeno = [] # offspring new lines fgeno = [] # founders oped = [] # for pedigrees fped = [] rsadict = {} # keep a count of alleles - seems to be a problem for i,l in enumerate(pf): if (i+1) % 500 == 0: print '%s at line %d' % (basename,i+1) ll = l.strip().split() ped = ll[:6] founder = (ll[2] == '0' and ll[3] == '0') aff = faff if not founder: aff = ofaff ped[5] = aff # adjust as needed if founder: fped.append(ped) else: oped.append(ped) gt = ll[6:] geno = [] for snp in wewant: # columns in order thisrs = colrs[snp] base = snp*2 g1 = gt[base] g2 = gt[base+1] geno.append(g1) geno.append(g2) if not rsadict.get(thisrs,None): rsadict[thisrs] = {} if g1 <> '0': if not rsadict[thisrs].get(g1,None): rsadict[thisrs][g1] = 1 else: rsadict[thisrs][g1] += 1 if g2 <> '0': if not rsadict[thisrs].get(g2,None): rsadict[thisrs][g2] = 1 else: rsadict[thisrs][g2] += 1 keepgt = array.array('c',geno) if founder: fgeno.append(keepgt) else: ogeno.append(keepgt) print '#Subsetped %s %d fgeno %d ogeno' % (basename,len(fgeno),len(ogeno)) return fped,oped,fgeno,ogeno,rsadict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLCD(lbase=[]):\r\n listmf = []\r\n rsdict = {}\r\n for i,basename in enumerate(lbase): # for each basename to be included\r\n mf = file('%s.map' % basename,'r').readlines()\r\n lmap = [x.strip().split() for x in mf] \r\n rslist = [x[1] for x in lmap] # chrom rs gendist physdist\r\n for x in lmap:\r\n rsdict[x[1]] = (x[0],int(x[3]),x[1]) # key by chrom,offset,rs\r\n setrs = set(rslist)\r\n listmf.append(setrs) # list of map lines for processing\r\n lcd = listmf.pop(0) # start with first - order doesn't matter\r\n for setrs in listmf:\r\n lcd = lcd & setrs # intersection\r\n lcd = list(lcd) # now have lowest common denom as a list of rs\r\n lcdmap = [rsdict[rs] for rs in lcd] # restore chrom,offset,rs for rs to keep\r\n lcdmap.sort() # now in genomic order\r\n print 'got lcdmap=',lcdmap[:10]\r\n return lcdmap # sorted common map\r", "def mergePed(bnlist=[],faff=[],ofaff=[],newbasename='newped',fo=0):\r\n lcdmap = getLCD(bnlist) # list of chr,offset,rs for all snp common to all files\r\n print 'got %d lcd snps-%s' % (len(lcdmap),lcdmap[:5])\r\n cfped = []\r\n coped = []\r\n cfgeno = []\r\n cogeno = []\r\n allrsa = {}\r\n ignorers = {}\r\n for i,basename in enumerate(bnlist):\r\n fped,oped,fgeno,ogeno,trsadict = subsetPed(basename,lcdmap,faff[i],ofaff[i])\r\n print '%s gave %d fgeno' % (basename,len(fgeno))\r\n for rs in trsadict.keys():\r\n tk = trsadict[rs].keys()\r\n if len(tk) > 2:\r\n print 'for %s, rs %s has alleles %s' % (basename, rs, trsadict[rs])\r\n if not allrsa.get(rs,None):\r\n allrsa[rs] = {}\r\n for a in tk:\r\n if not allrsa[rs].get(a,None):\r\n allrsa[rs][a] = trsadict[rs][a]\r\n else:\r\n allrsa[rs][a] += trsadict[rs][a]\r\n tk = allrsa[rs].keys()\r\n if len(tk) > 2 and not ignorers.get(rs,None): # new\r\n #print 'After merge basename %s, rs %s has alleles %s' % (basename, rs,allrsa[rs])\r\n ignorers[rs] = rs\r\n cfped += fped\r\n coped += oped\r\n cfgeno += fgeno\r\n cogeno += ogeno\r\n print 'after merge all have %d fgeno and %d ogeno' % (len(cfgeno),len(cogeno))\r\n # now have offspring and founder rows in lcdmap order\r\n # write map file\r\n print '### found %d markers > 2 alleles' % (len(ignorers.keys()))\r\n keepmarkers = [x for x in range(len(lcdmap)) if not ignorers.get(lcdmap[x][2],None)]\r\n newmap = ['\\t'.join((lcdmap[x][0],lcdmap[x][2],'0','%d' % lcdmap[x][1])) for x in keepmarkers] # chrom,offset,rs\r\n f = file('%s.map' % newbasename,'w')\r\n f.write('%s\\n' % '\\n'.join(newmap))\r\n f.close()\r\n for i,geno in enumerate(cfgeno): # convert each array into a list and keep the good markers\r\n gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers])\r\n g = array.array('c',gs) # good ones\r\n cfgeno[i] = g # replace\r\n print 'cfgeno converted'\r\n if not fo: # not founders only - note arrays are not lists!\r\n cfped += copy.copy(coped) #\r\n del coped\r\n for i,geno in enumerate(cogeno): # convert each array into a list and keep the good markers\r\n gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers])\r\n g = array.array('c',gs) # good ones\r\n cfgeno.append(g) # extend founders\r\n del cogeno\r\n print 'after if not fo now have %d cfgeno' % (len(cfgeno))\r\n f = file('%s.ped' % newbasename,'w')\r\n for n,ped in enumerate(cfped):\r\n l = ' '.join(ped + list(cfgeno[n]))\r\n if n % 100 == 0 and n > 0:\r\n print 'writing line %d' % n\r\n f.write(l)\r\n f.write('\\n')\r\n f.close()\r\n print 'wrote %d map rows and %d ped rows to %s' % (len(newmap),len(cfped),newbasename)", "def getStartEndCoords(fileName1, fileName2):\n uniqueNames = dict()\n with open(fileName1, \"r\", encoding=\"utf8\") as f1:\n f1 = csv.reader(f1, delimiter='\\t')\n for ls in f1:\n start = ls[0][4:].strip()\n normStart = norm.normalize_alphabet(start)\n start_reg = ls[1]#.strip().split(\",\")\n startKey = ','.join([normStart] + start_reg.strip().split(\",\"))\n startKey_orig = ','.join([start] + start_reg.strip().split(\",\"))\n end = ls[2][4:].strip()\n normEnd = norm.normalize_alphabet(end)\n end_reg = ls[3]#.strip().split(\",\")\n endKey = ','.join([normEnd] + end_reg.strip().split(\",\"))\n endKey_orig = ','.join([end] + end_reg.strip().split(\",\"))\n\n with open(fileName2, \"r\", encoding=\"utf8\") as jsonFile: \n allData = json.load(jsonFile)\n for d in allData[\"features\"]:\n # populates the uniqueNames dictionary for start and end toponyms\n if not any(x in uniqueNames for x in [startKey, startKey_orig]):\n uniqueNames.update(populateDict(start, start_reg, d))\n if not any(x in uniqueNames for x in [endKey, endKey_orig]):\n uniqueNames.update(populateDict(end, end_reg, d))\n if not any(x in uniqueNames for x in [startKey, startKey_orig]):\n for uri in gv.found_URIs:\n if any(x in uri for x in [startKey, startKey_orig])\\\n and re.match(r'\\d', gv.found_URIs[uri]) == None:\n tmp = {}\n tmp[startKey_orig] = {}\n tmp[startKey_orig]['lat'] = \"null\"\n tmp[startKey_orig]['lon'] = \"null\"\n tmp[startKey_orig]['region'] = start_reg\n tmp[startKey_orig]['cornuUri'] = gv.found_URIs[uri]\n uniqueNames.update(tmp)\n if startKey_orig not in uniqueNames:\n tmp = {}\n tmp[startKey_orig] = {}\n tmp[startKey_orig]['lat']= \"null\"\n tmp[startKey_orig]['lon'] = \"null\"\n tmp[startKey_orig]['region'] = start_reg\n tmp[startKey_orig]['cornuUri'] = \"null\"\n uniqueNames.update(tmp)\n\n if not any(x in uniqueNames for x in [endKey, endKey_orig]):\n for uri in gv.found_URIs:\n if any(x in uri for x in [endKey, endKey_orig])\\\n and re.match(r'\\d', gv.found_URIs[uri]) == None:\n tmp = {}\n tmp[endKey_orig] = {}\n tmp[endKey_orig]['lat'] = \"null\"\n tmp[endKey_orig]['lon'] = \"null\"\n tmp[endKey_orig]['region'] = end_reg\n tmp[endKey_orig]['cornuUri'] = gv.found_URIs[uri]\n uniqueNames.update(tmp)\n if endKey_orig not in uniqueNames:\n tmp = {}\n tmp[endKey_orig] = {}\n tmp[endKey_orig]['lat']= \"null\"\n tmp[endKey_orig]['lon'] = \"null\"\n tmp[endKey_orig]['region'] = end_reg\n tmp[endKey_orig]['cornuUri'] = \"null\"\n uniqueNames.update(tmp)\n return uniqueNames", "def getORFs(catFile, queryName, geneDir):\n\n\toutORFraw = geneDir+catFile.split(\"/\")[-1].split(\".\")[0]+\"_allORFs.fasta\"\n\tlogger = logging.getLogger(\"main.orf\")\n\t\n\tlogger.debug(\"getorf -sequence {:s} -outseq {:s} -table 0 -find 3 -noreverse\".format(catFile, outORFraw))\n\tcmd(\"getorf -sequence {:s} -outseq {:s} -table 0 -find 3 -noreverse\".format(catFile, outORFraw), False)\n\t\n\tdId2ORFs = defaultdict(list)\n\tf = SeqIO.parse(open(outORFraw),'fasta')\n\tfor fasta in f:\n\t\tfname, fseq = fasta.id, str(fasta.seq)\n\t\tif len(fname.split(\"_\")) > 2:\n\t\t\tfname2 = \"_\".join(fname.split(\"_\")[0:-1])\n\t\telse:\n\t\t\tfname2 = fname.split(\"_\")[0]\n\t\tdId2ORFs[fname2].append(fseq)\n\t\n\tdId2Longest = {}\n\tfor k, v in dId2ORFs.items():\n\t\tdId2Longest[k] = max(v, key=len)\n\t\t\n\t# delete duplicate sequences\n\tdRev = {}\n\tfor k, v in dId2Longest.items():\n\t\tdRev.setdefault(v, set()).add(k)\n\t\t\n\tAllDupl = [values for key, values in dRev.items() if len(values) > 1]\n\tn = 0\n\tfor dupl in AllDupl:\n\t\tspecies = set([x.split(\"_\")[0] for x in dupl])\n\t\t\n\t\tfor sp in species:\n\t\t\tif queryName in dupl:\n\t\t\t\tfirstOcc = queryName\n\t\t\telse:\n\t\t\t\tlOcc = [x for x in dupl if sp in x]\n\t\t\t\t\n\t\t\t\tif len(lOcc) > 0:\n\t\t\t\t\tfirstOcc = lOcc[0]\n\t\t\t\telse:\n\t\t\t\t\tfirstOcc = str(lOcc)\n\t\t\t\t\t\n\t\t\tdupl.remove(firstOcc)\n\t\t\n\t\tfor i in dupl:\n\t\t\tdId2Longest.pop(i, None)\n\t\t\tn += 1\n\t\t\tlogger.debug(\"Deleted sequence {:s} (duplicate)\".format(i))\n\t\t\n\tlogger.info(\"Deleted {} sequences as duplicates\".format(n))\n\t\n\toutORF = outORFraw.replace(\"_allORFs.fasta\",\"_longestORFs.fasta\")\n\n\twith open(outORF, \"w\") as outO:\n\t outO.write(FastaResFunc.dict2fasta(dId2Longest))\n\t outO.close()\n\t \n\tlogger.info(\"Extracted longest ORFs: {:s}\".format(outORF))\n\n\treturn(outORF)", "def getReadOnGeneFile(rnameList, len_param):\n log.info(\"Select reads that are on genes\")\n for ch in rnameList:\n tcount = 0\n \n geneS = {}#gene start\n geneE = {}#gene end\n g_direct = {}#gene direction\n readS = {}#read start\n readE = {}#read End\n readDic = {}#readDic[id] = read\n sortGeneId = {}\n sortReadId = {}\n genefile = os.path.join(working_dir, 'removeOverlap.'+ch+'.gff')\n readfile = os.path.join(working_dir, 'MappedRead.'+ch+'.sam')\n rgfile = os.path.join(working_dir, 'ReadOnGeneList.'+ch+'.tab')\n log.info(\"Generate \" + rgfile)\n f=open(rgfile, \"w\") \n \n geneS, geneE, g_direct = getGFFStartEnd(genefile, len_param)\n sortGeneId = sortId(geneS)\n \n readS, readE,readDic = getSAMStartEnd(readfile)\n sortReadId = sortId(readS)\n ys = 0\n \n for x in range(len(sortGeneId)):\n \n gID = sortGeneId[x]#gene id\n gs = geneS.get(gID)#gene start\n ge = geneE.get(gID)#gene end\n gd = g_direct.get(gID)\n glineList = []\n sameG = False\n \n for y in range(ys,len(sortReadId)):\n rID = sortReadId[y]\n rs = readS.get(rID)\n re = readE.get(rID)\n if rs >= gs:\n if re <= ge:\n f.write(gID)\n f.write('\\t')\n f.write(str(gs))\n f.write('\\t')\n f.write(str(ge))\n f.write('\\t')\n f.write(gd)\n f.write('\\t')\n f.write(rID)\n f.write('\\t')\n f.write(str(rs))\n f.write('\\t')\n f.write(str(re))\n f.write('\\t')\n f.write(readDic.get(rID))\n elif re > ge:\n ys = y\n break\n elif rs > ge:\n ys = y\n break\n f.close()", "def get_reps_filenames(celltype): \n prefix = os.path.join(os.getcwd(),'peaks',celltype,'MACS2')\n reps = os.listdir(prefix)\n return [os.path.join(prefix,rep) for rep in reps if rep.endswith('sorted.bdg')]", "def split_per(folderin, folderout, split_col='ECO_ID', colNms=['i_h100','i_cd',\n 'doy','i_wflen','i_acqdate','b1','vcf','ECO_NAME','ECO_ID','BIOME','geometry']):\n\n split_files = glob.glob(folderin + '*.shp')\n\n for filename in split_files:\n print(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n dfa = gpd.read_file(filename)\n df = dfa.astype({split_col: 'int32'}) \n ecoNames = list(np.unique(df[split_col]))#get list of unique ecoregions \n \n for eco in ecoNames:\n #create new df with just columns I want\n df2 = gpd.GeoDataFrame(df, columns=colNms)\n ID = str(eco)\n df_eco = df.loc[df2[split_col]==eco, colNms]\n df_eco.to_file(folderout + '/{}_eco_{}.shp'.format(basename, ID))", "def F_subset_OMHCHO(self,path):\n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('OMI-Aura_L2-OMHCHO_'+DATE.strftime(\"%Ym%m%d\")+'t*.he5')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n maxMDQF = self.maxMDQF\n maxEXTQF = self.maxEXTQF\n \n data_fields = ['AMFCloudFraction','AMFCloudPressure','AirMassFactor','Albedo',\\\n 'ReferenceSectorCorrectedVerticalColumn','ColumnUncertainty','MainDataQualityFlag',\\\n 'PixelCornerLatitudes','PixelCornerLongitudes','FittingRMS']\n data_fields_l2g = ['cloud_fraction','cloud_pressure','amf','albedo',\\\n 'column_amount','column_uncertainty','MainDataQualityFlag',\\\n 'PixelCornerLatitudes','PixelCornerLongitudes','FittingRMS']\n geo_fields = ['Latitude','Longitude','TimeUTC','SolarZenithAngle',\\\n 'TerrainHeight','XtrackQualityFlagsExpanded']\n geo_fields_l2g = ['latc','lonc','TimeUTC','SolarZenithAngle',\\\n 'terrain_height','XtrackQualityFlagsExpanded']\n swathname = 'OMI Total Column Amount HCHO'\n \n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading'+fn_dir)\n outp_he5 = self.F_read_he5(fn_dir,swathname,data_fields,geo_fields,data_fields_l2g,geo_fields_l2g)\n f1 = outp_he5['SolarZenithAngle'] <= maxsza\n f2 = outp_he5['cloud_fraction'] <= maxcf\n f3 = outp_he5['MainDataQualityFlag'] <= maxMDQF \n f4 = outp_he5['latc'] >= south\n f5 = outp_he5['latc'] <= north\n tmplon = outp_he5['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_he5['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_he5['UTC_matlab_datenum'] <= self.end_matlab_datenum\n f10 = outp_he5['XtrackQualityFlagsExpanded'] <= maxEXTQF\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9 & f10\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n \n l2g_data0 = {}\n \n Lat_lowerleft = outp_he5['PixelCornerLatitudes'][0:-1,0:-1][validmask]\n Lat_upperleft = outp_he5['PixelCornerLatitudes'][1:,0:-1][validmask]\n Lat_lowerright = outp_he5['PixelCornerLatitudes'][0:-1,1:][validmask]\n Lat_upperright = outp_he5['PixelCornerLatitudes'][1:,1:][validmask] \n Lon_lowerleft = outp_he5['PixelCornerLongitudes'][0:-1,0:-1][validmask]\n Lon_upperleft = outp_he5['PixelCornerLongitudes'][1:,0:-1][validmask]\n Lon_lowerright = outp_he5['PixelCornerLongitudes'][0:-1,1:][validmask]\n Lon_upperright = outp_he5['PixelCornerLongitudes'][1:,1:][validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_he5.keys():\n if key not in {'MainDataQualityFlag','PixelCornerLatitudes',\\\n 'PixelCornerLongitudes','TimeUTC','XtrackQualityFlagsExpanded'}:\n l2g_data0[key] = outp_he5[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def split_decode_file():\n # split files by chromosome\n header = []\n current_chrom = 'chr1'\n # file_template = decode_folder + '/{}.deCODE_2019.GRCh38.txt'\n file_template = decode_folder + '/{}.deCODE_2019_hg19.txt'\n decode_file = decode_folder + '/aau1043_DataS3_hg19_liftOver.bed'\n w = open(file_template.format(current_chrom), 'a')\n print('NOTE: appending to map files, not overwriting. may cause duplicates')\n with open(decode_file, 'r') as f:\n for line in f:\n # save the header info\n if line.startswith('#'):\n header.append(line)\n # save the column labels\n elif line.startswith('Chr'):\n header.append('# ' + line)\n # write header to first file now\n w.write(''.join(header))\n # the remaining lines are data\n else:\n # get the chromosome for the current line\n ch = line.split()[0]\n # if the chromosome matches the open file, write to it\n if ch == current_chrom:\n w.write(line)\n # if a new chromosome arises, switch to a new writefile\n else:\n w.close()\n current_chrom = ch\n w = open(file_template.format(current_chrom), 'a')\n # write header to file\n w.write(''.join(header))\n w.write(line)\n\n # close the last open file\n w.close()", "def subFarms(self,partition,full_name=None):\n if self.inUse.value() is None: self.load()\n got = []\n for i in xrange(len(self.inUse.data)):\n if self.inUse.data[i]==partition:\n if full_name:\n got.append(self.name+'_'+self.subfarms.data[i])\n else:\n got.append(self.subfarms.data[i])\n return got", "def F_subset_S5PHCHO(self,path): \n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_OFFL_L2__HCHO___'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n # not sure about cloud fraction\n # the time_utc string is empty?! why are you doing this to the user!\n data_fields = ['/PRODUCT/SUPPORT_DATA/INPUT_DATA/cloud_fraction_crb',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_albedo',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['cloud_fraction','latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','albedo','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n f1 = outp_nc['SolarZenithAngle'] <= maxsza\n f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def get_orfs(genome, min_num_aa):\n allowed = \"ATGC\"\n if not isinstance(genome, str) or len(genome) == 0 or not all(c in allowed for c in genome):\n raise TypeError\n start_codon = \"ATG\"\n stop_codon = ['TAA', 'TAG', 'TGA']\n ref_dict = {\"T\" : \"A\", \"A\" : \"T\", \"G\" : \"C\", \"C\" : \"G\"}\n amino_dict = {\n 'L' : ['CTC', 'CTT', 'CTA', 'CTG', 'TTA', 'TTG'],\n 'S' : ['TCA', 'TCT', 'TCC', 'TCG', 'AGC', 'AGT'],\n 'R' : ['CGA', 'CGC', 'CGT', 'CGG', 'AGA', 'AGG'],\n 'V' : ['GTA', 'GTG', 'GTC', 'GTT'],\n 'P' : ['CCC', 'CCA', 'CCG', 'CCT'],\n 'T' : ['ACC', 'ACG', 'ACT', 'ACA'],\n 'A' : ['GCA', 'GCC', 'GCG', 'GCT'],\n 'G' : ['GGA', 'GGC', 'GGT', 'GGG'],\n 'I' : ['ATA', 'ATC', 'ATT'],\n 'F' : ['TTT', 'TTC'],\n 'Y' : ['TAT', 'TAC'],\n 'H' : ['CAC', 'CAT'],\n 'Q' : ['CAG', 'CAA'],\n 'N' : ['AAC', 'AAT'],\n 'K' : ['AAA', 'AAG'],\n 'D' : ['GAC', 'GAT'],\n 'E' : ['GAA', 'GAG'],\n 'C' : ['TGC', 'TGT'],\n 'M' : ['ATG'],\n 'W' : ['TGG']\n\n }\n comp_genome = \"\"\n for stra in genome:\n comp_genome += ref_dict[stra]\n main_orfs = find_orfs(genome, start_codon, stop_codon, min_num_aa, amino_dict, False)\n comp_orfs = find_orfs(comp_genome[::-1], start_codon, stop_codon, min_num_aa, amino_dict, True)\n circular_orfs = find_cir_orfs(genome, main_orfs, start_codon, stop_codon, min_num_aa, amino_dict, False)\n \n circular_orfs_comp = find_cir_orfs(comp_genome[::-1], comp_orfs, start_codon, stop_codon, min_num_aa, amino_dict, True)\n \n for main_orf in main_orfs:\n for cir_orf in circular_orfs:\n if main_orf[0] <= cir_orf[1] and main_orf[1] <= cir_orf[1] or len(main_orf) == 5:\n main_orfs.remove(main_orf)\n for comp_orf in comp_orfs:\n for cir_orf in circular_orfs_comp:\n if comp_orf[1] == cir_orf[1] or len(comp_orf) == 5:\n comp_orfs.remove(comp_orf)\n\n final_orf = main_orfs + comp_orfs + circular_orfs + circular_orfs_comp\n #print(len(comp_orfs))\n \n \n \n return final_orf", "def create_file_list_popdiag(case,workdir):\n indir = os.path.join('/',workdir)\n allfiles = os.listdir(indir)\n\n suffix = ('-01.nc','-02.nc','-03.nc','-04.nc','-05.nc','-06.nc', \\\n '-07.nc','-08.nc','-09.nc','-10.nc','-11.nc','-12.nc')\n\n filelist = [os.path.join(indir,file) for file in allfiles\n if file.startswith(case) and file.endswith(suffix)]\n\n filelist.sort()\n return filelist", "def F_subset_S5PNO2(self,path): \n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_RPRO_L2__NO2____'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/DETAILED_RESULTS/cloud_fraction_crb_nitrogendioxide_window',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_albedo_nitrogendioxide_window',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time_utc',\\\n '/PRODUCT/nitrogendioxide_tropospheric_column',\\\n '/PRODUCT/nitrogendioxide_tropospheric_column_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['cloud_fraction','latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','albedo','latc','lonc','qa_value','time_utc',\\\n 'column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n f1 = outp_nc['SolarZenithAngle'] <= maxsza\n f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def read_files():\n with open(\"CvixLerC9.loc\") as loc, open(\"CvixLerC9.qua\") as qua:\n qua_file = (qua.read().split('\\n'))\n qua_file = qua_file[8:-1]\n new_qua = []\n for q in qua_file:\n new_qua.append(q.split('\\t')) # [['1', '1.279502474'], ['3', '0.303712231']....]\n\n new_loc = {}\n header = ''\n read = False\n for i in loc:\n i = i.replace(\"\\n\", '')\n if read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n if \"(a,b)\" in i:\n header = i\n read = True\n else:\n read = False\n\n elif read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n\n return new_loc, new_qua", "def mkglob(fullpaths: list, trim=False) -> str:\n string_list = []\n glob = None\n for fname in fullpaths:\n if trim:\n fname = re.sub(r\"^.*/(.*)$\", r\"\\1\", fname)\n # fname = re.sub(r\"^(.*)\\.fits?(\\.fz)*$\", r\"\\1\", fname)\n fname = re.sub(r\"^([^\\.]*)\\..*$\", r\"\\1\", fname) # trim suffix\n string_list.append(fname)\n logging.debug(\"string_list[]={}\".format(string_list))\n if len(string_list) == 1:\n glob = string_list[0]\n elif len(string_list) > 1:\n # title is longest common substring array\n # joined with *'s to look like a glob pattern\n ss_arr = []\n get_lcs_array(string_list, ss_arr, 0, \"\", 2)\n if ss_arr:\n glob = \"{}\".format(\"*\".join(ss_arr))\n if not re.match(ss_arr[0], string_list[0]):\n glob = \"*{}\".format(glob)\n if not re.search(r\"{}$\".format(ss_arr[-1]), string_list[0]):\n glob = \"{}*\".format(glob)\n return glob", "def getGFFStartEnd(file, len_param):\n dicS = {}\n dicE = {}\n direct = {}\n for line in open(file):\n itemList = line[:-1].split('\\t')\n start = int(itemList[3])-len_param\n if start <0:\n start = 0\n end = int(itemList[4])+len_param\n #id = getsubString(itemList[8][4:],';') # ToDo: need to check for other species\n id = itemList[8][itemList[8].find('=')+1:itemList[8].find(';')]\n dicS[id]= start\n dicE[id]= end\n direct[id] = itemList[6]\n return dicS,dicE,direct", "def generate_figures_and_xls_all_strains(outdir, cols_starts, region2data, ext, xls, group2pos, feature_names, samples):\n all_freqs = []\n # concatenate all pos and samples into one dataframe\n dframes = []\n for ri, (ref, pos) in enumerate(region2data.keys()): #regions): #[3]#; print(ref, pos, mt)\n mer, calls = region2data[(ref, pos)]\n for c, s in zip(calls, samples): \n df = pd.DataFrame(c, columns=feature_names)\n df[\"Strain\"] = s\n df[\"chr_pos\"] = \"%s:%s\"%(ref, pos)\n dframes.append(df)\n # read all tsv files\n df = pd.concat(dframes).dropna().reset_index()\n chr_pos, strains = df[\"chr_pos\"].unique(), df[\"Strain\"].unique() \n # compare individual methods\n for clf, method in (\n (KMeans(n_clusters=2), \"KMeans\"), \n (KNeighborsClassifier(), \"KNN\"), \n #(iso_new.iForest(ntrees=100, random_state=0), \"GMM+eIF\"), \n (GaussianMixture(random_state=0, n_components=2), \"GMM\"), \n (AgglomerativeClustering(n_clusters=2), \"AggClust\"), \n #(OneClassSVM(), \"OCSVM\"), \n (IsolationForest(random_state=0), \"IF\"), \n #(iso_new.iForest(ntrees=100, random_state=0), \"eIF\"), \n (RandomForestClassifier(), \"RF\"), \n ):\n fname = method\n for i, cols_start in enumerate(cols_starts, 1):\n results = []\n feat_name = \"_\".join(cols_start)\n fname = \"%s.%s\"%(method, feat_name); print(fname)\n outfn = os.path.join(outdir, \"%s.%s\"%(fname, ext))\n # narrow down the features to only signal intensity & trace\n cols = list(filter(lambda n: n.startswith(cols_start), feature_names))#; print(cols) #, \"DT\"\n # compare all samples to 0%\n s0 = samples[0]\n for s in samples[3:]: \n with np.errstate(under='ignore'):\n if \"+\" in method:\n clf2_name = method.split(\"+\")[-1]\n results += get_mod_freq_two_step(df, cols, chr_pos, [s0, s], feat_name, \n OFFSET=0.5, clf2_name=clf2_name, clf2=clf)\n elif method in (\"KNN\", \"RF\"):\n results += get_mod_freq_clf_train_test(df, cols, chr_pos, [s0, s], samples[1:3], clf, feat_name)\n else:\n results += get_mod_freq_clf(df, cols, chr_pos, [s0, s], clf, feat_name)\n \n # and store mod_freq predicted by various methods\n freqs = pd.DataFrame(results, columns=[\"chr_pos\", \"features\", \"mod_freq wt\", \"mod_freq strain\", \"strain\"])\n freqs[\"diff\"] = freqs.max(axis=1)-freqs.min(axis=1); freqs\n for name, pos in group2pos.items(): #((\"negative\", negatives), (\"pU\", pU_pos), (\"Nm\", Nm_pos)):\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"group\"] = name\n #freqs.to_csv(outfn, sep=\"\\t\"); freqs.head()\n freqs.to_excel(xls, fname, index=False)\n # plot differences between methods\n for group, pos in group2pos.items():\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"modification\"] = group\n #return freqs\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))#, sharey=\"all\")\n sns.barplot(x=\"chr_pos\", y=\"mod_freq strain\", hue=\"strain\", edgecolor=\"white\", palette=[\"#f8786fff\", \"#7aae02ff\", \"#00bfc2ff\", \"#c67afeff\"], \n data=freqs[(freqs[\"features\"]==feat_name)&(freqs[\"group\"]==\"pU\")], ax=ax1)\n sns.barplot(x=\"chr_pos\", y=\"mod_freq strain\", hue=\"strain\", edgecolor=\"white\", palette=[\"#ed823aff\", \"#1c6ca9ff\", \"#35d1bbff\", \"#c978fdff\"], \n data=freqs[(freqs[\"features\"]==feat_name)&(freqs[\"group\"]==\"Nm\")], ax=ax2)\n ax1.set_ylabel(\"Per-site stoichiometry\"); ax2.set_ylabel(\"\")\n ax1.get_legend().remove(); ax2.get_legend().remove()#ax1.legend([]); ax2.legend([])\n ax1.set_ylim(0, 1); ax2.set_ylim(0, 1); #ax2.set(aspect=1.7)\n ax1.set_title(\"pU modifications\"); ax2.set_title(\"Nm modifications\")\n fig.suptitle(fname)\n fig.savefig(outfn)\n plt.close() # clear axis\n freqs[\"name\"] = fname\n all_freqs.append(freqs)\n return all_freqs", "def get_keys(filen, flist): \n if (filen in flist[0]):\n key1 = 'PSTH_STIM'\n key2 = 'ELEC_'\n key3 = '_TRIAL_'\n elif (filen in flist[1]) or (filen in flist[2]):\n key1 = 'PSTH'\n key2 = ''\n key3 = '_'\n elif (filen in flist[3]) or (filen in flist[4]):\n key1 = 'Stim'\n key2 = 'Elec'\n key3 = 'Repet'\n return key1, key2, key3", "def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r", "def get_unique_snps(self):\n\n for chromosome in self.snpsites.keys():\n\n for position in self.snpsites[chromosome].keys():\n for filenumber in range(len(self.vcffilenames)):\n\n if (\n self.snpsites[chromosome][position][filenumber] == True\n and sum(self.snpsites[chromosome][position]) == 1\n ): # First any(array) finds\n self.snp_positions[self.vcffilenames[filenumber]][chromosome][\n position\n ].update({\"unique\": True})\n elif (\n sum(self.snpsites[chromosome][position]) >= 2\n ): # there might be snp at same position but with different alt base\n\n snp_index = [\n i\n for i, j in enumerate(self.snpsites[chromosome][position])\n if j == True\n ]\n\n totalindex = len(snp_index)\n # Lets check the alt base in these vcf files using index\n # lets get array of alt bases from each file\n alt_snps = []\n for index in snp_index:\n alt_snps.append(\n self.snp_positions[self.vcffilenames[index]][\n chromosome\n ][position][\"alt\"]\n )\n\n # get the counts of the elements\n\n counts = self.count_list_elements_occurrences(alt_snps)\n\n for index in range(len(counts)):\n if counts[index] == 1:\n # this is unique, so occurred once\n self.snp_positions[self.vcffilenames[snp_index[index]]][\n chromosome\n ][position].update(\n {\"unique\": True}\n ) # vcffilenames[snp_index[index]] = this will be the filename\n # print(\"this is unique\", vcffilenames[snp_index[index]], chromosome, position, self.snp_positions[vcffilenames[snp_index[index]]][chromosome][position])\n\n # else:\n # \tvcf_database[\"self.snp_positions\"][chromosome + \"_\" + position].update({\"unique\":False})\n\n return", "def read_file_names(self):\n files_BIDMC = os.listdir(self.root_dir_BIDMC)\n masks_BIDMC = os.listdir(self.seg_dir_BIDMC)\n files_HK = os.listdir(self.root_dir_HK)\n masks_HK = os.listdir(self.seg_dir_HK)\n files_I2CVB = os.listdir(self.root_dir_I2CVB)\n masks_I2CVB = os.listdir(self.seg_dir_I2CVB)\n files_ISBI = os.listdir(self.root_dir_ISBI)\n masks_ISBI = os.listdir(self.seg_dir_ISBI)\n files_ISBI_15 = os.listdir(self.root_dir_ISBI_15)\n masks_ISBI_15 = os.listdir(self.seg_dir_ISBI_15)\n files_UCL = os.listdir(self.root_dir_UCL)\n masks_UCL = os.listdir(self.seg_dir_UCL)\n site_files = [files_BIDMC, files_HK, files_I2CVB, files_ISBI, files_ISBI_15, files_UCL]\n site_masks = [masks_BIDMC, masks_HK, masks_I2CVB, masks_ISBI, masks_ISBI_15, masks_UCL]\n return site_files, site_masks", "def full_chromosomes(reader):\n for line in reader.header.get_lines(\"contig\"):\n if line.id in CHROMS:\n name = line.id\n length = line.length or 1_000_000_000\n yield \"{}:{}-{}\".format(name, 1, length)", "def getReadSamFile(read_file,rnameList):\n size = len(rnameList)\n prev = 0\n ends = range(0, size, 20)\n ends += [size]\n ends.pop(0)\n \n \n \n for i in ends:\n chrs = rnameList[prev:i]\n f = []\n ch_p = ''\n jj = 0\n for j in range(0,i-prev):\n samfile = os.path.join(working_dir, 'MappedRead.'+chrs[j]+'.sam')\n log.info('Generating ' + samfile)\n f.append(open(samfile, \"w\"))\n for line in open(read_file, \"r\"):\n \n itemList = line[:-1].split('\\t')\n \n if len(itemList) < 11:\n continue\n #print itemList\n if itemList[0][0:1] == '@':\n continue\n line_ch = itemList[2]\n if line_ch == '*':\n continue\n if int(itemList[1]) & 0b100 != 0:\n continue\n \n if ch_p != line_ch:\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n jj = j\n ch_p = line_ch\n continue\n #end for j in range(0,i-prev):\n elif ch_p == line_ch:\n f[jj].write(line)\n '''\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n continue\n '''\n for fp in f:\n fp.close()\n prev = i", "def create_file_list(case):\n for server in ['bonaire','barbados','caiapo']:\n for basedir in ['data0/ivan/archive','data1/ivan/archive',\n 'data2/ivan/archive','data3/ivan/archive',\n '/bonaire/data2/data/SODA-POP','data0',\n '/barbados/data3/CCSM3-BGC']:\n if 'SODA-POP' in basedir:\n path = os.path.join('/',server,basedir,case)\n elif 'CCSM3-BGC' in basedir:\n path = os.path.join('/',server,basedir,case,'ocn/hist')\n else:\n path = os.path.join('/',server,basedir,case,'ocn2')\n\n if os.path.isdir(path):\n \t\tindir = path\n \t\tallfiles = os.listdir(indir)\n else:\n continue\n\n filelist = [os.path.join(indir,file) for file in allfiles\n if file.endswith('.nc')]\n filelist.sort()\n return filelist", "def _collect_reads(self, wildcards, _library_name, prefix):\n folder_name = get_ngs_library_folder_name(self.parent.sheets, wildcards.library_name)\n pattern_set_keys = (\"right\",) if prefix.startswith(\"right-\") else (\"left\",)\n seen = []\n for _, path_infix, filename in self.path_gen.run(folder_name, pattern_set_keys):\n path = os.path.join(self.base_path_in, path_infix, filename).format(**wildcards)\n if path in seen:\n print(\"WARNING: ignoring path seen before %s\" % path, file=sys.stderr)\n else:\n seen.append(path)\n yield path", "def processFiles(fileName):\n print fileName\n count_t1 = 0\n inFile=open(fileName,'r')\n all_angleList = Counter()\n rep_angleList = Counter()\n all_lengthsList = Counter()\n maxDist_List = Counter()\n global xCord, yCord, zCord\n aminoAcidName={}\n xCord={}\n yCord={}\n zCord={}\n seq_number={}\n counter=0\n for i in inFile:\n if (i[0:6].rstrip()==\"NUMMDL\"):\n numOfModels=i[10:14].rstrip()\n if ((i[0:6].rstrip()==\"ENDMDL\")or (i[0:6].rstrip()=='TER')):\n break\n if (i[0:6].rstrip()==\"MODEL\" and int(i[10:14].rstrip())>1):\n break\n \n if(i[0:4].rstrip())==\"ATOM\" and(i[13:15].rstrip())==\"CA\" and(i[16]=='A'or i[16]==' ')and i[17:20]!= \"UNK\" :\n aminoAcidName[counter]=int(aminoAcidLabel[i[17:20]])\n xCord[counter]=(float(i[30:38]))\n yCord[counter]=(float(i[38:46]))\n zCord[counter]=(float(i[46:54]))\n seq_number[counter]=str(i[22:27])\n counter+=1\n\n protLen=len(yCord)\n initialLabel=[]\n sortedLabel=[]\n sortedIndex=[]\n outDist={}\n for m in range(0,3):\n initialLabel.append(0)\n sortedLabel.append(0)\n sortedIndex.append(0)\n\n for i in range(0,protLen-2):\n for j in range(i+1,protLen-1):\n for k in range(j+1, protLen):\n global i1,j1,k1\n i1=i\n j1=j\n k1=k\n keepLabelIndex={}\n keepLabelIndex[aminoAcidName[i]]=i\n keepLabelIndex[aminoAcidName[j]]=j\n keepLabelIndex[aminoAcidName[k]]=k\n initialLabel[0]=aminoAcidName[i]\n initialLabel[1]=aminoAcidName[j]\n initialLabel[2]=aminoAcidName[k]\n sortedLabel=list(initialLabel)\n sortedLabel.sort(reverse=True)\n\n #Perform Rule- based labelling\n\n if (sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):\n dist1_2Temp=calcDist(i,j)\n dist1_3Temp=calcDist(i,k)\n dist2_3Temp=calcDist(j,k)\n if dist1_2Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):\n indexOf0=i\n indexOf1=j\n indexOf2=k\n elif dist1_3Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):\n indexOf0=i\n indexOf1=k\n indexOf2=j\n else:\n indexOf0=j\n indexOf1=k\n indexOf2=i\n elif(aminoAcidName[i]!=aminoAcidName[j])and(aminoAcidName[i]!=aminoAcidName[k]) and(aminoAcidName[j]!=aminoAcidName[k]): \n for index_ in range(0,3):\n sortedIndex[index_]=keepLabelIndex[sortedLabel[index_]]\n indexOf0=sortedIndex[0]\n indexOf1=sortedIndex[1]\n indexOf2=sortedIndex[2]\n elif(sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]!=sortedLabel[2]):\n indexOf2=keepLabelIndex[sortedLabel[2]]\n indices=indexFind(indexOf2,i,j,k)\n a=indexOf2\n b=indices[0]\n c=indices[1]\n dist1_3Temp=calcDist(b,a)\n dist2_3Temp=calcDist(c,a)\n if dist1_3Temp>=dist2_3Temp:\n indexOf0=indices[0]\n indexOf1=indices[1] \n else:\n indexOf0=indices[1]\n indexOf1=indices[0]\n elif(sortedLabel[0]!=sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):\n indexOf0=keepLabelIndex[sortedLabel[0]]\n indices=indexFind(indexOf0,i,j,k)\n if calcDist(indexOf0,indices[0])>= calcDist(indexOf0,indices[1]):\n indexOf1=indices[0]\n indexOf2=indices[1] \n else:\n indexOf2=indices[0]\n indexOf1=indices[1]\n dist01=calcDist(indexOf0,indexOf1)\n s2=dist01/2\n dist02=calcDist(indexOf0,indexOf2)\n s1=dist02\n dist12=dist01\n dist03=calcDist(indexOf1,indexOf2)\n\n # All lengths calculation \n all_lengthsList[round(dist01,round_off_to)] += 1\n all_lengthsList[round(dist02,round_off_to)] += 1\n all_lengthsList[round(dist03,round_off_to)] += 1\n\n maxDist_List[round(max(dist01,dist02,dist03),round_off_to)] +=1\n\n s3=(((xCord[indexOf0]+xCord[indexOf1])/2-xCord[indexOf2])**2\n +((yCord[indexOf0]+yCord[indexOf1])/2-yCord[indexOf2])**2\n +((zCord[indexOf0]+zCord[indexOf1])/2-zCord[indexOf2])**2)**0.5\n \n \n Theta1=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14\n if Theta1<=90:\n all_angleList[round(Theta1,round_off_to)] +=1\n rep_angleList[round(Theta1,round_off_to)] +=1\n else:\n all_angleList[round(abs(180-Theta1),round_off_to)] +=1\n rep_angleList[round(abs(180-Theta1),round_off_to)] +=1\n \n #if Theta1>90: \n # Theta1=abs(180-Theta1)\n #print 'Second Theta1, ',Theta1\n #Theta 2\n dist02=calcDist(indexOf1,indexOf0)\n s1=dist02\n dist01=calcDist(indexOf1,indexOf2)\n s2=dist01/2\n s3=(((xCord[indexOf1]+xCord[indexOf2])/2-xCord[indexOf0])**2\n +((yCord[indexOf1]+yCord[indexOf2])/2-yCord[indexOf0])**2\n +((zCord[indexOf1]+zCord[indexOf2])/2-zCord[indexOf0])**2)**0.5\n \n Theta2=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 \n #if Theta2 > 90:\n # Theta2 = abs(180-Theta2)\n if Theta2<=90:\n all_angleList[round(Theta2,round_off_to)] +=1\n else:\n all_angleList[round(abs(180-Theta2),round_off_to)] +=1\n\n #Theta 3\n dist02=calcDist(indexOf2,indexOf1)\n s1=dist02\n dist01=calcDist(indexOf2,indexOf0)\n s2=dist01/2\n s3=(((xCord[indexOf2]+xCord[indexOf0])/2-xCord[indexOf1])**2+\n ((yCord[indexOf2]+yCord[indexOf0])/2-yCord[indexOf1])**2+\n ((zCord[indexOf2]+zCord[indexOf0])/2-zCord[indexOf1])**2)**0.5\n \n Theta3=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 \n #if Theta3 > 90:\n # Theta3 = abs(180-Theta3)\n if Theta3<=90:\n all_angleList[round(Theta3,round_off_to)] +=1\n else:\n all_angleList[round(abs(180-Theta3),round_off_to)] +=1\n # Either writting output to a file or using dictionary or \n # counter will save you from memory exceptions in this case.\n #all_angleList[round(Theta1,round_off_to)] +=1\n #all_angleList[round(Theta2,round_off_to)] +=1\n #all_angleList[round(Theta3,round_off_to)] +=1\n\n #rep_angleList[round(Theta1,round_off_to)] +=1\n\n count_t1 = count_t1+1\n\n print 'count_t1:',count_t1\n\n return [all_angleList,rep_angleList,all_lengthsList,maxDist_List]", "def _get_sensor_col_files(self, gas, loc):\n sub = os.path.join(self.GasNames[gas], self.Locs[loc])\n files = os.listdir(os.path.join(self.data_location, sub))\n files.sort()\n return (sub, files)", "def _build_found_list(self, filenames):\n return sorted(\n ('FOUND_FILENAME', os.path.normpath(f)) for f in filenames)", "def get_glob_strings(subdirglob):\n dirname = path.dirname(subdirglob)\n basename = path.basename(subdirglob)\n assert (((\"_M1_\" in subdirglob) or (\"_M2_\" in subdirglob)) or (\"_S_\" in subdirglob)), \\\n (\"_M1_ or _M2_ not in subdirglob, cant differentiate between M1 and M2, aborting.\"\n f\"glob: {subdirglob}\")\n if (\"*\" not in subdirglob) and (\"_S_\" not in basename):\n newbasename = basename.replace(\"_M2_\", \"_M1_\"), basename.replace(\"_M1_\", \"_M2_\")\n return path.join(dirname, newbasename[0]), path.join(dirname, newbasename[1])\n elif (\"_M1_\" or \"_M2_\") in basename:\n newbasename = basename.replace(\"_M2_\", \"_M1_\"), basename.replace(\"_M1_\", \"_M2_\")\n return path.join(dirname, newbasename[0]), path.join(dirname, newbasename[1])\n elif \"_S_\" in basename:\n return basename" ]
[ "0.6327156", "0.61232364", "0.56347793", "0.56296253", "0.5618694", "0.5515025", "0.54627013", "0.5458853", "0.538327", "0.53801644", "0.53717375", "0.52709544", "0.5267916", "0.52162147", "0.5175573", "0.5166157", "0.51569754", "0.5155961", "0.51448816", "0.5137253", "0.51114327", "0.51104504", "0.5105954", "0.50756", "0.5073835", "0.50372237", "0.50094825", "0.4972083", "0.4970715", "0.4952365" ]
0.675231
0
take a list of basenames, get lcd and merge set founder affection according to faff flag and offspring according to ofaff flag
def mergePed(bnlist=[],faff=[],ofaff=[],newbasename='newped',fo=0): lcdmap = getLCD(bnlist) # list of chr,offset,rs for all snp common to all files print 'got %d lcd snps-%s' % (len(lcdmap),lcdmap[:5]) cfped = [] coped = [] cfgeno = [] cogeno = [] allrsa = {} ignorers = {} for i,basename in enumerate(bnlist): fped,oped,fgeno,ogeno,trsadict = subsetPed(basename,lcdmap,faff[i],ofaff[i]) print '%s gave %d fgeno' % (basename,len(fgeno)) for rs in trsadict.keys(): tk = trsadict[rs].keys() if len(tk) > 2: print 'for %s, rs %s has alleles %s' % (basename, rs, trsadict[rs]) if not allrsa.get(rs,None): allrsa[rs] = {} for a in tk: if not allrsa[rs].get(a,None): allrsa[rs][a] = trsadict[rs][a] else: allrsa[rs][a] += trsadict[rs][a] tk = allrsa[rs].keys() if len(tk) > 2 and not ignorers.get(rs,None): # new #print 'After merge basename %s, rs %s has alleles %s' % (basename, rs,allrsa[rs]) ignorers[rs] = rs cfped += fped coped += oped cfgeno += fgeno cogeno += ogeno print 'after merge all have %d fgeno and %d ogeno' % (len(cfgeno),len(cogeno)) # now have offspring and founder rows in lcdmap order # write map file print '### found %d markers > 2 alleles' % (len(ignorers.keys())) keepmarkers = [x for x in range(len(lcdmap)) if not ignorers.get(lcdmap[x][2],None)] newmap = ['\t'.join((lcdmap[x][0],lcdmap[x][2],'0','%d' % lcdmap[x][1])) for x in keepmarkers] # chrom,offset,rs f = file('%s.map' % newbasename,'w') f.write('%s\n' % '\n'.join(newmap)) f.close() for i,geno in enumerate(cfgeno): # convert each array into a list and keep the good markers gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers]) g = array.array('c',gs) # good ones cfgeno[i] = g # replace print 'cfgeno converted' if not fo: # not founders only - note arrays are not lists! cfped += copy.copy(coped) # del coped for i,geno in enumerate(cogeno): # convert each array into a list and keep the good markers gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers]) g = array.array('c',gs) # good ones cfgeno.append(g) # extend founders del cogeno print 'after if not fo now have %d cfgeno' % (len(cfgeno)) f = file('%s.ped' % newbasename,'w') for n,ped in enumerate(cfped): l = ' '.join(ped + list(cfgeno[n])) if n % 100 == 0 and n > 0: print 'writing line %d' % n f.write(l) f.write('\n') f.close() print 'wrote %d map rows and %d ped rows to %s' % (len(newmap),len(cfped),newbasename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def autocontext_forests(dirname):\n rf_files = []\n for filename in os.listdir(dirname):\n fullname = os.path.join(dirname, filename)\n if os.path.isfile(fullname) and len(filename) >= 8:\n base, middle, end = filename[:3], filename[3:-4], filename[-4:]\n if base == \"rf_\" and end ==\".ilp\":\n rf_files.append((int(middle), fullname))\n rf_files = sorted(rf_files)\n rf_indices, rf_files = zip(*rf_files)\n assert rf_indices == tuple(xrange(len(rf_files))) # check that there are only the indices 0, 1, 2, ... .\n return rf_files", "def subsetPed(basename=\"\",lcdmap = [],faff='1', ofaff='2'):\r\n mf = file('%s.map' % basename,'r').readlines()\r\n lmap = [x.strip().split() for x in mf]\r\n rscols = {} # lookup marker table\r\n colrs = [] # lookup rs from column\r\n for i,m in enumerate(lmap): # get columns to keep in the order we want them\r\n rscols[m[1]] = i # keep track of where each rs is in this map\r\n colrs.append(m[1]) # and keep the list of rs for tracking alleles\r\n wewant = [rscols[x[2]] for x in lcdmap] # columns we want to keep\r\n print '#Subsetped faff=%s ofaff=%s keeping %d (%s) of potential lcd %d for %s' % \\\r\n (faff,ofaff,len(wewant),wewant[:20],len(lcdmap),basename)\r\n pf = file('%s.ped' % basename,'r')\r\n ogeno = [] # offspring new lines\r\n fgeno = [] # founders\r\n oped = [] # for pedigrees\r\n fped = []\r\n rsadict = {} # keep a count of alleles - seems to be a problem\r\n for i,l in enumerate(pf):\r\n if (i+1) % 500 == 0:\r\n print '%s at line %d' % (basename,i+1)\r\n ll = l.strip().split()\r\n ped = ll[:6]\r\n founder = (ll[2] == '0' and ll[3] == '0') \r\n aff = faff\r\n if not founder:\r\n aff = ofaff\r\n ped[5] = aff # adjust as needed\r\n if founder:\r\n fped.append(ped)\r\n else:\r\n oped.append(ped)\r\n gt = ll[6:]\r\n geno = []\r\n for snp in wewant: # columns in order\r\n thisrs = colrs[snp]\r\n base = snp*2\r\n g1 = gt[base]\r\n g2 = gt[base+1]\r\n geno.append(g1)\r\n geno.append(g2)\r\n if not rsadict.get(thisrs,None):\r\n rsadict[thisrs] = {}\r\n if g1 <> '0':\r\n if not rsadict[thisrs].get(g1,None):\r\n rsadict[thisrs][g1] = 1\r\n else:\r\n rsadict[thisrs][g1] += 1 \r\n if g2 <> '0':\r\n if not rsadict[thisrs].get(g2,None):\r\n rsadict[thisrs][g2] = 1\r\n else:\r\n rsadict[thisrs][g2] += 1\r\n keepgt = array.array('c',geno)\r\n if founder:\r\n fgeno.append(keepgt)\r\n else:\r\n ogeno.append(keepgt)\r\n print '#Subsetped %s %d fgeno %d ogeno' % (basename,len(fgeno),len(ogeno))\r\n return fped,oped,fgeno,ogeno,rsadict", "def getLCD(lbase=[]):\r\n listmf = []\r\n rsdict = {}\r\n for i,basename in enumerate(lbase): # for each basename to be included\r\n mf = file('%s.map' % basename,'r').readlines()\r\n lmap = [x.strip().split() for x in mf] \r\n rslist = [x[1] for x in lmap] # chrom rs gendist physdist\r\n for x in lmap:\r\n rsdict[x[1]] = (x[0],int(x[3]),x[1]) # key by chrom,offset,rs\r\n setrs = set(rslist)\r\n listmf.append(setrs) # list of map lines for processing\r\n lcd = listmf.pop(0) # start with first - order doesn't matter\r\n for setrs in listmf:\r\n lcd = lcd & setrs # intersection\r\n lcd = list(lcd) # now have lowest common denom as a list of rs\r\n lcdmap = [rsdict[rs] for rs in lcd] # restore chrom,offset,rs for rs to keep\r\n lcdmap.sort() # now in genomic order\r\n print 'got lcdmap=',lcdmap[:10]\r\n return lcdmap # sorted common map\r", "def buildfilelist():\r\n for files in filelist:\r\n if os.path.splitext(files)[1]=='.dxf': #查找目录下的dxf文件,加入到readfilelist文件列表中 \r\n readfilelist.append(files)\r\n #feilin=file('feilin(ph).dxf','w') #新建一个文件,名字先占位用,后续改成由配置文件中读入名称。 \r", "def maf2vcf_mrefs(maf):\n f = open(maf + \".aa\", 'w')\n with open(maf, 'r') as maf:\n for line in maf:\n if line.startswith(\"a\"):\n ancallele = ''\n refout = ''\n line = next(maf)\n while line.startswith(\"s\"):\n if \"Wb\" in line:\n aa = line.split()\n pos = int(aa[2])\n size = int(aa[5])\n chrom = aa[1].split(\".\")[1]\n if \"-\" in aa[4]:\n if aa[6] == 'A':\n rallele = 'T'\n elif aa[6] == 'T':\n rallele = 'A'\n elif aa[6] == 'C':\n rallele = 'G'\n elif aa[6] == 'G':\n rallele = 'C'\n else:\n print(\"ERROR allele not iupac\")\n pos_1 = size - pos\n else:\n pos_1 = pos\n rallele = aa[6]\n else:\n # read in other refs\n aa = line.split()\n refout += aa[1][0]\n if \"-\" in aa[4]:\n # flip to opposite base\n if aa[6] == 'A':\n ancallele += 'T'\n elif aa[6] == 'T':\n ancallele += 'A'\n elif aa[6] == 'C':\n ancallele += 'G'\n elif aa[6] == 'G':\n ancallele += 'C'\n else:\n print(\"ERROR allele not iupac\")\n else:\n ancallele += aa[6]\n line = next(maf)\n if ancallele:\n f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(chrom, pos_1 + 1,\n rallele, ancallele,\n refout))\n else:\n pass\n return(None)", "def get_files_suffix_list(suffixes, flist, Lshow=False, Ldir=False):\n matched_files=[]\n dirs=[]\n files=[]\n for fname in flist:\n if os.path.isdir(fname):\n dirs.append(fname)\n else:\n files.append(fname)\n for suff in suffixes:\n for fname in files:\n #print(f\" {suff} in {fname} ?\")\n if fname.endswith(suff):\n matched_files.append(fname)\n matched_files.extend(dirs) \n return matched_files", "def _separate_file_list( file_list, target_locus ):\n log.info(\"Parsing locus-specific subread FOFN\")\n target_fasta = None\n other_fasta = []\n print file_list, target_locus\n for filename in file_list:\n basename = filename.split('.')[0]\n locus = basename.split('_')[-1]\n if locus == target_locus and target_fasta is None:\n target_fasta = filename\n elif locus == target_locus:\n msg = 'Multiple files for target locus found!'\n log.error( msg )\n raise ValueError( msg )\n else:\n other_fasta.append( filename )\n if target_fasta is None:\n msg = 'No fasta file for target locus found!'\n log.error( msg )\n raise ValueError( msg )\n return ( target_fasta, other_fasta )", "def get_annot_cfpath_list(ibs, aid_list, suffix=None):\n #utool.assert_all_not_None(aid_list, 'aid_list')\n _cfname_fmt = get_chip_fname_fmt(ibs=ibs, suffix=suffix)\n cfname_iter = (None if aid is None else _cfname_fmt % aid for aid in iter(aid_list))\n cfpath_list = [None if cfname is None else join(ibs.chipdir, cfname) for cfname in cfname_iter]\n return cfpath_list", "def make_master_flats(dc):\n\n\t## Make EXTcheck: is there always the same number of extensions in each file\n\tprint \"Making master flats\"\n\t\n\t## Choose extensions you are using\n\t\n\tfor flat_type in ['FFS']: # Currently FFD is unsupported. If you have FFDs, add them to the list but you must have ONLY FFDs or ONLY FFSs in the dir. Otherwise the first element in the list will get overwritten!\n\t\t#~ print \"\\n\", flat_type, \"\\n\"\n\t\tfor i in dc:\n\t\t\tTRIM, TRIM1, VR, PS, PS1, OS, OS1 = CCD_sections((i[0], i[1]))\n\t\t\tfilelist = []\n\t\t\tfor f in glob.glob(RAW+'*'+flat_type+'*fits'):\n\t\t\t\tccd_conf = []\n\t\t\t\theader0 = fits.getheader(f)\n\t\t\t\theader1 = fits.getheader(f, ext=1)\n\t\t\t\tif header0['OBSMODE']==flat_type:\n\t\t\t\t\tfor KW in ['BINX', 'BINY']:\n\t\t\t\t\t\tccd_conf.append(header0[KW])\n\t\t\t\t\tfor KW in ['NAXIS1', 'NAXIS2']:\n\t\t\t\t\t\tccd_conf.append(header1[KW])\n\t\t\t\t\t\tif tuple(ccd_conf)==i:\n\t\t\t\t\t\t\tfilelist.append(f)\n\t\t\tlfl = len(filelist)\n\t\t\tif lfl > 0:\n\t\t\t\tBIN=CD+'/'+str(i[0])+'x'+str(i[1])+'/'\n\t\t\t\tWD=BIN+str(i[-2])+'x'+str(i[-1])+'/' # Bottom level dir with calibrated and master frames\n\t\t\t\tB=check_exist(WD, 'MF.fits', i)\n\t\t\t\tif B=='n':\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\thdul = fits.HDUList()\n\t\t\t\t\thdul.append(fits.ImageHDU())\n\t\t\t\t\t#~ MB = fits.open(WD+'MB.fits')\n\t\t\t\t\tx = np.array(range(0,i[-1]))\n\t\t\t\t\tfor EXT in (extensions):\n\t\t\t\t\t\tprint \"##################################################\"\n\t\t\t\t\t\tprint \"Stacking \"+`lfl`+' '+`i[-2]`+'x'+`i[-1]`+' channel '+`EXT`+' flat frames!'\n\t\t\t\t\t\tif EXT==1:\n\t\t\t\t\t\t\tPSC=PS1\n\t\t\t\t\t\t\tOSC=OS1\n\t\t\t\t\t\t\tTR=TRIM1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPSC=PS1\n\t\t\t\t\t\t\tOSC=OS\n\t\t\t\t\t\t\tTR=TRIM\n\t\t\t\t\t\tsc = -1 # counts how many flats have mean>limit\n\t\t\t\t\t\tfor n, fn in enumerate(filelist):\n\t\t\t\t\t\t\tprint \"Files left:\",`lfl-n`+'/'+`lfl`\n\t\t\t\t\t\t\tim = fits.getdata(fn, ext=EXT)\n\t\t\t\t\t\t\tmeanval = np.mean(im[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\t#~ maxval = np.max(im[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\tmaxval = stats.scoreatpercentile(im[VR[0]:VR[1], TR[0]:TR[1]], 90)\n\t\t\t\t\t\t\texptime = fits.getheader(fn)['EXPTIME']\n\t\t\t\t\t\t\t#~ if meanval > 15000. and meanval < 40000. and maxval < 50000. and exptime>5.:\n\t\t\t\t\t\t\tif meanval > 16000. and meanval < 40000. and exptime>=5.:\n\t\t\t\t\t\t\t\tsc+=1\n\t\t\t\t\t\t\t\t#~ im[im<1]=1\n\t\t\t\t\t\t\t\tmscrow, sigmarow = median_row(OSC, PSC, TR, im)\n\t\t\t\t\t\t\t\tsh = np.shape(im)\n\t\t\t\t\t\t\t\tfor y in range(0, sh[0]):\n\t\t\t\t\t\t\t\t\tim[y] = im[y]-mscrow[y]\n\t\t\t\t\t\t\t\tF=im\n\t\t\t\t\t\t\t\tnorm = np.median(F[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\t\tF = F/norm #+np.min(F)+0.0001\n\t\t\t\t\t\t\t\tif sc==0:\n\t\t\t\t\t\t\t\t\tstack_arr = F\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tstack_arr = np.dstack((stack_arr, F))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint \"****************************************\"\n\t\t\t\t\t\t\t\tprint \"Rejected\", fn, \"AVG =\", meanval, \"EXPTIME =\", exptime\n\t\t\t\t\t\t\t\tprint \"****************************************\"\n\t\t\t\t\t\tprint 'Will stack a total of', np.shape(stack_arr)[2], 'flats'\n\t\t\t\t\t\tMF = np.median(stack_arr, axis=2)\n\t\t\t\t\t\thdul.append(fits.ImageHDU(MF))\n\t\t\t\t\t\thdul[EXT].header.set(\"NAXIS1\", np.shape(MF)[1])\n\t\t\t\t\t\thdul[EXT].header.set(\"NAXIS2\", np.shape(MF)[0])\n\t\t\t\t\thdul[0].header.set(\"CALIBR\", \"T\")\n\t\t\t\t\thdul[0].header.set(\"INSTRUME\", \"MAIA\")\n\t\t\t\t\thdul[0].header.set(\"BINX\", i[0])\n\t\t\t\t\thdul[0].header.set(\"BINY\", i[1])\n\t\t\t\t\thdul[0].header.set(\"CALMODE\", \"MASTER FLAT\")\n\t\t\t\t\thdul.writeto(WD+\"MF.fits\", clobber=True)\n\t\t\t\t\tprint \"############################################################\"\n\tprint \"Completed master flats\"", "def get_light_sbc(filenames, onoff=True):\n if onoff:\n param = \"on\"\n else:\n param = \"off\"\n return filter_filenames(filenames, [param])", "def getPrefices(fileList):\n # Format:\n # prefix_dictionary[surl] = [oldPrefix, newPrefix]\n # Note: this function returns oldPrefix, newPrefix, prefix_dictionary\n # old/newPrefix are the fixed prefices defined in copysetup[in]\n # In case copyprefix[in] can be used, ie if it is set, it may contain a list of copyprefices that can sort out\n # more complicated cases\n\n prefix_dictionary = {}\n\n # get the file access info (only old/newPrefix are needed here)\n useCT, oldPrefix, newPrefix, useFileStager, directIn = getFileAccessInfo()\n\n # get the copyprefices\n copyprefix = readpar('copyprefixin')\n if copyprefix == \"\":\n copyprefix = readpar('copyprefix')\n\n # should we fall back to copyprefix or use the faxredirector? (this is the case for FAX test jobs since they reset old/newPrefix)\n if oldPrefix == \"\" or newPrefix == \"\" or not (oldPrefix and newPrefix):\n\n # special case for FAX on sites that are not setup for direct i/o in the normal way\n if (readpar('copytoolin').lower() == \"fax\") or (readpar('copytoolin') == \"\" and readpar('copytool').lower() == \"fax\"):\n if \"dummy\" in copyprefix:\n # try to construct the TURL using the copyprefix and the faxredirector\n prefix, dummy = copyprefix.split(\"^\")\n faxredirector = readpar('faxredirector')\n if faxredirector != \"\":\n tolog(\"Using copyprefix and faxredirector for old/newPrefix\")\n oldPrefix = prefix\n newPrefix = faxredirector\n else:\n tolog(\"WARNING: faxredirector not set, do not know how to construct old/newPrefix\")\n else:\n if not \"^\" in copyprefix:\n tolog(\"WARNING: Will default to using lcg-getturls\")\n \n # in case of less complex copyprefix\n if \"^\" in copyprefix and not \",\" in copyprefix and not \"dummy\" in copyprefix:\n prefices = copyprefix.split(\"^\")\n oldPrefix = prefices[0]\n newPrefix = prefices[1]\n\n # in case of more complex copyprefix (the case of copyprefix lists)\n if \"^\" in copyprefix and \",\" in copyprefix and not \"dummy\" in copyprefix:\n\n # handle copyprefix lists\n pfroms, ptos = getCopyprefixLists(copyprefix)\n tolog(\"Copyprefix lists: %s, %s\" % (str(pfroms), str(ptos)))\n\n if not \"\" in pfroms and not \"dummy\" in pfroms and not \"\" in ptos and not \"dummy\" in ptos:\n # create a prefix dictionary for all the files\n for surl in fileList:\n # first get the proper old/newPrefices\n oldPrefix, newPrefix = matchCopyprefixReplica(surl, pfroms, ptos)\n # then fill the dictionary\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n else:\n if oldPrefix != \"\" and newPrefix != \"\":\n # Use the same prefices for all surls\n for surl in fileList:\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n\n else: # old/newPrefix are set\n\n # handle copyprefix lists\n pfroms, ptos = getCopyprefixLists(copyprefix)\n tolog(\"Copyprefix lists: %s, %s\" % (str(pfroms), str(ptos)))\n\n if not \"\" in pfroms and not \"dummy\" in pfroms and not \"\" in ptos and not \"dummy\" in ptos:\n # create a prefix dictionary for all the files\n for surl in fileList:\n # first get the proper old/newPrefices\n oldPrefix, newPrefix = matchCopyprefixReplica(surl, pfroms, ptos)\n # then fill the dictionary\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n else:\n if oldPrefix != \"\" and newPrefix != \"\":\n # Use the same prefices for all surls\n for surl in fileList:\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n \n if oldPrefix != \"\" and newPrefix != \"\":\n tolog(\"Will use oldPrefix=%s and newPrefix=%s for SURL to TURL conversion\" % (oldPrefix, newPrefix))\n else:\n tolog(\"WARNING: old/newPrefix not known\")\n\n return oldPrefix, newPrefix, prefix_dictionary", "def get_files(file_list, mode):\n\tfile_set = set(file_list)\n\tif \"band\" in mode:\n\t\tband_all_set = set([\"bands.dat.gnu\", \"freq.plot\"])\n\t\tband_file_set = file_set & band_all_set ; remain_file_set = file_set - band_all_set\n\t\tfiles_str = \", \".join([f\"\\033[32m{b}\\033[0m\" for b in band_file_set]) + \"; \" + \", \".join(remain_file_set)\n\tif mode == \"dos\":\n\t\tdos_all_set = set([\"S.dos\"])\n\t\tdos_file_set = file_set & dos_all_set ; remain_file_set = file_set - dos_all_set\n\t\tfiles_str = \", \".join([f\"\\033[32m{d}\\033[0m\" for d in dos_file_set]) + \"; \" + \", \".join(remain_file_set)\n\tprint(f\"Files in the directory: {files_str}\"); file = input(\"Please choose your file (type one only): \")\n\treturn file", "def Parse_folder_to_multi_faa(target_dir,faa_filename):\n os.chdir(target_dir)\n output_handle = open(faa_filename, \"w\")\n for gbk_filename in FileGen(target_dir):\n with open(gbk_filename, \"r\") as input_handle:\n for seq_record in SeqIO.parse(input_handle, \"genbank\") :\n print(\"Dealing with GenBank record %s\" % seq_record.id)\n for seq_feature in seq_record.features :\n if seq_feature.type==\"CDS\" :\n assert len(seq_feature.qualifiers['translation'])==1\n try:\n name = seq_feature.qualifiers['locus_tag'][0]\n except KeyError:\n name = seq_feature.qualifiers['product'][0]\n output_handle.write(\">%s from %s\\n%s\\n\" % (\n name,\n gbk_filename.split(\"/\")[-1],\n seq_feature.qualifiers['translation'][0])) \n output_handle.close()", "def get_keys(filen, flist): \n if (filen in flist[0]):\n key1 = 'PSTH_STIM'\n key2 = 'ELEC_'\n key3 = '_TRIAL_'\n elif (filen in flist[1]) or (filen in flist[2]):\n key1 = 'PSTH'\n key2 = ''\n key3 = '_'\n elif (filen in flist[3]) or (filen in flist[4]):\n key1 = 'Stim'\n key2 = 'Elec'\n key3 = 'Repet'\n return key1, key2, key3", "def masterFlat(flat_list, master_dark_fname, normalize = 'median', local_sig_bad_pix = 3, \\\n global_sig_bad_pix = 9, local_box_size = 11, hotp_map_fname = None, verbose=False,\n output_dir = None,min_flux=1000):\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n\n if verbose:\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open all files into a 3D array\n #foo = np.empty((dark_shape[0],dark_shape[1],len(flat_list)))\n foo = []\n\n #Open first flat file to check exposure time and filter\n first_flat_hdu = f.open(flat_list[0])\n flat_exp_time = first_flat_hdu[0].header['EXPTIME']\n\n\n\n if dark_exp_time != flat_exp_time:\n print(\"The master dark file doesn't have the same exposure time as the flats. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = flat_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #We've already read it, so we'll stick it in foo\n\n print(\"Combining flat files\")\n for i in range(0,len(flat_list)):\n try: \n #subtract dark for each file, then normalize by mode\n hdu = f.open(flat_list[i],ignore_missing_end=True)\n d_sub = hdu[0].data - factor*master_dark\n if np.nanmedian(d_sub) < min_flux:\n #print(\"Skipping file {}, because its flux is lower than {}\".format(flat_list[i],min_flux))\n continue\n #normalize\n if normalize == 'mode':\n d_sub = d_sub/mode(d_sub, axis = None, nan_policy = 'omit')\n elif normalize == 'median':\n d_sub = d_sub/np.nanmedian(d_sub)\n #foo[:,:,i] = d_sub\n foo.append(d_sub)\n except:\n print(\"Some error. Skipping file {}\".format(i)) \n #Median combine frames\n flat = np.median(foo, axis = 0)\n\n #Filter bad pixels\n #bad_px = sigma_clip(flat, sigma = sig_bad_pix) #old and bad\n ###Major update here: do sigma clipping on the pix-to-pix flat with the large scale vignette removed\n ###Also add local sigma clipping\n def stddevFilter(img, box_size):\n \"\"\" from\n https://stackoverflow.com/questions/28931265/calculating-variance-of-an-image-python-efficiently/36266187#36266187\n This function compute the standard deviation of an image in a\n moving box of a given size. The pixel i,j of the output is the\n standard deviation of the pixel value in the box_size x box_size box\n around the i,j pixel in the original image.\n \"\"\"\n wmean, wsqrmean = (cv2.boxFilter(x, -1, (box_size, box_size), \\\n borderType=cv2.BORDER_REFLECT) for x in (img, img*img))\n return np.sqrt(wsqrmean - wmean*wmean)\n\n #median flat\n median_flat = median_filter(flat, local_box_size) #arbitrary size, shouldn't matter as long as it's big enough\n #standard deviation image\n stddev_im = stddevFilter(flat, local_box_size)\n\n #Local clipping\n local_bad_pix = np.abs(median_flat - flat) > local_sig_bad_pix*stddev_im\n\n #Global clipping here to reject awful pixels and dust, bad columns, etc\n pix_to_pix = flat/median_flat\n global_bad_px = sigma_clip(pix_to_pix, sigma = global_sig_bad_pix).mask #9 seems to work best\n\n #also set all 0 and negative pixels in flat as bad\n non_positive = flat <= 0\n\n #logic combine\n bad_px = np.logical_or(global_bad_px, local_bad_pix)\n\n #also add non_positive pixels\n bad_px = np.logical_or(bad_px, non_positive)\n\n #Normalize good pixel values\n if normalize == 'median':\n norm_flat = flat/np.nanmedian(flat[~bad_px])\n elif normalize == 'mode':\n norm_flat = flat/mode(flat, axis = None, nan_policy = 'omit')\n #Stick it back in the last hdu\n hdu[0].data = norm_flat\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created master flat by median combining the following:\"\n for i in range(len(flat_list)):\n hdu[0].header['HISTORY'] = flat_list[i]\n hdu[0].header['HISTORY'] = \"Normalized to the median of the master flat\"\n hdu[0].header['HISTORY'] = \"Performed bad pixel local and global sigma clipping with {}, {}sigmas\".format(local_sig_bad_pix, global_sig_bad_pix)\n hdu[0].header['HISTORY'] = \"############################\"\n\n #Parse the last fileanme\n if output_dir is not None:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n flat_outname = flat_outname.rsplit('/',1)[-1]\n flat_outname = output_dir+flat_outname\n else:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n\n #Write the fits file\n if verbose:\n print((\"Writing master flat to {}\".format(flat_outname)))\n hdu.writeto(flat_outname, overwrite=True)\n\n #If there's already a hot pixel map then we'll add to it.\n if hotp_map_fname != None:\n #read in the existing bp map\n #hdu = f.open(hotp_map_fname)\n #hdu[0].data += np.array(bad_px.mask, dtype=float)\n #hdu[0].data = np.logical_or(hdu[0].data.astype(bool), bad_px) #use logical or to combine bad pixel maps\n #bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n print(\"Will deal with hot pixel map from dark frames in the calibrate function\")\n\n #else:\n #Parse the last fileanme\n if output_dir is not None:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n bp_outname = bp_outname.rsplit('/',1)[-1]\n bp_outname = output_dir+bp_outname\n else:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n ##### Now write the bad pixel map\n hdu[0].data = bad_px.astype(int)#np.array(bad_px.mask, dtype=float)\n #Parse the last fileanme\n # bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n #Add history keywords\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created bad pixel map by sigma clipping on pixel-to-pixel flat{}\".format(flat_outname)\n hdu[0].header['HISTORY'] = \"Bad pixel cutoffs: local sigma = {} and global sigma = {} for clipping\".format(local_sig_bad_pix, global_sig_bad_pix)\n #hdu[0].header['HISTORY'] = \"Bad pixel cutoff of {}sigma\".format(sig_bad_pix)\n hdu[0].header['HISTORY'] = \"A pixel value of 1 indicates a bad pixel\"\n hdu[0].header['HISTORY'] = \"############################\"\n\n if verbose:\n print((\"Writing bad pixel map to {}\".format(bp_outname)))\n #Write the fits file\n hdu.writeto(bp_outname, overwrite=True)\n\n return flat_outname, bp_outname", "def cat_sff_files(list_of_file_handles):\r\n # mimicks lazy_parse_sff_handle on multiple files\r\n # Move to cogent???\r\n if (list_of_file_handles == []):\r\n return [], None\r\n try:\r\n flowgrams_and_headers = map(\r\n lazy_parse_sff_handle,\r\n list_of_file_handles)\r\n except ValueError:\r\n raise FileFormatError('Wrong flogram file format. Make sure you pass the sff.txt format ' +\r\n 'produced by sffinfo. The binary .sff will not work here.')\r\n\r\n flowgram_iterators = [a for a, b in flowgrams_and_headers]\r\n return chain(*flowgram_iterators), flowgrams_and_headers[0][1]", "def create_F1_F2_cols(col_base_list, output='both'):\n F12_cols = []\n for x in col_base_list:\n pref = x[:3]\n if output == 'both':\n if pref =='FM_':\n F12_cols.append('FM_F1_'+ x[3:])\n F12_cols.append('FM_F2_' + x[3:])\n else:\n F12_cols.append('F1_' + x)\n F12_cols.append('F2_' + x)\n elif output =='F1':\n if pref =='FM_':\n F12_cols.append('FM_F1_'+ x[3:])\n else:\n F12_cols.append('F1_' + x)\n elif output =='F2':\n if pref =='FM_':\n F12_cols.append('FM_F2_'+ x[3:])\n else:\n F12_cols.append('F2_' + x)\n return F12_cols", "def parse_flt_files(files=[], info=None, uniquename=False, use_visit=False,\n get_footprint = False, \n translate = {'AEGIS-':'aegis-', \n 'COSMOS-':'cosmos-', \n 'GNGRISM':'goodsn-', \n 'GOODS-SOUTH-':'goodss-', \n 'UDS-':'uds-'}): \n \n if info is None:\n if not files:\n files=glob.glob('*flt.fits')\n \n if len(files) == 0:\n return False\n \n info = get_flt_info(files)\n else:\n info = info.copy()\n \n for c in info.colnames:\n if not c.islower(): \n info.rename_column(c, c.lower())\n\n if 'expstart' not in info.colnames:\n info['expstart'] = info['exptime']*0.\n\n so = np.argsort(info['expstart'])\n info = info[so]\n\n #pa_v3 = np.round(info['pa_v3']*10)/10 % 360.\n pa_v3 = np.round(info['pa_v3']) % 360.\n \n target_list = []\n for i in range(len(info)):\n #### Replace ANY targets with JRhRmRs-DdDmDs\n if info['targname'][i] == 'ANY': \n if use_visit:\n new_targname=info['file'][i][:6]\n else:\n new_targname = 'par-'+radec_to_targname(ra=info['ra_targ'][i],\n dec=info['dec_targ'][i])\n \n target_list.append(new_targname.lower())\n else:\n target_list.append(info['targname'][i])\n \n target_list = np.array(target_list)\n\n info['progIDs'] = [file[1:4] for file in info['file']]\n\n progIDs = np.unique(info['progIDs'])\n visits = np.array([os.path.basename(file)[4:6] for file in info['file']])\n dates = np.array([''.join(date.split('-')[1:]) for date in info['date-obs']])\n \n targets = np.unique(target_list)\n \n output_list = [] #OrderedDict()\n filter_list = OrderedDict()\n \n for filter in np.unique(info['filter']):\n filter_list[filter] = OrderedDict()\n \n angles = np.unique(pa_v3[(info['filter'] == filter)]) \n for angle in angles:\n filter_list[filter][angle] = []\n \n for target in targets:\n #### 3D-HST targname translations\n target_use = target\n for key in translate.keys():\n target_use = target_use.replace(key, translate[key])\n \n ## pad i < 10 with zero\n for key in translate.keys():\n if translate[key] in target_use:\n spl = target_use.split('-')\n try:\n if (int(spl[-1]) < 10) & (len(spl[-1]) == 1):\n spl[-1] = '{0:02d}'.format(int(spl[-1]))\n target_use = '-'.join(spl)\n except:\n pass\n\n for filter in np.unique(info['filter'][(target_list == target)]):\n angles = np.unique(pa_v3[(info['filter'] == filter) & \n (target_list == target)])\n \n for angle in angles:\n exposure_list = []\n exposure_start = []\n product='{0}-{1:05.1f}-{2}'.format(target_use, angle, filter) \n\n visit_match = np.unique(visits[(target_list == target) &\n (info['filter'] == filter)])\n \n this_progs = []\n this_visits = []\n \n for visit in visit_match:\n ix = (visits == visit) & (target_list == target) & (info['filter'] == filter)\n #this_progs.append(info['progIDs'][ix][0])\n #print visit, ix.sum(), np.unique(info['progIDs'][ix])\n new_progs = list(np.unique(info['progIDs'][ix]))\n this_visits.extend([visit]*len(new_progs))\n this_progs.extend(new_progs)\n \n for visit, prog in zip(this_visits, this_progs):\n visit_list = []\n visit_start = []\n visit_product = '{0}-{1}-{2}-{3:05.1f}-{4}'.format(target_use, prog, visit, angle, filter) \n \n use = ((target_list == target) & \n (info['filter'] == filter) & \n (visits == visit) & (pa_v3 == angle) &\n (info['progIDs'] == prog))\n \n if use.sum() == 0:\n continue\n\n for tstart, file in zip(info['expstart'][use],\n info['file'][use]):\n \n f = file.split('.gz')[0]\n if f not in exposure_list:\n visit_list.append(str(f))\n visit_start.append(tstart)\n \n exposure_list = np.append(exposure_list, visit_list)\n exposure_start.extend(visit_start)\n \n filter_list[filter][angle].extend(visit_list)\n \n if uniquename:\n print(visit_product, len(visit_list))\n so = np.argsort(visit_start)\n exposure_list = np.array(visit_list)[so]\n #output_list[visit_product.lower()] = visit_list\n \n d = OrderedDict(product=str(visit_product.lower()),\n files=list(np.array(visit_list)[so]))\n output_list.append(d)\n \n if not uniquename:\n print(product, len(exposure_list))\n so = np.argsort(exposure_start)\n exposure_list = np.array(exposure_list)[so]\n #output_list[product.lower()] = exposure_list\n d = OrderedDict(product=str(product.lower()),\n files=list(np.array(exposure_list)[so]))\n output_list.append(d)\n \n ### Get visit footprint from FLT WCS\n if get_footprint:\n from shapely.geometry import Polygon\n \n N = len(output_list)\n for i in range(N):\n for j in range(len(output_list[i]['files'])):\n flt_file = output_list[i]['files'][j]\n if (not os.path.exists(flt_file)) & os.path.exists('../RAW/'+flt_file):\n flt_file = '../RAW/'+flt_file\n \n flt_j = pyfits.open(flt_file)\n h = flt_j[0].header\n if (h['INSTRUME'] == 'WFC3') & (h['DETECTOR'] == 'IR'):\n wcs_j = pywcs.WCS(flt_j['SCI',1])\n else:\n wcs_j = pywcs.WCS(flt_j['SCI',1], fobj=flt_j)\n \n fp_j = Polygon(wcs_j.calc_footprint())\n if j == 0:\n fp_i = fp_j\n else:\n fp_i = fp_i.union(fp_j)\n \n output_list[i]['footprint'] = fp_i\n \n return output_list, filter_list", "def chk_chng(src_flist,dst_flist):\n uc_flist = []\n c_flist = []\n for files in src_flist:\n if files in dst_flist:\n uc_flist.append(files)\n else:\n c_flist.append(files)\n return uc_flist,c_flist", "def concatenate_detected_verified(fasta_name, PATH_FASTA_DETECTED, PATH_FASTA_VERIFIED, INFO_folder, PATH_FASTA_CONCATENATED):\n\n\tprint \"\\n#################\"\n\tprint \"# Concatetaned file\"\n\tprint \"#################\\n\"\n\n\t# NOTE Dictionaire avec en clef l'id espèce/système et en value une liste\n\t# NOTE [\"l'id espèce/système du verifié qui correspond\", [liste des sequences ATPase, IM ...]]\n\tdict_remove = {}\n\n\tprint \"\\n------------------------------------------\"\n\tprint \"| First read : Creation of the dictionnary\"\n\tprint \"------------------------------------------\\n\"\n\n\tfor fasta_file in fasta_name :\n\t\tverified_fasta=os.path.join(PATH_FASTA_VERIFIED, fasta_file)\n\t\tdetected_fasta=os.path.join(PATH_FASTA_DETECTED, fasta_file)\n\t\tconcatenated_fasta=os.path.join(PATH_FASTA_CONCATENATED, fasta_file)\n\n\t\tlist_seq_verified = list(SeqIO.parse(verified_fasta, \"fasta\"))\n\t\tlist_id_verified = [seq.id for seq in list_seq_verified]\n\t\tlist_seq_verified = [seq.seq for seq in list_seq_verified]\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\t\tnumber_seq = len(list(seq_parser))\n\t\tprogression = 1\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\n\t\t# IDEA Il faut tester au moins une fois pour voir si lors de la concatenation, je ne me retrouve pas avec des systems ou je n'ai pas tous enlevé. Exemple l'ATPase de X n'est pas la même que celle de Y mais l'IMplatform l'ai si c'est le cas X est a enlevé aussi pour son ATPase\n\t\t# IDEA Si idea précédente vrai alors il faut faire des fichiers temporaires des sequences que l'on garde et concatener par \"cat\" à la fin le fichier temporaire et son homonyme en verifié.\n\n\t\t# NOTE Il y avait un problème : le nom/id de l'epèce + système ne doit pas contenir le _NumX_ car ce Num fait référence au nombre de duplicat de la protéine (exemple deux ATPase gspE)\n\t\t# NOTE Quelques systèmes on des sequences qui sont similaire pour toutes les protéines sauf une exemple ESCO3 et NC_011993 qui sont identique pour tous sauf ATPase (98% seulement)\n\n\t\tfor seq in seq_parser :\n\n\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences detected read\\r\".format(fasta_file, progression/float(number_seq)*100, progression,number_seq))\n\t\t\tsys.stdout.flush()\n\t\t\tprogression += 1\n\n\t\t\tid_seq=seq.id.split(\"_\")\n\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\n\t\t\tif id_seq in dict_remove :\n\t\t\t\tcontinue\n\n\t\t\telif seq.seq in list_seq_verified :\n\t\t\t\tindex=list_seq_verified.index(seq.seq)\n\n\t\t\t\tid_seq_verif = list_id_verified[index].split(\"_\")\n\t\t\t\tid_seq_verif = re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq_verif[:id_seq_verif.index(\"V\")]))\n\n\t\t\t\t# NOTE dans le dictionnaire je met le système vérifié en premier, toutes les séquences du système identitique en deuxième et la séquence qui en est la cause en troisème\n\t\t\t\tdict_remove[id_seq]=[id_seq_verif,[], seq.id]\n\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(fasta_file))\n\n\tprint \"\\n-----------------------------\"\n\tprint \"| Second read : Writing files\"\n\tprint \"-----------------------------\\n\"\n\n\tfor fasta_file in fasta_name :\n\t\tverified_fasta=os.path.join(PATH_FASTA_VERIFIED, fasta_file)\n\t\tdetected_fasta=os.path.join(PATH_FASTA_DETECTED, fasta_file)\n\t\tconcatenated_fasta=os.path.join(PATH_FASTA_CONCATENATED, fasta_file)\n\n\t\tos.system('cat \"{}\" > \"{}\"'.format(verified_fasta, concatenated_fasta))\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\t\tnumber_seq = len(list(seq_parser))\n\t\tprogression = 1\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\n\t\twith open(concatenated_fasta, \"a\") as w_file :\n\t\t\tfor seq in seq_parser :\n\n\t\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences detected read\\r\".format(fasta_file, progression/float(number_seq)*100, progression,number_seq))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tprogression += 1\n\n\t\t\t\tid_seq=seq.id.split(\"_\")\n\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\n\t\t\t\tif id_seq in dict_remove :\n\t\t\t\t\tdict_remove[id_seq][1].append(seq)\n\n\t\t\t\telse :\n\t\t\t\t\tSeqIO.write(seq, w_file, \"fasta\")\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(fasta_file))\n\n\t# NOTE Dict remove complete and all concatenate write\n\twrite_remove_concatenate(dict_remove, INFO_folder)\n\n\treturn", "def batch_fuc(CCD):\n batch_q()\n default_path = os.getcwd()\n PATH = os.path.join(os.getcwd(), list_subdir()[0])\n folder_name = list_subdir()[0]\n # print(\"default_path :\", default_path)\n print(\"folder_name :\", folder_name)\n # print(\"PATH :\", PATH)\n\n A = True\n\n while A is True:\n message = \"Select function\"\n choices = ['Bias correction', 'Cosmic-ray correction',\n 'Flat correction', 'Wavelength calibration',\n 'Flux calibration', 'Plot tools', 'Backup',\n 'Restore', 'Header correction', 'Quit']\n input = options(message, choices)\n\n if input == 'Bias correction':\n b_bias(folder_name, PATH, CCD)\n elif input == 'Cosmic-ray correction':\n b_cosmic(folder_name, PATH, CCD)\n elif input == 'Flat correction':\n b_flat(folder_name, PATH, CCD)\n elif input == 'Wavelength calibration':\n b_wave(folder_name, PATH, CCD, default_path)\n elif input == 'Flux calibration':\n b_flux(folder_name, PATH, CCD, default_path)\n elif input == 'Plot tools':\n b_plots(folder_name, PATH, default_path)\n elif input == 'Backup':\n b_backup(pathloc=PATH)\n elif input == 'Restore':\n b_restore(pathloc=PATH)\n elif input == 'Header correction':\n b_headercorr(folder_name)\n elif input == 'Quit':\n A = False\n sys.exit()", "def prep_cum_data(self, list_of_concat_files):\n\n cf = ''.join(list_of_concat_files)\n cf = cf.replace('\\n', '\\t')\n cf = cf.split('\\t')\n cf = filter(lambda x: 'GO:' in x, cf)\n return cf", "def lfp_extract(files):\r\n \r\n if 'lfpdata' in locals():\r\n del lfpdata\r\n \r\n for i, file in enumerate(files):\r\n \r\n ### load data\r\n matdat = sio.loadmat(file, variable_names = ['lfpsegs', 'lfpdata', 'fs', 'chnAreas'], \r\n struct_as_record = False, squeeze_me = True) \r\n \r\n \r\n \r\n ### extract the noused channels, only calculate once\r\n if i == 0:\r\n \r\n # chnAreas\r\n chnAreas = matdat['chnAreas'].tolist()\r\n \r\n # fs: sample rate\r\n fs = matdat['fs'] \r\n \r\n \r\n\r\n ### dealing lfp data\r\n \r\n # lfp (np.ndarray): nareas * ntemp * ntrials or ntemp * nareas * ntrials\r\n if 'lfpdata' in matdat.keys():\r\n lfpdata_1file = matdat['lfpdata']\r\n elif 'lfpsegs' in matdat.keys():\r\n lfpdata_1file = matdat['lfpsegs']\r\n\r\n n1, n2, n3 = lfpdata_1file.shape\r\n if n1 > n2: # ntemp * nareas * ntrials\r\n lfpdata_1file = np.transpose(lfpdata_1file, (1, 0, 2))\r\n \r\n # concatenate to lfpdata for all files\r\n if 'lfpdata' not in locals():\r\n lfpdata = lfpdata_1file\r\n else:\r\n lfpdata = np.concatenate((lfpdata, lfpdata_1file), axis = 2)\r\n \r\n \r\n return lfpdata, chnAreas, fs", "def add_reffile_overrides(self):\n all_obs_info, unique_obs_info = self.info_for_all_observations()\n\n # Add empty placeholders for reference file entries\n empty_col = np.array([' ' * 500] * len(self.info['Instrument']))\n superbias_arr = deepcopy(empty_col)\n linearity_arr = deepcopy(empty_col)\n saturation_arr = deepcopy(empty_col)\n gain_arr = deepcopy(empty_col)\n distortion_arr = deepcopy(empty_col)\n photom_arr = deepcopy(empty_col)\n ipc_arr = deepcopy(empty_col)\n transmission_arr = deepcopy(empty_col)\n badpixmask_arr = deepcopy(empty_col)\n pixelflat_arr = deepcopy(empty_col)\n\n # Loop over combinations, create metadata dict, and get reffiles\n for status in unique_obs_info:\n updated_status = deepcopy(status)\n (instrument, detector, filtername, pupilname, readpattern, exptype) = status\n\n if instrument == 'FGS':\n if detector in ['G1', 'G2']:\n detector = detector.replace('G', 'GUIDER')\n updated_status = (instrument, detector, filtername, pupilname, readpattern, exptype)\n\n # If the user entered reference files in self.reffile_defaults\n # use those over what comes from the CRDS query\n #sbias, lin, sat, gainfile, dist, ipcfile, pam = self.reffiles_from_dict(status)\n manual_reffiles = self.reffiles_from_dict(updated_status)\n for key in manual_reffiles:\n if manual_reffiles[key] == 'none':\n manual_reffiles[key] = 'crds'\n\n # Identify entries in the original list that use this combination\n match = [i for i, item in enumerate(all_obs_info) if item==status]\n\n # Populate the reference file names for the matching entries\n superbias_arr[match] = manual_reffiles['superbias']\n linearity_arr[match] = manual_reffiles['linearity']\n saturation_arr[match] = manual_reffiles['saturation']\n gain_arr[match] = manual_reffiles['gain']\n distortion_arr[match] = manual_reffiles['distortion']\n photom_arr[match] = manual_reffiles['photom']\n ipc_arr[match] = manual_reffiles['ipc']\n transmission_arr[match] = manual_reffiles['transmission']\n badpixmask_arr[match] = manual_reffiles['badpixmask']\n pixelflat_arr[match] = manual_reffiles['pixelflat']\n\n self.info['superbias'] = list(superbias_arr)\n self.info['linearity'] = list(linearity_arr)\n self.info['saturation'] = list(saturation_arr)\n self.info['gain'] = list(gain_arr)\n self.info['astrometric'] = list(distortion_arr)\n self.info['photom'] = list(photom_arr)\n self.info['ipc'] = list(ipc_arr)\n self.info['transmission'] = list(transmission_arr)\n self.info['badpixmask'] = list(badpixmask_arr)\n self.info['pixelflat'] = list(pixelflat_arr)", "def conRFMixAndMaskToBeagle(indfile_name, rephasedhaps_pref, em_iters, win_size, chroms):\n\t### First get individual information\n\twindow_id = 0\n\tem_iter = em_iters\n\tindfile = open(indfile_name, \"r\")\t\n\tinds = []\n\tfor line in indfile:\n\t\tsplits = line.strip(\"\\r\\n\").split()\n\t\tinds.append(splits[1] + \"_A\")\n\t\tinds.append(splits[1] + \"_B\")\n\n\tallloci = []\n\toutfilename = rephasedhaps_pref + \"_w\" + str(win_size) + \".beagle\"\n\toutfile = open(outfilename, \"w\")\n\toutfile.write(\"I\\tid\\t\" + \"\\t\".join(inds) + \"\\n\")\n\t## Write genotype data out to file\n\n\tvitout = open(rephasedhaps_pref + \".vit\", \"w\")\n\twinout = open(rephasedhaps_pref + \".windows\", \"w\")\n\tfbkout = rephasedhaps_pref + \".fbk\"\n\tif os.path.exists(fbkout):\n\t\tos.remove(fbkout)\n\tvitlist = []\n\tfor chrom in chroms:\n\t\tprint chrom\n\t\tshapeitfilename = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.allelesRephased\" + str(em_iters) + \".txt\"\n\t\tshapeitfile = open(shapeitfilename, \"rb\")\n\t\tfbkin_name = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.\" + str(em_iters) + \".ForwardBackward.txt\"\n\t\tos.system('cat ' + fbkin_name + \" >> \" + fbkout) # Concatenate files together\n\t\tmarkerin = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.amaps\"\n\t\tmarkerfile = open(markerin, \"r\")\n\t\tloci=[]\n\t\talleles = {}\n\t\tfor mline in markerfile:\n\t\t\tmsplit = mline.strip().split()\n\t\t\tloci.append(msplit[1])\n\t\t\talleles[msplit[1]] = [msplit[3], msplit[4] ]\n\n\t\tallloci.extend(loci)\n\t\tfor j,line in enumerate(shapeitfile):\n\t\t\tsline = line.strip(\"\\r\\n\")\n\t\t\tzero, ones = alleles[loci[j]]\n\t\t\tfixed = [ recodeAllele(k, zero, ones) for k in sline ]\n\t\t\toutfile.write(\"M\\t\" + loci[j] + \"\\t\" + \"\\t\".join(fixed) + \"\\n\")\n\t\tvitfile = open(rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.\" + str(em_iters) + \".Viterbi.txt\", \"r\")\n\t\tvitlist.extend([x.strip().split() for x in vitfile])\n\t\tshapeitfile.close()\n\t\tvitfile.close()\n\t\t\n\t# This will transpose the whole Viterbi file\n\t# Yikes this may take a lot of memory\n\tfor i,x in enumerate(zip(*vitlist)):\n\t\tvitout.write(inds[i] + \"\\t\")\n\t\tfor y in x:\n\t\t\tvitout.write(y+\"\\t\")\n\t\tvitout.write(\"\\n\")\n\t\t### This doesn't quite work yet so make sure to fix it next time\n\tfor l in allloci:\n\t\twinout.write(\"window\" + str(window_id) + \"\\t\" + l + \"\\n\")\n\t\twindow_id += 1\n\treturn([outfile.name, vitout.name, winout.name, fbkout])", "def test_outpath_multi(tmpdir):\n base = glob.glob(\"%s/dummy/mm0\" % DATA_DIR)[0]\n paths = sorted(glob.glob(base + \"/*.ufo\"))\n # the reference font is modified in-place, make a temp copy first\n referenceSrc = py.path.local(paths[0])\n referenceDst = tmpdir / referenceSrc.basename\n referenceSrc.copy(referenceDst)\n reference = str(referenceDst)\n inpaths = paths[1:]\n outpaths = [str(tmpdir / basename(p)) for p in inpaths]\n\n psautohint(inpaths + ['-o'] + outpaths + ['-r', reference])", "def correct_naming(obsid, inst):\n cobsid = str(int(float(obsid)))\n if len(cobsid) == 5:\n return \n\n lobsid = mcf.add_leading_zero(obsid, 5)\n \n for sdir in ['secondary', 'analysis']:\n\n cmd = 'ls /data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/hrcf* >' + zspace\n os.system(cmd)\n\n data = mcf.read_data_file(zspace, remove=1)\n for ent in data:\n atemp = re.split('\\/', ent)\n fname = atemp[-1]\n mc = re.search(lobsid, fname)\n if mc is not None:\n continue\n else:\n atemp = re.split('hrcf', fname)\n btemp = re.split('_', atemp[1])\n sobs = btemp[0]\n new = fname.replace(sobs, lobsid)\n full = '/data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/' + new\n\n cmd = 'mv ' + ent + ' ' + full\n os.system(cmd)", "def fetch_basenames(engine, form_factor):\n for key in ['current', 'm_mother', 'm_daughter', 'm_spectator', 'momentum']:\n if key not in form_factor:\n raise KeyError(f\"Required key '{key}' is missing.\")\n\n def abspath(dirname):\n return os.path.join(pathlib.Path(__file__).parent.absolute(), dirname)\n\n # 2pt correlators like 'P5-P5_RW_RW_d_d_m0.002426_m0.002426_p000'\n mother = \"%_RW_RW_d_d_m{m_mother}_m{m_spectator}_p000%fine\"\n daughter = \"%_RW_RW_d_d_m{m_daughter}_m{m_spectator}_{momentum}%fine\"\n if form_factor['m_daughter'] < form_factor['m_spectator']:\n daughter = \"%_RW_RW_d_d_m{m_spectator}_m{m_daughter}_{momentum}%fine\"\n\n # 3pt correlators like 'P5-P5_RW_RW_d_d_m0.002426_m0.002426_p000',\n corr3 = \"%_{current}_T%_m{m_mother}_RW_RW_x_d_m{m_spectator}_m{m_daughter}_{momentum}%fine\"\n\n params = {\n 'mother': mother.format(**form_factor),\n 'daughter': daughter.format(**form_factor),\n 'corr3': corr3.format(**form_factor)}\n queries = aiosql.from_path(abspath(\"sql/\"), \"sqlite3\")\n with db.connection_scope(engine) as conn:\n corrs = queries.postgres.get_correlator_names(conn, **params)\n \n return np.squeeze(np.array(corrs))", "def _SetAnatNames(self, anat_tgt):\n# Define links to structural image in each output directory.\n for entry in self.entry_map['epi'] + self.entry_map['fmap'] + \\\n self.entry_map['dti'] + self.entry_map['asl']:\n self.info[entry]['anat_link'] = anat_tgt\n\n# Name the normalization source image T1High. Number the rest.\n anat_entries = self.entry_map['anat'][:]\n anat_entries.remove(anat_tgt)\n n_t1high = 1\n for entry in anat_entries:\n if self.info[entry]['type'] == 'T1High':\n# High res T1-weighted, not normalization target. Rename it.\n fname = 'T1High_%d' % n_t1high\n fullname = '%s/%s' % (self.info[entry]['outdir'], fname)\n self.info[entry]['imgfile'] = fullname\n self.info[entry]['imgfile_skstrip'] = '%s_skstrip' % fullname\n self.info[entry]['matfile'] = '%s_matfile.aff12.1D' % fullname\n self.info[anat_tgt]['norm_src'] = False\n n_t1high += 1\n fname = 'T1High'\n fullname = '%s/%s' % (self.info[anat_tgt]['outdir'], fname)\n self.info[anat_tgt]['imgfile'] = fullname\n self.info[anat_tgt]['imgfile_skstrip'] = '%s_skstrip' % fullname\n self.info[anat_tgt]['matfile'] = '%s_matfile.aff12.1D' % fullname\n self.info[anat_tgt]['norm_src'] = True\n\n self.anatomical = '%s%s' % (self.info[anat_tgt]['imgfile'], \\\n self.info[anat_tgt]['suffix'])\n# The target for motin correction is the source for spatial normalization.\n self.norm_src = anat_tgt", "def set_fnames(subj, decondir):\n fnames = dict()\n outpref = 'decon_out.ramps_wav.%s_concat.Powered.cleanEPI' % subj\n sfx = 'Powered.cleanEPI.uncensored.txt'\n wm_name = 'wm_v8.%s_all.%s' % (subj, sfx)\n fnames['wm_file'] = os.path.join(os.environ['avp'], 'nii',\n '%s_CNR.anat' % subj, wm_name)\n vent_name = 'vent_v8.%s_all.%s' % (subj, sfx)\n fnames['vent_file'] = os.path.join(os.environ['avp'], 'nii',\n '%s_CNR.anat' % subj, vent_name)\n fnames['cf'] = os.path.join(os.environ['avp'], 'nii',\n 'all_ts.%s.Powered.censor.1D' % subj)\n fnames['outpref'] = os.path.join(decondir, outpref)\n\n return fnames" ]
[ "0.58278376", "0.569089", "0.5623684", "0.56120443", "0.5441088", "0.53539836", "0.5308381", "0.5256427", "0.52116", "0.51895374", "0.5147063", "0.5108449", "0.51060414", "0.5100264", "0.5047533", "0.5038564", "0.5021236", "0.5014878", "0.50056285", "0.49947405", "0.49672922", "0.4966864", "0.49275842", "0.4905078", "0.48935366", "0.48853895", "0.48839477", "0.48766556", "0.48747486", "0.48745847" ]
0.64975625
0
Test list secrets when not connected to any cluster.
def test_secrets_list_server_not_reachable(): message = "REANA client is not connected to any REANA cluster." reana_token = "000000" runner = CliRunner() result = runner.invoke(cli, ["secrets-list", "-t", reana_token]) assert result.exit_code == 1 assert message in result.output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def list_secrets(self):\n pass", "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_secrets_list_server_no_token():\n message = \"Please provide your access token\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n result = runner.invoke(cli, [\"secrets-list\"])\n assert result.exit_code == 1\n assert message in result.output", "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def secrets(self): # pylint: disable=no-self-use\n return []", "def list(**kwargs):\n cluster_call(\"secret_list\", **kwargs)", "def _all_secrets(cls, *, secretsmanager_client):\n return secretsmanager_client.list_secrets()['SecretList']", "def test_get_secret_4(self):\n self.assertIsNone(\n get_secret(\"plain text, no secrets here\")\n )", "def test_secrets_list_ok():\n status_code = 200\n response = [{\"name\": \"password\", \"type\": \"env\"}]\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n mock_http_response, mock_response = Mock(), Mock()\n mock_http_response.status_code = status_code\n mock_response = response\n reana_token = \"000000\"\n runner = CliRunner(env=env)\n with runner.isolation():\n with patch(\n \"reana_client.api.client.current_rs_api_client\",\n make_mock_api_client(\"reana-server\")(mock_response, mock_http_response),\n ):\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 0\n assert \"password\" in result.output\n assert \"env\" in result.output", "def secrets():\n click.echo(STEP_PATH / \"secrets\")", "def list_command(env: Optional[str], config: str) -> None:\n layer = Layer.load_from_yaml(config, env)\n amplitude_client.send_event(amplitude_client.LIST_SECRETS_EVENT)\n gen_all(layer)\n _raise_if_no_k8s_cluster_exists(layer)\n\n configure_kubectl(layer)\n load_kube_config()\n v1 = CoreV1Api()\n api_response = v1.read_namespaced_secret(\"secret\", layer.name)\n if api_response.data is None:\n print(\n \"No secrets found, you can make some by adding them in you opta file k8s service\"\n )\n return\n for key in api_response.data:\n print(key)", "def secrets(self):\n return self._secrets", "def test_secret():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n token: str = ce(\n comment=\"The discord token for your bot\",\n secret=True,\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/secrets.toml\",\n ]\n )\n\n assert \"token\" in str(c._crve_configs)\n assert c.token == \"secret token\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n \"\\n\"\n \"# (str): The discord token for your bot\\n\"\n \"# Secret: value will not be exported\\n\"\n \"token =\\n\"\n )\n assert c.defaults_toml() == default_toml", "def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecretArgs']]]]:\n return pulumi.get(self, \"secrets\")", "def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )", "def pod_secrets(self) -> Optional[Sequence['outputs.DataBoxSecretResponse']]:\n return pulumi.get(self, \"pod_secrets\")", "def list_secrets(self, MaxResults: int = None, NextToken: str = None) -> Dict:\n pass", "def list_secrets_command(client: KeyVaultClient, args: dict[str, Any]) -> CommandResults:\n vault_name = args['vault_name']\n limit = arg_to_number(args.get('limit')) or DEFAULT_LIMIT\n offset = arg_to_number(args.get('offset')) or DEFAULT_OFFSET\n response = client.list_secrets_request(vault_name, limit, offset)\n outputs = copy.deepcopy(response)\n readable_response = []\n\n for secret in outputs:\n readable_response.append({\n 'secret_id': secret.get('id'), 'managed': secret.get('managed'),\n **convert_attributes_to_readable(secret.get('attributes', {}).copy())\n })\n secret[VAULT_NAME_CONTEXT_FIELD] = vault_name\n secret['attributes'] = convert_time_attributes_to_iso(secret['attributes'])\n\n readable_output = tableToMarkdown(\n f'{vault_name} Secrets List',\n readable_response,\n ['secret_id', 'enabled', 'create_time', 'update_time', 'expiry_time'], removeNull=True,\n headerTransform=string_to_table_header)\n\n command_results = CommandResults(\n outputs_prefix='AzureKeyVault.Secret',\n outputs_key_field='id',\n outputs=outputs,\n raw_response=response,\n readable_output=readable_output,\n ignore_auto_extract=True\n )\n\n return command_results", "def test_secret():\r\n try:\r\n straxen.get_secret('somethingnonexistent')\r\n except ValueError:\r\n # Good we got some message we cannot load something that does\r\n # not exist,\r\n pass", "def list_vault_secrets(schedule_id):\n from mist.api.poller.models import ListVaultSecretsPollingSchedule\n sched = ListVaultSecretsPollingSchedule.objects.get(id=schedule_id)\n sched.owner.secrets_ctl.list_secrets(recursive=True)", "def know_secret(self):\r\n return(self.secret != \"\") and (self.key != \"\")", "def get_db_secrets():\n secret_response = secrets_client.get_secret_value(SecretId=db_secret_name)\n secrets = json.loads(secret_response['SecretString'])\n return secrets", "def secrets(self):\n return self._secrets_store", "def _secrets(self, credstash):\n if credstash == \"true\":\n return True\n else:\n return False", "def _list_known_secret_tokens():\n global _secret_token_map\n\n keys = list(_secret_token_map.keys())\n keys.sort()\n\n ret = ''\n for key in keys:\n if ret != '':\n ret += ', '\n ret += \"'\" + key + \"'\"\n return ret", "def cabinet_pod_secrets(self) -> Sequence['outputs.DataBoxHeavySecretResponse']:\n return pulumi.get(self, \"cabinet_pod_secrets\")", "def test_secret(self, env: yaenv.Env):\n assert env.secret() == 'notsosecret'\n assert 'NEW_SECRET_KEY' not in env\n _secret = env.secret('NEW_SECRET_KEY')\n assert _secret is not None\n assert _secret != env.secret('NEW_SECRET_KEY2')\n del env['NEW_SECRET_KEY'], env['NEW_SECRET_KEY2']", "def test_get_secret_5(self):\n\n # notice space between SECRET and immediately following\n # curly bracket.\n self.assertIsNone(\n get_secret(\"SECRET { ...}\")\n )\n\n # typo in keyword SECRET\n self.assertIsNone(\n get_secret(\"SECRIT { ...}\")\n )\n\n # missing closing bracket\n self.assertIsNone(\n get_secret(\"SECRET { ...\")\n )\n\n # curly brackets missing\n self.assertIsNone(\n get_secret(\"SECRET ...\")\n )", "def test_mask_secret_nomatch():\n secrets = [\n \"8bca8d2e-1cd6-4ec0-8e55-9614aa01cf88\",\n \"683c08d7-bc07-4d72-b098-46ef00b74aec\",\n ]\n assert utils.mask_secrets(\"ls -lh /tmp\", secrets) == \"ls -lh /tmp\"", "def get_secrets():\n client = datastore.Client()\n query = client.query(kind='env_vars')\n entity = query.fetch()\n secrets = list(entity)[0]\n return secrets" ]
[ "0.7848596", "0.77356535", "0.73634505", "0.71850795", "0.7145914", "0.6926122", "0.674381", "0.655007", "0.64121014", "0.639309", "0.6267915", "0.6263949", "0.6144681", "0.61244977", "0.6104293", "0.6090162", "0.6053545", "0.60486376", "0.601324", "0.59840655", "0.59817785", "0.5971765", "0.5966492", "0.5962005", "0.595862", "0.59378386", "0.59016454", "0.5863159", "0.58138025", "0.58063555" ]
0.77385426
1
Test list secrets when access token is not set.
def test_secrets_list_server_no_token(): message = "Please provide your access token" env = {"REANA_SERVER_URL": "localhost"} runner = CliRunner(env=env) result = runner.invoke(cli, ["secrets-list"]) assert result.exit_code == 1 assert message in result.output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def test_secrets_list_server_not_reachable():\n message = \"REANA client is not connected to any REANA cluster.\"\n reana_token = \"000000\"\n runner = CliRunner()\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 1\n assert message in result.output", "async def list_secrets(self):\n pass", "def test_get_secret_4(self):\n self.assertIsNone(\n get_secret(\"plain text, no secrets here\")\n )", "def test_list_o_auth_access_token(self):\n pass", "def _all_secrets(cls, *, secretsmanager_client):\n return secretsmanager_client.list_secrets()['SecretList']", "def test_secrets_list_ok():\n status_code = 200\n response = [{\"name\": \"password\", \"type\": \"env\"}]\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n mock_http_response, mock_response = Mock(), Mock()\n mock_http_response.status_code = status_code\n mock_response = response\n reana_token = \"000000\"\n runner = CliRunner(env=env)\n with runner.isolation():\n with patch(\n \"reana_client.api.client.current_rs_api_client\",\n make_mock_api_client(\"reana-server\")(mock_response, mock_http_response),\n ):\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 0\n assert \"password\" in result.output\n assert \"env\" in result.output", "def get_secrets(token):\n try:\n return get_keycloak_client().userinfo(token)['attributes'].get('secrets')\n except KeycloakError as ke:\n logger.error(\"Keycloak error: {0}\").format(ke)\n raise exceptions.TransferError\n except KeyError as ke:\n logger.error(\"Secrects not found in token.\")\n raise exceptions.TransferUnauthorized", "def secrets(self): # pylint: disable=no-self-use\n return []", "def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))", "def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )", "def test_get_invalid_secret(self):\n response = self.client.get(\n reverse(\n 'projectroles:api_remote_get', kwargs={'secret': build_secret()}\n )\n )\n self.assertEqual(response.status_code, 401)", "def test_check_keys_exist_for_provider_list_no_keys(self):\n\n secret_key = [None, None]\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access", "def test_secret():\r\n try:\r\n straxen.get_secret('somethingnonexistent')\r\n except ValueError:\r\n # Good we got some message we cannot load something that does\r\n # not exist,\r\n pass", "def test_secret(self, env: yaenv.Env):\n assert env.secret() == 'notsosecret'\n assert 'NEW_SECRET_KEY' not in env\n _secret = env.secret('NEW_SECRET_KEY')\n assert _secret is not None\n assert _secret != env.secret('NEW_SECRET_KEY2')\n del env['NEW_SECRET_KEY'], env['NEW_SECRET_KEY2']", "def test_get_secret_5(self):\n\n # notice space between SECRET and immediately following\n # curly bracket.\n self.assertIsNone(\n get_secret(\"SECRET { ...}\")\n )\n\n # typo in keyword SECRET\n self.assertIsNone(\n get_secret(\"SECRIT { ...}\")\n )\n\n # missing closing bracket\n self.assertIsNone(\n get_secret(\"SECRET { ...\")\n )\n\n # curly brackets missing\n self.assertIsNone(\n get_secret(\"SECRET ...\")\n )", "def test_get_tokens():\n tokens = get_tokens()\n assert tokens[\"token_type\"] == \"Bearer\"\n assert tokens[\"access_token\"] is not None\n assert tokens[\"expires_at\"] is not None\n assert tokens[\"expires_in\"] is not None\n assert tokens[\"refresh_token\"] is not None\n\n assert \"token_type\" in tokens\n assert \"access_token\" in tokens\n assert \"expires_at\" in tokens\n assert \"expires_in\" in tokens\n assert \"refresh_token\" in tokens\n\n assert tokens[\"expires_at\"] > int(time.time())", "def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))", "def test_env_access_token(context):\n os.environ[config.FLOWSERV_ACCESS_TOKEN] = '0001'\n assert context.access_token() == '0001'\n del os.environ[config.FLOWSERV_ACCESS_TOKEN]\n with pytest.raises(err.MissingConfigurationError):\n context.access_token()", "def test_authenticated_user_read(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').read,\r\n token)", "def list_secrets(self, MaxResults: int = None, NextToken: str = None) -> Dict:\n pass", "def test_get_secrets_does_not_retry_on_200(self, mget):\n error_data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n secret_data = json.dumps({\n \"data\": {\n \"sushi\": \"ikenohana\",\n \"ramen\": \"yuzu\"\n }\n })\n\n mget.side_effect = [self._mock_response(status=200, content=secret_data),\n self._mock_response(status=500, content=error_data)]\n self.client.get_secrets_data('fake/path')", "def test_cannot_view_all_users_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_interactive_withdraw_no_token(client):\n response = client.get(WEBAPP_PATH)\n assert \"Missing authentication token\" in str(response.content)\n assert response.status_code == 403", "def test_secret():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n token: str = ce(\n comment=\"The discord token for your bot\",\n secret=True,\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/secrets.toml\",\n ]\n )\n\n assert \"token\" in str(c._crve_configs)\n assert c.token == \"secret token\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n \"\\n\"\n \"# (str): The discord token for your bot\\n\"\n \"# Secret: value will not be exported\\n\"\n \"token =\\n\"\n )\n assert c.defaults_toml() == default_toml", "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def secrets():\n click.echo(STEP_PATH / \"secrets\")", "def test_aiven_creds_exist(self):\n assert os.environ[\"AIVEN_API_URL\"] is not None\n assert os.environ[\"AIVEN_TOKEN\"] is not None", "def _list_known_secret_tokens():\n global _secret_token_map\n\n keys = list(_secret_token_map.keys())\n keys.sort()\n\n ret = ''\n for key in keys:\n if ret != '':\n ret += ', '\n ret += \"'\" + key + \"'\"\n return ret" ]
[ "0.71583724", "0.697432", "0.6764023", "0.6734302", "0.657616", "0.6429628", "0.63544613", "0.6260767", "0.6180775", "0.61721295", "0.61655974", "0.6159696", "0.6127158", "0.6099549", "0.60978544", "0.60681677", "0.60320014", "0.60044026", "0.59975713", "0.59924906", "0.5940487", "0.59056437", "0.5865818", "0.58229667", "0.5821601", "0.57871646", "0.5784048", "0.5770556", "0.57658297", "0.5753944" ]
0.77104926
0
Test adding secrets with wrong format.
def test_secrets_add_wrong_format(secret): reana_token = "000000" env = {"REANA_SERVER_URL": "localhost"} runner = CliRunner(env=env) message = 'For literal strings use "SECRET_NAME=VALUE" format' result = runner.invoke(cli, ["secrets-add", "-t", reana_token, "--env", secret]) assert result.exit_code == 1 assert message in result.output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_invalid_secrets(self):\n s = SecretsChecker(stage='dev')\n # Override the email field obtained from terraform\n s.email = ['nonsense']\n with self.assertRaises(ValueError):\n s.run()", "def test_get_secret_5(self):\n\n # notice space between SECRET and immediately following\n # curly bracket.\n self.assertIsNone(\n get_secret(\"SECRET { ...}\")\n )\n\n # typo in keyword SECRET\n self.assertIsNone(\n get_secret(\"SECRIT { ...}\")\n )\n\n # missing closing bracket\n self.assertIsNone(\n get_secret(\"SECRET { ...\")\n )\n\n # curly brackets missing\n self.assertIsNone(\n get_secret(\"SECRET ...\")\n )", "def test_secret():\r\n try:\r\n straxen.get_secret('somethingnonexistent')\r\n except ValueError:\r\n # Good we got some message we cannot load something that does\r\n # not exist,\r\n pass", "def test_get_secret_4(self):\n self.assertIsNone(\n get_secret(\"plain text, no secrets here\")\n )", "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_secret(self, env: yaenv.Env):\n assert env.secret() == 'notsosecret'\n assert 'NEW_SECRET_KEY' not in env\n _secret = env.secret('NEW_SECRET_KEY')\n assert _secret is not None\n assert _secret != env.secret('NEW_SECRET_KEY2')\n del env['NEW_SECRET_KEY'], env['NEW_SECRET_KEY2']", "def testSecretKey(loggingMixin, yamlConfigForParsingPlugins):\n parameters = yamlConfigForParsingPlugins\n # It will always return a string, so we must compare to a string.\n assert parameters[\"secretKey\"] == \"12345\"\n # We can't predict what it will produce, so we just check to make sure that it's not null\n assert parameters[\"secretKeyGen\"] != \"null\"\n assert parameters[\"secretKeyGen\"] is not None", "def test_secret():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n token: str = ce(\n comment=\"The discord token for your bot\",\n secret=True,\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/secrets.toml\",\n ]\n )\n\n assert \"token\" in str(c._crve_configs)\n assert c.token == \"secret token\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n \"\\n\"\n \"# (str): The discord token for your bot\\n\"\n \"# Secret: value will not be exported\\n\"\n \"token =\\n\"\n )\n assert c.defaults_toml() == default_toml", "def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))", "def test_diff_is_not_shown_for_keys_in_secrets(tmp_path, monkeypatch, capsys):\n monkeypatch.chdir(\"examples/tutorial-secrets\")\n if os.path.exists(\"work\"):\n shutil.rmtree(\"work\")\n try:\n out, _ = cmd(\"./batou deploy tutorial\")\n finally:\n shutil.rmtree(\"work\")\n assert out == Ellipsis(\n \"\"\"\\\nbatou/2... (cpython 3...)\n================================== Preparing ===================================\nmain: Loading environment `tutorial`...\nmain: Verifying repository ...\nmain: Loading secrets ...\n================== Connecting hosts and configuring model ... ==================\nlocalhost: Connecting via local (1/1)\n================================== Deploying ===================================\nlocalhost: Scheduling component hello ...\nlocalhost > Hello > File('work/hello/hello') > Presence('hello')\nlocalhost > Hello > File('work/hello/hello') > Content('hello')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Presence('other-secrets.yaml')\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Content('other-secrets.yaml')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\n=================================== Summary ====================================\nDeployment took total=...s, connect=...s, deploy=...s\n============================= DEPLOYMENT FINISHED ==============================\n\"\"\"\n ) # noqa: E501 line too long", "def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )", "def test_fails_on_dict(self):\n invalid_credentials_dict_not_array_twine = \"\"\"\n {\n \"credentials\": {\n \"name\": \"MY_API_SECRET_KEY\",\n \"purpose\": \"Token for accessing a 3rd party API service\"\n }\n }\n \"\"\"\n\n with self.assertRaises(exceptions.InvalidTwine):\n Twine(source=invalid_credentials_dict_not_array_twine)", "def test_secrets_add_already_exist():\n status_code = 409\n reana_token = \"000000\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n message = \"One of the secrets already exists. No secrets were added.\"\n mock_http_response = Mock(\n status_code=status_code,\n reason=\"Conflict\",\n json=Mock(return_value={\"message\": \"Conflict\"}),\n )\n rs_api_client_mock = Mock()\n rs_api_client_mock.api.add_secrets = Mock(side_effect=HTTPError(mock_http_response))\n runner = CliRunner(env=env)\n with runner.isolation():\n with patch(\"reana_client.api.client.current_rs_api_client\", rs_api_client_mock):\n result = runner.invoke(\n cli, [\"secrets-add\", \"-t\", reana_token, \"--env\", \"USER=reanauser\"]\n )\n assert message in result.output\n assert result.exit_code == 1", "def test_create_seed_secrets(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n name = 'job-type-post-test-secret'\n manifest['job']['name'] = name\n manifest['job']['interface']['settings'] = [\n {\n 'name': 'VERSION',\n 'secret': True\n },\n {\n 'name': 'DB_HOST',\n 'secret': True\n },\n {\n 'name': 'DB_PASS',\n 'secret': True\n }\n ]\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': False,\n 'max_scheduled': 1,\n 'docker_image': 'my-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n with patch.object(SecretsHandler, '__init__', return_value=None), \\\n patch.object(SecretsHandler, 'set_job_type_secrets', return_value=None) as mock_set_secret:\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n job_type = JobType.objects.filter(name=name).first()\n\n results = json.loads(response.content)\n self.assertEqual(results['id'], job_type.id)\n\n # Secrets sent to Vault\n secrets_name = '-'.join([results['name'], results['version']]).replace('.', '_')\n secrets = json_data['configuration']['settings']\n mock_set_secret.assert_called_once_with(secrets_name, secrets)\n\n #Secrets scrubbed from configuration on return\n self.assertEqual(results['configuration']['settings'], {})", "def test_run_cmd_simple_positive_with_secrets(caplog):\n caplog.set_level(logging.DEBUG)\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"echo -n hello 8bca8d2e-1cd6\"\n assert utils.run_cmd(cmd, secrets=secrets) == \"hello *****\"\n # check that logs were satinized as well\n for secret in secrets:\n assert secret not in caplog.text", "def test_mask_secret_nosecrets():\n assert utils.mask_secrets(\"ls -lh /tmp\", None) == \"ls -lh /tmp\"", "def test_get_secret_3(self):\n\n text_subject = \"Important Message\"\n text_body = \"\"\"\n This is body of plain text message of some email\n \"\"\"\n self.assertIsNone(\n # no secret in the text\n get_secret([text_subject, text_body])\n )", "def test_run_cmd_simple_negative_with_secrets(caplog):\n caplog.set_level(logging.DEBUG)\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"ls /tmp/this/file/683c08d7-bc07/isnotthere\"\n with pytest.raises(CommandFailed) as excinfo:\n utils.run_cmd(cmd, secrets=secrets)\n assert \"No such file or directory\" in str(excinfo.value)\n # check that exception was sanitized\n for secret in secrets:\n assert secret not in str(excinfo.value)\n # check that logs were satinized as well\n for secret in secrets:\n assert secret not in caplog.text", "def test_create_rsa_container_w_invalid_key_names(self):\n secret_urls = self.secret_behaviors.create_n_secrets(3)\n secret_refs = [SecretRef(name='secret{0}'.format(i), ref=url)\n for i, url in enumerate(secret_urls)]\n container_resp = self.behaviors.create_container(\n 'name', 'rsa', secret_refs)\n self.assertEqual(container_resp.status_code, 400)", "def test_mask_secret_nomatch():\n secrets = [\n \"8bca8d2e-1cd6-4ec0-8e55-9614aa01cf88\",\n \"683c08d7-bc07-4d72-b098-46ef00b74aec\",\n ]\n assert utils.mask_secrets(\"ls -lh /tmp\", secrets) == \"ls -lh /tmp\"", "def _wrap_secret(self, val):\n return {\"SecretString\": val}", "def verify_secret(prop_name, value):\n\n hashed = hashlib.sha256(value.encode('UTF-8')).hexdigest()\n has_must_be = RUN_CONFIG.get(prop_name)\n\n return hashed == has_must_be", "def test_adding_config_keys():\n\n with pytest.raises(ValueError) as error:\n Config.config()[\"something_fake\"] = True\n\n assert \"something_fake is not a valid config key.\" in error.value.args", "def test_secrets_list_server_no_token():\n message = \"Please provide your access token\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n result = runner.invoke(cli, [\"secrets-list\"])\n assert result.exit_code == 1\n assert message in result.output", "def test_bad_password_type(self):\n for val in [x for x in bad_data_typevals_list if not isinstance(x, basestring) and x is not None]:\n self.request.json_body = deepcopy(self.good_dict)\n self.request.json_body['password'] = val\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'password must be a string'))", "def test_plaintext_and_anoncrypt_raises_error(alice):\n with pytest.raises(ValueError):\n alice.pack({\"test\": \"test\"}, plaintext=True, anoncrypt=True)", "def test_create_container_w_duplicate_secret_refs(self):\n\n secret_resp = self.secret_behaviors.create_secret_from_config()\n secret_refs = [SecretRef(name='1', ref=secret_resp.ref),\n SecretRef(name='2', ref=secret_resp.ref)]\n\n container_resp = self.behaviors.create_container(\n 'name', 'generic', secret_refs)\n\n self.assertEqual(container_resp.status_code, 400)", "def secrets():\n click.echo(STEP_PATH / \"secrets\")" ]
[ "0.75044936", "0.707541", "0.70254934", "0.6977966", "0.67734826", "0.67173374", "0.6702236", "0.66481084", "0.6610407", "0.6495734", "0.642665", "0.6363775", "0.63510156", "0.6331783", "0.6269925", "0.6216668", "0.6207249", "0.61964357", "0.61894506", "0.6124794", "0.61163867", "0.6105825", "0.6076612", "0.60725117", "0.6063872", "0.60619366", "0.60597914", "0.5986713", "0.5979764", "0.5932464" ]
0.8482589
0
Test adding secrets when they already exist.
def test_secrets_add_already_exist(): status_code = 409 reana_token = "000000" env = {"REANA_SERVER_URL": "localhost"} message = "One of the secrets already exists. No secrets were added." mock_http_response = Mock( status_code=status_code, reason="Conflict", json=Mock(return_value={"message": "Conflict"}), ) rs_api_client_mock = Mock() rs_api_client_mock.api.add_secrets = Mock(side_effect=HTTPError(mock_http_response)) runner = CliRunner(env=env) with runner.isolation(): with patch("reana_client.api.client.current_rs_api_client", rs_api_client_mock): result = runner.invoke( cli, ["secrets-add", "-t", reana_token, "--env", "USER=reanauser"] ) assert message in result.output assert result.exit_code == 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def test_secret(self, env: yaenv.Env):\n assert env.secret() == 'notsosecret'\n assert 'NEW_SECRET_KEY' not in env\n _secret = env.secret('NEW_SECRET_KEY')\n assert _secret is not None\n assert _secret != env.secret('NEW_SECRET_KEY2')\n del env['NEW_SECRET_KEY'], env['NEW_SECRET_KEY2']", "def test_secret():\r\n try:\r\n straxen.get_secret('somethingnonexistent')\r\n except ValueError:\r\n # Good we got some message we cannot load something that does\r\n # not exist,\r\n pass", "def test_get_secret_4(self):\n self.assertIsNone(\n get_secret(\"plain text, no secrets here\")\n )", "def test_secrets_add_wrong_format(secret):\n reana_token = \"000000\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n message = 'For literal strings use \"SECRET_NAME=VALUE\" format'\n\n result = runner.invoke(cli, [\"secrets-add\", \"-t\", reana_token, \"--env\", secret])\n assert result.exit_code == 1\n assert message in result.output", "def test_create_container_w_duplicate_secret_refs(self):\n\n secret_resp = self.secret_behaviors.create_secret_from_config()\n secret_refs = [SecretRef(name='1', ref=secret_resp.ref),\n SecretRef(name='2', ref=secret_resp.ref)]\n\n container_resp = self.behaviors.create_container(\n 'name', 'generic', secret_refs)\n\n self.assertEqual(container_resp.status_code, 400)", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_create_seed_secrets(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n name = 'job-type-post-test-secret'\n manifest['job']['name'] = name\n manifest['job']['interface']['settings'] = [\n {\n 'name': 'VERSION',\n 'secret': True\n },\n {\n 'name': 'DB_HOST',\n 'secret': True\n },\n {\n 'name': 'DB_PASS',\n 'secret': True\n }\n ]\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': False,\n 'max_scheduled': 1,\n 'docker_image': 'my-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n with patch.object(SecretsHandler, '__init__', return_value=None), \\\n patch.object(SecretsHandler, 'set_job_type_secrets', return_value=None) as mock_set_secret:\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n job_type = JobType.objects.filter(name=name).first()\n\n results = json.loads(response.content)\n self.assertEqual(results['id'], job_type.id)\n\n # Secrets sent to Vault\n secrets_name = '-'.join([results['name'], results['version']]).replace('.', '_')\n secrets = json_data['configuration']['settings']\n mock_set_secret.assert_called_once_with(secrets_name, secrets)\n\n #Secrets scrubbed from configuration on return\n self.assertEqual(results['configuration']['settings'], {})", "def test_invalid_secrets(self):\n s = SecretsChecker(stage='dev')\n # Override the email field obtained from terraform\n s.email = ['nonsense']\n with self.assertRaises(ValueError):\n s.run()", "def test_get_secret_5(self):\n\n # notice space between SECRET and immediately following\n # curly bracket.\n self.assertIsNone(\n get_secret(\"SECRET { ...}\")\n )\n\n # typo in keyword SECRET\n self.assertIsNone(\n get_secret(\"SECRIT { ...}\")\n )\n\n # missing closing bracket\n self.assertIsNone(\n get_secret(\"SECRET { ...\")\n )\n\n # curly brackets missing\n self.assertIsNone(\n get_secret(\"SECRET ...\")\n )", "def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )", "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_secret():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n token: str = ce(\n comment=\"The discord token for your bot\",\n secret=True,\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/secrets.toml\",\n ]\n )\n\n assert \"token\" in str(c._crve_configs)\n assert c.token == \"secret token\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n \"\\n\"\n \"# (str): The discord token for your bot\\n\"\n \"# Secret: value will not be exported\\n\"\n \"token =\\n\"\n )\n assert c.defaults_toml() == default_toml", "def secretstore():\n pass", "def apply_secrets():\n for name, value in Secrets.__dict__.items():\n if name[0] != '_':\n os.environ[name] = value", "def test_diff_is_not_shown_for_keys_in_secrets(tmp_path, monkeypatch, capsys):\n monkeypatch.chdir(\"examples/tutorial-secrets\")\n if os.path.exists(\"work\"):\n shutil.rmtree(\"work\")\n try:\n out, _ = cmd(\"./batou deploy tutorial\")\n finally:\n shutil.rmtree(\"work\")\n assert out == Ellipsis(\n \"\"\"\\\nbatou/2... (cpython 3...)\n================================== Preparing ===================================\nmain: Loading environment `tutorial`...\nmain: Verifying repository ...\nmain: Loading secrets ...\n================== Connecting hosts and configuring model ... ==================\nlocalhost: Connecting via local (1/1)\n================================== Deploying ===================================\nlocalhost: Scheduling component hello ...\nlocalhost > Hello > File('work/hello/hello') > Presence('hello')\nlocalhost > Hello > File('work/hello/hello') > Content('hello')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Presence('other-secrets.yaml')\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Content('other-secrets.yaml')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\n=================================== Summary ====================================\nDeployment took total=...s, connect=...s, deploy=...s\n============================= DEPLOYMENT FINISHED ==============================\n\"\"\"\n ) # noqa: E501 line too long", "def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))", "def test_add_exchange_empty_secret(self):\n exchange_name = \"Testing\"\n api_key = \"Testing\"\n secret = \"\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertIn(new_exchange[0], \"error\")", "def test_mask_secret_nomatch():\n secrets = [\n \"8bca8d2e-1cd6-4ec0-8e55-9614aa01cf88\",\n \"683c08d7-bc07-4d72-b098-46ef00b74aec\",\n ]\n assert utils.mask_secrets(\"ls -lh /tmp\", secrets) == \"ls -lh /tmp\"", "def secrets():\n click.echo(STEP_PATH / \"secrets\")", "def test_secrets_list_server_no_token():\n message = \"Please provide your access token\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n result = runner.invoke(cli, [\"secrets-list\"])\n assert result.exit_code == 1\n assert message in result.output", "def know_secret(self):\r\n return(self.secret != \"\") and (self.key != \"\")", "def verify_secret(prop_name, value):\n\n hashed = hashlib.sha256(value.encode('UTF-8')).hexdigest()\n has_must_be = RUN_CONFIG.get(prop_name)\n\n return hashed == has_must_be", "def testSecretKey(loggingMixin, yamlConfigForParsingPlugins):\n parameters = yamlConfigForParsingPlugins\n # It will always return a string, so we must compare to a string.\n assert parameters[\"secretKey\"] == \"12345\"\n # We can't predict what it will produce, so we just check to make sure that it's not null\n assert parameters[\"secretKeyGen\"] != \"null\"\n assert parameters[\"secretKeyGen\"] is not None", "def test_credential_exists(self):\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\" )\n test_credential.save_attributes()\n\n credential_exist = Credentials.credentials_exist(\"Instagram\")\n self.assertTrue(credential_exist)", "def test_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with mock.patch.dict(\n os.environ,\n {\"SECRET_THE_FIRST\": \"a value\", \"SECRET_THE_SECOND\": \"another value\", \"SECRET_THE_THIRD\": \"value\"},\n ):\n twine.validate_credentials()\n self.assertEqual(os.environ[\"SECRET_THE_THIRD\"], \"value\")", "def test_credential_exist(self):\n self.new_credentials.save_creds()\n account_found = Credentials.search_by_account(\"Instagram\")\n\n self.assertTrue(account_found)", "def test_add_with_existing_key(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'})\n # TODO status 201\n self.client.login(user='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'})\n # TODO status 409", "def secret() -> None:\n pass", "def test_keyring_exists_without_keyring(self, mock_keyring):\n mock_keyring.get_keyring.return_value = False\n self.assertFalse(keyring_exists())" ]
[ "0.7291631", "0.7147517", "0.6715984", "0.6684597", "0.66473454", "0.66376173", "0.6494984", "0.6443271", "0.6437272", "0.6393369", "0.629985", "0.6255083", "0.62139803", "0.61440545", "0.602208", "0.5996874", "0.5979041", "0.5953571", "0.59042215", "0.58320487", "0.58229697", "0.58182454", "0.5770143", "0.575439", "0.57336324", "0.57244754", "0.57242984", "0.57100505", "0.5703069", "0.5698558" ]
0.7203451
1
Optimized version of the generic paginate_query_across_partitioned_databases for case schedules queue_schedule_instances uses a lock to ensure that the same case_id cannot be queued within one hour of another instance The celery tasks handle_case_alert_schedule_instance and handle_case_timed_schedule_instance both use locks to ensure only one taks is operating on a case at one time. Each task also checks if the schedule is still valid on this case before processing it further Assumes that q_expression includes active = True
def _paginate_query_across_partitioned_databases(model_class, q_expression, load_source): from corehq.messaging.scheduling.scheduling_partitioned.models import ( CaseAlertScheduleInstance, CaseTimedScheduleInstance, ) if model_class not in (CaseAlertScheduleInstance, CaseTimedScheduleInstance): raise TypeError("Expected CaseAlertScheduleInstance or CaseTimedScheduleInstance") db_names = get_db_aliases_for_partitioned_query() for db_name in db_names: for row in _paginate_query(db_name, model_class, q_expression, load_source): yield row
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scheduling_method(self, cur_time, es, es_dict):\n dispatching_plan = []\n\n resource_types = self.resource_manager.resource_types\n avl_resources = self.resource_manager.current_availability\n system_capacity = self.resource_manager.system_capacity('nodes')\n\n # =======================================================================\n # Considered queued jobs: Jobs can be fitted in the current system state and less or equal than q_length\n # If a job_obj cannot be fitted or exceed the q_length is directly loaded in the dispatching decision using the no-solution dispatching tuple\n # =======================================================================\n priorized_jobs = SortedListWithKey(key=lambda job_tuple: job_tuple[1])\n\n current_qjobs = SortedList()\n\n cons_qjobs = {}\n for node in self.resource_manager.node_names:\n avl_res = avl_resources[node]\n # avl_res = system_capacity[node]\n for idx, job_obj in enumerate(es):\n job_id = job_obj.id\n\n if not (job_id in cons_qjobs):\n current_qjobs.add(job_id)\n cons_qjobs[job_id] = [False, 0, {}, None]\n priorized_jobs.add((job_id, self._job_priority_slowdown(job_obj, cur_time)))\n if self._reduced_model:\n possibilities = self._joint_nodes(job_obj, avl_res)\n if possibilities > 0:\n cons_qjobs[job_id][2][node] = min(possibilities, job_obj.requested_nodes)\n cons_qjobs[job_id][1] += possibilities\n if cons_qjobs[job_id][1] >= job_obj.requested_nodes:\n cons_qjobs[job_id][0] = True\n if not cons_qjobs[job_id][3]:\n cons_qjobs[job_id][3] = job_obj\n else:\n cons_qjobs[job_id][0] = True\n cons_qjobs[job_id][1] = None\n cons_qjobs[job_id][2] = None\n cons_qjobs[job_id][3] = job_obj\n\n qjobs = 0\n wc_makespan = 0\n makespans = []\n\n selected_priorized_jobs = []\n\n # Job of the dispatching decision\n decision_jobs = {}\n\n if self._reduced_model:\n for job_id, _ in priorized_jobs:\n t = cons_qjobs[job_id]\n if not t[0] or qjobs > self._cur_q_length - 1:\n decision_jobs[job_id] = self.dispatching_tuple(job_id)\n cons_qjobs.pop(job_id)\n else:\n exp_duration = max(1, t[-1].expected_duration)\n wc_makespan += exp_duration\n makespans.append(exp_duration)\n qjobs += 1\n selected_priorized_jobs.append(job_id)\n else:\n cannot_start_selected = 0\n for job_id, _ in priorized_jobs:\n t = cons_qjobs[job_id]\n if (not t[0] and cannot_start_selected >= self._considered_cannot_start) or (\n qjobs > self._cur_q_length - 1):\n decision_jobs[job_id] = self.dispatching_tuple(job_id)\n cons_qjobs.pop(job_id)\n else:\n if not t[0]:\n cons_qjobs[job_id][3] = es_dict[job_id]\n cannot_start_selected += 1\n exp_duration = max(1, t[-1].expected_duration)\n wc_makespan += exp_duration # , self.get_queue(t[-1].queue)) # exp_duration\n makespans.append(exp_duration)\n qjobs += 1\n selected_priorized_jobs.append(job_id)\n # =======================================================================\n # There are no jobs to dispatch at the current system state.\n # Then a no solution list is returned.\n # =======================================================================\n if not cons_qjobs:\n # Job Dispatching skip\n return decision_jobs.values(), []\n\n solved = False\n self.priorized_jobs = None\n\n if self._safe:\n manager = mp_dill.Manager()\n schedule_plan = manager.dict()\n process_class = mp_dill.Process\n\n p = process_class(target=getattr(self, 'cp_model'),\n args=(\n schedule_plan, cur_time, cons_qjobs, selected_priorized_jobs, es_dict, resource_types,\n avl_resources),\n kwargs={'timelimit': timelimit}\n )\n p.start()\n p.join()\n\n if p.exitcode != 0:\n schedule_plan.pop('solver_state', None)\n schedule_plan.pop('limit_reached', None)\n return list(decision_jobs.values()) \\\n + [self.dispatching_tuple(job_id, start_time, nodes) for (start_time, job_id, nodes) in\n schedule_plan.values()] \\\n + [self.dispatching_tuple(job_id, None, []) for job_id in cons_qjobs if\n not (job_id in schedule_plan)], []\n else:\n schedule_plan = {}\n args = (\n schedule_plan, cur_time, cons_qjobs, selected_priorized_jobs, es_dict, resource_types, avl_resources)\n kwargs = {'max_timelimit': self._max_timelimit}\n function = getattr(self, 'cp_model')\n function(*args, **kwargs)\n\n solved = schedule_plan.pop('solved')\n of_value = schedule_plan.pop('of_value')\n walltime = schedule_plan.pop('walltime')\n proc_time = schedule_plan.pop('proc_time')\n incurred_time = walltime + proc_time\n failures = schedule_plan.pop('failures')\n branches = schedule_plan.pop('branches')\n p = None\n\n self.priorized_jobs = None\n dispatching_plan = list(schedule_plan.values())\n self.__instance_data = (\n solved, of_value, walltime, incurred_time, failures, branches,\n dispatching_plan + list(decision_jobs.values()),)\n\n # This is useful for print and also to create the unsuccessful data\n dispatched_jobs = 0\n queued_job_ids = []\n for a in dispatching_plan:\n if a[2]:\n dispatched_jobs += 1\n if dispatched_jobs == 0:\n queued_job_ids.append(a[1])\n\n if self._reduce_job_length:\n # ===================================================================\n # The considered number of jobs in the next scheduling decision are reduced to the half\n # if the current problem instance was not solved, if the current usage is\n # leq of the previous time point. After a successful dispatching this value is reset.\n # The minimum is 1, otherwise there will be nothing to dispatch\n # ===================================================================\n if not solved:\n self._cur_q_length = max(1, min(self._cur_q_length,\n len(schedule_plan)) // 2) # max(1, self._cur_q_length // 2)\n else:\n self._cur_q_length = self._q_length\n\n print('{} - {}: Queued {}, Dispatched {}, Running {}. {}'.format(self._counter, cur_time,\n len(es) - dispatched_jobs, dispatched_jobs,\n len(self.resource_manager.current_allocations),\n self.resource_manager.current_usage))\n return dispatching_plan + list(decision_jobs.values()), []", "def schedule_metadata_tasks():\n # Some metadata tasks will abort if higher precedence tasks are in\n # progress. Avoid scheduling these tasks. The priority here is to\n # get the result of an in-progress metadata operation if one exists.\n for instance in models.Instance.query():\n queue = None\n if instance.active_metadata_update:\n if instance.active_metadata_update.url:\n # Enqueue task to check the in-progress metadata operation.\n queue = 'check-instance-metadata-operation'\n else:\n # Enqueue task to start a metadata operation.\n queue = 'update-instance-metadata'\n elif instance.pending_metadata_updates:\n # Enqueue task to compress a list of desired metadata updates.\n queue = 'compress-instance-metadata-updates'\n if queue:\n utilities.enqueue_task(queue, instance.key)", "def execute( self ):\n\n # This allows dynamic changing of the throughput timescale\n self.throughputTimescale = self.am_getOption( 'ThroughputTimescale', 3600 )\n self.throughputTimescale = 60 * 60 * 1\n #print 'ThroughputTimescale:',self.throughputTimescale\n ######################################################################################\n #\n # Obtain information on the current state of the channel queues\n #\n\n res = self.TransferDB.getChannelQueues()\n if not res['OK']:\n errStr = \"ReplicationScheduler._execute: Failed to get channel queues from TransferDB.\"\n gLogger.error( errStr, res['Message'] )\n return S_OK()\n if not res['Value']:\n gLogger.info( \"ReplicationScheduler._execute: No active channels found for replication.\" )\n return S_OK()\n channels = res['Value']\n\n res = self.TransferDB.getChannelObservedThroughput( self.throughputTimescale )\n if not res['OK']:\n errStr = \"ReplicationScheduler._execute: Failed to get observed throughput from TransferDB.\"\n gLogger.error( errStr, res['Message'] )\n return S_OK()\n if not res['Value']:\n gLogger.info( \"ReplicationScheduler._execute: No active channels found for replication.\" )\n return S_OK()\n bandwidths = res['Value']\n\n self.strategyHandler = StrategyHandler( bandwidths, channels, self.section )\n\n processedRequests = []\n requestsPresent = True\n while requestsPresent:\n\n ######################################################################################\n #\n # The first step is to obtain a transfer request from the RequestDB which should be scheduled.\n #\n\n gLogger.info( \"ReplicationScheduler._execute: Contacting RequestDB for suitable requests.\" )\n res = self.RequestDB.getRequest( 'transfer' )\n if not res['OK']:\n gLogger.error( \"ReplicationScheduler._execute: Failed to get a request list from RequestDB.\", res['Message'] )\n continue\n if not res['Value']:\n gLogger.info( \"ReplicationScheduler._execute: No requests found in RequestDB.\" )\n requestsPresent = False\n return S_OK()\n requestString = res['Value']['RequestString']\n requestName = res['Value']['RequestName']\n gLogger.info( \"ReplicationScheduler._execute: Obtained Request %s from RequestDB.\" % ( requestName ) )\n\n ######################################################################################\n #\n # The request must then be parsed to obtain the sub-requests, their attributes and files.\n #\n\n logStr = 'ReplicationScheduler._execute: Parsing Request %s.' % ( requestName )\n gLogger.info( logStr )\n oRequest = RequestContainer( requestString )\n res = oRequest.getAttribute( 'RequestID' )\n if not res['OK']:\n gLogger.error( 'ReplicationScheduler._execute: Failed to get requestID.', res['Message'] )\n return S_ERROR( 'ReplicationScheduler._execute: Failed to get number of sub-requests.' )\n requestID = res['Value']\n if requestID in processedRequests:\n # Break the loop once we have iterated once over all requests\n res = self.RequestDB.updateRequest( requestName, requestString )\n if not res['OK']:\n gLogger.error( \"Failed to update request\", \"%s %s\" % ( requestName, res['Message'] ) )\n return S_OK()\n\n processedRequests.append( requestID )\n\n res = oRequest.getNumSubRequests( 'transfer' )\n if not res['OK']:\n gLogger.error( 'ReplicationScheduler._execute: Failed to get number of sub-requests.', res['Message'] )\n return S_ERROR( 'ReplicationScheduler._execute: Failed to get number of sub-requests.' )\n numberRequests = res['Value']\n gLogger.info( \"ReplicationScheduler._execute: '%s' found with %s sub-requests.\" % ( requestName, numberRequests ) )\n\n ######################################################################################\n #\n # The important request attributes are the source and target SEs.\n #\n\n for ind in range( numberRequests ):\n gLogger.info( \"ReplicationScheduler._execute: Treating sub-request %s from '%s'.\" % ( ind, requestName ) )\n attributes = oRequest.getSubRequestAttributes( ind, 'transfer' )['Value']\n if attributes['Status'] != 'Waiting':\n # If the sub-request is already in terminal state\n gLogger.info( \"ReplicationScheduler._execute: Sub-request %s is status '%s' and not to be executed.\" % ( ind, attributes['Status'] ) )\n continue\n\n sourceSE = attributes['SourceSE']\n targetSE = attributes['TargetSE']\n \"\"\" This section should go in the transfer request class \"\"\"\n if type( targetSE ) in types.StringTypes:\n if re.search( ',', targetSE ):\n targetSEs = targetSE.split( ',' )\n else:\n targetSEs = [targetSE]\n \"\"\"----------------------------------------------------- \"\"\"\n operation = attributes['Operation']\n reqRepStrategy = None\n if operation in self.strategyHandler.getSupportedStrategies():\n reqRepStrategy = operation\n\n ######################################################################################\n #\n # Then obtain the file attribute of interest are the LFN and FileID\n #\n\n res = oRequest.getSubRequestFiles( ind, 'transfer' )\n if not res['OK']:\n gLogger.error( 'ReplicationScheduler._execute: Failed to obtain sub-request files.' , res['Message'] )\n continue\n files = res['Value']\n gLogger.info( \"ReplicationScheduler._execute: Sub-request %s found with %s files.\" % ( ind, len( files ) ) )\n filesDict = {}\n for file in files:\n lfn = file['LFN']\n if file['Status'] != 'Waiting':\n gLogger.debug( \"ReplicationScheduler._execute: %s will not be scheduled because it is %s.\" % ( lfn, file['Status'] ) )\n else:\n fileID = file['FileID']\n filesDict[lfn] = fileID\n if not filesDict:\n gLogger.info( \"ReplicationScheduler._execute: No Waiting files found for request\" )\n continue\n notSched = len( files ) - len( filesDict )\n if notSched:\n gLogger.info( \"ReplicationScheduler._execute: %d files found not Waiting\" % notSched )\n\n ######################################################################################\n #\n # Now obtain replica information for the files associated to the sub-request.\n #\n\n lfns = filesDict.keys()\n gLogger.info( \"ReplicationScheduler._execute: Obtaining replica information for %d sub-request files.\" % len( lfns ) )\n res = self.rm.getCatalogReplicas( lfns )\n if not res['OK']:\n gLogger.error( \"ReplicationScheduler._execute: Failed to get replica information.\", res['Message'] )\n continue\n for lfn, failure in res['Value']['Failed'].items():\n gLogger.error( \"ReplicationScheduler._execute: Failed to get replicas.\", '%s: %s' % ( lfn, failure ) )\n replicas = res['Value']['Successful']\n if not replicas.keys():\n gLogger.error( \"ReplicationScheduler._execute: Failed to get replica information for all files.\" )\n continue\n\n ######################################################################################\n #\n # Now obtain the file sizes for the files associated to the sub-request.\n #\n\n lfns = replicas.keys()\n gLogger.info( \"ReplicationScheduler._execute: Obtaining file sizes for %d sub-request files.\" % len( lfns ) )\n res = self.rm.getCatalogFileMetadata( lfns )\n if not res['OK']:\n gLogger.error( \"ReplicationScheduler._execute: Failed to get file size information.\", res['Message'] )\n continue\n for lfn, failure in res['Value']['Failed'].items():\n gLogger.error( 'ReplicationScheduler._execute: Failed to get file size.', '%s: %s' % ( lfn, failure ) )\n metadata = res['Value']['Successful']\n if not metadata.keys():\n gLogger.error( \"ReplicationScheduler._execute: Failed to get metadata for all files.\" )\n continue\n\n ######################################################################################\n #\n # For each LFN determine the replication tree\n #\n\n for lfn in sortList( metadata.keys() ):\n fileSize = metadata[lfn]['Size']\n lfnReps = replicas[lfn]\n fileID = filesDict[lfn]\n\n targets = []\n for targetSE in targetSEs:\n if targetSE in lfnReps.keys():\n gLogger.debug( \"ReplicationScheduler.execute: %s already present at %s.\" % ( lfn, targetSE ) )\n else:\n targets.append( targetSE )\n if not targets:\n gLogger.info( \"ReplicationScheduler.execute: %s present at all targets.\" % lfn )\n oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done' )\n continue\n if not lfnReps:\n gLogger.error( \"ReplicationScheduler.execute: The file has no replicas.\", lfn )\n continue\n res = self.strategyHandler.determineReplicationTree( sourceSE, targets, lfnReps, fileSize, strategy = reqRepStrategy )\n if not res['OK']:\n gLogger.error( \"ReplicationScheduler.execute: Failed to determine replication tree.\", res['Message'] )\n continue\n tree = res['Value']\n\n ######################################################################################\n #\n # For each item in the replication tree obtain the source and target SURLS\n #\n\n for channelID, dict in tree.items():\n gLogger.info( \"ReplicationScheduler.execute: processing for channel %d %s\" % ( channelID, str( dict ) ) )\n hopSourceSE = dict['SourceSE']\n hopDestSE = dict['DestSE']\n hopAncestor = dict['Ancestor']\n\n # Get the sourceSURL\n if hopAncestor:\n status = 'Waiting%s' % ( hopAncestor )\n res = self.obtainLFNSURL( hopSourceSE, lfn )\n if not res['OK']:\n errStr = res['Message']\n gLogger.error( errStr )\n return S_ERROR( errStr )\n sourceSURL = res['Value']\n else:\n status = 'Waiting'\n res = self.resolvePFNSURL( hopSourceSE, lfnReps[hopSourceSE] )\n if not res['OK']:\n sourceSURL = lfnReps[hopSourceSE]\n else:\n sourceSURL = res['Value']\n\n # Get the targetSURL\n res = self.obtainLFNSURL( hopDestSE, lfn )\n if not res['OK']:\n errStr = res['Message']\n gLogger.error( errStr )\n return S_ERROR( errStr )\n targetSURL = res['Value']\n\n ######################################################################################\n #\n # For each item in the replication tree add the file to the channel\n #\n res = self.TransferDB.addFileToChannel( channelID, fileID, hopSourceSE, sourceSURL, hopDestSE, targetSURL, fileSize, fileStatus = status )\n if not res['OK']:\n errStr = res['Message']\n gLogger.error( \"ReplicationScheduler._execute: Failed to add File to Channel.\" , \"%s %s\" % ( fileID, channelID ) )\n return S_ERROR( errStr )\n res = self.TransferDB.addFileRegistration( channelID, fileID, lfn, targetSURL, hopDestSE )\n if not res['OK']:\n errStr = res['Message']\n gLogger.error( \"ReplicationScheduler._execute: Failed to add File registration.\" , \"%s %s\" % ( fileID, channelID ) )\n result = self.TransferDB.removeFileFromChannel( channelID, fileID )\n if not result['OK']:\n errStr += result['Message']\n gLogger.error( \"ReplicationScheduler._execute: Failed to remove File.\" , \"%s %s\" % ( fileID, channelID ) )\n return S_ERROR( errStr )\n oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Scheduled' )\n res = self.TransferDB.addReplicationTree( fileID, tree )\n\n if oRequest.isSubRequestEmpty( ind, 'transfer' )['Value']:\n oRequest.setSubRequestStatus( ind, 'transfer', 'Scheduled' )\n\n ################################################\n # Generate the new request string after operation\n requestString = oRequest.toXML()['Value']\n res = self.RequestDB.updateRequest( requestName, requestString )\n if not res['OK']:\n gLogger.error( \"ReplicationScheduler._execute: Failed to update request\", \"%s %s\" % ( requestName, res['Message'] ) )", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def get_queue(queue_limits):\n\n queues, limits = queue_limits.items()\n queues.pop('')\n\n while(True): \n \n queued_jobs = qstat_plain()\n jobs = {queue : [j for j in queued_jobs if j.queue == queue] for queue in queues} \n jobs[''] = [j for j in queued_jobs if j.queue not in queues]\n\n for queue in queues:\n if len(jobs[queue]) < queue_limits[queue]:\n yield queue\n else:\n time.sleep(30)", "def instance_backup_schedule_update(self, context, instance_uuid,\n schedule):\n metadata = self._instance_metadata(context, instance_uuid)\n schedule_key = meta.BACKUP_SCHEDULE_KEY\n active_key = meta.BACKUP_ACTIVE_KEY\n if schedule and len(schedule) > 0:\n # Sort items by frequency\n sorted_schedule = sorted(schedule,\n key=lambda item: item[meta.SCHEDULE_FREQUENCY_KEY])\n metadata[schedule_key] = jsonutils.dumps(sorted_schedule)\n metadata[active_key] = True # This lingers forever, on purpose.\n self._instance_metadata_update(context, instance_uuid, metadata)\n return sorted_schedule\n else:\n metadata[schedule_key] = jsonutils.dumps([])\n self._instance_metadata_update(context, instance_uuid, metadata)\n return []", "async def get_scheduled_flow_runs(\n work_pool_name: str = Path(..., description=\"The work pool name\", alias=\"name\"),\n work_pool_queue_names: List[str] = Body(\n None, description=\"The names of work pool queues\"\n ),\n scheduled_before: DateTimeTZ = Body(\n None, description=\"The maximum time to look for scheduled flow runs\"\n ),\n scheduled_after: DateTimeTZ = Body(\n None, description=\"The minimum time to look for scheduled flow runs\"\n ),\n limit: int = dependencies.LimitBody(),\n worker_lookups: WorkerLookups = Depends(WorkerLookups),\n db: OrionDBInterface = Depends(provide_database_interface),\n) -> List[schemas.responses.WorkerFlowRunResponse]:\n async with db.session_context(begin_transaction=True) as session:\n work_pool_id = await worker_lookups._get_work_pool_id_from_name(\n session=session, work_pool_name=work_pool_name\n )\n\n if work_pool_queue_names is None:\n work_pool_queue_ids = None\n else:\n work_pool_queue_ids = []\n for qn in work_pool_queue_names:\n work_pool_queue_ids.append(\n await worker_lookups._get_work_pool_queue_id_from_name(\n session=session,\n work_pool_name=work_pool_name,\n work_pool_queue_name=qn,\n )\n )\n\n queue_response = await models.workers.get_scheduled_flow_runs(\n session=session,\n db=db,\n work_pool_ids=[work_pool_id],\n work_pool_queue_ids=work_pool_queue_ids,\n scheduled_before=scheduled_before,\n scheduled_after=scheduled_after,\n limit=limit,\n )\n\n return queue_response", "def run(delayed, concurrency, version_type=None, queue=None, raise_on_error=True):\n if delayed:\n celery_kwargs = {\n \"kwargs\": {\n \"version_type\": version_type,\n \"search_bulk_kwargs\": {\"raise_on_error\": raise_on_error},\n }\n }\n click.secho(\n \"Starting {0} tasks for indexing records...\".format(concurrency), fg=\"green\"\n )\n if queue is not None:\n celery_kwargs.update({\"queue\": queue})\n for c in range(0, concurrency):\n process_bulk_queue.apply_async(**celery_kwargs)\n else:\n click.secho(\"Indexing records...\", fg=\"green\")\n RecordIndexer(version_type=version_type).process_bulk_queue(\n search_bulk_kwargs={\"raise_on_error\": raise_on_error}\n )", "def _run_queries(self, queries: List[Query]) -> None:\n QUERY_TASK_LIMIT = 250\n\n while queries or self._running_queries:\n if queries:\n logger.debug(f\"Starting a new loop, {len(queries)} queries queued\")\n self._fill_query_slots(queries)\n query_tasks = self.get_running_query_tasks()[:QUERY_TASK_LIMIT]\n logger.debug(f\"Checking for results of {len(query_tasks)} query tasks\")\n for query_result in self._get_query_results(query_tasks):\n self._handle_query_result(query_result)\n time.sleep(0.5)", "def queue_fetch(model_admin, request, queryset):\n for locator in queryset:\n locator.queue_fetch()", "def instances_availability(self, lastsubmitedinstance, metrics):\n connection = self.connection\n instancesconfig = self.instancesconfigs\n\n cur = connection.cursor()\n harvesters = instancesconfig.keys()\n connection.row_factory = sqlite3.Row\n\n for harvesterid in harvesters:\n error_text = set()\n\n instanceisenable = self.__str_to_bool(instancesconfig[harvesterid]['instanceisenable'])\n del instancesconfig[harvesterid]['instanceisenable']\n ### Instance is enable ###\n if instanceisenable:\n for host in instancesconfig[harvesterid].keys():\n avaibility = []\n if self.__str_to_bool(instancesconfig[harvesterid][host]['hostisenable']):\n ### No submitted worker ###\n timedelta_submitted = timedelta(minutes=30)\n if host != 'none' and host in instancesconfig[harvesterid] \\\n and self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['enable']):\n timedelta_submitted = self.__get_timedelta(\n instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['value'])\n if lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'] < datetime.utcnow() - timedelta_submitted:\n error = \"Last submitted worker was {0}\".format(\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'])) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n if harvesterid in metrics:\n ### No heartbeat ###\n heartbeattime = metrics[harvesterid][host].keys()[0]\n contacts = instancesconfig[harvesterid][host]['contacts']\n timedelta_heartbeat = self.__get_timedelta(instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['value'])\n if self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['enable']) and \\\n heartbeattime < datetime.utcnow() - timedelta_heartbeat:\n error = \"Last heartbeat was {0}\".format(\n str(heartbeattime)) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n\n #### Metrics ####\n memory = instancesconfig[harvesterid][host]['memory']\n cpu_warning = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_warning']\n cpu_critical = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_critical']\n disk_warning = instancesconfig[harvesterid][host]['metrics']['disk']['disk_warning']\n disk_critical = instancesconfig[harvesterid][host]['metrics']['disk']['disk_critical']\n memory_warning = instancesconfig[harvesterid][host]['metrics']['memory']['memory_warning']\n memory_critical = instancesconfig[harvesterid][host]['metrics']['memory']['memory_critical']\n\n cpu_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['cpu']['enable'])\n disk_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['disk']['enable'])\n memory_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['memory']['enable'])\n\n #### Metrics DB ####\n for metric in metrics[harvesterid][host][heartbeattime]:\n #### CPU ####\n if cpu_enable:\n cpu_pc = int(metric['cpu_pc'])\n if cpu_pc >= cpu_warning:\n avaibility.append(50)\n error = \"Warning! CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n elif cpu_pc >= cpu_critical:\n avaibility.append(10)\n error = \"CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n #### Memory ####\n if memory_enable:\n if 'memory_pc' in metric:\n memory_pc = int(metric['memory_pc'])\n else:\n memory_pc = int(self.__get_change(metric['rss_mib'], memory))\n if memory_pc >= memory_warning:\n avaibility.append(50)\n error = \"Warning! Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n elif memory_pc >= memory_critical:\n avaibility.append(0)\n error = \"Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n #### HDD&HDD1 ####\n if disk_enable:\n if 'volume_data_pc' in metric:\n volume_data_pc = int(metric['volume_data_pc'])\n else:\n volume_data_pc = -1\n if volume_data_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n elif volume_data_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n if 'volume_data1_pc' in metric:\n volume_data1_pc = int(metric['volume_data1_pc'])\n if volume_data1_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n elif volume_data1_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n try:\n cur.execute(\"insert into INSTANCES values (?,?,?,?,?,?,?,?,?)\",\n (str(harvesterid), str(host),\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n heartbeattime, 1, 0, min(avaibility) if len(avaibility) > 0 else 100, str(contacts), ', '.join(str(e) for e in error_text)))\n connection.commit()\n error_text = set()\n except:\n query = \\\n \"\"\"UPDATE INSTANCES \n SET lastsubmitted = '{0}', active = {1}, availability = {2}, lastheartbeat = '{3}', contacts = '{4}', errorsdesc = '{5}'\n WHERE harvesterid = '{6}' and harvesterhost = '{7}'\n \"\"\".format(str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n 1, min(avaibility) if len(avaibility) > 0 else 100, heartbeattime, str(contacts), ', '.join(str(e) for e in error_text), str(harvesterid),\n str(host))\n cur.execute(query)\n connection.commit()\n error_text = set()\n else:\n cur.execute(\"DELETE FROM INSTANCES WHERE harvesterid = ?\", [str(harvesterid)])\n connection.commit()", "def run():\r\n num_workers = g.num_query_queue_workers\r\n wq = WorkQueue(num_workers = num_workers)\r\n wq.start()\r\n\r\n while True:\r\n job = None\r\n #limit the total number of jobs in the WorkQueue. we don't\r\n #need to load the entire db queue right away (the db queue can\r\n #get quite large).\r\n if len(running) < 2 * num_workers:\r\n with running_lock:\r\n iden, pickled_cr = get_query()\r\n if pickled_cr is not None:\r\n if not iden in running:\r\n running.add(iden)\r\n job = make_query_job(iden, pickled_cr)\r\n wq.add(job)\r\n\r\n #if we didn't find a job, sleep before trying again\r\n if not job:\r\n time.sleep(1)", "def _reclaim_queued_deletes(self, context):\n interval = CONF.reclaim_instance_interval\n if interval <= 0:\n LOG.debug(\"CONF.reclaim_instance_interval <= 0, skipping...\")\n return\n\n # TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.\n # The only case that the quota might be inconsistent is\n # the cloud node died between set instance state to SOFT_DELETED\n # and quota commit to DB. When cloud node starts again\n # it will have no idea the reservation is committed or not or even\n # expired, since it's a rare case, so marked as todo.\n quotas = objects.Quotas.from_reservations(context, None)\n\n filters = {'vm_state': vm_states.SOFT_DELETED,\n 'task_state': None,\n 'host': self.host}\n instances = objects.InstanceList.get_by_filters(\n context, filters,\n expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,\n use_slave=True)\n for instance in instances:\n if self._deleted_old_enough(instance, interval):\n bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(\n context, instance.uuid)\n LOG.info(_LI('Reclaiming deleted instance'), instance=instance)\n try:\n self._delete_instance(context, instance, bdms, quotas)\n except Exception as e:\n LOG.warning(_LW(\"Periodic reclaim failed to delete \"\n \"instance: %s\"),\n e, instance=instance)", "def _schedule(self, context, spec_obj):\n elevated = context.elevated()\n\n # Find our local list of acceptable hosts by repeatedly\n # filtering and weighing our options. Each time we choose a\n # host, we virtually consume resources on it so subsequent\n # selections can adjust accordingly.\n\n # Note: remember, we are using an iterator here. So only\n # traverse this list once. This can bite you if the hosts\n # are being scanned in a filter or weighing function.\n\n # If the request is for a preemptible instace, take into account all\n # resources used on the host. However, if the request is for a normal\n # instance, do not take into account the preemptible instances. This\n # way we can schedule normal requests even when there is no room for\n # them without doing a retry cycle.\n\n if self._is_preemptible_request(spec_obj):\n hosts = self._get_all_host_states(elevated, partial=False)\n else:\n hosts = self._get_all_host_states(elevated, partial=True)\n\n hosts_full_state = self._get_all_host_states(elevated, partial=False)\n\n selected_hosts = []\n num_instances = spec_obj.num_instances\n for num in range(num_instances):\n # Filter local hosts based on requirements ...\n hosts = self.host_manager.get_filtered_hosts(hosts,\n spec_obj, index=num)\n if not hosts:\n # Can't get any more locally.\n break\n\n LOG.debug(\"Filtered %(hosts)s\", {'hosts': hosts})\n\n # Get the full host states for weighing. The filtered list of\n # hosts does not take into account preemptible instances, but we\n # need them for weighing\n\n hosts_full_state = list(hosts_full_state)\n\n filtered_hosts = {(h.host, h.nodename): h for h in hosts}\n hosts_aux = [h for h in hosts_full_state\n if (h.host, h.nodename) in filtered_hosts]\n weighed_hosts = self.host_manager.get_weighed_hosts(hosts_aux,\n spec_obj)\n\n LOG.debug(\"Weighed %(hosts)s\", {'hosts': weighed_hosts})\n\n scheduler_host_subset_size = CONF.scheduler_host_subset_size\n if scheduler_host_subset_size > len(weighed_hosts):\n scheduler_host_subset_size = len(weighed_hosts)\n if scheduler_host_subset_size < 1:\n scheduler_host_subset_size = 1\n\n chosen_host = random.choice(\n weighed_hosts[0:scheduler_host_subset_size])\n LOG.debug(\"Selected host: %(host)s\", {'host': chosen_host})\n selected_hosts.append(chosen_host)\n\n # Now consume the resources so the filter/weights\n # will change for the next instance.\n\n # First update the chosen host, that is from the full state list\n chosen_host.obj.consume_from_request(spec_obj)\n\n # Now consume from the partial state list\n host = chosen_host.obj.host\n node = chosen_host.obj.nodename\n state_key = (host, node)\n filtered_hosts[state_key].consume_from_request(spec_obj)\n\n # Now continue with the rest of the scheduling function\n if spec_obj.instance_group is not None:\n spec_obj.instance_group.hosts.append(chosen_host.obj.host)\n # hosts has to be not part of the updates when saving\n spec_obj.instance_group.obj_reset_changes(['hosts'])\n\n return selected_hosts", "def test_singleton_reschedule(self):\n dbpool = buildConnectionPool(self, jobSchema + schemaText)\n\n qpool = yield self._enqueue(dbpool, 1, 2, cl=DummyWorkSingletonItem, notBefore=datetime.datetime(2014, 5, 17, 12, 0, 0))\n\n @inlineCallbacks\n def allWork(txn):\n jobs = yield JobItem.all(txn)\n work = [((yield job.workItem()), job) for job in jobs]\n returnValue(filter(lambda x: x[0], work))\n\n work = yield inTransaction(dbpool.connection, allWork)\n self.assertTrue(len(work) == 1)\n self.assertTrue(work[0][1].notBefore == datetime.datetime(2014, 5, 17, 12, 0, 0))\n\n def _reschedule_force(txn, force):\n txn._queuer = qpool\n return DummyWorkSingletonItem.reschedule(txn, 60, force=force)\n yield inTransaction(dbpool.connection, _reschedule_force, force=False)\n\n work = yield inTransaction(dbpool.connection, allWork)\n self.assertTrue(len(work) == 1)\n self.assertTrue(work[0][1].notBefore == datetime.datetime(2014, 5, 17, 12, 0, 0))\n\n yield inTransaction(dbpool.connection, _reschedule_force, force=True)\n\n work = yield inTransaction(dbpool.connection, allWork)\n self.assertTrue(len(work) == 1)\n self.assertTrue(work[0][1].notBefore != datetime.datetime(2014, 5, 17, 12, 0, 0))", "def _plan_workorders(self, replan=False):\n self.ensure_one()\n\n if not self.workorder_ids:\n return\n # Schedule all work orders (new ones and those already created)\n qty_to_produce = max(self.product_qty - self.qty_produced, 0)\n qty_to_produce = self.product_uom_id._compute_quantity(qty_to_produce, self.product_id.uom_id)\n start_date = max(self.date_planned_start, datetime.datetime.now())\n if replan:\n workorder_ids = self.workorder_ids.filtered(lambda wo: wo.state in ['ready', 'pending'])\n # We plan the manufacturing order according to its `date_planned_start`, but if\n # `date_planned_start` is in the past, we plan it as soon as possible.\n workorder_ids.leave_id.unlink()\n else:\n workorder_ids = self.workorder_ids.filtered(lambda wo: not wo.date_planned_start)\n for workorder in workorder_ids:\n workcenters = workorder.workcenter_id | workorder.workcenter_id.alternative_workcenter_ids\n\n best_finished_date = datetime.datetime.max\n vals = {}\n for workcenter in workcenters:\n # compute theoretical duration\n if workorder.workcenter_id == workcenter:\n duration_expected = workorder.duration_expected\n else:\n duration_expected = workorder._get_duration_expected(alternative_workcenter=workcenter)\n\n from_date, to_date = workcenter._get_first_available_slot(start_date, duration_expected)\n # If the workcenter is unavailable, try planning on the next one\n if not from_date:\n continue\n # Check if this workcenter is better than the previous ones\n if to_date and to_date < best_finished_date:\n best_start_date = from_date\n best_finished_date = to_date\n best_workcenter = workcenter\n vals = {\n 'workcenter_id': workcenter.id,\n 'duration_expected': duration_expected,\n }\n\n # If none of the workcenter are available, raise\n if best_finished_date == datetime.datetime.max:\n raise UserError(_('Impossible to plan the workorder. Please check the workcenter availabilities.'))\n\n # Instantiate start_date for the next workorder planning\n if workorder.next_work_order_id:\n start_date = best_finished_date\n\n # Create leave on chosen workcenter calendar\n leave = self.env['resource.calendar.leaves'].create({\n 'name': workorder.display_name,\n 'calendar_id': best_workcenter.resource_calendar_id.id,\n 'date_from': best_start_date,\n 'date_to': best_finished_date,\n 'resource_id': best_workcenter.resource_id.id,\n 'time_type': 'other'\n })\n vals['leave_id'] = leave.id\n workorder.write(vals)\n self.with_context(force_date=True).write({\n 'date_planned_start': self.workorder_ids[0].date_planned_start,\n 'date_planned_finished': self.workorder_ids[-1].date_planned_finished\n })", "def domain_check_threading_manage(next_domain_info: typing.Callable[\n [],\n typing.Tuple[\n Domain,\n typing.List[typing.Tuple[LocationHint, Location]],\n typing.List[typing.Tuple[MeasurementResult, Location]]\n ]],\n increment_domain_type_count: typing.Callable[\n [DomainLocationType], None],\n increment_count_for_type: typing.Callable[\n [LocationCodeType], None],\n ripe_create_sema: mp.Semaphore,\n ripe_slow_down_sema: mp.Semaphore,\n bill_to_address: str,\n wo_measurements: bool,\n allowed_measurement_age: int,\n api_key: str,\n measurement_strategy: MeasurementStrategy,\n number_of_probes_per_measurement: int,\n buffer_time: float,\n packets_per_measurement: int,\n use_efficient_probes: bool,\n location_to_probes_dct: typing.Dict[\n str, typing.Tuple[RipeAtlasProbe, float, Location]],\n measurement_results_queue: queue.Queue,\n stop_without_old_results: bool):\n logger.debug('thread started')\n\n def get_domains() -> typing.Generator[typing.Tuple[Domain, typing.List[LocationHint]],\n None, None]:\n while True:\n domain_hints_tuple = next_domain_info()\n if domain_hints_tuple is not None:\n yield domain_hints_tuple\n else:\n break\n\n for domain, location_hints, measurement_result_tuples in get_domains():\n try:\n logger.debug('next domain %s', domain.name)\n ip_version = constants.IPV4_IDENTIFIER if domain.ipv4_address else \\\n constants.IPV6_IDENTIFIER\n check_domain_location_ripe(domain, location_hints, increment_domain_type_count,\n increment_count_for_type, ripe_create_sema,\n ripe_slow_down_sema, ip_version, bill_to_address,\n wo_measurements, allowed_measurement_age, api_key,\n measurement_strategy, number_of_probes_per_measurement,\n buffer_time, packets_per_measurement, use_efficient_probes,\n location_to_probes_dct, measurement_result_tuples,\n measurement_results_queue, stop_without_old_results)\n except Exception:\n logger.exception('Check Domain Error %s', domain.name)\n\n logger.debug('Thread finished')", "def start_scans_for_lists_who_are_up_for_scanning() -> Task:\n\n tasks = []\n\n for urllist in UrlList.objects.all().filter():\n # this also gets the lists that are not scanned. The scan date needs to progress, otherwise it will be\n # scanned instantly when the list will be enabled. This also goes for deleted lists.\n if urllist.enable_scans is False or urllist.is_deleted is True:\n urllist.renew_scan_moment()\n continue\n\n if urllist.is_due_for_scanning():\n tasks.append(initialize_scan.si(urllist))\n\n # placed here, as otherwise the list is never due for scanning as the date might be updated to something\n # new in the future.\n urllist.renew_scan_moment()\n\n # using this in create_function_job so a job is created, allowing for tracking this a bit\n return group(tasks)", "def consistency_function(p_message_queue,\n p_consistency_results,\n p_last_balanced,\n p_database_lock,\n p_continue_consistency,\n consistency_rate = 2, # queries per second\n query_timeout = 5,\n VERBOSE = False,\n worker_num = 0):\n \n print(\"w{}: Consistency thread started.\".format(worker_num))\n \n # path to worker's database\n data_file = os.path.join(\"databases\",\"worker_{}_database.csv\".format(worker_num))\n \n # wait until there are data results in database\n next_im_id = -1\n while next_im_id < 5:\n time.sleep(0.1)\n with p_last_balanced.get_lock():\n next_im_id = p_last_balanced.value\n \n prev_time = time.time()\n active_queries = {} \n \n # get continue_val\n with p_continue_consistency.get_lock():\n continue_val = p_continue_consistency.value\n \n # continue until the val of p_continue_consistency is changed by heartbeat thread exiting\n while continue_val:\n \n if time.time() > prev_time + 1/consistency_rate:\n \n # cycle backwards through im_ids\n next_im_id -= 1\n if next_im_id < 0:\n with p_last_balanced.get_lock():\n next_im_id = p_last_balanced.value\n # add query to dict of active queries\n active_queries[next_im_id] = {\"time_in\": time.time(),\n \"vals\": [get_im_data(data_file,next_im_id,p_database_lock)[0]]}\n \n # forward consistency query to all other workers via message queue\n # the True indicates that this is an internal request\n message = (\"query_request\", (next_im_id,worker_num,True))\n p_message_queue.put(message)\n #if VERBOSE: print(\"w{}: Consistency query for im {} requested.\".format(worker_num,next_im_id))\n \n # parse results from consistency results queue\n while True:\n try:\n (query_data,query_im_id) = p_consistency_results.get(timeout = 0) \n prev_time = time.time()\n # add if still active\n if query_im_id in active_queries.keys():\n active_queries[query_im_id][\"vals\"].append(query_data) \n #if VERBOSE: print(\"w{}: Parsed consistency response for im {}.\".format(worker_num,query_im_id))\n except queue.Empty:\n break\n \n # cycle through active queries and return result for all that have timed out\n timed_out = []\n for id_tag in active_queries:\n prev_time = time.time()\n query = active_queries[id_tag]\n if query['time_in'] + query_timeout < time.time():\n # get most common val by comparing unique hashes\n hash_dict = {}\n for data in query[\"vals\"]:\n if type(data) == np.ndarray: # make sure None doesn't become the most common value\n data_hash = hash(data.tostring())\n if data_hash in hash_dict.keys():\n hash_dict[data_hash][\"count\"] +=1\n else:\n hash_dict[data_hash] = {}\n hash_dict[data_hash][\"count\"] = 1\n hash_dict[data_hash][\"data\"] = data\n \n # count hashes\n most_common_data = None\n count = 0\n for data_hash in hash_dict:\n if hash_dict[data_hash][\"count\"] > count:\n count = hash_dict[data_hash][\"count\"]\n most_common_data = hash_dict[data_hash][\"data\"]\n \n # lastly, compare to own data and see if count is greater than\n # num_validators on previous data. If not, send own value and \n # don't update own value\n (own_data, own_num_validators) = get_im_data(data_file,id_tag,p_database_lock)\n if own_num_validators < count:\n assert len(most_common_data[0]) > 0, print(\"most_common_data isn't valid\")\n update_data(data_file,count,most_common_data,p_database_lock)\n if VERBOSE: print(\"w{}: Consistency update on im {} with {} validators.\".format(worker_num,id_tag,count))\n \n timed_out.append(id_tag)\n \n # remove all handled requests\n timed_out.reverse()\n for tag in timed_out:\n del active_queries[tag]\n \n # determine whether to continue using shared variable with heartbeat thread\n with p_continue_consistency.get_lock():\n continue_val = p_continue_consistency.value\n \n print(\"{}: Consistency thread exited.\".format(worker_num))", "def runTasks(self):\n\n self.logger.INFO(\n f\"STARTING TASKS FOR TRADER {self.user['Name']} - ACCOUNT ID: {self.account_id}\\n\")\n\n def selectSleep():\n \"\"\"\n PRE-MARKET(0400 - 0930 ET): 5 SECONDS\n MARKET OPEN(0930 - 1600 ET): 5 SECONDS\n AFTER MARKET(1600 - 2000 ET): 5 SECONDS\n\n WEEKENDS: 60 SECONDS\n WEEKDAYS(2000 - 0400 ET): 60 SECONDS\n\n EVERYTHING WILL BE BASED OFF CENTRAL TIME\n\n OBJECTIVE IS TO FREE UP UNNECESSARY SERVER USAGE\n \"\"\"\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekends = [\"Sat\", \"Sun\"]\n\n # IF CURRENT TIME GREATER THAN 8PM AND LESS THAN 4AM, OR DAY IS WEEKEND, THEN RETURN 60 SECONDS\n if tm > \"20:00\" or tm < \"04:00\" or day in weekends:\n\n return 60\n\n # ELSE RETURN 5 SECONDS\n return 5\n\n while self.isAlive:\n\n try:\n\n # RUN TASKS ####################################################\n self.killQueueOrder()\n\n self.updateAccountBalance()\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n tm = dt_central.time().strftime(\"%H:%M\")\n\n if tm == \"08:30\": # set this based on YOUR timezone\n\n if not self.check_options:\n\n self.sellOptionsAtExpiration()\n\n self.check_options = True\n\n else:\n\n self.check_options = False\n\n # IF MIDNIGHT, ADD BALANCE, PROFIT/LOSS TO HISTORY\n if tm == \"23:55\":\n\n if not self.midnight:\n\n self.balanceHistory()\n\n self.profitLossHistory()\n\n self.midnight = True\n\n else:\n\n self.midnight = False\n\n except KeyError:\n\n self.isAlive = False\n\n except Exception:\n\n self.logger.ERROR(\n f\"ACCOUNT ID: {self.account_id} - TRADER: {self.user['Name']}\")\n\n finally:\n\n time.sleep(selectSleep())\n\n self.logger.INFO(f\"TASK STOPPED FOR ACCOUNT ID {self.account_id}\")", "def onetime_query_state_locks(config, acon_query, acon_pg, query, args={}, num_workers=0):\n\n\tcurs_query = acon_query.cursor()\n\tcurs_pg = acon_pg.cursor()\n\tcurs_query.execute(\"select pg_advisory_lock(1);\")\n\tcurs_pg.execute(\"select pg_advisory_lock(2);\")\n\twait(acon_query)\n\twait(acon_pg)\n\tcurs_pg.execute(\"select pg_advisory_lock(1);\")\n\tset_guc(acon_query, 'enable_mergejoin', 'off')\n\tset_guc(acon_query, 'max_parallel_workers_per_gather', num_workers)\n\tcurs_query.execute(query)\n\t# extract current state of query progress\n\tMAX_PG_QS_RETRIES = 10\n\tDELAY_BETWEEN_RETRIES = 0.1\n\tpg_qs_args = {\n\t\t\t'config': config,\n\t\t\t'pid': acon_query.get_backend_pid(),\n\t\t\t'conn': acon_pg\n\t\t\t}\n\tfor k, v in args.items():\n\t\tpg_qs_args[k] = v\n\tn_retries = 0\n\n\twait(acon_pg)\n\n\twhile True:\n\t\tresult, notices = pg_query_state_locks(**pg_qs_args)\n\t\tn_retries += 1\n\t\tif len(result) > 0:\n\t\t\tbreak\n\t\tif n_retries >= MAX_PG_QS_RETRIES:\n\t\t\t# pg_query_state callings don't return any result, more likely run\n\t\t\t# query has completed\n\t\t\tbreak\n\t\ttime.sleep(DELAY_BETWEEN_RETRIES)\n\n\tcurs_pg.execute(\"select pg_advisory_unlock(2);\")\n\twait(acon_pg)\n\twait(acon_query)\n\n\tset_guc(acon_query, 'enable_mergejoin', 'on')\n\tcurs_query.execute(\"select pg_advisory_unlock(2);\")\n\tcurs_pg.execute(\"select pg_advisory_unlock(1);\")\n\treturn result, notices", "def _schedule(self, context, topic, spec_obj, instance_uuids,\n return_alternates=False):\n\n elevated = context.elevated()\n hosts = self.hosts_up(elevated, topic)\n if not hosts:\n msg = _(\"Is the appropriate service running?\")\n raise exception.NoValidHost(reason=msg)\n\n hosts = self._filter_hosts(hosts, spec_obj)\n if not hosts:\n msg = _(\"Could not find another compute\")\n raise exception.NoValidHost(reason=msg)\n\n # Note that we don't claim in the chance scheduler\n num_instances = len(instance_uuids)\n # If possible, we'd like to return distinct hosts for each instance.\n # But when there are fewer available hosts than requested instances, we\n # will need to return some duplicates.\n if len(hosts) >= num_instances:\n selected_hosts = random.sample(hosts, num_instances)\n else:\n selected_hosts = [random.choice(hosts)\n for i in range(num_instances)]\n\n # This is the overall list of values to be returned. There will be one\n # item per instance, and that item will be a list of Selection objects\n # representing the selected host and zero or more alternates.\n # NOTE(edleafe): in a multi-cell environment, this can return\n # alternates from different cells. When support for multiple cells is\n # implemented in select_destinations, this will have to be updated to\n # restrict alternates to come from the same cell.\n selections_to_return = []\n\n # We can't return dupes as alternates, since alternates are used when\n # building to the selected host fails.\n if return_alternates:\n alts_per_instance = min(len(hosts), CONF.scheduler.max_attempts)\n else:\n alts_per_instance = 0\n for sel_host in selected_hosts:\n selection = objects.Selection.from_host_state(sel_host)\n sel_plus_alts = [selection]\n while len(sel_plus_alts) < alts_per_instance:\n candidate = random.choice(hosts)\n if (candidate not in sel_plus_alts) and (\n candidate not in selected_hosts):\n # We don't want to include a selected host as an alternate,\n # as it will have a high likelihood of not having enough\n # resources left after it has an instance built on it.\n alt_select = objects.Selection.from_host_state(candidate)\n sel_plus_alts.append(alt_select)\n selections_to_return.append(sel_plus_alts)\n return selections_to_return", "def check_queue(st):\n\n logging.info(\"Checking queue...\")\n check_time = time.time()\n n_waiting_jobs = BatchPlugin.poll_queue()\n\n if n_waiting_jobs is not None:\n\n # Correction factor\n corr = st['vms_allegedly_running'] * cf['elastiq']['n_jobs_per_vm']\n logging.info(\"Jobs: waiting=%d | allegedly running=%d | considering=%d\" % \\\n (n_waiting_jobs, corr, n_waiting_jobs-corr))\n n_waiting_jobs -= corr\n\n if n_waiting_jobs > cf['elastiq']['waiting_jobs_threshold']:\n if st['first_seen_above_threshold'] != -1:\n if (check_time-st['first_seen_above_threshold']) > cf['elastiq']['waiting_jobs_time_s']:\n # Above threshold time-wise and jobs-wise: do something\n logging.info(\"Waiting jobs: %d (above threshold of %d for more than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n list_ok = scale_up( math.ceil(n_waiting_jobs / float(cf['elastiq']['n_jobs_per_vm'])), valid_hostnames=st['workers_status'].keys(), vms_allegedly_running=st['vms_allegedly_running'] )\n for inst in list_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n st['first_seen_above_threshold'] = -1\n else:\n # Above threshold but not for enough time\n logging.info(\"Waiting jobs: %d (still above threshold of %d for less than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n else:\n # First time seen above threshold\n logging.info(\"Waiting jobs: %d (first time above threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = check_time\n else:\n # Not above threshold: reset\n logging.info(\"Waiting jobs: %d (below threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = -1\n else:\n logging.error(\"Cannot get the number of waiting jobs this time, sorry\")\n\n return {\n 'action': 'check_queue',\n 'when': time.time() + cf['elastiq']['check_queue_every_s']\n }", "def _build_task_queue(self, dt: datetime.datetime, scheduled_tasks: List[ScheduledTask]):\r\n self.task_queue = tuple([task for task in scheduled_tasks if task.is_scheduled_to_run(dt)])\r\n logging.info(f\"Task queue built, {len(self.task_queue)} tasks scheduled\")", "def query_active(cls, schedule_id=None):\n q = Session.query(cls).outerjoin(Lesson).join(Schedule)\n if schedule_id is None:\n stmt = Schedule.query_current_id().subquery()\n q = q.join((stmt, Lesson.schedule_id == stmt.c.id))\n else:\n q = q.filter(Schedule.id == schedule_id)\n return q", "def query_active(cls, schedule_id=None):\n q = Session.query(cls).outerjoin(Lesson).join(Schedule)\n if schedule_id is None:\n stmt = Schedule.query_current_id().subquery()\n q = q.join((stmt, Lesson.schedule_id == stmt.c.id))\n else:\n q = q.filter(Schedule.id == schedule_id)\n return q", "def exec_query(collection,\n collection_name,\n granularity,\n queries,\n query_file_name,\n fig_dir,\n grid_dir):\n\n time_grid = [[None for i in range(granularity)] for j in range(granularity)]\n plan_grid = [[0 for i in range(granularity)] for j in range(granularity)]\n itr_count = 0\n fig_id = 0\n not_exists_marker = 'NULL'\n\n for (query, b_i, a_i) in queries:\n progress = round(float(itr_count) * 100 / len(queries), 2)\n print(\"Progress {}%\".format(progress))\n\n # display result\n if progress % 2 < 0.001:\n display_grid(plan_grid,\n os.path.join(fig_dir,\n collection_name,\n query_file_name.replace(\".txt\", \"\")),\n granularity,\n id=\"fig_{:0>5d}\".format(fig_id))\n fig_id += 1\n\n # timeout\n # t_win, t_a, t_b, t_cover, t_tbl = timeout, timeout, timeout, timeout, timeout\n projection = {\"_id\": 0, \"a\": 1, \"b\": 1}\n\n # measure time consumption of executing each query plan\n print(\"Forcing collscan\")\n table_scan_explain = collection.find(query, projection).hint([(\"$natural\", 1)]).explain()\n t_tbl = table_scan_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n print(\"Forcing aIdx\")\n t_a = not_exists_marker\n if \"aIdx\" in collection.index_information():\n idx_a_explain = collection.find(query, projection).hint(\"aIdx\").explain()\n t_a = idx_a_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n print(\"Forcing bIdx\")\n t_b = not_exists_marker\n if \"bIdx\" in collection.index_information():\n idx_b_explain = collection.find(query, projection).hint(\"bIdx\").explain()\n t_b = idx_b_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n print(\"Forcing coverIdx\")\n t_cover = not_exists_marker\n if \"coverIdx\" in collection.index_information():\n idx_cover_explain = collection.find(query, projection).hint(\"coverIdx\").explain()\n t_cover = idx_cover_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n # NOTE: FORMAT a|b|coverIdx|collscan\n t_s = [str(t_a), str(t_b), str(t_cover), str(t_tbl)]\n time_grid[b_i][a_i] = \"|\".join(t_s)\n\n # run the query without hint\n print(\"Finding winner\")\n exec_explain = collection.find(query, projection).explain()\n # t_win = exec_explain[\"executionStats\"][\"executionTimeMillis\"]\n winning_plan = str(exec_explain['queryPlanner']['winningPlan'])\n\n if 'aIdx' in winning_plan:\n plan_grid[b_i][a_i] = 1\n elif 'bIdx' in winning_plan:\n plan_grid[b_i][a_i] = 2\n elif 'coverIdx' in winning_plan:\n plan_grid[b_i][a_i] = 3\n elif 'COLLSCAN' in winning_plan:\n plan_grid[b_i][a_i] = 4\n\n pprint(exec_explain['queryPlanner'])\n print(\"Time: a: {}, b: {}, cover: {} ,collscan: {}\".format(t_a, t_b, t_cover, t_tbl))\n print(\"=\" * 60)\n\n itr_count += 1\n\n save_grid(plan_grid, os.path.join(grid_dir, collection_name,\n \"plan_grid{}\".format(query_file_name.replace(\"query\", \"\"))))\n save_grid(time_grid, os.path.join(grid_dir, collection_name,\n \"time_grid{}\".format(query_file_name.replace(\"query\", \"\"))))\n\n display_grid(plan_grid,\n os.path.join(fig_dir,\n collection_name,\n query_file_name.replace(\".txt\", \"\")),\n granularity,\n id=\"fig_{:0>5d}\".format(fig_id))\n return", "def run(self):\n self.timer.start()\n \n while not Status.is_final(self.status):\n if self.request:\n self.handle_request()\n \n if self.status == Status.RUNNING:\n # Clean up orphaned schedules and undead schedulers.\n # Schedule.objects.orphaned().update(scheduler=None)\n # CronSchedule.objects.orphaned().update(scheduler=None)\n \n cron = CronSchedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n simple = Schedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n for schedule in itertools.chain(cron, simple):\n self.log.info('Claiming %s.' % schedule)\n schedule.scheduler = self\n schedule.save()\n self.add(schedule)\n if not Status.is_final(self.status):\n self.wait()\n self.request = Scheduler.objects.get(pk=self.pk).request", "async def test_get_tasks(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # declare _scheduler task\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'get_tasks'\n interval_schedule.process_name = \"sleep5\"\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n interval_schedule.exclusive = False\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(15)\n\n # Assert running tasks\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.INTERRUPTED)])\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"end_time\", \"=\", 'NULL'])\n assert tasks\n\n tasks = await scheduler.get_tasks(limit=50)\n states = [int(task.state) for task in tasks]\n\n assert len(tasks) > 1\n assert int(Task.State.RUNNING) in states\n assert int(Task.State.COMPLETE) in states\n\n tasks = await scheduler.get_tasks(1)\n assert len(tasks) == 1\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"]], offset=50)\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"], [\"start_time\", \"asc\"]])\n assert tasks\n\n tasks = await scheduler.get_tasks(or_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n tasks = await scheduler.get_tasks(and_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n await self.stop_scheduler(scheduler)", "def GenerateRequestQueuing(self):\n url_to_requests = collections.defaultdict(list)\n for rq in self._request_track.GetEvents():\n url_to_requests[rq.url].append(rq)\n # Queuing events are organized by source id, which corresponds to a load of\n # a url. First collect timing information for each source id, then associate\n # with each request.\n timing_by_source_id = {}\n for source_id, events in self._queuing_events_by_id.iteritems():\n assert all(e.end_msec is None for e in events), \\\n 'Unexpected end_msec for nested async queuing events'\n ready_times = [e.start_msec for e in events if e.name == self.READY_NAME]\n if not ready_times:\n ready_msec = None\n else:\n assert len(ready_times) == 1, events\n ready_msec = ready_times[0]\n timing_by_source_id[source_id] = (\n min(e.start_msec for e in events),\n max(e.start_msec for e in events),\n ready_msec)\n queue_info = {}\n for request_url, requests in url_to_requests.iteritems():\n matching_source_ids = set(\n source_id for source_id, url in self._source_id_to_url.iteritems()\n if url == request_url)\n if len(matching_source_ids) > 1:\n logging.warning('Multiple matching source ids, probably duplicated'\n 'urls: %s', [rq.url for rq in requests])\n # Get first source id.\n sid = next(s for s in matching_source_ids) \\\n if matching_source_ids else None\n (throttle_start_msec, throttle_end_msec, ready_msec) = \\\n timing_by_source_id[sid] if matching_source_ids else (-1, -1, -1)\n\n blocking_requests = []\n for sid, (flight_start_msec,\n flight_end_msec, _) in timing_by_source_id.iteritems():\n if (flight_start_msec < throttle_start_msec and\n flight_end_msec > throttle_start_msec and\n flight_end_msec < throttle_end_msec):\n blocking_requests.extend(\n url_to_requests.get(self._source_id_to_url[sid], []))\n\n info = collections.namedtuple(\n 'QueueInfo', ['start_msec', 'end_msec', 'ready_msec', 'blocking'\n 'source_ids'])\n info.start_msec = throttle_start_msec\n info.end_msec = throttle_end_msec\n info.ready_msec = ready_msec\n current_request_ids = set(rq.request_id for rq in requests)\n info.blocking = [b for b in blocking_requests\n if b is not None and\n b.request_id not in current_request_ids]\n info.source_ids = matching_source_ids\n for rq in requests:\n queue_info[rq] = info\n return queue_info" ]
[ "0.5548428", "0.5151464", "0.48518416", "0.47420847", "0.46796945", "0.46776414", "0.4667777", "0.4659418", "0.46430779", "0.46141425", "0.45946392", "0.45765543", "0.4571356", "0.45686328", "0.45587033", "0.45582697", "0.4556789", "0.45445147", "0.454312", "0.4534792", "0.45316252", "0.45129263", "0.45012805", "0.44768894", "0.44716182", "0.44716182", "0.44664705", "0.44520947", "0.44479093", "0.4439285" ]
0.62616307
0
This method is used to terminate a job with the specified or a group of jobs job_id or job_name in a given cluster
def delete(cls, cluster, job, group=None): try: if group is not None: # get the job ids from the db arguments = {'cluster': cluster, 'group': group} db_jobs = cls.cm.find('batchjob', **arguments) list1 = [] for i in db_jobs: list1.append(db_jobs[i]['job_id']) # read active jobs active_jobs = json.loads(cls.queue(cluster)) list2 = [] for i in active_jobs: list2.append(active_jobs[i]['jobid']) # find intersection res = set(list1).intersection(set(list2)) if res is not None: for j in res: cmd = 'scancel {}'.format(str(j)) Shell.ssh(cluster, cmd) print("Deleted {}".format(j)) return "All jobs for group {} killed successfully".format(group) else: args = 'scancel ' if job.isdigit(): args += job else: args += "-n {}".format(job) Shell.ssh(cluster, args) return "Job {} killed successfully".format(job) except Exception as ex: print("in exceptio") print(ex) return ex
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def terminate(ctx):\n ctl = ctx.ctl\n jobs = ctl('list-avail', '--partition', 'main', flatten=False)\n\n for job in jobs:\n jobid = job['id']\n click.echo('Terminating {}'.format(jobid))\n ctl('terminate', '--jobid', jobid)", "def kill_job(self, job):\n\n if job.status == Job.STATUS_QUEUED:\n # case 1: job is in QUEUED state\n # remove it from the queue and mark as killed\n\n job_queue = job_queue_name(job.model)\n logger.info(\n \"killing job {} by removing from queue {}\".\n format(job.uuid, job_queue))\n\n command_dict = {'command': 'PROCESS_JOB', 'job_uuid': job.uuid}\n remove_command(redis_connection(), job_queue, command_dict)\n job.status = Job.STATUS_KILLED\n # save it\n Job[job.uuid] = job\n elif job.status == Job.STATUS_RUNNING:\n # case 2: job is in RUNNING state\n # send message to worker to kill the job\n worker = worker_name(job.worker_url, job.model)\n worker_channel = node_channel_name(worker)\n logger.info(\"sending command to kill job on channel {}\".\n format(worker_channel))\n command_dict = {'command': \"KILL_JOB\", 'job_uuid': job.uuid}\n publish_command(redis_connection(), worker_channel, command_dict)\n else:\n logger.info(\"kill called on job {} in incompatible state {}\".\n format(job.uuid, job.status))", "def kill_job(self , index):\n job = self.jobs.__getitem__( index )\n if job:\n job.kill()", "def terminate():\n with open (f\"{CLUSTER_FOLDER}/uuid\", \"r\") as f:\n uuid = f.read().strip()\n\n start_time = time.time() \n cluster = delete_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], uuid) \n if(not cluster):\n log(\"Failed to terminate cluster via API.\")\n exit(1)\n\n log(f\"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...\")\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']):\n time.sleep(5)\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n\n if(cluster['status']['failed']):\n log(\"Cluster termination failed.\")\n exit(1)\n\n if(TIMEOUT_SECONDS <= (time.time()-start_time)):\n log(\"Timeout while launching cluster.\")\n exit(1)\n\n log(f\"Cluster '{cluster['id']}' is terminated.\")", "def delete_job(api_instance, job_name):\n api_response = api_instance.delete_namespaced_job(\n name=job_name,\n namespace=\"default\",\n body=client.V1DeleteOptions(\n propagation_policy=\"Foreground\", grace_period_seconds=5\n ),\n )\n logger.info(\"Job deleted with status='%s'\" % str(api_response.status))", "def stop(self) -> None:\n self._client.terminate_job(jobId = self.id, reason = self.STOP_REASON)", "def stop_job(self):\n # DELETE /jobs/{job_id}/results\n pass", "def cleanup(self):\n cluster = self.client and self.client.cluster\n\n if self.client:\n self.client.close()\n self.client = None\n\n if cluster:\n try:\n cluster.close(timeout=60.0)\n except RuntimeError as ex:\n ## For some reason, sometimes the cluster can't be closed due to some\n ## problem with 'bkill', which fails with an error that looks like the following.\n ## If that happens, try to re-run bkill one more time in the hopes of really\n ## killing the cluster and not leaving lingering workers running.\n ## (This issue has been observed on the Janelia cluster for both dask and spark clusters.)\n ##\n # RuntimeError: Command exited with non-zero exit code.\n # Exit code: 255\n # Command:\n # bkill 54421878 54421872 54421877\n # stdout:\n #\n # stderr:\n # Job <54421878>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n # Job <54421872>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n # Job <54421877>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n m = re.search(r'bkill( \\d+)+', str(ex))\n if not m:\n raise\n\n logger.warning(\"Failed to kill cluster with bkill, trying one more time...\")\n time.sleep(2.0)\n result = subprocess.run(m.group(), shell=True)\n if result.returncode != 0:\n logger.error(\"Second attempt to kill the cluster failed!\")\n raise", "def stop_batch_job(self, name, error_on_stopped=False):\n if name not in self.batch_jobs:\n raise ValueError(\"job {} doesn't exists\".format(name))\n if name not in self.jobs:\n if error_on_stopped:\n raise ValueError(\"job {} doesn't exists\".format(name))\n return\n self.remove_job(name)\n _,args,kwargs,cleanup=self._batch_jobs_args.pop(name)\n if cleanup:\n cleanup(*args,**kwargs)", "def cli(ctx, job_id):\n return ctx.gi.jobs.cancel_job(job_id)", "def _delete_job(self, job):", "def job_stop(self, job_id):\n resp = self.backend.job_stop(job_id)\n\n self.refresh_jobs()", "def _kill_canceling(self, job):\n pidrecord = os.path.join(job.output_dir, \"jobpid\")\n if os.path.exists(pidrecord):\n with open(pidrecord, 'r') as f:\n pgid = int(f.read())\n self.logger.info(\"Signalling SIGTERM to process group: %d\", pgid)\n try:\n os.killpg(pgid, signal.SIGTERM)\n except OSError as e:\n self.logger.info(\"Unable to kill process group %d: %s\", pgid, e)\n os.unlink(pidrecord)", "def stop_labeling_job(LabelingJobName=None):\n pass", "def killJob(appName, jobId):\n jobs = db.getJobs(jobId=jobId)\n job = None if len(jobs) == 0 else jobs[0]\n\n if job == None:\n return returnError (\"Job ID, %s, does not exist\" % jobId, 404)\n\n logging.info (\"[FLASKWEB] Asked to KILL job #%s. Current Job status is %s\" % (jobId, job['status']))\n # Separate check to kill orphaned jobs in Db\n # TODO: Merge Job with experiments to post updates to correct table\n if job['status'] == 'RUNNING' or job['status'] == 'SUBMITTED':\n db.updateJob(jobId, status='KILLED')\n\n if int(jobId) in dispatcher.getActiveJobs():\n status = 'KILLED'\n logging.debug('[FLASKWEB] Job %s is active. Signaling to kill in mesos.' % jobId)\n dispatcher.cancelJob(int(jobId), driverDispatch)\n else:\n status = 'ORPHANED and CLEANED'\n logging.debug('[FLASKWEB] Job # %s is ORPHANED and does not exist in current state. Cleaning up.' % jobId)\n\n ts = db.getTS_est() #datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n thisjob = dict(jobId=jobId, time=ts, url=dispatcher.getSandboxURL(jobId), status=status)\n if 'application/json' in request.headers['Accept']:\n return jsonify(thisjob)\n else:\n return render_template(\"last.html\", appName=appName, lastjob=thisjob)", "def terminate_worker_groups(cls, args, config):\n logging.debug(\"MOLNSWorkerGroup.terminate_worker_groups(args={0})\".format(args))\n worker_obj = cls._get_workerobj(args, config)\n if worker_obj is None: return\n # Check for any instances are assigned to this worker group\n instance_list = config.get_all_instances(worker_group_id=worker_obj.id)\n # Check if they are running or stopped (if so, resume them)\n inst_to_stop = []\n if len(instance_list) > 0:\n for i in instance_list:\n status = worker_obj.get_instance_status(i)\n if status == worker_obj.STATUS_RUNNING or status == worker_obj.STATUS_STOPPED:\n print \"Terminating worker at {0}\".format(i.ip_address)\n inst_to_stop.append(i)\n if len(inst_to_stop) > 0:\n worker_obj.terminate_instance(inst_to_stop)\n else:\n print \"No workers running in the worker group\"", "def stop_training_job(TrainingJobName=None):\n pass", "def cluster_stop(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.stop(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster stop failed\")", "def delete_jobs(self):\n jobs = self.get_jobs(self.age)\n print('Jobs queued for delete: ', jobs)\n for job in jobs:\n try: \n body = k_client.V1DeleteOptions(propagation_policy='Background')\n self.kube_v1_batch_client.delete_namespaced_job(job, body=body, namespace=self.project)\n self.kube_client.delete_namespaced_persistent_volume_claim(job+\"-storage-claim\", self.project, {})\n print('Deleted job: ', job)\n except ApiException as e:\n print(\"Exception when calling BatchV1Api -> delete_namespaced_job: %s\\n\" % e)\n exit(1)", "def delete_job(self, job):\n subprocess.call(self.cli + [PlatformJenkinsJavaCLI.DELETE_JOB, job.name])", "def cancel(self):\n\n query = f\"scancel {self.jobid}\"\n if self.cluster:\n query = f\"scancel {self.jobid} --clusters={self.cluster}\"\n\n cmd = BuildTestCommand(query)\n cmd.execute()\n logger.debug(f\"Cancelling Job: {self.jobid} by running: {query}\")\n\n self.poll()\n self._state = \"CANCELLED\"", "def kill(self, job_id):\n if webtlsmdd.kill_job(job_id):\n x = ''\n x += '<center>'\n x += '<h3>Job %s has died ' % (job_id)\n x += 'or its associated pid has been manually killed.</h3>'\n x += '</center>'\n else:\n x = ''\n x += '<center>'\n x += '<h3>Error: Can not remove job %s.</h3>' % (job_id)\n x += '</center>'\n return x", "def cancel_job(self, job):\n try:\n self.jobs.remove(job)\n except ValueError:\n pass", "def terminate_job_run(\n self,\n ) -> Callable[\n [cloud_deploy.TerminateJobRunRequest], cloud_deploy.TerminateJobRunResponse\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"terminate_job_run\" not in self._stubs:\n self._stubs[\"terminate_job_run\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/TerminateJobRun\",\n request_serializer=cloud_deploy.TerminateJobRunRequest.serialize,\n response_deserializer=cloud_deploy.TerminateJobRunResponse.deserialize,\n )\n return self._stubs[\"terminate_job_run\"]", "def cancel_vmware_protection_job(job_name):\n try:\n cohesity_client = _get_client()\n jobs = cohesity_client.protection_jobs.get_protection_jobs(\n is_deleted=False, names=job_name)\n if not jobs:\n return \"Job with name {} not available.\".format(job_name)\n for job in jobs:\n if job.name == job_name:\n job_id = job.id\n break\n if not job_id:\n return \"Job with name {} not available.\".format(job_name)\n\n # Get recent job run id and status.\n runs = cohesity_client.protection_runs.get_protection_runs(\n job_id=job_id)\n if not runs:\n return \"Job run details not available for job {}\".format(job_name)\n latest_run = runs[0]\n if latest_run.backup_run.status not in [\"kRunning\", \"kAccepted\"]:\n return \"No active job run available for job {}\".format(job_name)\n run_id = latest_run.backup_run.job_run_id\n body = CancelProtectionJobRunParam()\n body.job_run_id = run_id\n cohesity_client.protection_runs.create_cancel_protection_job_run(\n job_id, body)\n return \"Successfully cancelled the run for job {}\".format(job_name)\n except APIException as err:\n return \"Error while attempting to cancel the job {}, error : {}\".format(\n job_name, err)", "def delete(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"job_id\", type=str, location=\"form\")\n args = parser.parse_args()\n job_id = args[\"job_id\"]\n if job_id is None or job_id == \"\":\n return errors.all_errors(\n \"CLIENT_MISSING_PARAMETER\", \"job_id (str) parameter is required\"\n )\n\n get_job_info = get(\n config.Config.FLASK_ENDPOINT + \"/api/scheduler/job\",\n headers={\"X-SOCA-TOKEN\": config.Config.API_ROOT_KEY},\n params={\"job_id\": job_id},\n verify=False,\n ) # nosec\n\n if get_job_info.status_code != 200:\n return {\n \"success\": False,\n \"message\": \"Unable to retrieve this job. Job may have terminated\",\n }, 500\n else:\n job_info = get_job_info.json()[\"message\"]\n job_owner = job_info[\"Job_Owner\"].split(\"@\")[0]\n request_user = request.headers.get(\"X-SOCA-USER\")\n if request_user is None:\n return errors.all_errors(\"X-SOCA-USER_MISSING\")\n if request_user != job_owner:\n return errors.all_errors(\"CLIENT_NOT_OWNER\")\n try:\n qdel_command = config.Config.PBS_QDEL + \" \" + job_id\n try:\n delete_job = subprocess.check_output(shlex.split(qdel_command))\n return {\"success\": True, \"message\": \"Job deleted\"}\n except Exception as err:\n return {\n \"success\": False,\n \"message\": \"Unable to execute qdel command: \" + str(err),\n }, 500\n\n except Exception as err:\n return {\"success\": False, \"message\": \"Unknown error: \" + str(err)}, 500", "def killJob(job_id):\n \n # mark all of the Ready tasks as Killed\n with transaction() as t:\n t.cur.execute(\"\"\"update Hydra_rendertask set status = 'K' \n where job_id = '%d' and status = 'R'\"\"\" % job_id)\n \n # get hostnames for tasks that were already started\n tuples = None # @UnusedVariable\n with transaction() as t:\n t.cur.execute(\"\"\"select host from Hydra_rendertask \n where job_id = '%d' and status = 'S'\"\"\" % job_id)\n tuples = t.cur.fetchall()\n \n # make flat list out of single-element tuples fetched from db\n hosts = [t for (t,) in tuples]\n \n # send a kill request to each host, note if any failures occurred\n error = False\n for host in hosts:\n try:\n error = error or not sendKillQuestion(host)\n except socketerror:\n logger.debug(\"There was a problem communicating with {:s}\"\n .format(host))\n error = True\n \n return error", "def terminate_jobflow(self, jobflow_id):\r\n self.terminate_jobflows([jobflow_id])", "def delete(\n address: Optional[str],\n job_id: str,\n headers: Optional[str],\n verify: Union[bool, str],\n):\n client = _get_sdk_client(address, headers=headers, verify=verify)\n client.delete_job(job_id)\n cli_logger.print(f\"Job '{job_id}' deleted successfully\")", "def cancel(self):\n if not self.parent_node.is_job:\n return\n\n # First perform clean operation\n self.clean()\n\n self.winstance.send_event('Cancelling job..')\n result = self.winstance.execute_operation('hpc.interfaces.'\n 'lifecycle.cancel',\n kwargs={\"name\": self.name})\n self.winstance.send_event('.. job canceled')\n result.task.wait_for_terminated()\n\n self._status = 'CANCELLED'" ]
[ "0.69394773", "0.6439822", "0.6429004", "0.63975894", "0.6377325", "0.6343466", "0.6278735", "0.62779915", "0.62628543", "0.61729574", "0.6161055", "0.61369205", "0.6120452", "0.61027503", "0.60934454", "0.60848886", "0.60653436", "0.6043757", "0.5999601", "0.5956319", "0.5952695", "0.5952206", "0.59408826", "0.5939996", "0.58549774", "0.5837356", "0.5833367", "0.5829566", "0.5801544", "0.57941866" ]
0.7375492
0
Function to set the tolerance.
def set_tolerance(self, tol): self.tolerance = tol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tolerance(self, tolerance: float) -> None:\n self._tolerance = tolerance", "def set_tolerance(self, value):\n\n self._tolerance = value", "def set_tolerance(self, tol):\n self.precision = tol\n return", "def tol(self, value):\n self._tol = value", "def set_tol(self, tol : float):\n self.tol = tol", "def set_tolerance(self, *args, **kwargs):\n raise ParameterError(\"The %s StoppingCriterioin does not yet support resetting tolerances.\")", "def set_tolerance(rel_tolerance=1e-09, abs_tolerance=0.0):\n global REL_TOLERANCE, ABS_TOLERANCE\n REL_TOLERANCE = rel_tolerance\n ABS_TOLERANCE = abs_tolerance", "def setTolerance(self, eps):\n self._simulator_.update(eps=eps)\n return", "def _set_tolerances(self, atol=None, rtol=None, maxiter=None):\n atol = self.atol if atol is None else atol\n rtol = self.rtol if rtol is None else rtol\n maxiter = self.maxiter if maxiter is None else maxiter\n # BUG: PETSc misses rtol requirement by ~10-20X -> Report to petsc4py\n self.ksp.setTolerances(atol=None, rtol=rtol/50, max_it=maxiter)", "def tolerance(self) -> float:\n return self._tolerance", "def tolerance(self):\n return self.params['tolerance']", "def set_particle_tolerance(self, value):\n\n self._particle_tolerance = value", "def tolerance(self):\n return self._tolerance", "def set_abs_tolerance(self, value):\n\n self._abs_tolerance = value", "def set_size_tolerance(self, tolerance):\n self._size_tolerance = tolerance", "def get_tolerance(self):\n return self.tolerance", "def SetTol(self, tol):\n return _hypre.HypreLOBPCG_SetTol(self, tol)", "def tol(self, atol: Real):\n if not isinstance(atol, Real):\n raise TypeError(\"The attribute tol must be a real number.\")\n if 0 <= atol < 1:\n self._tol = atol\n else:\n raise ValueError(\"Need 0 <= tol < 1.\")", "def SetTol(self, tol):\n return _hypre.HyprePCG_SetTol(self, tol)", "def SetTol(self, tol):\n return _hypre.HypreBoomerAMG_SetTol(self, tol)", "def set_tol(iprec):\n \n tol = -1\n \n if iprec == -2:\n tol = 0.5\n \n elif iprec == -1:\n tol = 0.5 * 10**-1\n \n elif iprec == 0:\n tol = 0.5 * 10**-2\n \n elif iprec == 1:\n tol = 0.5 * 10**-3\n \n elif iprec == 2:\n tol = 0.5 * 10**-6\n \n elif iprec == 3:\n tol = 0.5 * 10**-9\n \n elif iprec == 4:\n tol = 0.5 * 10**-12\n \n elif iprec == 5:\n tol = 0.5 * 10**-15\n \n return tol", "def SetTol(self, tol):\n return _hypre.HypreGMRES_SetTol(self, tol)", "def SetMaxTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_SetMaxTolerance(self, *args)", "def update_vrad_tolerance(self):\n try:\n value = float(self.edit_vrad_tolerance.text())\n except:\n value = None\n self._get_selected_model().metadata[\"velocity_tolerance\"] = value\n return None", "def SetTol(self, tol):\n return _hypre.HypreAME_SetTol(self, tol)", "def set_tol_2d(value=1e-9):\r\n geometry.gmSetXyTol(value)", "def tol(self) -> Real:\n return self._tol", "def SetMinTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_SetMinTolerance(self, *args)", "def LimitTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_LimitTolerance(self, *args)", "def SetTol(self, tol):\n return _hypre.HypreFGMRES_SetTol(self, tol)" ]
[ "0.8269015", "0.81052655", "0.79584485", "0.7956575", "0.78157336", "0.7737034", "0.7723517", "0.76122135", "0.7429519", "0.7174162", "0.71566623", "0.7126722", "0.7088782", "0.68695533", "0.6833426", "0.676617", "0.66971606", "0.668103", "0.66773707", "0.665312", "0.6650963", "0.6629326", "0.6607332", "0.65898097", "0.65870655", "0.6579697", "0.6524468", "0.6506353", "0.6489616", "0.647758" ]
0.8542296
0
Function to revert the direction of the current line. Returns
def revert(self): reverted = Line(l=self) reverted.direction *= -1.0 return reverted
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fliped(self):\n return Line(self.end, self.start, self)", "def reversed(self):\n return LINE(*self.elems,**{'reverse':(not self.reverse)})", "def _inverse_lines(self):\n pass", "def revert(self, *args, **kwargs):", "def _backup_line(self):\n if self._orig_line is None:\n self._orig_line = self._line", "def flip(self, p):\n return -p", "def previous_line():\r\n set_point(point().previous_line())", "def opposite(self):\n if self.direction == 8: return Direction(8)\n n = self.direction + 4\n if n >= 8: n -= 8\n return Direction(n)", "def restore(self):\n print(\"Restoring Direction\")\n if self.turn_track > 0:\n self.encL(abs(self.turn_track))\n elif self.turn_track < 0:\n self.encR(abs(self.turn_track))", "def backward_character():\r\n set_point(point().offset(-1))", "def pre_revert(self):", "def revert(self):\n\n if len(self.stack) == 0 or not self.revertable:\n return\n\n for s in self.stack:\n s[\"model\"].setPos(s[\"model\"].getPos() + Vec3(0,0,THING_REVERT_DISTANCE))\n\n state = self.stack.pop()\n\n #not sure if this helps, but it can't hurt\n self.model.detachNode()\n\n for x in self.toRevert:\n self.toRevert[x](state[x])", "def flip(self, bev_direction: str = 'horizontal') -> None:\n pass", "def __invert__(self):\n return self.reverse()", "def revert(self, a):\n raise NotImplementedError", "def flip(self):", "def back(cargo):\n # Go backwards\n line_follower.turn()\n\n # return\n new_state = \"follow\"\n txt = \"follow line..\"\n\n return (new_state, txt)", "def turn_left(self):\n temp = self.direction[0]\n self.direction[0] = self.direction[1]\n self.direction[1] = -temp", "def Reverse(self):\n if (self.translated == False):\n self.alignment = self.alignment[:,::-1]\n self.Show(self.displayedColumn)\n self.BackupAlignment()\n else:\n self.AlertMessage(\"Can't reverse protein sequences.\", 'medium')", "def down(self, wrap = None):\n len_current = self.line_length()\n \n # If there is line wrapping\n if wrap:\n wraps_current = int(len_current / wrap)\n columns_current = len_current % wrap\n \n # If the position is not in the bottom wrap of the line move it down a\n # wrap. Take into account shorter wraps below.\n if len_current > wrap and self.pos < wraps_current * wrap:\n pos_wrap = int(self.pos / wrap)\n if pos_wrap + 1 == wraps_current and self.pos % wrap > columns_current:\n self.pos = (wraps_current * wrap) + columns_current\n else:\n self.pos = self.pos + wrap\n \n # If the position is in the bottom wrap move it to the first wrap of\n # the next line. Take into acount shorter lines below.\n elif self.line < self.buffer.size() - 1:\n len_next = self.line_length(1)\n self.line += 1\n if self.pos % wrap > len_next:\n self.pos = len_next\n else:\n self.pos = self.pos % wrap\n \n # If no wrapping is being done move the line down one and adjust the\n # position if the next line is shorter.\n elif self.line < self.buffer.size() - 1:\n len_next = self.line_length(1)\n self.line += 1\n if self.pos > len_next:\n self.pos = len_next", "def delete_backward():\r\n point().delete_left_char()\r\n set_point(point().offset(-1))", "def flip(self):\n self._start, self._end = self._end, self._start", "def revert(self, ref=None):\n # TODO\n raise NotImplementedError", "def __invert__(self):\n return self.strip(axis = 1)", "def backToSource(self, point):\n if self.revertTransformation is not None:\n return self.revertTransformation(point)\n return point", "def backward(self, y):\n pass", "def undo():", "def back(self):\n self.position -= 1", "def revise():", "def getDirectionChange(pre, now, next):\r\n return RIGHT" ]
[ "0.68525314", "0.6473832", "0.64653385", "0.64345104", "0.64087176", "0.6327431", "0.6293631", "0.6287279", "0.62672716", "0.6193143", "0.6137352", "0.61128193", "0.6102303", "0.60842335", "0.60765207", "0.6057564", "0.60540533", "0.60380507", "0.6030797", "0.60029155", "0.5998949", "0.597672", "0.5972079", "0.59696555", "0.59154564", "0.59099084", "0.59048104", "0.59046954", "0.5894029", "0.5850022" ]
0.85526377
0
Function to get the abscissa of a point with respect to a line. The abscissa is 0 if the projection of the point and the projection of the frame origin on the line are the same point.
def get_abscissa(self, p): return np.dot(p - self.zero, self.direction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b", "def _dist_point2line(self, point: ndarray,\n line: Tuple[ndarray, ndarray]) -> ndarray:\n\n assert isinstance(line, tuple)\n point1, point2 = line\n d = abs(np.cross(point2 - point1, point - point1)) / (\n norm(point2 - point1) + 1e-8)\n return d", "def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)", "def point_to_line_abs(p: Vec2, p0: Vec2, p1: Vec2):\n return abs(point_to_line_signed(p, p0, p1))", "def slope(self):\n if self.b == 0:\n return None\n else:\n return (-1) * self.a/self.b", "def determine_angle_slope(line, ax):\n x, y = line.get_data()\n\n sp1 = ax.transData.transform_point((x[0],y[0]))\n sp2 = ax.transData.transform_point((x[-1],y[-1]))\n\n rise = (sp2[1] - sp1[1])\n run = (sp2[0] - sp1[0])\n\n return degrees(atan(rise/run))", "def get_projection_of_pt_on_line(point, line_point1, line_point2):\n projection = Point(-1, -1)\n projection.x = point.x\n if (line_point2.x - line_point1.x) != 0:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / \\\n (line_point2.x - line_point1.x) + line_point1.y\n else:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / 1 + line_point1.y\n return projection", "def slope_from_origin(self):\n\n return self.y / self.x", "def distance_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector(cross_vectors(pa, pb))\n l_ab = length_vector(ab)\n return l / l_ab", "def slope_from_origin(self):\n\n return (self.y / self.x)", "def center_point(polyline):\n\tpts = unique(polyline.points)\n\treturn sum(pts) / len(pts)", "def distance_point_line_sqrd(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector_sqrd(cross_vectors(pa, pb))\n l_ab = length_vector_sqrd(ab)\n return l / l_ab", "def point(self, x, y):\n d1 = super().point(x, y)\n top = self._lifetime.top\n bottom = self._lifetime.bottom\n d2 = distance_line_point(top.pos, bottom.pos, (x, y))[0]\n return min(d1, d2)", "def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)", "def slope_from_origin(self):\n return round(math.degrees(abs(math.atan(self.y/self.x))), 2)", "def closest_line_point(point:tuple, edge:tuple)->tuple:\n d_y, d_x, b = line_equation((edge[0], edge[1]))\n if b == None:\n # The line is vertical, need different intercept formula.\n return (edge[0][0], point[1])\n if d_y == 0:\n # The line is horizontal, we can use a faster formula:\n return (point[0], edge[0][1])\n term_1 = d_x * d_y * (point[1] - edge[1][1])\n term_2 = (d_y ** 2) * edge[1][0]\n term_3 = (d_x ** 2) * point[0]\n denom = (d_y ** 2) + (d_x ** 2)\n x_int = (term_1 + term_2 + term_3) / denom\n y_int = (d_y / d_x) * x_int + b\n return (x_int, y_int)", "def process_laneOffset(self):\n center_line = np.poly1d(np.mean([self.line_l.get_LinePoly().coeffs, self.line_r.get_LinePoly().coeffs], axis=0))\n # store the center line polynomial\n self.center_poly = center_line\n center_point = IMAGE_WIDTH/2 - center_line(709)\n offset_from_center =center_point* self.line_l.x_pxm\n self.lane_offset = offset_from_center\n return center_point", "def dist_to_line(self, line, pt):\n return abs(line[0]*pt.x + line[1]*pt.y + line[2])/math.sqrt(line[0]**2 + line[1]**2)", "def get_angle_sign_pt_to_line(point, line_point1, line_point2):\n projection = get_projection_of_pt_on_line(point, line_point1, line_point2)\n if line_point1.x <= line_point2.x:\n if point.y >= projection.y:\n sign = 1\n else:\n sign = -1\n else:\n if point.y >= projection.y:\n sign = -1\n else:\n sign = 1\n return sign", "def point_at(self, abscissa):\n return self.zero + abscissa * self.direction", "def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))", "def slope(point_a, point_b, flip):\n\n x_a, y_a = point_a\n x_b, y_b = point_b\n\n dx = x_b - x_a\n dy = y_b - y_a\n\n return -dx / dy if flip else dy / dx", "def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)", "def slope(a, b):\r\n if a[0] == b[0]: #If the x values are both 0\r\n return 0 #Technically, undefined, but doesn't matter for finding collinearity\r\n return (a[1] - b[1]) / (a[0] - b[0])", "def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()", "def getClosestPointFromLine(origin, ray, point):\n # calculate the difference vector\n delta = point-origin\n # norm the ray\n ray /= np.linalg.norm(ray, axis=-1)[..., None]\n # calculate the scale product\n factor = np.sum(ray*delta, axis=-1)\n try:\n return origin + factor[:, None] * ray\n except IndexError:\n return origin + factor * ray", "def slope(start, end):\n\tx1 = start[0]\n\ty1 = start[1]\n\tx2 = end[0]\n\ty2 = end[1]\n\ttop = float(y2 - y1) \n\tbot = float(x2 - x1)\n\tif bot == 0:\n\t\treturn None\n\telse:\n\t\treturn top / bot", "def LineMinDistanceTo(line, point_or_line):\n line = rhutil.coerceline(line, True)\n test = rhutil.coerceline(point_or_line)\n if test is None: test = rhutil.coerce3dpoint(point_or_line, True)\n return line.MinimumDistanceTo(test)", "def rl_get_point() -> int: # pragma: no cover\n if rl_type == RlType.GNU:\n return ctypes.c_int.in_dll(readline_lib, \"rl_point\").value\n\n elif rl_type == RlType.PYREADLINE:\n return int(readline.rl.mode.l_buffer.point)\n\n else:\n return 0", "def mid(self, line):\n return [(line.x1 + line.x2) // 2, (line.y1 + line.y2) // 2]" ]
[ "0.659462", "0.6087625", "0.6012542", "0.59856695", "0.58890575", "0.58882195", "0.5846869", "0.5727574", "0.57215995", "0.57181567", "0.57152516", "0.56363255", "0.5614506", "0.5601204", "0.55724233", "0.55630857", "0.55362034", "0.5515001", "0.5511656", "0.55000144", "0.5489459", "0.5450959", "0.544936", "0.5449059", "0.5403886", "0.54027635", "0.53956074", "0.53666526", "0.5341457", "0.5309213" ]
0.6117362
1
Get the status code as per ttype and it's status_val
def get_status_code(self, ttype, status_val) -> str: # get the status code from __status_code or __default_code pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status_code(self):\n return int(self.status.split()[1])", "def get_status_code(status):\n return dict(const.STATUS_CODES).get(status)", "def status_code(self):\r\n return int(self._status[:3])", "def status_code(self):\n return int(self._status[:3])", "def _parse_status(self, status):\n if status in (STATUS_FINISHED, 'FINISHED'):\n return STATUS_FINISHED\n elif status in (STATUS_ERROR, 'ERROR'):\n return STATUS_ERROR\n elif status in (STATUS_CANCELED, 'CANCELED'):\n return STATUS_CANCELED\n return STATUS_STARTED", "def _get_status(self):\n return self.__status", "def status_code(self) -> int:\n return pulumi.get(self, \"status_code\")", "def _get_status_code(response: Response) -> int:\n status_code = response.status_code\n if isinstance(status_code, HTTPStatus):\n return status_code.value\n else:\n return status_code", "def status(self, value):\r\n if isinstance(value, (int, long)):\r\n if 100 <= value <= 999:\r\n st = _RESPONSE_STATUSES.get(value, '')\r\n if st:\r\n self._status = '%d %s' % (value, st)\r\n else:\r\n self._status = str(value)\r\n else:\r\n raise ValueError('Bad response code: %d' % value)\r\n elif isinstance(value, basestring):\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n if _RE_RESPONSE_STATUS.match(value):\r\n self._status = value\r\n else:\r\n raise ValueError('Bad response code: %s' % value)\r\n else:\r\n raise TypeError('Bad type of response code.')", "def status(self):\n return STATUS[self.fields['status']]", "def GetStatus(self):\r\n return self.status", "def gather_http_status_code(self):\n\n if self.status.ipv6_syntax_validation:\n self.status.http_status_code = PyFunceble.lookup.HTTPCode(\n self.subject, \"ipv6\"\n ).get()\n else:\n self.status.http_status_code = PyFunceble.lookup.HTTPCode(\n self.subject, self.subject_type\n ).get()", "def status(_):\n return {\"status\": \"ok\"}", "def get_status_code(self, response):\n if hasattr(response, 'status_int'):\n return response.status_int\n return response.status", "def get_status_code(self, response):\r\n if hasattr(response, 'status_int'):\r\n return response.status_int\r\n else:\r\n return response.status_code", "def get_status_code(self, response):\r\n if hasattr(response, 'status_int'):\r\n return response.status_int\r\n else:\r\n return response.status_code", "def _get_status(trial: dict) -> int:\n if trial['overall_status'] in {'Not yet recruiting', 'Active, not recruiting'}:\n return 0\n elif trial['overall_status'] in {'Enrolling by invitation', 'Recruiting', 'Available'}:\n return 1\n elif trial['overall_status'] in {'Approved for marketing'}:\n return 2\n else:\n return 3", "def status_to_event_code(status: str):\n return {\n \"sent\": \"txSent\",\n \"pending\": \"txPool\",\n \"pending-simulation\": \"txPoolSimulation\",\n \"stuck\": \"txStuck\",\n \"confirmed\": \"txConfirmed\",\n \"failed\": \"txFailed\",\n \"speedup\": \"txSpeedUp\",\n \"cancel\": \"txCancel\",\n \"dropped\": \"txDropped\",\n }[status]", "def status(self, value):\n if isinstance(value, (long, int)):\n if 100 <= value <= 900:\n status = _RESPONSE_STATUSES.get(value, '')\n if status:\n self._status = '%d %s' % (value, status)\n else:\n self._status = str(value)\n else:\n raise ValueError('Bad response code: %d' % value)\n elif isinstance(value, basestring):\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n if _RE_RESPONSE_STATUS.match(value):\n self._status = value\n else:\n raise ValueError('Bad response code: %d' % value)\n else:\n raise TypeError('Bad type of response code.')", "def _GetStatusFromOp(op):\n for prop in op.response.additionalProperties:\n if prop.key == 'status':\n return prop.value.string_value\n return 'UNKNOWN'", "def code(self):\n\t\treturn self.status_code", "def __dec_status(self, status_code):\n ret = self.status_codes.get(status_code)\n if ret == None:\n return \"Unknown\"\n else:\n return ret", "def status(self, code, content_length=None):", "def status(self):\n return self.get(self._names[\"status\"])", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status():\n return \"OK\" # defaults to a 200 HTML status return code", "def status_code(self):\n return self._status_code", "def rtt_get_status(self):\n status = structs.JLinkRTTerminalStatus()\n res = self.rtt_control(enums.JLinkRTTCommand.GETSTAT, status)\n return status" ]
[ "0.7202656", "0.71696675", "0.7097821", "0.6938727", "0.68742913", "0.68445003", "0.68133837", "0.6747887", "0.67144114", "0.6705535", "0.662726", "0.6622442", "0.6621217", "0.66081154", "0.6606708", "0.6606708", "0.6598715", "0.65756345", "0.65702355", "0.65538526", "0.6550194", "0.6547384", "0.64904195", "0.64783126", "0.6455558", "0.6455558", "0.6455558", "0.643943", "0.64168113", "0.6397609" ]
0.86103326
0
To check if payload to be processed with this lambda
def apply_filter(self, payload: dict, ainfos) -> (dict, dict): # check if needs to process by this lambda pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_payload():\n return True", "def process(self, payload, status_code=0):", "def has_payload(self):\n\n if self._payload:\n return True\n return False", "def __should_payload_execute(self, queue_item):\n\n soup = queue_item.get_soup_response()\n\n ng_app_soup = soup.select(\"[ng-app]\")\n if not ng_app_soup:\n return False\n\n for non_bindable in ng_app_soup[0].select(\"[ng-non-bindable]\"):\n non_bindable.decompose()\n\n in_scope_html = str(ng_app_soup[0])\n\n if queue_item.payload[\"value\"] in in_scope_html:\n return True\n\n return False", "async def exists(self, payload: TPayload) -> bool:", "def payload_undefined(self):\n return self._attr is None", "def payload_valid(self, payload):\n return (\n isinstance(payload, DPTArray)\n and len(payload.value) == self.dpt_class.payload_length\n )", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n\n event_body = json.loads(event['body'])\n print(\"EVENT:\")\n print(event_body)\n\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n recs = flow(event_body, textract, cache = True)\n rval = {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\" : \"hello world\",\n \"textract\" : recs\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }\n\n return rval", "def payload(self):", "def lambda_handler(event, context):\n return dispatch(event)", "def lambda_handler(event, context):\n\n operations = {\n 'POST': main,\n }\n\n if event.get('httpMethod', False):\n operation = event['httpMethod']\n else:\n operation = \"not available\"\n\n payload = base64.b64decode(event['body'])\n try:\n payload = json.loads(payload)\n except TypeError:\n pass\n\n if operation in operations:\n return respond(None, operations[operation](payload))\n else:\n return respond(ValueError(f'Unsupported method {operation}'))", "def process_webhook(self):\n if self.token:\n self.verify = VerificationMethod.TOKEN\n if self.secret:\n self.verify = VerificationMethod.HMAC\n return True", "def lambda_handler(event, context):\n\n print(\"EVENT:\")\n print(event)\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n\n recs = flow(event, s3)\n print(recs)\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"hello world\",\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }", "def payload_is_handleable(self, payload):\n\t\tif payload.get_filename():\n\t\t\treturn True\n\t\treturn False", "def consume(self, payload):\n raise NotImplementedError()", "def handler(self, *args, **kwargs):\n return True", "def handler(event, context):\n if event and \"Records\" in event:\n for record in event[\"Records\"]:\n time_str = time.ctime()\n if \"body\" in record:\n try:\n hasura_request(record[\"body\"])\n except Exception as e:\n print(f\"Start Time: {time_str}\", str(e))\n time_str = time.ctime()\n print(\"Done executing: \", time_str)\n raise_critical_error(\n message=f\"Could not process record: {str(e)}\",\n data=record,\n exception_type=Exception\n )", "async def triggered_on(self, ctx: FilterContext) -> bool:", "def check_message_payload(dequeued_item):\n key_array = [\"dateTime\",\n \"payload\",\n \"messageType\"]\n\n # Note that the \"ttl\" key (and others) may be present but its not checked here!\n\n for key in key_array:\n if key not in dequeued_item.keys():\n return False\n\n key_array = [\"zoomR\",\n \"spatial\",\n \"circuitID\",\n \"reputationEnabled\",\n \"assetID\",\n \"temporal\",\n \"outageTime\",\n \"company\",\n \"votes\",\n \"zoomT\",\n \"longitude\",\n \"latitude\"]\n for key in key_array:\n if key not in dequeued_item[\"payload\"].keys():\n return False\n return True", "def execute_request(self, request: Request):\r\n print(\"Handler is validating data\")\r\n if request.data_input is not None or request.input_file is not None:\r\n if not self.next_handler:\r\n return True\r\n return self.next_handler.execute_request(request)\r\n else:\r\n print(\"Data is not validated\")\r\n return False", "def payload_handle(self, payload, mail):\n\t\tif self.payload_is_handleable(payload):\n\t\t\tif self.export_payload:\n\t\t\t\tself.payload_pipe(payload, mail)\n\t\t\tif self.reduce_payload:\n\t\t\t\t# Mark email as deleted:\n\t\t\t\tself.delete_marked.append(self.payload_index(payload, mail))", "def lambda_handler(event, context):\n print(event)\n print(context)\n storage_gateway_status()", "def validate_payload(payload):\n\n if not isinstance(payload, dict):\n raise Exception(\"payload is a %s, not a dictionary\" % type(payload))\n\n if \"nmo\" not in payload:\n raise Exception(\"No nmo in payload\")\n\n if \"job\" not in payload[\"nmo\"]:\n raise Exception(\"No job in nmo \\nnmo is %s\" % payload[\"nmo\"])\n\n if \"task\" not in payload[\"nmo\"]:\n raise Exception(\"No task in nmo \\nnmo is %s\" % payload[\"nmo\"])\n\n try:\n isGroup = payload['nmo']['source']['misc']['isGroup']\n except:\n isGroup = False\n\n if \"jsonld\" not in payload and not isGroup:\n raise Exception(\"No jsonld in payload \\nPayload is:- %s\" % payload)", "def check_Data(self):\r\n \r\n if self._target_data is None:\r\n self.processData()", "def body(self, _):\r\n return False", "def add_event(payload: PayloadDict) -> bool:\n ...", "def handler(event, context):\n pub_sub_message = base64.b64decode(event['data']).decode('utf-8')\n\n if pub_sub_message == 'executor':\n LOGGER.debug('POST: %s', EVENTS_EXECUTION_ENDPOINT)\n response = requests.post(EVENTS_EXECUTION_ENDPOINT, json={'type': 'POLICY'},\n headers=utils.get_auth_header())\n LOGGER.debug('Response: %s', response.text)\n\n elif pub_sub_message == 'validator':\n LOGGER.debug('POST: %s', EVENTS_VALIDATION_ENDPOINT)\n response = requests.post(EVENTS_VALIDATION_ENDPOINT,\n headers=utils.get_auth_header())\n LOGGER.debug('Response: %s', response.text)\n\n else:\n LOGGER.warn('Unexpected message from PubSub: %s', pub_sub_message)\n return", "def preprocess_body(self) -> None:\n self._verify_archive_url_and_zip_path()\n self._verify_upload_url_and_zip_path()\n self._verify_upload_url_and_no_zip_path()\n if self.upload_function is None:\n self.upload_function = False" ]
[ "0.6974256", "0.6329951", "0.6317524", "0.5889682", "0.58806306", "0.5717963", "0.57109547", "0.56970483", "0.56970483", "0.56906414", "0.566029", "0.55937403", "0.5567957", "0.5565435", "0.5472518", "0.5453765", "0.5410358", "0.54089427", "0.53794", "0.5371008", "0.53691435", "0.53511924", "0.5346419", "0.53367436", "0.5328826", "0.5311681", "0.52784127", "0.5276847", "0.52756053", "0.5258173" ]
0.66218215
1
Use ansi code on 'string' if the output is the terminal of a not Windows platform
def isSpecial(ansiCode,string): if IS_TERMINAL and not IS_WIN32: return ansiCode+string+ANSI_END else: return string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def b(string):\n return \"\\033[94m{0}\\033[0m\".format(string)", "def ansi(color, text):\r\n code = COLOR_CODES[color]\r\n return '\\033[1;{0}m{1}{2}'.format(code, text, RESET_TERM)", "def ansi_escape(text: object) -> str:\n return str(text).replace(\"\\x1b\", \"?\").replace(\"\\b\", \"?\")", "def _handle_ansi_color_codes(self, s):\r\n def ansi_code_to_css(code):\r\n return ' '.join(['ansi-%s' % c for c in code.split(';')])\r\n return '<span>' +\\\r\n HtmlReporter._ANSI_COLOR_CODE_RE.sub(\r\n lambda m: '</span><span class=\"%s\">' % ansi_code_to_css(m.group(1)), s) +\\\r\n '</span>'", "def colour(string: str) -> str:\n string = f\"\\033[32m{string}\\033[0m\"\n return string", "def strc(text, color='black', style='normal'):\n\n ansii = ANSIIcode(color, style)\n back_to_normal = ANSIIcode('normal', 'normal') # '\\033[0m'\n\n return ansii + text + back_to_normal", "def ansi_code(text: str, color: List[ANSICode] or ANSICode or None):\n if color is None:\n return text\n elif type(color) is list:\n return \"\".join(color) + f\"{text}{colors.Reset}\"\n else:\n return f\"{color}{text}{colors.Reset}\"", "def ansi(color=\"none\"):\n if color == \"\" or color is None:\n return \"\\33[0m\"\n if isinstance(color, tuple):\n return \"\\33[38;2;{:d};{:d};{:d}m\".format(int(255*color[0]),\n int(255*color[1]),\n int(255*color[2]))\n tupl = clr_tuple(color)\n if tupl is not None:\n return ansi(tupl)\n if color == \"bold\":\n return \"\\33[1m\"\n if color == \"/bold\":\n return \"\\33[22m\"\n return \"\\33[0m\"", "def ansi(*args):\n code = Term.ESCAPE_START\n code += ';'.join(args)\n code += Term.ESCAPE_END\n return code", "def process(self, string: str) -> str:\r\n self._check_all_repeaters(string)\r\n no_repeaters_str = self._process_repeaters(string)\r\n ansi_compliant_str = self._process_colors(no_repeaters_str)\r\n\r\n return ansi_compliant_str", "def _ansi_equivalent(self, s: str) -> str:\r\n color_id = self._color_id_regexp.search(s).groups()[0]\r\n\r\n # TODO: Replace this with a class the handles dynamic color configuration!\r\n return {\r\n '0': '\\u001b[37m',\r\n '1': '\\u001b[32m',\r\n '2': '\\u001b[31m',\r\n '3': '\\u001b[33m',\r\n '4': '\\u001b[34m',\r\n '5': '\\u001b[36m',\r\n '6': '\\u001b[37m',\r\n '7': '\\u001b[35m',\r\n '8': '\\u001b[30m',\r\n '.': '\\u001b[0m',\r\n }[color_id]", "def redtext(mesg):\n if sys.platform == 'win32':\n import win32console\n handle = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)\n reset = handle.GetConsoleScreenBufferInfo()['Attributes']\n handle.SetConsoleTextAttribute(12)\n sys.stdout.writelines(mesg+'\\n')\n handle.SetConsoleTextAttribute(reset)\n else:\n sys.stdout.write('\\033[91m'+mesg+'\\033[0m\\n')", "def __repr__(self):\n return \"ANSIString(%s, decoded=True)\" % repr(self._raw_string)", "def _ansi_wrap(self, text, fg, bg):\n codes = []\n\n if fg is not None:\n codes.append(30 + self._to_code(fg))\n\n if bg is not None:\n codes.append(40 + self._to_code(bg))\n\n if fg is not None and 'i' in fg:\n codes.append(1) # Bold\n\n if bg is not None and 'i' in bg:\n codes.append(4) # Underscore\n\n return \"\\033[\" + \";\".join([str(code) for code in codes]) + \"m\" + text + \"\\033[0m\"", "def _color_string(string, color):\n if color is None:\n return string\n else:\n return color + string + '\\033[0m'", "def test_asciitable_m_pretty_ansi(self):\n input = '''\n┏━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━┓ \n┃\\x1b[1m \\x1b[0m\\x1b[1mReleased \\x1b[0m\\x1b[1m \\x1b[0m┃\\x1b[1m \\x1b[0m\\x1b[1mTitle \\x1b[0m\\x1b[1m \\x1b[0m┃\\x1b[1m \\x1b[0m\\x1b[1m Box Office\\x1b[0m\\x1b[1m \\x1b[0m┃ \n┡━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━┩ \n│\\x1b[36m \\x1b[0m\\x1b[36mDec 20, 2019\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mStar Wars: The Rise of Skywalker \\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m $952,110,690\\x1b[0m\\x1b[32m \\x1b[0m│ \n│\\x1b[36m \\x1b[0m\\x1b[36mMay 25, 2018\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mSolo: A Star Wars Story \\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m $393,151,347\\x1b[0m\\x1b[32m \\x1b[0m│ \n│\\x1b[36m \\x1b[0m\\x1b[36mDec 15, 2017\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mStar Wars Ep. V111: The Last Jedi\\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m$1,332,539,889\\x1b[0m\\x1b[32m \\x1b[0m│ \n│\\x1b[36m \\x1b[0m\\x1b[36mDec 16, 2016\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mRogue One: A Star Wars Story \\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m$1,332,439,889\\x1b[0m\\x1b[32m \\x1b[0m│ \n└──────────────┴───────────────────────────────────┴────────────────┘ \n'''\n expected = [\n {\n \"released\": \"Dec 20, 2019\\nMay 25, 2018\\nDec 15, 2017\\nDec 16, 2016\",\n \"title\": \"Star Wars: The Rise of Skywalker\\nSolo: A Star Wars Story\\nStar Wars Ep. V111: The Last Jedi\\nRogue One: A Star Wars Story\",\n \"box_office\": \"$952,110,690\\n$393,151,347\\n$1,332,539,889\\n$1,332,439,889\"\n }\n ]\n\n self.assertEqual(jc.parsers.asciitable_m.parse(input, quiet=True), expected)", "def in_green(s: str) -> str:\n return f\"\\033[92m{str(s)}\\033[0m\"", "def color(code):\n return lambda t: \"\\033[{0}{1}\\033[0;m\".format(code, t)", "def _strip_ansi(s):\n if isinstance(s, str):\n return _ansi_codes.sub(r\"\\4\", s)\n else: # a bytestring\n return _ansi_codes_bytes.sub(r\"\\4\", s)", "def style_output(msg='{}'):\n green_code = '\\033[0;32m'\n return text_color(msg, green_code)", "def ANSIIcode(color='black', style='normal'):\n\n colorCode = colorCodes[color]\n styleCode = styleCodes[style]\n\n return '\\033[' + styleCode + colorCode + 'm'", "def color_str(text, color):\n if not is_cli() or no_color():\n # Disable color output if not in CLI mode or if color is disabled\n return text\n return f'\\033[{_COLORS[color]}m{text}\\033[30m'", "def strip_ansi(text):\n return ANSI_ESCAPE_RE.sub('', text)", "def text_color(string: str, color: str) -> str:\n return f\"\\x1b{_code(color)}{string}\\x1b[0m\"", "def test_plain_ansi(self):\n irc_ansi = irc.parse_ansi_to_irc(string.printable)\n ansi_irc = irc.parse_irc_to_ansi(string.printable)\n self.assertEqual(irc_ansi, string.printable)\n self.assertEqual(ansi_irc, string.printable)", "def colorize_string(string: str, r: int, g: int, b: int, *, reset: bool = True) -> str:\n # Todo: optimize sequential characters with same colors.\n output = f\"\\u001b[38;2;{r};{g};{b}m{string}\"\n if reset:\n output += \"\\033[0m\"\n return output", "def __termcode(num):\r\n return \"\\033[%sm\" % num", "def emph_text(text):\n\n if use_color():\n return colorama.Style.BRIGHT + text + colorama.Style.RESET_ALL\n else:\n return text", "def strip_ansi(text: str):\n return _ANSI_SEQUENCE_REGEX.sub('', text)", "def printColorizedInWindows(text, color):\n std_out_handle = ctypes.windll.kernel32.GetStdHandle(-11)\n for i in range(0, len(color)):\n ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, color[i])\n sys.stdout.write(text)\n # cor padrão é 7, white\n ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, 7)" ]
[ "0.698737", "0.6955122", "0.68967116", "0.6775408", "0.67163545", "0.6666538", "0.6612062", "0.6594532", "0.6576787", "0.6540611", "0.6525283", "0.6465225", "0.6419213", "0.6390745", "0.6369478", "0.63463056", "0.6337062", "0.6330112", "0.6313778", "0.6313092", "0.6297528", "0.6288874", "0.6213546", "0.6130968", "0.61224574", "0.61007416", "0.60889256", "0.606803", "0.60561806", "0.60425407" ]
0.771174
0
Sort list of TKey by their names ignoring the case
def keyListSort(keyList): keyList.sort(key=lambda y: y.GetName().lower())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sortCaseInsensitive(*args, **kwargs)->List[AnyStr]:\n pass", "def sort_by_name(list_to_sort):\n return sorted(\n list_to_sort,\n key=lambda k: k['Name'].lower()\n )", "def natsort_icase(lst: List[str]) -> None:\n lst.sort(key=natsort_key_icase)", "def human_sort(l):\n l.sort(key=alphanum_key)\n return l", "def _mySort(self, alist):\n return sorted(alist, key=lambda x: (x[0].isdigit(), x.lower()))", "def natsort_case_insensitive(seq):\r\n return natsort(seq, case_sensitive=False)", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def sort_names(li, by_which):\n \n if by_which == 'first':\n li.sort(key = Name.first)\n elif by_which == 'last':\n li.sort(key = Name.last)", "def sort_nicely(l):\r\n\tl.sort(key=alphanum_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def titleSort(dictList):\n\tres = sorted(dictList, key=lambda k: getSortTitle(k))\n\treturn res", "def natsort(lst: List[str]) -> None:\n lst.sort(key=natsort_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def tupleListSort(tupleList):\n tupleList.sort(key=lambda y: y[0].lower())", "def alpha_case_insensitive():\n# fill it out\n return sorted(STRING_LIST, key=lambda s: s.lower())", "def natsort_icase(lst):\n lst.sort(key=natsort_key_icase)", "def _sort_by_name(bam_fn):", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def sorted(cls, tags: list, reverse: bool = False) -> list:\n return sorted(tags, key=lambda x: x.name.lower(), reverse=reverse)", "def sortTermsAlphabetically(terms):\n # Tutorial for sorting credit:\n # https://www.geeksforgeeks.org/ways-sort-list-dictionaries-values-python-using-lambda-function/\n sorted_list = sorted(terms, key=lambda i: (i[\"term_header\"], i[\"rating\"]))\n return sorted_list", "def _sort(self, groups):\n return sorted(groups, key=lambda group: (group.name.lower(), group.pubid))", "def sortednameslist(nameslist):\n sortednames = sorted(nameslist, key=lambda x: x[1])\n return sortednames", "def natsorted_icase(lst: Sequence[str]) -> List[str]:\n return sorted(lst, key=natsort_key_icase)", "def sort(self, key_func):\n pass", "def alphabetical_sorted(iterable, cmp=None, key=lambda x: x.lower(),\n reverse=False):\n return sorted(iterable, cmp, key, reverse)", "def natsorted(lst: Sequence[str]) -> List[str]:\n return sorted(lst, key=natsort_key)", "def sort(self):\r\n self.list.sort(key=lambda x: ''.join(x))", "def _sort_natural(names_list, reverse=False):\n def sort_key(val):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', val)]\n\n return sorted(names_list, key=sort_key, reverse=reverse)", "def sort_key(self):\n ..." ]
[ "0.7360461", "0.7314273", "0.6833507", "0.67649287", "0.66534966", "0.6629779", "0.6619612", "0.6566453", "0.65517783", "0.6549519", "0.6549519", "0.65458226", "0.65365845", "0.65276384", "0.6501143", "0.64999694", "0.64592296", "0.6409549", "0.6385534", "0.6382833", "0.63771147", "0.6361041", "0.63178575", "0.6309192", "0.6241156", "0.623927", "0.6195783", "0.61827636", "0.61751884", "0.61701214" ]
0.818581
0
Sort list of tuple by their first elements ignoring the case
def tupleListSort(tupleList): tupleList.sort(key=lambda y: y[0].lower())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mySort(self, alist):\n return sorted(alist, key=lambda x: (x[0].isdigit(), x.lower()))", "def sort_list_of_tuples(list):\n list.sort(key=lambda x: x[0])\n return list", "def sortCaseInsensitive(*args, **kwargs)->List[AnyStr]:\n pass", "def natsort_case_insensitive(seq):\r\n return natsort(seq, case_sensitive=False)", "def _natsort_key_case_insensitive(item):\r\n # added the lower() call to allow for case-insensitive sorting\r\n item = str(item).lower()\r\n\r\n try:\r\n chunks = re.split('(\\d+(?:\\.\\d+)?)', item)\r\n except TypeError:\r\n # if item is a tuple or list (i.e., indexable, but not a string)\r\n # work with the first element\r\n chunks = re.split('(\\d+(?:\\.\\d+)?)', item[0])\r\n for ii in range(len(chunks)):\r\n if chunks[ii] and chunks[ii][0] in '0123456789':\r\n if '.' in chunks[ii]:\r\n numtype = float\r\n else:\r\n numtype = int\r\n # wrap in tuple with '0' to explicitly specify numbers come first\r\n chunks[ii] = (0, numtype(chunks[ii]))\r\n else:\r\n chunks[ii] = (1, chunks[ii])\r\n return (chunks, item)", "def string_sort(a_list):\n for index in range(1, len(a_list)): # indexing through the list\n value = a_list[index]\n pos = index - 1\n while pos >= 0 and a_list[pos].lower() > value.lower(): #case insensitive, compare words\n a_list[pos + 1] = a_list[pos]\n pos -= 1\n a_list[pos + 1] = value", "def human_sort(l):\n l.sort(key=alphanum_key)\n return l", "def tuple_sorted(a):\r\n if ((isinstance(a, int) == True) or (isinstance(a, str) == True)):\r\n return a\r\n if ((isinstance(a[0], int) == True) or (isinstance(a[0], str) == True)):\r\n return sorted(a)\r\n else:\r\n w = []\r\n for b in a:\r\n w.append(tuple(tuple_sorted(b)))\r\n return tuple(sorted(tuple(w)))", "def test_signed_sort(self):\r\n\r\n # an empty list must be returned when an empty list needs to be sorted\r\n self.assertEqual(signed_natsort([]), [])\r\n\r\n # tuples that can be sorted by type-casting the first element\r\n test_list = [('9', 'SampleA'), ('-1', 'SampleD'), ('7', 'SampleC'),\r\n ('-2', 'SampleE'), ('-0.11',\r\n 'SampleF'), ('17.11', 'SampleB'),\r\n ('100', 'SampleG'), ('13', 'SampleH')]\r\n expected_result = [('-2', 'SampleE'), ('-1', 'SampleD'),\r\n ('-0.11', 'SampleF'), ('7',\r\n 'SampleC'), ('9', 'SampleA'),\r\n ('13', 'SampleH'), ('17.11', 'SampleB'), ('100', 'SampleG')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # tuples that must be sorted alphabetically\r\n test_list = [('Cygnus', 'SampleA'), ('Cepheus', 'SampleD'),\r\n ('Auriga', 'SampleC'), ('Grus',\r\n 'SampleE'), ('Hydra', 'SampleF'),\r\n ('Carina', 'SampleB'), ('Orion', 'SampleG'), ('Lynx', 'SampleH')]\r\n expected_result = [('Auriga', 'SampleC'), ('Carina', 'SampleB'),\r\n ('Cepheus', 'SampleD'), ('Cygnus',\r\n 'SampleA'), ('Grus', 'SampleE'),\r\n ('Hydra', 'SampleF'), ('Lynx', 'SampleH'), ('Orion', 'SampleG')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed case, tuples will be sorted alpha-numerically\r\n test_list = [('Cygnus', 'SampleA'), ('Cepheus', 'SampleD'),\r\n ('Auriga', 'SampleC'), ('Grus',\r\n 'SampleE'), ('-0.11', 'SampleF'),\r\n ('17.11', 'SampleB'), ('100', 'SampleG'), ('Lynx', 'SampleH')]\r\n expected_result = [('17.11', 'SampleB'), ('100', 'SampleG'),\r\n ('-0.11', 'SampleF'), ('Auriga',\r\n 'SampleC'), ('Cepheus', 'SampleD'),\r\n ('Cygnus', 'SampleA'), ('Grus', 'SampleE'), ('Lynx', 'SampleH')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed case just a list\r\n test_list = ['foo', 'bar', '-100', '12', 'spam', '4', '-1']\r\n expected_result = ['4', '12', '-1', '-100', 'bar', 'foo', 'spam']\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # list of elements that can be type-casted\r\n test_list = ['0', '1', '14', '12', '-15', '4', '-1']\r\n expected_result = ['-15', '-1', '0', '1', '4', '12', '14']\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed dict case\r\n test_dict = {\r\n 'foo': 'a', 'bar': 'b', '-100': '1', '12': '11', 'spam': 'q',\r\n '4': '11', '-1': 'e'}\r\n expected_result = ['4', '12', '-1', '-100', 'bar', 'foo', 'spam']\r\n\r\n output = signed_natsort(test_dict)\r\n self.assertEquals(output, expected_result)\r\n\r\n # dict where the keys can be type-casted\r\n test_dict = {\r\n '0': 'foo', '1': 'bar', '14': 'stand', '12': 'eggs', '-15': 'q',\r\n '4': 'b', '-1': 'h'}\r\n expected_result = ['-15', '-1', '0', '1', '4', '12', '14']\r\n\r\n output = signed_natsort(test_dict)\r\n self.assertEquals(output, expected_result)", "def sort_words_case_insensitively(words):\n #temp = sorted(words, key=lambda test_str: test_str[:1].lower() + test_str[1:])\n temp = sorted(words, key=str.lower)\n temp1 = []\n for index, word in enumerate(temp):\n if not word[0].isdigit():\n temp1.append(temp[index])\n for index, word in enumerate(temp):\n if word[0].isdigit():\n temp1.append(temp[index])\n return temp1", "def alpha_case_insensitive():\n# fill it out\n return sorted(STRING_LIST, key=lambda s: s.lower())", "def natsort_icase(lst: List[str]) -> None:\n lst.sort(key=natsort_key_icase)", "def keyListSort(keyList):\n keyList.sort(key=lambda y: y.GetName().lower())", "def alphabetical(lst):\n\treturn list(reversed(sorted(lst, key=lambda x: x[0])))", "def sort_fst(xs):\n return sorted(xs, key=lambda pair: pair[0])", "def langsort_tuples (lst, index, lang=None):\n\n reset_locale = _set_lang_locale(lang)\n lst.sort(lambda x, y: locale.strcoll(x[index], y[index]))\n reset_locale()", "def natsort(lst: List[str]) -> None:\n lst.sort(key=natsort_key)", "def sort_nicely(l):\r\n\tl.sort(key=alphanum_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def sort(self):\r\n self.list.sort(key=lambda x: ''.join(x))", "def sortTuple(lstTuples, element):\n\n lstTuples.sort(key=lambda x: x[element-1])\n return lstTuples", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def test_natsort_case_insensitive(self):\r\n\r\n # string with alpha and numerics sort correctly\r\n s = [\r\n 'sample1',\r\n 'sample2',\r\n 'sample11',\r\n 'sample12',\r\n 'SAmple1',\r\n 'Sample2']\r\n\r\n # expected values\r\n exp_natsort = ['SAmple1', 'Sample2', 'sample1', 'sample2', 'sample11',\r\n 'sample12']\r\n exp_natsort_case_insensitive = ['sample1', 'SAmple1', 'sample2',\r\n 'Sample2', 'sample11', 'sample12']\r\n\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort_case_insensitive(s),\r\n exp_natsort_case_insensitive)\r\n\r\n s.reverse()\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort(list('cbaA321')), list('123Aabc'))\r\n\r\n # strings with alpha only sort correctly\r\n self.assertEqual(natsort_case_insensitive(list('cdBa')), list('aBcd'))\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive(['11', '2', '1', '0']),\r\n ['0', '1', '2', '11'])\r\n\r\n # strings of floats sort correctly\r\n self.assertEqual(natsort_case_insensitive(['1.11', '1.12', '1.00',\r\n '0.009']), ['0.009', '1.00',\r\n '1.11', '1.12'])\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive([('11', 'A'), ('2', 'B'),\r\n ('1', 'C'), ('0', 'D')]),\r\n [('0', 'D'), ('1', 'C'),\r\n ('2', 'B'), ('11', 'A')])", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def sort_1(l):\n pass", "def natural_sort(l):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)", "def sort_by_name(list_to_sort):\n return sorted(\n list_to_sort,\n key=lambda k: k['Name'].lower()\n )", "def natural_sort_case_insensitive_comparison(value1, value2):\n return natural_sort_comparison(value1.lower(), value2.lower())", "def humanSort(l): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )" ]
[ "0.70666724", "0.6958094", "0.6883425", "0.68414825", "0.6625755", "0.65509486", "0.64947134", "0.64417565", "0.6294914", "0.62795585", "0.6264345", "0.6251216", "0.6234281", "0.62279594", "0.62186223", "0.62157", "0.6190618", "0.6180634", "0.6172756", "0.61627626", "0.6156476", "0.61263466", "0.61263466", "0.61226153", "0.6100449", "0.60908145", "0.60905695", "0.60773146", "0.606564", "0.6061432" ]
0.8253932
0
Use sys.stdout.write to write the string with an indentation equal to indent and specifying the end character
def write(string,indent=0,end=""): sys.stdout.write(" "*indent+string+end)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indentOut_ (stream, indent) :\r\n if indent == 0 :\r\n return\r\n else :\r\n stream.write(\" \"*indent)", "def out_indent(indent, *args):\n s = \"\"\n s += indent * \" \"\n s += \" \".join(args)\n return s", "def try_print_indent(self):\n if self.lasttoken[0] != lex.Token.NEWLINE:\n return\n\n if len(self.lasttoken[1]) > 0:\n self.buffer.scope_line(\"__io.write(u'\" + self.lasttoken[1] + \"')\")", "def indent(self):\n print (self.indent_size*self.n_indent*' ',end='',flush=True, file=self.fp)", "def printer(end,message):\n\n sys.stdout.write('\\r'+message+'\\t')\n sys.stdout.flush()\n if end: sys.stdout.write('\\n')", "def test_with_custom_indent(self):\n self.assertEqual(indent('foo', 3), ' foo')", "def write_text(self, token):\n self.try_print_indent()\n self.buffer.write_scope(\"__io.write(u'\")\n self.buffer.write(token)\n self.buffer.write_line(\"')\")", "def printIndent(s,lvl) :\n for line in s.split('\\n') :\n print('%s%s' % (' '*lvl,line))", "def prettyPrintStringHelper_ (s, stream, indent, pretty_print=True, indent_additive=4):\r\n stream.write(repr(s))", "def print(self, s, end='\\n'):\n self._output.write(str(s)+end)", "def console_print(out, *args, **kwargs):\n const_charset = stream_encoding(out)\n out.write(' '.join([a.encode(cons_charset, 'replace') for a in args]))\n if kwargs.get('newline', True):\n out.write('\\n')", "def indent(string, level=1):\n spaces = ' ' * (level * 4)\n return \"%s%s\" % (spaces, string)", "def indent(str, level):\n if level == 0: return str\n return \"\\n\".join(\"\\t\" * level + line for line in str.splitlines())", "def print_with_indent(*args):\n if INDENT_LEVEL:\n print(\"\\t\" * INDENT_LEVEL, end='')\n for arg in args:\n print(arg, end='')\n print()", "def print(*args, **kwargs):\n sep, file = kwargs.pop(\"sep\", b\" \"), kwargs.pop(\"file\", sys.stdout)\n at_start = True\n for x in args:\n if not at_start:\n file.write(sep)\n file.write(str(x))\n at_start = False\n file.write(kwargs.pop(\"end\", b\"\\n\"))\n if kwargs.pop(\"flush\", False):\n file.flush()", "def __indent_text_block(text):\n lines = text.splitlines()\n if len(lines) > 1:\n out = lines[0] + \"\\r\\n\"\n for i in range(1, len(lines)-1):\n out = out + \" \" + lines[i] + \"\\r\\n\"\n out = out + \" \" + lines[-1]\n return out\n return text", "def printc(txt):\n sys.stdout.write(txt)\n sys.stdout.write('\\n')", "def _newline(self):\n if prettyprint:\n return '\\n' + self._indent_spaces()\n else:\n return ''", "def indent(text, prefix, predicate=...): # -> str:\n ...", "def echo(string, end=\"\\n\"):\n\tprint(string, end=end)", "def tprint(msg, indent=0):\n\n print(\" \" * indent + \n \" \" * (indent+1) + \n \"'-- \" + msg)", "def Write(self, line='', *args, **kwargs):\n result = line % args\n offset = self._indent * 2 + kwargs.get('offset', 0)\n indent = ' ' * offset if result else ''\n self._out.write(indent + result + '\\n')", "def space():\n print(' ', end='')", "def indent(txt, indent_level):\n indent = \" \" * indent_level\n return \"\\n\".join(indent + x for x in txt.splitlines())", "def insert_indent(event):\n env = XSH.env\n event.cli.current_buffer.insert_text(env.get(\"INDENT\"))", "def write(string):\n\n\tsys.stdout.write(string)\n\tsys.stdout.flush()", "def print_substep(text, style=\"\"):\n console.print(text, style=style)", "def escaped_printer(to_write):\n # suppress(anomalous-backslash-in-string)\n to_write = to_write.replace(\";\", \"{c};\".format(c=char))\n to_write = to_write.replace(\"\\n\", \";\\n\") + \";\\n\"\n\n if file_object:\n file_object.write(to_write)\n else:\n sys.stdout.write(to_write)", "def get_indent(op):\n ret = \"\"\n for ii in range(op):\n # Would tab be better?\n ret += \" \"\n return ret", "def log(self, message, indent_amount=0):\n indent = \" \" * indent_amount\n text = \"{indent}{text}\\n\".format(indent=indent, text=message)\n sys.stdout.write(text)" ]
[ "0.7758625", "0.6414926", "0.6342674", "0.6169035", "0.6082922", "0.59543496", "0.5942104", "0.5933664", "0.58904225", "0.5866938", "0.5845034", "0.5830857", "0.5830536", "0.5825642", "0.5822062", "0.58123285", "0.58008015", "0.5715586", "0.5666716", "0.5651807", "0.56485546", "0.5639401", "0.5629479", "0.5616615", "0.56136316", "0.56098604", "0.5600371", "0.5595038", "0.5573551", "0.5537827" ]
0.8376877
0
Print informations given by keyList with a rools style choosen with optDict
def roolsPrint(keyList,optDict,indent=0): if optDict['long'] or optDict['tree']: \ roolsPrintLongLs(keyList,optDict,indent) else: roolsPrintSimpleLs(keyList,indent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printDict(myDict):\n for key in myDict:\n print(f\"Version: --> {myDict[key]['version']} \")\n print(f\"Accuracy: --> {myDict[key]['accuracy']}\")\n print(f\"Time --> {myDict[key]['time_per_target']}\")\n print(f\"Penalty --> {myDict[key]['target_w_penalty']}\")\n print(f\"ID --> {myDict[key]['assessed_by']}\")\n print(f\"# --> {myDict[key]['attempt']}\")\n\n print()", "def roolsPrintSimpleLs(keyList,indent):\n # This code is adaptated from the pprint_list function here :\n # http://stackoverflow.com/questions/25026556/output-list-like-ls\n # Thanks hawkjo !!\n if len(keyList) == 0: return\n (term_width, term_height) = getTerminalSize()\n term_width = term_width - indent\n min_chars_between = 2\n min_element_width = min( len(key.GetName()) for key in keyList ) \\\n + min_chars_between\n max_element_width = max( len(key.GetName()) for key in keyList ) \\\n + min_chars_between\n if max_element_width >= term_width: ncol,col_widths = 1,[1]\n else:\n # Start with max possible number of columns and reduce until it fits\n ncol = min( len(keyList), term_width / min_element_width )\n while True:\n col_widths = \\\n [ max( len(key.GetName()) + min_chars_between \\\n for j, key in enumerate(keyList) if j % ncol == i ) \\\n for i in range(ncol) ]\n if sum( col_widths ) <= term_width: break\n else: ncol -= 1\n for i, key in enumerate(keyList):\n if i%ncol == 0: write(\"\",indent) # indentation\n # Don't add spaces after the last element of the line or of the list\n if (i+1)%ncol != 0 and i != len(keyList)-1:\n if not IS_TERMINAL: write( \\\n key.GetName().ljust(col_widths[i%ncol]))\n elif isDirectoryKey(keyList[i]): write( \\\n isSpecial(ANSI_BLUE,key.GetName()).ljust( \\\n col_widths[i%ncol] + ANSI_BLUE_LENGTH))\n elif isTreeKey(keyList[i]): write( \\\n isSpecial(ANSI_GREEN,key.GetName()).ljust( \\\n col_widths[i%ncol] + ANSI_GREEN_LENGTH))\n else: write(key.GetName().ljust(col_widths[i%ncol]))\n else: # No spaces after the last element of the line or of the list\n if not IS_TERMINAL: write(key.GetName())\n elif isDirectoryKey(keyList[i]):\n write(isSpecial(ANSI_BLUE, key.GetName()))\n elif isTreeKey(keyList[i]):\n write(isSpecial(ANSI_GREEN, key.GetName()))\n else: write(key.GetName())\n write('\\n')", "def DictFunction():\r\n print \"{name} is from {city}, and he likes {cake} cake, {fruit} fruit, {salad} salad and {pasta} pasta\".format(**food_prefs)", "def listOptions(lst):\n for k, e in enumerate(lst,1):\n print(\"{:^15}{:<10}\".format(k,e))", "def roolsPrintLongLs(keyList,optDict,indent):\n if len(keyList) > 0: # Width informations\n maxCharClass = max([len(key.GetClassName()) for key in keyList])\n maxCharTime = 12\n maxCharName = max([len(key.GetName()) for key in keyList])\n dic = { \\\n \"classWidth\":maxCharClass+2, \\\n \"timeWidth\":maxCharTime+2, \\\n \"nameWidth\":maxCharName+2, \\\n \"titleWidth\":1}\n date = ROOT.Long(0) \n for key in keyList:\n time = ROOT.Long(0)\n key.GetDatime().GetDateTime(key.GetDatime().Get(),date,time)\n time = prepareTime(time)\n rec = \\\n [key.GetClassName(), \\\n MONTH[int(str(date)[4:6])]+\" \" +str(date)[6:]+ \\\n \" \"+time[:2]+\":\"+time[2:4], \\\n key.GetName(), \\\n \"\\\"\"+key.GetTitle()+\"\\\"\"]\n write(LONG_TEMPLATE.format(*rec,**dic),indent,end=\"\\n\")\n if optDict['tree'] and isTreeKey(key):\n tree = key.ReadObj()\n recursifTreePrinter(tree,indent+2)", "def display_dict() -> None:\n for key in ascii_dict:\n print(key, ': ')\n for line in ascii_dict[key]:\n print(line)", "def _print_enum_opt(self, option, choices):\n for key in choices:\n if key == self.conf[option]:\n print(\"* %s\" % key)\n else:\n print(\" %s\" % key)", "def show(list_of_dicts, key):\n print(\"\\nHere are the stocks I have considered for you:\")\n for i in list_of_dicts: # iterates through list_of_dicts and prints Name and Market Cap\n print(f\" - {i['Name']} - {key} is {i[key]} \")", "def print_car(car):\n for key, value in car.items():\n print(f\"{key}: {value}\")", "def printPicnic(itemsDict: dict, leftWidth: int, rightWidth: int) -> None:\n print('PICNIC ITEMS'.center(leftWidth + rightWidth, '-'))\n for k, v in itemsDict.items():\n print(k.ljust(leftWidth, '.') + str(v).rjust(rightWidth))", "def show_key_options(json_dict, backtrack):\n print(\"Keys available:\")\n for key in json_dict:\n print(key, end=\" \"*5)\n key = input(\"\\nEnter next key: \")\n step_into(json_dict, key, backtrack)", "def run_print_dict_examples():\n print()\n print_dict_keys(NAME_DICT)\n print()\n print_dict_items(NAME_DICT)", "def run_print_dict_examples():\n print()\n print_dict_keys(NAME_DICT)\n print()\n print_dict_items(NAME_DICT)", "def print_option_set(option_set, leader):\n for option in option_set:\n labels = \",\".join(option['labels'])\n option_set = leader + labels + \" \"*(20-len(labels)) + \"- \" + option['description']\n print(option_set)", "def print_individual(individual : Dict[str, str], keys: List[str], individualsDict):\n ind_str = \"\"\n for index, key in enumerate(keys):\n if index != 0:\n ind_str += \", \"\n\n if key == 'name':\n\n #US47 twins special symbol\n twins = {}\n for id, i in individualsDict.items():\n family = i[\"child\"]\n birthday = i[\"birthday\"]\n\n if family+birthday in twins:\n twins[family+birthday] = twins[family+birthday].append(i['id'])\n else:\n twins[family+birthday] = [i['id']]\n\n flatList = []\n for twin_lists in twins.values():\n if len(twin_lists) > 1:\n flatList = flatList + twin_lists\n\n # US44: underline if dead\n if not individual[\"alive\"]:\n ind_str += \"\\u001b[4m\"\n # blue for boy, red for girl\n ind_str += \"\\033[1;34;40m\" if individual[\"gender\"] == \"M\" else \"\\033[1;35;40m\"\n ind_str += f\"name = {individual['name']}\\033[0;37;40m\" # reset color\n ind_str += \"\\u001b[0m\" # reset text decoration\n \n if individual['id'] in flatList:\n ind_str += u'\\1071'\n else:\n ind_str += f\"{key} = {individual[key]}\"\n\n if key == 'birthday':\n ind_str += format_date(individual['birthday'])\n\n print(ind_str)", "def print_options(val, cur_matches):\n print val\n\n #skip one to print none at end\n for i,v in enumerate(cur_matches[1:]):\n print \"[%i] %s : %s \"%(i+1, v[0], v[1])\n print \"[%i] %s : %s \" % (0, cur_matches[0][0], cur_matches[0][1])\n\n print \n print 'Choice?'", "def printMap(values, klab, vlab, precision, offset=16):\n\tprint(klab.ljust(offset, \" \") + vlab)\n\tfor k in values.keys():\n\t\tv = values[k]\n\t\tks = toStr(k, precision).ljust(offset, \" \")\n\t\tvs = toStr(v, precision)\n\t\tprint(ks + vs)", "def print_options(order_list, option_list):\n menu = ''\n for order, text in zip(order_list, option_list):\n menu += (str(order) + ' - ' + text + '\\n')\n return menu", "def display(self):\r\n\t\tfor key, value in self.__dict__.items():\r\n\t\t\tprint(key.upper(), value, sep=': ')\r\n\r\n\t\tprint(\"\")", "def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')", "def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')", "def printdict(input_dict):\n for key in input_dict:\n print key, \":\", input_dict[key]", "def format_dict(kv_list):\n return '\\n'.join(['{} - {}'.format(key, value) for\n key, value in kv_list])", "def show_values():\n dic_drg = {}\n dic_age = {}\n dic_sex = {}\n dic_sline = {}\n for tup in all_data:\n drg = tup[7]\n age = tup[9]\n sex = tup[10]\n sline = tup[14]\n\n dic_drg[drg] = 1\n dic_age[age] = 1\n dic_sex[sex] = 1\n dic_sline[sline] = 1\n\n print \"Age values\"\n for key in sorted(dic_age.keys()):\n print key\n\n print \"Sex values\"\n for key in sorted(dic_sex.keys()):\n print key\n\n print \"Service line values\"\n for key in sorted(dic_sline.keys()):\n if key is None or len(key) == 0:\n continue\n print \"'\" + key + \"',\",\n print\n\n print \"Drg values\"\n for key in sorted(dic_drg.keys()):\n if key is None or len(key) == 0:\n continue\n print\"'\" + key + \"',\",\n print", "def show(self, keys=None, sort_keys_function=None):\n output_keys = keys or self.keys\n if not self.items:\n print(\"No items to show\")\n else:\n for item in self.__get_items(sort_keys_function):\n for output_key in output_keys:\n print(\"{0:25}: {1!s}\".format(output_key, getattr(item, self.mapping[output_key])))\n print(\"-\" * 25)", "def build_choices(header, dictionary, after):\n out = f\"{header}\\n\"\n for i, (key, item) in enumerate(dictionary.items(), start=1):\n out += f\"{INDENT_STRING}{i}. {item}\\n\"\n out += after\n return out", "def print_pairs(self, d, level=0):\n for k, v in d.iteritems():\n if type(v) is dict:\n self._write('%s%s :\\n' % (\"\\t\" * level, k.upper()))\n self.print_pairs(v, level + 1)\n elif k == \"output\":\n self._write('%s%s :\\n' % (\"\\t\" * level, k.upper()))\n self._write('%s\\n' % v)\n else:\n self._write('%s%s : %s\\n' % (\"\\t\" * level, k.upper(), v))", "def recursive_dict_key_print(dict_in, spacer=\"\"):\n if type(dict_in) is not dict:\n return\n next_spacer = spacer + \" \"\n for key, value in dict_in.items():\n try:\n print(spacer, f\"{key} : {value.shape}\")\n except(AttributeError):\n print(spacer, key)\n recursive_dict_key_print(value, next_spacer)", "def print_verb_dict(verb):\n for keys in verb:\n print(f'{keys}: {verb[keys]}')", "def showd(d):\r\n return ' '.join([':%s %s' % (k,v)\r\n for k,v in\r\n sorted(d.items())\r\n if not \"_\" in k])" ]
[ "0.6518504", "0.6352941", "0.63204557", "0.6319985", "0.62665594", "0.61978984", "0.6115929", "0.6110398", "0.60612506", "0.59930515", "0.5966976", "0.59666437", "0.59666437", "0.59537727", "0.5947354", "0.59427744", "0.5914536", "0.5914091", "0.582117", "0.5819495", "0.5819495", "0.5787796", "0.57763624", "0.5764358", "0.57596225", "0.5741508", "0.5726192", "0.5722572", "0.5717987", "0.5707593" ]
0.7562387
0
Get a Producer queue instance
def getProducer(): # get the config and a producer config = ecommerce.config.getConfig() return ecommerce.queue.queue(config, queuePrefix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queue():\n\n return multiprocessing.Queue()", "def get(queue_name: str, **kwargs) -> Queue:\n return Queue(queue_name, **kwargs)", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def producer(self):\n return Producer(app=self.app, client=self.client)", "def get_producer(conf_settings=None, address=Config.INSIGHTS_KAFKA_ADDRESS): # pragma: no cover\n if conf_settings is None:\n conf_settings = {}\n conf = _get_producer_config(address, conf_settings)\n return ProducerSingleton(conf)", "def new_queue() -> Queue:\n return multiprocessing.Queue()", "def get_queue(self):\n return self.queue", "def get_queue(self):\n return self.queue", "async def get_queue(self, ctx: commands.Context) -> Optional[QueueManager]:\n\n return self.queue[ctx.guild.id]", "def create(self):\n topic = self.__conn__.create_topic(self.__topic__)\n return topic.get_producer(*self.__args__, **self.__kargs__)", "def get_queue(self):\n if self.queue is not None:\n return self.queue\n state = self.get_state()\n self.queue = state.get_queue()\n # print(\"IQ\", self.queue)\n return self.queue", "def get_queue(self):\r\n return _channeldata[self.chan].queue", "def queue(self, sid):\r\n return queues.Queue(self, sid)", "def small_queue():\n que = Queue()\n que.enqueue(1)\n que.enqueue(2)\n que.enqueue(3)\n que.enqueue(4)\n que.enqueue(5)\n return que", "def get_message(cls):\n rp = cls.get()\n try:\n message = rp.queue_send.get_nowait()\n except Exception:\n return None\n\n return message", "def _create_queue(self):\n # Instantiate\n queue = pbs.queue(verbose=not self.quiet)\n\n if self.q == 'ember':\n # Submitting to Utah ember cluster\n ppn = 12\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n walltime = self.walltime if int(self.walltime.split(':')[0]) < 72 else '72:00:00'\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=walltime, ppn=ppn, cpus=cpus, partition='ember', alloc='sdss')\n elif self.q is not None:\n # All other self.q values expected for Portsmouth cluster,\n # sciama. In this case, the number of nodes is queue\n # dependent, and qos is not set\n if self.q == 'sciama1.q':\n ppn = 12\n elif self.q == 'sciama3.q':\n ppn = 20\n else:\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, umask=self.umask,\n walltime=self.walltime, queue=self.q, ppn=ppn, cpus=cpus)\n else:\n # self.q can be None when submitting to both the Portsmouth\n # and Utah clusters. In this case, the default queue\n # destination and ppn is correct. qos is also set, but this\n # should only be used when submitting to Utah.\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=self.walltime, ppn=ppn, cpus=cpus)\n\n return queue" ]
[ "0.7450264", "0.7052122", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69648343", "0.69545984", "0.6784196", "0.6645031", "0.6645031", "0.66199046", "0.6527877", "0.6523349", "0.63335377", "0.6271913", "0.6227262", "0.6198443", "0.6166942" ]
0.8324251
0
Get the list of all entities of a given type from DB
def getEntityIds(type, subtype = None): # get a cursor conn = ecommerce.db.getConnection() cursor = conn.cursor() # decide the query to execute if type not in entityQueries: return [ ] # execute the query qparams = (type, ) if subtype is not None: qparams = (type, subtype) cursor.execute(entityQueries[type], qparams) # fetch the ids elist = [ ] row = cursor.fetchone() while row is not None: elist.append(int(row[0])) row = cursor.fetchone() cursor.close() return elist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_all(cls)->List:\n database.cursor.execute(\"SELECT * FROM {}\".format(cls.table_name))\n items = database.cursor.fetchall()\n return [cls.to_object(item) for item in items]", "def get_all(cls):\n return db_session.query(cls).order_by(cls.name).all()", "def get_entities(self, type, offset=0, limit=20):\n # url = '{}/ngsi-ld/v1/entities?type={}&offset={}&limit={}'.format(self.url, type, offset, limit)\n url = '{}/ngsi-ld/v1/entities?type={}'.format(self.url, type, offset, limit)\n r = requests.get(url, headers=self.headers_with_link)\n return r.json()", "def all(self, *args, **kwargs):\n list_to_return = []\n if not self.object_type:\n return list_to_return\n class_name = eval(self.object_type)\n if self.objects_id:\n for id in self.objects_id.split(';'):\n if id:\n list_to_return.append(class_name.objects.get(id=id))\n return list_to_return", "def get_all(class_name):\n result = class_name.query.all()\n return result", "def get_all(cls, order_by: Column = None):\n # Validate class before query\n cls.__class_validation()\n\n if order_by:\n entity_list = cls.query.order_by(order_by).all()\n else:\n entity_list = cls.query.all()\n\n if entity_list:\n return entity_list\n\n return None", "def fetch_all_sensor_types():\n query = db.session.query(\n TypeClass.id,\n TypeClass.sensor_type,\n )\n sensor_types = db.session.execute(query).fetchall()\n sensor_types = query_result_to_array(sensor_types)\n sensor_types = [st for st in sensor_types if is_valid_sensor_type(st[\"id\"])]\n return sensor_types", "def fetch_all(cls):\n return cls.query.all()", "def get_all_by_type(self, type):\n # Validation\n TrainerManager._str_validator(type)\n\n # Database Query\n session = self._db_session()\n if type == 'Regular Trainer':\n trainer_query = session.query(RegularTrainer).filter(\n RegularTrainer.type == \"Regular Trainer\").all()\n if type == 'Gym Leader':\n trainer_query = session.query(GymLeader).filter(\n GymLeader.type == \"Gym Leader\").all()\n session.close()\n\n return trainer_query", "def fetch_all(cls: Type[_T], session: Session, limit: int, offset: int) \\\n -> List[_T]:\n return Query(cls, session=session).limit(limit).offset(offset).all()", "def get_all_types():\n cnx, cursor = connect_db()\n query = \"\"\"select a.name, b.`order` from types a, types b\n where a.parent=b.guid\"\"\"\n cursor.execute(query)\n result = cursor.fetchall()\n result = pd.DataFrame(result, columns=['type', 'order'])\n cnx.close()\n return result", "def fetch_all_sensors(sensor_type):\n query = db.session.query(\n SensorClass.id,\n SensorClass.aranet_code,\n SensorClass.name,\n ).filter(SensorClass.type_id == sensor_type)\n sensors = db.session.execute(query).fetchall()\n sensors = query_result_to_array(sensors)\n sensors = {s[\"id\"]: s for s in sorted(sensors, key=lambda x: x[\"id\"])}\n return sensors", "def get_all_items(model, type):\n if(type == \"office\"):\n return model.get_all_offices()\n elif(type == \"party\"):\n return model.get_all_parties()\n return []", "def types():\n types = session.query(Type).all()\n return jsonify(types=[t.name for t in types])", "def find_objects_by_type():\n try:\n keyword = request.form[\"keyword\"]\n object_type = request.form[\"object_type\"]\n\n # Get entities based on the selection\n entities = g.user.get_api().get_by_object_types(keyword, object_type)\n\n # Parse response object into table data\n data = raw_entities_to_table_data(entities)\n\n # If no entities were found reutrn with failure state and message\n result = get_result_template()\n if len(data[\"data\"]) == 0:\n result[\"status\"] = \"FAIL\"\n result[\"message\"] = 'No entities of type \"{TYPE}\" were found.'.format(\n TYPE=object_type\n )\n else:\n result[\"status\"] = \"SUCCESS\"\n result[\"data\"] = {\"table_field\": data}\n return jsonify(result_decorator(result))\n\n except Exception as e:\n result = get_result_template()\n result[\"status\"] = \"FAIL\"\n result[\"message\"] = str(e)\n return jsonify(result_decorator(result))", "def type_index(context, request):\n\n return {'types': db.DBSession.query(db.Type).order_by(db.Type.id).all()}", "def types_query(owner_name):\n query = Products.query.with_entities(Products.type_name.label('Type'))\\\n .filter_by(owner_name=owner_name)\\\n .distinct()\n return query", "def find_all(cls):\n return cls.dbm().modelclass_find_all(cls)", "def get_records(table, id=None):\n try:\n my_class = load_entity(table)\n except LoaderError as e:\n abort(400, e)\n\n if id is not None:\n try:\n r = my_class[id]\n except ObjectNotFound:\n abort(404)\n return serialize_entity(r)\n\n records = select(r for r in my_class)\n return serialize_entity_collection(records)", "def getResourcesByEntitytype(entitytype, srcentty):\n # Distinction is implemented by python set\n cursor.execute(\n '''SELECT r1.value FROM resource as r1\n JOIN resource as r2 ON r1.content_id = r2.content_id\n JOIN entitytype as e1 ON r1.entitytype_id = e1.id\n JOIN entitytype as e2 ON r2.entitytype_id = e2.id\n JOIN content ON r1.content_id = content.id\n WHERE e1.name = %s\n AND e2.name = %s\n AND in_dump = True\n ''',\n (entitytype, srcentty,)\n )\n return {c['value'] for c in cursor}", "def entities(self) -> List[Entity]:\n return [field for field in self._fields.values() if isinstance(field, Entity)]", "def _get_all_records(self) -> List[DBModelInstance]:\n return self.model.query.all()", "def get_all(self, context, type_):\n types = None\n if type_ and isinstance(type_, basestring):\n types = type_.strip(\",\").split(\",\")\n\n try:\n db_resource_mgrs_data = self.db_api.get_all_resource_managers(\n context, types=types)\n\n _resource_mgrs_data = []\n for db_resource_mgr_data in db_resource_mgrs_data:\n _resource_mgrs_data.append(_make_response(\n db_resource_mgr_data))\n except Exception as e:\n msg = (\"Error retrieving the 'resource managers' reason : %s\"\n % e.message)\n LOG.exception(msg)\n raise exception.RetrieveException(e.message)\n return _resource_mgrs_data", "def all(cls):\n return dbsession.query(cls).all()", "def all(cls):\n return dbsession.query(cls).all()", "async def get_metadata_for_object_type(\n dbcon: DBConnection, object_type: str) -> Iterable[object_models.ObjectMetadata]:\n q = '''select metadata.object_type, metadata.object_id, metadata.key, metadata.value\n from object_metadata as metadata\n where metadata.object_type=%s'''\n return [object_models.ObjectMetadata(*row) for row in await dbcon.fetch_all(q, (object_type,))]", "def get_all() -> list:\n categorias = []\n conn = GenericDao.connect()\n cursor = conn.execute(\"SELECT * FROM categorias\")\n for row in cursor:\n categoria = Categoria(row[1], row[0])\n categorias.append(categoria)\n if debug:\n print(str(categoria))\n\n conn.close()\n return categorias", "def list(self):\n if not self.model:\n raise NameError('database model has not been set.')\n\n with self.session() as session:\n query = self.get_query(session)\n data = query.all()\n return data", "def _queryset(self):\n return self.type.objects.filter(id__in=self.ids)", "async def fetchall(entity, query: Union[ClauseElement, str], values: Dict = None) -> List[Mapping]:\n return await uvicore.db.fetchall(query=query, connection=entity.__connection__)" ]
[ "0.7030536", "0.6770714", "0.66363615", "0.6616682", "0.6602713", "0.6575097", "0.64512324", "0.6423671", "0.64117724", "0.6411315", "0.63493234", "0.634775", "0.63454515", "0.6322174", "0.62615913", "0.6218871", "0.6200281", "0.61640894", "0.6134082", "0.613314", "0.6114528", "0.6106322", "0.6104303", "0.60861796", "0.60861796", "0.60642195", "0.6038581", "0.6036301", "0.6004704", "0.5985883" ]
0.6784044
1
Mark the entities modified before a specific date as processed
def mark_processed_entities(entity_type, max_date): try: # get a connection and cursor conn = ecommerce.db.getConnection() cursor = conn.cursor() # execute the query cursor.execute(""" UPDATE Stage0_Delta SET FlagUpdated = 0 WHERE EntityType = ? AND FlagUpdated = 1 AND LastUpdate <= TO_DATE(?, 'YYYY-MM-DD HH24:MI:SS') """, (entity_type, max_date) ) # commit changes conn.commit() except: conn.rollback() pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visitBefore(self, date):\n raise NotImplementedError()", "def is_before(self,other_date):", "def modified(self):\r\n\t\treturn self.last_modified > self.last_processed", "def modified(self):\n\t\treturn self.last_modified > self.last_processed", "def _update_modified_since(self, timestamp):\n pass", "def modified_date(self, modified_date):\n\n self._modified_date = modified_date", "def modified_date(self, modified_date):\n\n self._modified_date = modified_date", "def modified_object(obj, event):\n now = datetime.now(tz=_zone)\n obj.modification_date = now", "def set_modified_since(self, data):\n self.add_payload('modifiedSince', data)", "def is_modified_since(thing, action, date):\r\n from pylons import g\r\n\r\n prop = 'last_' + action\r\n if not hasattr(thing, prop):\r\n last_modified = make_last_modified()\r\n setattr(thing, prop, last_modified)\r\n thing._commit()\r\n else:\r\n last_modified = getattr(thing, prop)\r\n\r\n if not date or date < last_modified:\r\n return last_modified\r\n \r\n #if a date was passed in and it's equal to last modified\r\n return True", "def date_modified(self, date_modified):\n \n self._date_modified = date_modified", "def test_modification_date(self):\n form_data = {'seo_title': 'New Title',\n 'seo_title_override:int': 1,\n 'form.submitted:int': 1}\n\n md_before = self.my_doc.modification_date\n self.publish(path=self.mydoc_path+'/@@seo-context-properties',\n basic=self.basic_auth, request_method='POST',\n stdin=StringIO(urllib.urlencode(form_data)))\n md_after = self.my_doc.modification_date\n\n self.assertNotEqual(md_before, md_after)", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def set_modified(self, dt):\n self.modified = dt_to_iso(dt)", "def set_modified(self, dt):\n self.modified = dt_to_iso(dt)", "def created_before(self, date: datetime):\n return self.created_search(date, search_type=\"before\")", "def preProcess(self, datum):\n pass", "def before_revision(self, before_revision):\n\n self._before_revision = before_revision", "def setModifiedDate(self, *args):\n return _libsbml.ModelHistory_setModifiedDate(self, *args)", "def change_modified_date(sbml):\n history = sbml.getModel().getModelHistory()\n if history:\n history.setModifiedDate(libsbml.Date(w3c_time()))\n # remove all but final modified date\n while history.getListModifiedDates().getSize() > 1:\n history.getListModifiedDates().remove(0)", "def mark_started(self):\n self.started = datetime.now()\n self.save()", "def modified(self):\n raise NotImplementedError", "def mark_preprocessed(self, processor):\n self.__preprocessed[processor] = True", "def modified(self, modified):\n\n self._modified = modified" ]
[ "0.6026466", "0.60223776", "0.6005824", "0.58785576", "0.5820918", "0.5775946", "0.5775946", "0.57705164", "0.56849986", "0.5605239", "0.5544186", "0.55204594", "0.54331017", "0.54331017", "0.54331017", "0.54331017", "0.54331017", "0.54331017", "0.54331017", "0.5399244", "0.5399244", "0.5380063", "0.5378017", "0.5366152", "0.5307051", "0.53008634", "0.5261901", "0.52558434", "0.5210573", "0.5206624" ]
0.6365167
0
A generator that can be used to iterate over all of the message handlers that belong to this instance.
def iter_message_handlers(self): for name in dir(self): attr = getattr(self, name) if isinstance(attr, MessageHandler): yield attr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_message_handlers(self):\n\t\treturn self.message_handlers", "def get_handlers(self):\n\n # Get handlers \n logger.debug(\"%s: Returned %d handlers.\" % \\\n (self.__class__.__name__, len(self._handlers)))\n return self._handlers[:]", "def __iter__(self):\n return _iterEvents(self._eventHandlers)", "def get_handlers(self):\n return self._handlers", "def get_handlers(self):\n raise NotImplementedError()", "def _handlers(self):\n if not self.__handlers:\n handlers = {}\n for key in dir(self):\n # Underscores are protected\n if key.startswith('_'):\n continue\n attr = getattr(self, key)\n # Tree syntax\n if issubclass(type(attr), Handler) and attr != self:\n for name, handler in attr._handlers.iteritems():\n name = '%s.%s' % (key, name)\n handlers[name] = handler\n # Normal syntax\n elif hasattr(attr, '__call__'):\n handlers[key] = attr\n self.__handlers = handlers\n return self.__handlers", "def send(self, *args, **kw):\n result = []\n for handler in self.registry.values():\n result.append(handler(*args, **kw))\n return result", "def handlers(self, handlers):\n return self._set_list_field(\"handlers\", handlers)", "def u2handlers(self):\n return []", "def get_handlers(self):\n svs = []\n paths = self.get_paths()\n for p in paths:\n s = re.sub(r\"(?<={)\\w+}\", \".*\", p).replace(\"{\", \"\")\n o = re.sub(r\"(?<=<)\\w+\", \"\", s).replace(\"<\", \"\").replace(\">\",\"\").replace(\"&\", \"\").replace(\"?\", \"\")\n svs.append((o, self))\n\n return svs", "def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])", "def event_handlers(self):\n if self._event_handlers is not None:\n return self._event_handlers\n\n # Get event handlers for self\n ordered = []\n unordered = []\n cls = type(self)\n for cls_name in dir(cls):\n cls_item = getattr(cls, cls_name, None)\n if isinstance(cls_item, HandlerDecorator):\n bound_handler = getattr(self, cls_name)\n if cls_item.priority is not None:\n ordered.append((cls_item, bound_handler))\n else:\n unordered.append((cls_item, bound_handler))\n ordered.sort(key=lambda h: h[0].priority)\n\n # get parent event handlers\n try:\n parent = self.parent.acquire.event_handlers\n except AttributeError:\n parent = []\n\n # Combine, cache and return\n handlers = [*ordered, *unordered, *parent]\n self._event_handlers = handlers\n return handlers", "def _handlers(self):\n settings = self.get_settings(prefix='tangled.app.handler.')\n # System handler chain\n handlers = [settings['exc']]\n if self.has_any('static_directory'):\n # Only enable static file handler if there's at least one\n # local static directory registered.\n dirs = self.get_all('static_directory')\n if any(isinstance(d, LocalDirectory) for d in dirs):\n handlers.append(settings['static_files'])\n handlers.append(settings['tweaker'])\n handlers.append(settings['notifier'])\n handlers.append(settings['resource_finder'])\n if self.get_setting('csrf.enabled'):\n handlers.append(settings['csrf'])\n if 'auth' in settings:\n handlers.append(settings['auth'])\n # Handlers added by extensions and applications\n handlers += self.get_all(abcs.AHandler, [])\n if self.get_setting('cors.enabled'):\n handlers.append(settings['cors'])\n # Main handler\n handlers.append(settings['main'])\n # Wrap handlers\n wrapped_handlers = []\n next_handler = None\n for handler in reversed(handlers):\n handler = HandlerWrapper(handler, next_handler)\n wrapped_handlers.append(handler)\n next_handler = handler\n wrapped_handlers.reverse()\n return wrapped_handlers", "def getSimulationEventHandlers(self): \r\n return self.__eventHandlers.values()", "def signal_callbacks(self):\n for name in self.lookup_dict[self.__class__]:\n yield name, getattr(self, name)", "def get_handlers(self):\n # TODO(eric.cousineau): Consider just using `OrderedDict`.\n return map(self._handlers.get, self._frame_names)", "def get_registered_handlers(self):\n return list(self._registry.values())", "def handles(self) -> Union[Callable, Sequence]:\n return self._handles", "def get_command_handlers(self):\n\t\treturn self.command_handlers", "def _handlers(self) -> tuple:\n return self._classname2handlers[self.deco_class.__name__]", "def __iter__(self):\n while True:\n m = self.recv(timeout=1.0)\n if m is not None:\n yield m\n logger.debug(\"done iterating over bus messages\")", "def sender_iter(self):\n while 1:\n yield self.send_next()", "def _update_handlers(self):\n handler_map = defaultdict(list)\n for i, obj in enumerate(self.handlers):\n for dummy, handler in inspect.getmembers(obj, callable):\n if not hasattr(handler, \"_pyxmpp_event_handled\"):\n continue\n # pylint: disable-msg=W0212\n event_class = handler._pyxmpp_event_handled\n handler_map[event_class].append( (i, handler) )\n self._handler_map = handler_map", "def event_handlers(self):\n if self.is_flow:\n return self._event_handlers\n\n try:\n return self._event_handlers\n except AttributeError:\n return self.flow._event_handlers", "def messageCollector():\n\ttempui = CatchallUI()\n\trealui = base.ui\n\ttry:\n\t\tbase.ui = tempui\n\t\tyield tempui\n\tfinally:\n\t\tbase.ui = realui", "def _get_instance_handlers ( self, name ):\n return [ ( getattr( self, method_name ), item_name )\n for method_name, item_name in\n self.__class__.__instance_traits__[ name ] ]", "def send_all(self, service, payload):\n for handler in self.partyline[service]:\n try:\n yield handler(payload)\n except HighAndDry:\n pass", "def _iterate_messages(self):\n if not self.targets:\n raise sex.SullyRuntimeError(\"No targets specified in session\")\n\n if not self.edges_from(self.root.id):\n raise sex.SullyRuntimeError(\"No requests specified in session\")\n\n self._reset_fuzz_state()\n\n for x in self._iterate_messages_recursive(this_node=self.root, path=[]):\n yield x", "def ReadMessageHandlerRequests(self):\n res = []\n leases = self.message_handler_leases\n for requests in self.message_handler_requests.values():\n for r in requests.values():\n res.append(r.Copy())\n existing_lease = leases.get(r.handler_name, {}).get(r.request_id, None)\n res[-1].leased_until = existing_lease\n\n return sorted(res, key=lambda r: r.timestamp, reverse=True)", "def messages(self):\n return list(iter(self))" ]
[ "0.75313586", "0.7092574", "0.689368", "0.6885732", "0.6611093", "0.64954853", "0.6490498", "0.6438109", "0.6379151", "0.6324771", "0.6322066", "0.62982225", "0.62764996", "0.6234379", "0.6224543", "0.62200075", "0.61538786", "0.60254633", "0.6023188", "0.60213643", "0.594284", "0.59243566", "0.5918119", "0.5899713", "0.5887125", "0.5881722", "0.5860365", "0.582422", "0.5808762", "0.57450217" ]
0.86009705
0
Adds the given service's message handlers to our managed message handlers.
def register_service(self, service): for message_handler in service.iter_message_handlers(): self.message_handlers[message_handler.name] = message_handler
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(self, service, handler):\n self.partyline.setdefault(service, []).append(handler)", "def register_websock_handlers(self, service, new_client, new_message, close_client):\n if service in self.websock_handlers:\n L.error(\"Error: service:\" + service + \" is already registered\")\n return False\n handlers = {\n \"new_client\":new_client,\n \"new_message\":new_message,\n \"close_client\":close_client\n }\n self.websock_handlers[service] = handlers\n return True", "def add_handlers(self, host_pattern, host_handlers):\n pass", "def add_handler(self, handler, backtrack = False):\n\n # Add Handler\n self._handlers.append(handler)\n logger.debug(\"%s: handler %s added.\" % \\\n (self.__class__.__name__, handler.__name__))\n \n # Backtrack\n if backtrack:\n for message in self.get_waiting(): handler(message)\n logger.debug(\"%s: handler %s backtracked.\" % \\\n (self.__class__.__name__, handler.__name__))", "def addAllStatics(self, module=None):\n module = module or sys.modules[self.__module__]\n\n servicehandler_classes = inspect.getmembers(module, is_ServiceHandler)\n for servicehandler in servicehandler_classes:\n self.add(servicehandler[1])", "def fileHandlers(self, handlers):\n for handler in handlers:\n self.logger.addHandler(handler)", "def add_handler(self, handler):\n pass", "def add(self, handler, on_error=None):\n self.handlers.append(handler)", "def addHandlers(self, handlers):\n self._eventHandlers.update(handlers)\n keys = self._eventHandlers.keys()\n pygame.event.set_allowed(keys)", "def _register_handlers(self):\n import handlers as th\n import inspect\n for name, class_type in inspect.getmembers(th, predicate=inspect.isclass):\n if class_type is th.ZMQTopicHandlerBase:\n continue\n handler = class_type()\n topic = handler.get_topic()\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def bind(self, svc, svc_ref):\n with self._lock:\n if ORDER_HANDLER in svc_ref.get_property(pelix.OBJECTCLASS):\n targets = svc_ref.get_property(ORDER_TARGETS)\n if isinstance(targets, (list, tuple)):\n for target in targets:\n self._target_handlers.setdefault(target, []).append(svc)\n\n else:\n self._target_handlers.setdefault(str(targets), []).append(svc)", "def handlers(self, handlers):\n return self._set_list_field(\"handlers\", handlers)", "async def async_service_handler(service):\n _LOGGER.info(\"%s service called\", service.service)\n method = SERVICE_TO_METHOD.get(service.service)\n if not method:\n _LOGGER.warning(\"Unknown service method %s\", service.service)\n return\n\n params = {\n key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID\n }\n entity_ids = service.data.get(ATTR_ENTITY_ID)\n component = hass.data.get(SWITCH_DOMAIN)\n if entity_ids:\n target_switches = [component.get_entity(entity) for entity in entity_ids]\n else:\n return\n\n method_name = method[\"method\"]\n _LOGGER.debug(\"Service handler: %s %s\", method_name, params)\n\n for entity in target_switches:\n if not hasattr(entity, method_name):\n _LOGGER.error(\"Service not implemented: %s\", method_name)\n return\n await getattr(entity, method_name)(**params)", "def _register_services(self) -> None:\n\n for isr in self.immediate_services_with_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n isr_instance = isr()\n for handler_type in isr.message_handler_types():\n # for each explicitly supported type, add it to the router\n self.immediate_msg_with_reply_router[handler_type] = isr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.immediate_msg_with_reply_router[\n handler_type_subclass\n ] = isr_instance\n\n for iswr in self.immediate_services_without_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n iswr_instance = iswr()\n for handler_type in iswr.message_handler_types():\n\n # for each explicitly supported type, add it to the router\n self.immediate_msg_without_reply_router[handler_type] = iswr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.immediate_msg_without_reply_router[\n handler_type_subclass\n ] = iswr_instance\n\n for eswr in self.eventual_services_without_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n eswr_instance = eswr()\n for handler_type in eswr.message_handler_types():\n\n # for each explicitly supported type, add it to the router\n self.eventual_msg_without_reply_router[handler_type] = eswr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.eventual_msg_without_reply_router[\n handler_type_subclass\n ] = eswr_instance\n\n # Set the services_registered flag to true so that we know that all services\n # have been properly registered. This mostly exists because someone might\n # accidentally delete (forget to call) this method inside the __init__ function\n # of a sub-class of Node.\n self.services_registered = True", "def register_message_handlers(journal):\n journal.dispatcher.register_message_handler(\n DumpQuorumMessage, _dumpquorumhandler)", "def get_message_handlers(self):\n\t\treturn self.message_handlers", "def registerMessageHandler(self, message_handler, message_priority_list):\n if isinstance(message_handler, MessageHandler):\n for key in message_priority_list:\n rule = (message_priority_list[key], message_handler)\n self.message_handlers[key].append(rule)\n self.message_handlers[key].sort() # Keep priority order\n else:\n self.logger.critical(\n \"MessageHandler registration failed. Object \" +\n repr(message_handler) +\" is invalid type.\")\n raise TypeError(\"Only MessageHandlers can be registered!\")\n self.logger.debug(\"MessageHandler '\" + str(message_handler) +\n \"' registered to the message bus.\")", "def _update_handlers(self):\n handler_map = defaultdict(list)\n for i, obj in enumerate(self.handlers):\n for dummy, handler in inspect.getmembers(obj, callable):\n if not hasattr(handler, \"_pyxmpp_event_handled\"):\n continue\n # pylint: disable-msg=W0212\n event_class = handler._pyxmpp_event_handled\n handler_map[event_class].append( (i, handler) )\n self._handler_map = handler_map", "def register_handlers(dp, di_container: di.Container):\n general.router.register_handlers(dp)\n\n di_container.wire(packages=[sys.modules[__name__]])", "def addhandler(self, txt, handler):\n self.handlers[txt] = handler\n rlog(0, 'webserver', '%s handler added' % txt)", "def _register_handlers(self):\n DBG(\"\\nregister handlers\")\n for hook, handler in self.handlers:\n g.registerHandler(hook, handler)\n\n signal_manager.connect(self.c, 'body_changed', self._after_body_key)", "def addHandler(self, fn):\n self.handlers.append(fn)", "def set_added_handler(self, handler):\n self._added_handler = handler", "def add_service(self, service):\n # type: (LoadBalancerService) -> List[BoundAction]\n return self._client.add_service(self, service=service)", "def add_package_handler(self, package_name, cls):\n for module in messages.MESSAGES:\n if self._fuzzy_module_name_eq(module, package_name):\n for name in module.DESCRIPTOR.message_types_by_name:\n self.add_handler(name, getattr(cls, 'on_' + name.lower()))", "def register_handler(self, method, handler):\n self.handlers[method] = handler", "async def reload_service_handler(service: ServiceCall) -> None:\n auto = [e for e in component.entities if not e.user_defined]\n\n if (conf := await component.async_prepare_reload()) is None:\n return\n await _async_process_config(hass, conf)\n\n await component.async_add_entities(auto)\n\n await async_reload_integration_platforms(hass, DOMAIN, PLATFORMS)", "def register_handler(self, handler):\n if handler.key in self.handlers.keys():\n raise ValueError(f'Key {handler.key} already registered')\n self.handlers[handler.key] = handler", "def register_handler(self, topic, handler):\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def media_player_service_handler(service):\n target_players = component.extract_from_service(service)\n\n method = SERVICE_TO_METHOD[service.service]\n\n for player in target_players:\n getattr(player, method)()\n\n if player.should_poll:\n player.update_ha_state(True)" ]
[ "0.6415957", "0.63830936", "0.60461724", "0.6046069", "0.6006367", "0.58971536", "0.5866512", "0.5834788", "0.5806009", "0.5753481", "0.5696065", "0.5631438", "0.56120425", "0.5598932", "0.55874574", "0.55768156", "0.55757207", "0.55562836", "0.5551294", "0.5539715", "0.5536428", "0.5506033", "0.550355", "0.54882044", "0.5427758", "0.5393952", "0.53927433", "0.53868055", "0.5379734", "0.53586674" ]
0.8374331
0
Invokes the correct message handler for the given message.
def handle_message(self, sender, message): self.logger.debug('handle_message(%r, %r)', sender, message.handler) message_handler = self.message_handlers.get(message.handler) if message_handler is None: self.logger.warning("sender=%r, No handler found: '%s'", sender, message.handler) return message_handler(sender, message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)", "def handle_message(**payload):\n handler_instance = message.MessageHandler(payload)\n handler_instance.handle()", "def _handle_message(self, msg):\n self.event('message', msg)", "def handle(self, message: Message) -> None:\n self.handled_message = message", "def process_message(self, message):\n processors = {\n \"^org.chicago.cta.stations.\": self._handle_station,\n \"^org.chicago.cta.arrivals.\": self._handle_arrival,\n \"org.chicago.cta.turnstiles\": self._handle_turnstiles\n }\n processor = processors.get(message.topic, False)\n if processor:\n processor(message)\n else:\n logger.debug(\n \"unable to find handler for message from topic %s\", message.topic\n )", "def handle(self, message: InternalMessage) -> None:\n if isinstance(message, TransactionMessage):\n self._handle_tx_message(message)\n elif isinstance(message, StateUpdateMessage):\n self._handle_state_update_message(message)", "def on_action(self, message):\n with self.handler.wrapee as wrapee:\n log.debug(\"Calling {method} on {name}\", method=message['action'], name=self.name)\n try:\n func = getattr(wrapee, message['action'])\n except AttributeError as ex:\n log.warn(\"Trying to call a method {method} that does not exsist!\",\n method=ex.args[0])\n return\n res, msg = func(*message['args'])\n if not res:\n log.warn(\"Error while calling {method}: {msg}\", msg=msg,\n method=message['action'])\n else:\n log.debug(\"Called method succesfully\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n if msg != '':\n protocol.send_news(msg)", "def dispatch(self, message):\n data = ujson.loads(message)\n command = data.get(\"command\", \"no command field!\")\n if command in self._command_hash_views:\n self._command_hash_views[command](self, data)\n else:\n # handler.send(\"404 Error\")\n logger.warning(\"[Local] System don't understand command[%s]\" % command)", "def _PushHandlerMessage(self, message):\n\n # We only accept messages of type MESSAGE.\n if message.type != rdf_flows.GrrMessage.Type.MESSAGE:\n raise ValueError(\"Unexpected message type: %s\" % type(message))\n\n if not message.session_id:\n raise ValueError(\"Message without session_id: %s\" % message)\n\n # Assume the message is authenticated and comes from this client.\n message.source = self.client_id\n\n message.auth_state = \"AUTHENTICATED\"\n session_id = message.session_id\n\n handler_name = message_handlers.session_id_map.get(session_id, None)\n if handler_name is None:\n raise ValueError(\"Unknown well known session id in msg %s\" % message)\n\n logging.info(\"Running message handler: %s\", handler_name)\n handler_cls = handler_registry.handler_name_map.get(handler_name)\n handler_request = rdf_objects.MessageHandlerRequest(\n client_id=self.client_id,\n handler_name=handler_name,\n request_id=message.response_id,\n request=message.payload)\n\n handler_cls().ProcessMessages([handler_request])", "def messageHandler(self, source, message, messageId):\n try:\n type, params, data = message.split(':',2)\n except:\n # Not a real message\n return\n \n try:\n getattr(self, \"thive_%s\" % type)(messageId, params.split(), data)\n except exceptions.AttributeError, c:\n raise c\n print \"[HIVE] No method bound for command '%s'\" % type", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def handle_message(self, message):\n\n try:\n controller_func = get_controller_func(message.code)\n\n if controller_func:\n response = get_controller_func(message.code)(message.payload)\n self.send_message(response)\n else:\n self.send_bad_request()\n except Exception as e:\n Logger.log_error(e)\n self.send_server_error()", "def message_callback(self, message):\n message_data = json.loads(message)\n\n if message_data.get('command') == 'error':\n return self.command_error(message_data)\n\n if 'device_type' in message_data and not message_data['device_type'].startswith(self.device_filter):\n return\n\n # Try to find a matching command and execute it\n command_name = message_data['command']\n command_data = message_data.get('data', {})\n device_name = message_data.get('name')\n\n command_handler_name = 'command_{}'.format(command_name)\n if not hasattr(self, command_handler_name):\n logging.info(\"{} does not support command {}\".format(\n self,\n command_name\n ))\n return\n\n command_handler = getattr(self, command_handler_name)\n return command_handler(device_name, command_data)", "def handle_message(self, msg):\n\n if msg.error != None:\n return\n else:\n try:\n method = self.get_service_method(msg.method_name)\n params = getattr(msg, 'params', None)\n msg.result = self.execute_method(method, params)\n except (MethodNotFoundError, InvalidParamsError, ServerError), ex:\n logging.error(ex)\n msg.error = ex\n except Exception, ex:\n logging.error(ex)\n ex = InternalError(\"Error executing service method\")\n ex.data = ''.join(traceback.format_exception(*sys.exc_info()))\n msg.error = ex", "def handle_message(self, mxmsg):\n if self._handler is None:\n raise NotImplementedError()\n\n self.notify_started()\n response = self._handler(mxmsg)\n if response == ():\n self.no_response()\n elif isinstance(response, str):\n self.send_message(message=response, type=MessageTypes.PING)\n elif isinstance(response, dict):\n self.send_message(**response)\n else:\n raise ValueError(\"Unsupported handler return type %r\" %\n type(response))", "def handle(self, message):", "def _incoming_handler(self, context, message, fake_reply):\r\n return self._map[message.method](context, fake_reply, *message.args, **message.kwargs)", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])", "def handle_message(self, validated_message: dict):\n self.logger.debug(f'Sensor received message {validated_message}')\n if (validated_message['messageType'] !=\n model.MessageTypes.Control.value):\n self.logger.debug(\n 'Sensor ignoring because messageType was not control'\n )\n return\n if validated_message['messageBody']['target'] != self.component_id:\n self.logger.debug(\n 'Sensor ignoring because not targeted at me'\n )\n return\n\n subtype = validated_message['messageSubtype']\n try:\n self.logger.debug(f'Dispatching message with subtype {subtype}')\n self.message_handler_table[subtype](validated_message)\n except KeyError:\n self.logger.warning(f'No handler for with subtype {subtype}')\n pass", "def execute_message_received(self, message_received):\n pass", "def call(self, message: Message) -> None:\n self.fn(message)", "def on_message(client, userdata, msg):\n TOPIC_DISPATCH_DICTIONARY[msg.topic][\"method\"](msg)", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "async def process(self, message):\n return await self.dispatcher.dispatch(message)", "def handle_message(self, msg, identity=None):\n\n if (self._supervisor and\n not isinstance(msg, mplane.model.Envelope)):\n self._exporter.put_nowait([msg, identity])\n\n if isinstance(msg, mplane.model.Capability):\n self._add_capability(msg, identity)\n elif isinstance(msg, mplane.model.Withdrawal):\n self._withdraw_capability(msg, identity)\n elif isinstance(msg, mplane.model.Receipt):\n self._handle_receipt(msg, identity)\n elif isinstance(msg, mplane.model.Result):\n self._handle_result(msg, identity)\n elif isinstance(msg, mplane.model.Exception):\n self._handle_exception(msg, identity)\n elif isinstance(msg, mplane.model.Envelope):\n if msg.get_token() in self._receipts:\n self._handle_result(msg, identity)\n else:\n for imsg in msg.messages():\n self.handle_message(imsg, identity)\n else:\n raise ValueError(\"Internal error: unknown message \"+repr(msg))", "def handle_message(self, message):", "def _handler(self, message):\n\n data = pickle.loads(message['data'])\n\n if not data[2]:\n # empty method call; bail out\n return\n\n # call the function and respond to the proxy object with return value\n uuid = data[0]\n proxy = data[1]\n func = getattr(self, data[2])\n result = (uuid, func(*data[3], **data[4]))\n self._redis.publish('proxy:%s' % proxy, pickle.dumps(result))", "async def handle(self, message: discord.Message):\n raise NotImplementedError()", "def handle_message(self, message):\n\n\t\tself.console.handle_message(message)" ]
[ "0.7761963", "0.72658277", "0.7105738", "0.7102408", "0.7060995", "0.7036604", "0.70310044", "0.7029397", "0.699162", "0.69913566", "0.698211", "0.6949379", "0.69273627", "0.6888938", "0.68215775", "0.6818507", "0.67997396", "0.6774335", "0.67683744", "0.6749502", "0.6715756", "0.6680169", "0.665071", "0.66503346", "0.6640639", "0.6629455", "0.66195524", "0.660921", "0.659287", "0.6557109" ]
0.73108006
1
Convert a time.struct_time as returned by feedparser into a
def _convert_struct_time_to_dt(stime): return date.fromtimestamp(mktime(stime))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_struct_to_datetime(struct_time_object):\n return datetime.datetime(*struct_time_object[:6])", "def _convert_struct_time_to_dt(stime):\n\n dt = datetime.datetime.fromtimestamp(mktime(stime))\n\n return dt.date()", "def unmarshall_time(tyme):\r\n return datetime.datetime(day=tyme['day'],\r\n month=tyme['month'],\r\n year=tyme['year'],\r\n hour=tyme['hour'],\r\n minute=tyme['minute'],\r\n second=tyme['second'],\r\n microsecond=tyme['microsecond'])", "def unpack_time(s, type='I'):\n\ttry:\n\t\t(l,), s = unpack(\"!\"+type, s)\n\texcept TypeError, e:\n\t\traise TypeError(\"Problem unpacking time: %s\" % e)\n\n\tif l < 0:\n\t\treturn None\n\treturn datetime.fromtimestamp(l), s", "def parse_time(s):\n return time.gmtime(float(s))", "def __parse_time(self, time_obj):\n if time_obj:\n resp = ''\n if isinstance(time_obj, int) or isinstance(time_obj, str):\n resp = time_obj\n elif isinstance(time_obj, datetime.datetime):\n resp = calendar.timegm(time_obj.timetuple())\n else:\n raise Exception(\"Unknown __parse_time format for {0}\".format(time_obj))\n return str(resp)\n return None", "def dehydrate_time(value):\n if isinstance(value, Time):\n nanoseconds = int(value.ticks * 1000000000)\n elif isinstance(value, time):\n nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +\n 1000000000 * value.second + 1000 * value.microsecond)\n else:\n raise TypeError(\"Value must be a neotime.Time or a datetime.time\")\n if value.tzinfo:\n return Structure(ord(b\"T\"), nanoseconds, value.tzinfo.utcoffset(value).seconds)\n else:\n return Structure(ord(b\"t\"), nanoseconds)", "def parse_time_record(self, record):\n\n time_record = TIME_RECORD_MATCHER.match(record)\n if not time_record:\n time_data = None\n else:\n time_data = struct.unpack(TIME_FORMAT, \n time_record.group(0)[0:TIME_RECORD_SIZE])\n\n return time_data", "def parse_time_to_SAML(time):\n data = datetime.utcfromtimestamp(float(time))\n return data.strftime('%Y-%m-%dT%H:%M:%SZ')", "def parse_time(time_string):\n return calendar.timegm(time.strptime(time_string, \"%Y%m%dT%H%M%SZ\"))", "def _astropy_time(time):\n return time if isinstance(time, astropy.time.Time) else astropy.time.Time(parse_time(time))", "def get_time(text_time):\n # return Observer.datetime_to_astropy_time(dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M'))\n the_time = dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M')\n return Time(the_time.strftime('%Y-%m-%d %H:%M'))\n #date = [int(i) for i in date.split('/')]", "def parse_time(self):\n\n # parse time\n year = int(self.start[:4])\n month = int(self.start[5:7])\n day = int(self.start[8:10])\n hours = int(self.start[11:13])\n minutes = int(self.start[14:16])\n seconds = int(self.start[17:19])\n time = datetime.datetime(year, month, day, hours, minutes, seconds)\n\n # advance time\n time = time + datetime.timedelta(minutes=self.rain_interval)\n time = time.isoformat(\" \")\n\n # timestamp\n # elevation (m)\n evolved_elevation = (\n 'elevation_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # water depth (m)\n depth = (\n 'depth_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # sediment flux (kg/ms)\n sediment_flux = (\n 'flux_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # erosion-deposition (kg/m2s)\n erosion_deposition = (\n 'erosion_deposition_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # elevation difference (m)\n difference = (\n 'difference_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n\n return (evolved_elevation, time, depth, sediment_flux,\n erosion_deposition, difference)", "def time(self):\n return parse_time(self['timestamp'])", "def _time_to_date(parsed_time):\n if not parsed_time:\n return parsed_time\n return datetime.fromtimestamp(calendar.timegm(parsed_time), tz=timezone.utc)", "def struct_time(self):\n _, month, day, hour, minute, second, weekday, _, _ = self.current_time\n # Bluetooth weekdays count from 1. struct_time counts from 0.\n return time.struct_time((month, day, hour, minute, second, weekday - 1, -1))", "def time_convert(timestr):\n \n try:\n # Analyse given time str to seperate elements.\n struct_time = time.strptime(timestr[:-4], \"%a, %d %b %Y %H:%M:%S\")\n # Convert given time by secend unit.\n t = time.mktime(struct_time) \n # Re-construct time to isotime format.\n isot = time.strftime(\"%Y-%m-%d\", time.gmtime(t))\n return isot\n \n except:\n return ''", "def directive_to_struct_time_item(directive, value):\n if directive == DIRECTIVES.YEAR:\n # Return YEAR as TM_YEAR.\n return STRUCT_TIME.TM_YEAR, value\n elif directive == DIRECTIVES.YEAR_NO_CENTURY:\n # Return YEAR_NO_CENTURY as TM_YEAR.\n # Assume that a two-digit year is relative to the year 2000.\n return STRUCT_TIME.TM_YEAR, value + 2000\n elif directive == DIRECTIVES.MONTH:\n # Return MONTH as TM_MON.\n return STRUCT_TIME.TM_MON, value\n elif directive == DIRECTIVES.ABBREV_MONTH_NAME:\n # Return ABBREV_MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, ABBREVIATED_MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.MONTH_NAME:\n # Return MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_MONTH:\n # Return DAY_OF_MONTH as TM_MDAY\n return STRUCT_TIME.TM_MDAY, value\n elif directive == DIRECTIVES.HOUR_24:\n # Return HOUR_24 as TM_HOUR\n return STRUCT_TIME.TM_HOUR, value\n elif directive == DIRECTIVES.HOUR_12:\n # Return HOUR_12 as 0-based TM_HOUR\n return STRUCT_TIME.TM_HOUR, 0 if value == 12 else value\n elif directive == DIRECTIVES.MINUTE:\n # Return MINUTE as TM_MIN\n return STRUCT_TIME.TM_MIN, value\n elif directive == DIRECTIVES.SECOND:\n # Return SECOND as TM_SEC\n return STRUCT_TIME.TM_SEC, value\n elif directive == DIRECTIVES.DAY_OF_WEEK:\n # Return DAY_OF_WEEK as TM_WDAY\n return STRUCT_TIME.TM_WDAY, value\n elif directive == DIRECTIVES.ABBREV_WEEKDAY_NAME:\n # Return ABBREV_WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, ABBREVIATED_WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.WEEKDAY_NAME:\n # Return WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_YEAR:\n # Return DAY_OF_YEAR as TM_YDAY\n return STRUCT_TIME.TM_YDAY, value\n elif directive == DIRECTIVES.TIME_ZONE:\n # Take no action for TIME_ZONE.\n return None\n elif directive == DIRECTIVES.TIME_ZONE_OFFSET:\n # Return TIME_ZONE_OFFSET as TM_MIN - to be subtracted from any\n # existing minute value to arrive at UTC.\n return STRUCT_TIME.TM_MIN, -value\n elif directive == DIRECTIVES.AM_PM:\n # Return AM_PM as TM_HOUR\n # If value = 'PM' return +12 to update hour value to 24-hour format.\n return STRUCT_TIME.TM_HOUR, 12 if value == 'PM' else 0\n elif directive == DIRECTIVES.PERCENT:\n # Take no action for PERCENT.\n return None\n else:\n raise NotImplementedError(\n 'struct_time conversion not defined for directive: {}'\n .format(directive)\n )", "def __init__(self, struct_time):\r\n\t\tself.struct_time = struct_time\r\n\t\tself.year = struct_time[0]\r\n\t\tself.mon = self.set_month(struct_time[1])\r\n\t\tself.day = struct_time[2]\r\n\t\tself.hour = struct_time[3]\r\n\t\tself.min = struct_time[4]\r\n\t\tself.wday = self.set_week_day(struct_time[6])\r\n\t\tself.day_or_night = self.set_day_state(struct_time[8])", "def convert_time(slog_time_str):\n \n base_time = datetime.datetime(2007, 1, 1)\n delta = datetime.timedelta(0, float(slog_time_str))\n \n timestamp = base_time + delta\n taml_dtg = timestamp.strftime('%Y-%m-%dT%H:%M:%S')\n return taml_dtg", "def _serialize_time(val):\n return val.isoformat()", "def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)", "def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\r\n return datetime.datetime.strptime(timestr, fmt)", "def parse_gerrit_time(value):\n parts = value.split('.')\n dt = datetime.datetime.strptime(parts[0], GERRIT_TIMESTAMP_FMT)\n if len(parts) > 1:\n dt += datetime.timedelta(\n microseconds=int(float('0.%s' % parts[1]) * 1000000.0))\n return dt", "def parse_timestamp(timestamp):\n if not timestamp or timestamp == '0000-00-00T00:00:00Z':\n return struct_time((0, 0, 0, 0, 0, 0, 0, 0, 0))\n return strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')", "def _parse_timestamp(self, api_time):\n return (\n pendulum.parse(api_time)\n if api_time is not None\n else pendulum.from_timestamp(-1)\n )", "def _ParseTimeElements(self, time_elements_structure):\n try:\n year, month, day_of_month, hours, minutes, seconds = (\n time_elements_structure)\n\n # Ensure time_elements_tuple is not a pyparsing.ParseResults otherwise\n # copy.deepcopy() of the dfDateTime object will fail on Python 3.8 with:\n # \"TypeError: 'str' object is not callable\" due to pyparsing.ParseResults\n # overriding __getattr__ with a function that returns an empty string\n # when named token does not exist.\n time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)\n date_time = dfdatetime_time_elements.TimeElements(\n time_elements_tuple=time_elements_tuple)\n\n # APT History logs store date and time values in local time.\n date_time.is_local_time = True\n\n return date_time\n\n except (TypeError, ValueError) as exception:\n raise errors.ParseError(\n 'Unable to parse time elements with error: {0!s}'.format(exception))", "def rfc3339nano_to_datetime(my_time, time_dash='_'):\n my_time = my_time.replace('_', ':') # Replace undercores with colons\n my_time = re.sub(r\"\\.\\d*\", \"\", my_time) # Strip nanoseconds\n return datetime.datetime.strptime(my_time, f\"%Y-%m-%dT%H:%M:%S%z\") # Parse string to datetime", "def _datetime2et(time: datetime) -> float:\n if isinstance(time, float):\n return time\n if not isinstance(time, datetime):\n raise TypeError(\"Time must be a float or a datetime object.\")\n return spy.str2et(time.isoformat())", "def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\n return datetime.datetime.strptime(timestr, fmt)" ]
[ "0.7229315", "0.67260563", "0.6565747", "0.6513471", "0.63544554", "0.6352278", "0.63428724", "0.62921166", "0.6273469", "0.6266583", "0.62201977", "0.62145513", "0.6209023", "0.61696506", "0.6159586", "0.61529684", "0.61013764", "0.608812", "0.59947324", "0.5987127", "0.5983697", "0.59629583", "0.59613794", "0.59556013", "0.59532315", "0.5933886", "0.59100986", "0.5908248", "0.58864784", "0.5873513" ]
0.7049241
1
Use feedparser to parse PyBites RSS feed. Return a list of Entry namedtuples (date = date, drop time part)
def get_feed_entries(feed=FEED) -> list: f = feedparser.parse(feed) entry_list = [] for entry in f.entries: date = _convert_struct_time_to_dt(entry["published_parsed"]) title = entry["title"] link = entry["link"] tags = [tag["term"].lower() for tag in entry["tags"]] entry_list.append(Entry(date=date, title=title, link=link, tags=tags)) return entry_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_feed_entries(feed=FEED):\n d = feedparser.parse(feed)\n entries = d.entries\n \n all_entries =[]\n for entry in entries:\n title = entry.title\n link = entry.link\n date = entry.published_parsed\n tags = entry.tags\n tags = [t.get('term').lower() for t in tags]\n\n date = _convert_struct_time_to_dt(date)\n\n\n entry = Entry(date,title,link,tags)\n all_entries.append(entry)\n\n return all_entries", "def feed(self):\n feed_dict = feedparser.parse(self.URL)\n return [self.entry_dict(entry) for entry in feed_dict['entries']]", "def parse_feed(feed, last_update, entry, get_updated = lambda e: e.updated_parsed[:6]):\n\n entries = []\n for e in feed.entries:\n if datetime(*get_updated(e)) > last_update:\n new = entry(e)\n if new != None:\n entries.append(new)\n return entries", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def rss_fetch():\n items = {}\n\n def add_item(pubDate, title, link):\n nonlocal items\n idx = float(parsedate_to_datetime(pubDate).timestamp())\n while idx in items:\n idx = idx + 0.1\n dbg(\"Adding item: %11.1f \\\"%s\\\" %s\" % (idx, title, link))\n items[idx] = {}\n items[idx]['title'] = title\n items[idx]['link'] = link\n\n state = \"\" # state parser is in (\"\", \"item\", \"title\", \"link\", \"pubDate\")\n title = \"\" # Currently parsing this title.\n link = \"\" # \" \" \" link\n pubDate = \"\" # \" \" \" pubDate (index)\n\n def start_element(name, attrs):\n nonlocal state\n nonlocal title\n nonlocal link\n nonlocal pubDate\n dbg(\"Start: %s %s %s\" %(name, str(attrs), str((state, title, link, pubDate))))\n if state == \"\":\n if name == \"item\":\n state = \"item\"\n elif state == \"item\":\n if name == \"title\":\n state = \"title\"\n if title:\n prn(\"Two titles?\")\n sys.exit(1)\n elif name == \"link\":\n state = \"link\"\n if link:\n prn(\"Two links?\")\n sys.exit(1)\n elif name == \"pubDate\":\n state = \"pubDate\"\n if pubDate:\n prn(\"Two pubDates?\")\n sys.exit(1)\n\n\n def end_element(name):\n nonlocal state\n nonlocal title\n nonlocal pubDate\n nonlocal link\n dbg(\"End: %s %s\" % (name, str((state, title, link, pubDate))))\n if state == \"item\":\n if name == \"item\":\n if title == \"\":\n prn(\"No title at end item.\")\n sys.exit(1)\n if link == \"\":\n prn(\"No link at end item.\")\n sys.exit(1)\n if pubDate == \"\":\n prn(\"No pubDate at end item.\")\n sys.exit(1)\n else:\n add_item(pubDate, title, link)\n state = \"\"\n title = \"\"\n link = \"\"\n pubDate = \"\"\n elif state == \"title\":\n if name == \"title\":\n state = \"item\"\n elif state == \"link\":\n if name == \"link\":\n state = \"item\"\n elif state == \"pubDate\":\n if name == \"pubDate\":\n state = \"item\"\n\n def char_data(data):\n nonlocal state\n nonlocal title\n nonlocal pubDate\n nonlocal link\n dbg(\"Data: %s %s)\" % (str(data), str((state, title, link, pubDate))))\n if state == \"title\":\n title = title + data\n elif state == \"link\":\n link = link + data\n elif state == \"pubDate\":\n pubDate = pubDate + data\n\n\n p = xml.parsers.expat.ParserCreate(\"UTF-8\")\n\n p.StartElementHandler = start_element\n p.EndElementHandler = end_element\n p.CharacterDataHandler = char_data\n\n with urllib.request.urlopen('https://news.ycombinator.com/rss') as f:\n xml_file = b\"\"\n while True:\n r = f.read(255)\n if r:\n xml_file = xml_file + r\n else:\n break\n\n try:\n p.Parse(xml_file.decode(\"UTF-8\"), True)\n except:\n dbg(\"Writing fetched RSS feed to file...\")\n err_f = open(parse_error_output_file, \"ab\")\n err_f.write(b\"GET URL: \")\n err_f.write(f.geturl().encode(\"UTF-8\"))\n err_f.write(b\"\\nReturn Code: \")\n err_f.write((\"%d\\n\" % (f.getcode(), )).encode(\"UTF-8\"))\n err_f.write(b\"Meta Info:\\n\")\n err_f.write(f.info().as_bytes(unixfrom=True))\n err_f.write(b\"XML output:\\n\")\n err_f.write(xml_file)\n err_f.close()\n dbg(\"Done.\")\n raise\n\n return items", "def parse_rss(database, feed, depth=1):\n # Get the updates article count, and article urls and publish dates.\n rss_a = rss_feed(feed)\n \n # Get all (article urls, publish dates) pairs\n articles = []\n pairs = rss_a[1].items()\n for url, pubdate in pairs: \n articles += crawl_url(database, url, date=pubdate, depth=depth)\n \n return articles", "def parse_rss(link, mode):\n\n one_feed = []\n news_counter = 0\n app.logger.info(f'Parsing feed: {link}')\n # Get file from internet, open it with xml-parser\n rss = feedparser.parse(link)\n\n for entry in rss.entries:\n\n if mode == 'latest':\n news_item_date = get_timestamp(entry.published)\n\n # Stop reading RSS if current news is already older than time\n # when user last got the news feed\n if news_item_date < last_time_user_got_news:\n return one_feed\n\n post = {'title': entry.title,\n 'published': get_timestamp(entry.published)}\n\n # Try to get link to image from one of a place where it can be\n try:\n pic = entry.enclosures[0].href\n except(IndexError, AttributeError):\n pic = get_img_source(entry.summary)\n\n post['image'] = pic if pic else url_for('static',\n filename=\"400x400.jpg\")\n\n link = entry.link\n post['link'] = link\n domain_name = re.search(r'://(.+?)/', link).group(1)\n post['domain_name'] = domain_name if domain_name else 'unknown'\n\n one_feed.append(post)\n\n if mode != 'latest':\n return one_feed\n else:\n print('There are no new news at all.')\n return []", "def rss_feed(rss_url):\n try:\n # Use feedparser to analyze given RSS feed, if it is valid RSS.\n d = feedparser.parse(rss_url)\n except:\n return \"Sorry, invalid RSS feed. Please check and try again later.\"\n \n total = len(d['entries'])\n updates = dict()\n for index, item in enumerate(d['entries']):\n # Convert publish time from ctime format to iso-time format.\n a_time = time_convert(item.published)\n # Set article url ad dictionary key, with publish date as value. \n updates[str(item.link)] = a_time \n return (total, updates)", "def get_news(url):\r\n \r\n # parse RSS feed into list of dictionaries\r\n feed = feedparser.parse(url)\r\n\r\n # no RSS feed articles for url\r\n if len(feed['entries']) == 0:\r\n return []\r\n \r\n # get first ten articles from the RSS feed\r\n news = []\r\n i = 0\r\n while True:\r\n if i == len(feed['entries']) or i > 30:\r\n break\r\n \r\n try:\r\n # get link to article\r\n link = feed[\"entries\"][i][\"link\"]\r\n\r\n # get title of article\r\n title = feed[\"entries\"][i][\"title\"]\r\n \r\n try:\r\n # get raw summary of article\r\n summary_raw = feed[\"entries\"][i][\"summary\"]\r\n \r\n # format summary\r\n summary = \"\"\r\n for c in summary_raw:\r\n if c == \"<\":\r\n summary += \"...\"\r\n break\r\n summary += c\r\n except KeyError as e:\r\n logging.error(\"no summary for RSS feed article: {}\".format(link))\r\n summary = \"read more here...\"\r\n \r\n # get raw date \r\n date_raw = feed[\"entries\"][i][\"published_parsed\"]\r\n \r\n if date_raw is None:\r\n date = feed[\"entries\"][i][\"published\"]\r\n \r\n else:\r\n # format date\r\n year = str(date_raw.tm_year)\r\n months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\r\n month = months[date_raw.tm_mon - 1]\r\n day = str(date_raw.tm_mday)\r\n weekdays = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\r\n wday = weekdays[date_raw.tm_wday]\r\n hour = str(date_raw.tm_hour)\r\n hour = \"{:2}\".format(hour).format(' ','0')\r\n min = str(date_raw.tm_min)\r\n min = \"{:2}\".format(min).replace(' ','0')\r\n date = hour + \":\" + min + \" - \" + wday + \" \" + month + \" \" + day + \", \" + year\r\n \r\n # compile entry and append to news list\r\n entry = {\"link\":link, \"title\":title, \"date\":date, \"summary\":summary}\r\n \r\n # sanitize entry\r\n for key in entry:\r\n # apostrophe\r\n entry[key] = entry[key].replace(\"&#39;\", \"'\")\r\n # right single quotation mark\r\n entry[key] = entry[key].replace(\"’\", \"&#8217;\")\r\n # left single quotation mark\r\n entry[key] = entry[key].replace('\"', \"&#8216;\")\r\n # right double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8221;\")\r\n # left double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8220;\")\r\n # Weird ampersand formatting\r\n entry[key] = entry[key].replace(\"&amp;\", \"&\")\r\n \r\n # prepare entry for sqlite queries\r\n entry[key] = surround(entry[key])\r\n \r\n # add entry to news list\r\n news.append(entry)\r\n \r\n # max 10 entries\r\n if len(news) == 10:\r\n break\r\n i += 1\r\n \r\n except Exception as e:\r\n logging.error(e)\r\n i += 1\r\n pass\r\n \r\n # success\r\n return news", "def feed2fields(file):\r\n import feedparser\r\n d = feedparser.parse(file)\r\n for entry in d.entries:\r\n date = (time.strftime(\"%Y-%m-%d %H:%M\", entry.updated_parsed)\r\n if hasattr(entry, \"updated_parsed\") else None)\r\n author = entry.author if hasattr(entry, \"author\") else None\r\n tags = [e['term'] for e in entry.tags] if hasattr(entry, \"tags\") else None\r\n\r\n slug = slugify(entry.title)\r\n kind = 'article'\r\n yield (entry.title, entry.description, slug, date, author, [], tags,\r\n kind, \"html\")", "def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries", "def get_feed(self):\n possible_endings = ('rss', 'rss/')\n if not self.url or not self.url.endswith(possible_endings):\n print('Please check URL(is RSS?) and Internet connection')\n sys.exit()\n try:\n data = feedparser.parse(self.url)\n except urllib.error.URLError:\n print('Please input correct URL')\n sys.exit()\n self.get_content(data)\n return self.items", "def get_news(rss_feed):\r\n\r\n class _CurrentData(object):\r\n \"\"\"Class holding a set of current attributes.\"\"\"\r\n item = None\r\n text = None\r\n\r\n def _start_element_handler(name, attrs):\r\n \"\"\"Handle XML start-elements.\"\"\"\r\n if name == 'item':\r\n # Allocate a new item.\r\n current.item = NewsItem()\r\n\r\n def _end_element_handler(name):\r\n \"\"\"Handle XML end-elements.\"\"\"\r\n if name == 'item':\r\n news_items.append(current.item)\r\n elif name in ('title', 'description', 'link', 'category'):\r\n try:\r\n setattr(current.item, name, current.text)\r\n except AttributeError:\r\n # The parser has run into a non-news item.\r\n pass\r\n\r\n def _char_data_handler(data):\r\n \"\"\"Handle XML element character data.\"\"\"\r\n current.text = data\r\n\r\n news_items = list()\r\n current = _CurrentData()\r\n\r\n parser = expat.ParserCreate()\r\n parser.StartElementHandler = _start_element_handler\r\n parser.EndElementHandler = _end_element_handler\r\n parser.CharacterDataHandler = _char_data_handler\r\n\r\n news_handle = urllib2.urlopen(rss_feed)\r\n xml_data = news_handle.read()\r\n \r\n parser.Parse(xml_data)\r\n\r\n return news_items", "def fetch_feeds(self):\n feed_list = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n rss_title = rss.get('title', '-')\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n feed_list.append({\n 'title':rss_title,\n 'href':rss_href,\n 'status': feed.get('status', 400),\n 'updated': feed.get('updated', None),\n 'updated_parsed': feed.get('updated_parsed', None),\n 'encoding': feed.get('encoding', None),\n 'bozo': feed.get('bozo', None),\n 'headers': feed.get('headers', {}),\n 'etag': feed.get('etag', None),\n 'version': feed.get('version', None),\n 'entries': feed.get('entries', []),\n 'namespaces': feed.get('namespaces', None)\n })\n\n return feed_list", "def get_rss_feed(feed_key):\n\n if rss_feeds[feed_key]['updated'] is None:\n # Update Cache\n entries = update_cache(feed_key)\n elif (datetime.datetime.today() - rss_feeds[feed_key]['updated']).seconds > (60 * 5):\n # Update Cache\n entries = update_cache(feed_key)\n else:\n # Read Cache\n entries = get_cache(feed_key)\n\n return entries", "def get_feed_entries(helper, name, start, stats):\n feed_url = helper.get_arg('feed_url')\n feed_creds = helper.get_arg('credentials')\n feed_headers = {}\n # If auth is specified, add it as a header.\n if feed_creds is not None:\n auth = '{0}:{1}'.format(feed_creds['username'], feed_creds['password'])\n auth = base64.encodestring(auth).replace('\\n', '')\n feed_headers['Authorization'] = 'Basic {0}'.format(auth)\n\n # Pull events as json.\n resp = helper.send_http_request(\n url=feed_url,\n method='GET',\n parameters={'v': 'json', 'tr': 1},\n headers=feed_headers,\n verify=VERIFY_CERTIFICATE,\n )\n\n # Raise exceptions on problems.\n resp.raise_for_status()\n feed_entries = resp.json()\n\n # Return the normalized events to be saved to the kv store.\n return normalized(name, feed_entries, start)", "def download_feed_return_objects(rss_url):\r\n try:\r\n feed_obj = rss_exists(rss_url)\r\n except:\r\n yield None\r\n return\r\n\r\n feed_obj_found = False\r\n feed_parser_results, success = get_rss(rss_url)\r\n\r\n if feed_parser_results is None:\r\n error_reporter.captureMessage(u'Feed Parser results is None', **dict(rss_url=rss_url))\r\n yield None\r\n return\r\n\r\n if feed_obj is None:\r\n feed_obj = create_new_feed(feed_parser_results, rss_url)\r\n else:\r\n feed_obj_found = True\r\n\r\n feed_id = feed_obj.id\r\n feed_obj.title = feed_parser_results.get(\"title\", \"\") or \"\"\r\n max_length_field(feed_obj, 'title', 100)\r\n\r\n feed_obj.status_code = feed_parser_results.get(\"status\", \"\") or 200\r\n feed_obj.status = find_feed_status_from_scode(feed_obj)\r\n\r\n feed_obj.etag = cut_clean_etag(feed_parser_results.get(\"etag\", \"\"))\r\n\r\n updated_date = feed_parser_results.get(\"updated_parsed\")\r\n feed_obj.updated = dt.fromtimestamp(mktime(updated_date)) if updated_date is not None else dt.utcnow()\r\n #\tfeed_obj.published = dt.fromtimestamp(mktime(published_date)) if published_date is not None else None\r\n feed_obj.last_check = dt.utcnow()\r\n\r\n # We could be creating a new feed, or updating the existing one.\r\n yield feed_obj\r\n rss_posts = []\r\n\r\n for feed_article in feed_parser_results.get(\"entries\", []):\r\n ptime = feed_article.get(\"published_parsed\", None)\r\n post_date = dt.fromtimestamp(mktime(ptime)) if ptime is not None else dt.utcnow()\r\n #\t\tprint \"%r\" % post\r\n p = Post(\r\n id=uuid.uuid1(),\r\n title=feed_article.get(\"title\", \"\"),\r\n author=feed_article.get(\"author\", \"\"),\r\n href=feed_article.get(\"href\", \"\"),\r\n post_id=feed_article.get(\"id\", \"\"),\r\n published_at=post_date,\r\n feed_id=feed_id\r\n )\r\n\r\n p.original_title = max_length_field(p, 'title', 200)\r\n p.original_author = max_length_field(p, 'author', 200)\r\n\r\n p.content_html = feed_article.get(\"content\", \"\") or \"\"\r\n\r\n if feed_article.has_key(\"media_content\"):\r\n media_contents = feed_article.get(\"media_content\", []) or []\r\n if media_contents is not None and (not isinstance(media_contents, basestring)) and isinstance(\r\n media_contents, collections.Iterable):\r\n p.media = [media.get(\"url\") for media in media_contents]\r\n\r\n hasHash = False\r\n\r\n if feed_article.has_key(\"feedburner_origlink\"):\r\n p.original_link = feed_article.get(\"feedburner_origlink\", \"\")\r\n if non_empty_str(p.original_link):\r\n p.link_hash = url_hash(safe_str(p.original_link))\r\n hasHash = True\r\n\r\n if feed_article.has_key(\"link\"):\r\n p.href = feed_article.get(\"link\", \"\")\r\n if not hasHash and non_empty_str(p.href):\r\n p.link_hash = url_hash(safe_str(p.href))\r\n hasHash = True\r\n\r\n if not hasHash:\r\n print \"Post don't have any hash\"\r\n\r\n p.title_hash = url_hash(safe_str(p.title)) if non_empty_str(p.title) else \"\"\r\n p.post_id_hash = url_hash(safe_str(p.post_id)) if non_empty_str(p.post_id) else \"\"\r\n\r\n if feed_article.has_key(\"tags\"):\r\n if isinstance(feed_article['tags'], collections.Iterable):\r\n p.tags = [pst.get(\"term\") for pst in feed_article['tags']]\r\n\r\n rss_posts.append(p)\r\n\r\n has_posts = len(rss_posts) > 0\r\n post_id_hashes = [p.post_id_hash for p in rss_posts]\r\n #\tpost_title_hashes = [p.title_hash for p in rss_posts]\r\n post_link_hashes = [p.link_hash for p in rss_posts]\r\n\r\n found_posts_id_hashes = []\r\n found_posts_link_hashes = []\r\n\r\n if feed_obj_found and has_posts:\r\n existing_posts = find_existing_posts(feed_id, post_id_hashes, post_link_hashes)\r\n\r\n for ex_post_id_hash, ex_link_hash in existing_posts:\r\n found_posts_id_hashes.append(ex_post_id_hash)\r\n found_posts_link_hashes.append(ex_link_hash)\r\n\r\n has_existing_posts = len(found_posts_id_hashes) > 0 or len(found_posts_link_hashes) > 0\r\n\r\n new_post_count = 0\r\n if has_posts:\r\n for rss_post in rss_posts:\r\n should_skip = False\r\n\r\n if has_existing_posts:\r\n if non_empty_str(rss_post.post_id_hash) and rss_post.post_id_hash in found_posts_id_hashes:\r\n should_skip = True\r\n elif rss_post.link_hash in found_posts_link_hashes:\r\n should_skip = True # \"Link Hash found in existing records\"\r\n\r\n if not should_skip:\r\n new_post_count += 1\r\n yield rss_post\r\n\r\n feed_history = FeedHistory(id=uuid.uuid1(),\r\n feed_id=feed_obj.id,\r\n timestamp=dt.utcnow(),\r\n status=feed_obj.status_code,\r\n post_count=new_post_count,\r\n etag=feed_obj.etag)\r\n yield feed_history", "def parseWebFeed(data,date_object):\n\twebResult = []\n\tif data:\n\t\tfor repo in data['entries']:\n\t\t\tentryDate = repo['time'].split('at')[0]\n\t\t\tentryDate = entryDate.lstrip(\"0\").replace(\" 0\", \" \").strip()\n\t\t\tif entryDate == date_object:\n\t\t\t\ttext = repo['text']\n\t\t\t\tsentiment = repo['sentiment']\n\t\t\t\ttime = repo['time']\n\t\t\t\ttime = dateutil.parser.parse(time).isoformat(' ').split('+')[0] \n\t\t\t\ttime = datetime.datetime.strptime( time, \"%Y-%m-%d %H:%M:%S\" )\n\t\t\t\titem = copy.deepcopy(templateResult)\n\t\t\t\titem['message'] = text + \" , sentiment: \" + sentiment\n\t\t\t\titem['datetime'] = time\n\t\t\t\titem['source'] = 'Web EndPoint'\n\t\t\t\twebResult.append(item)\n\treturn webResult", "def _get_dates():\n remote = os.path.join(BASE_URL, RSS_FEED)\n local = os.path.join(TMP, RSS_FEED)\n u..(remote, local)\n\n with open(local) as f:\n return PUB_DATE.findall(f.read())", "def get_rss(self):\r\n rssfiles = []\r\n \r\n rssfiles.append(feedparser.parse(self.url))\r\n return rssfiles", "def parse_feed(self):\n parsed_feed = feedparser.parse(self.rss_url)\n # Check for malformed feed\n if parsed_feed['bozo']:\n raise Exception('malformed rss feed!')\n self.parsed_feed = parsed_feed", "def _parse_feed(self,feed): \n meta=[]\n for entry in feed:\n item_meta=self._parse_entry(entry)\n item_meta['video-id']='0'\n meta.append(item_meta)\n self._logger.info('%s videos were founded and parsed at Megavideo',len(meta)) \n return meta", "def process(url):\n feed = feedparser.parse(url)\n entries = feed.entries\n ret = []\n for entry in entries:\n print entry\n guid = entry.guid\n title = translate_html(entry.title)\n link = entry.link\n summary = translate_html(entry.summary)\n try:\n subject = translate_html(entry.tags[0]['term'])\n except AttributeError:\n subject = \"\"\n newsStory = NewsStory(guid, title, subject, summary, link)\n ret.append(newsStory)\n return ret", "def get_rss_infos():\n\n url_rss_lib = \"http://www.liberation.fr/rss\"\n soup = utils.recovery_flux_url_rss(url_rss_lib)\n\n rss_items = soup.find_all(\"li\")\n\n rss_list = []\n\n link_rss = []\n\n for ri in rss_items:\n if ri.get(\"class\") == ['rss-item']:\n rss_list.append(ri.a.get('href'))\n\n for rl in rss_list:\n soup = utils.recovery_flux_url_rss(rl)\n entre = soup.find_all('entry')\n for e in entre:\n link_rss.append(e.link.get('href'))\n\n return link_rss", "def list_feed(self):\n entities = []\n entities_j = self._get('strings/tags/module:inventory,feed:*')\n if entities_j and entities_j['feed']:\n for entity_j in entities_j['feed']:\n entities.append(Feed(entity_j, CanonicalPath('/f;{}'.format(entity_j))))\n return entities", "def request_rss(self, url):\n return feedparser.parse(url)", "def zhihu_rss_fetcher(ctx):\n URL = 'http://www.zhihu.com/rss'\n coll = ctx.get_mongo_collection()\n\n for entry in fetch_rss(URL).entries:\n try:\n coll.insert({'_id': entry.link})\n except DuplicateKeyError:\n continue\n ctx.new_item(TextOnlyItem(entry.title, entry.description), ['zhihu'],\n parse_entry_time(entry),\n {'id': entry.link})\n log_info(u'zhihu: new entry: {} {}'.format(entry.link,\n entry.title))", "def parse_shaarli_rss_export(rss_file):\n\n rss_file.seek(0)\n entries = rss_file.read().split('<entry>')[1:]\n for entry in entries:\n # example entry:\n # <entry>\n # <title>Aktuelle Trojaner-Welle: Emotet lauert in gefälschten Rechnungsmails | heise online</title>\n # <link href=\"https://www.heise.de/security/meldung/Aktuelle-Trojaner-Welle-Emotet-lauert-in-gefaelschten-Rechnungsmails-4291268.html\" />\n # <id>https://demo.shaarli.org/?cEV4vw</id>\n # <published>2019-01-30T06:06:01+00:00</published>\n # <updated>2019-01-30T06:06:01+00:00</updated>\n # <content type=\"html\" xml:lang=\"en\"><![CDATA[<div class=\"markdown\"><p>&#8212; <a href=\"https://demo.shaarli.org/?cEV4vw\">Permalink</a></p></div>]]></content>\n # </entry>\n\n trailing_removed = entry.split('</entry>', 1)[0]\n leading_removed = trailing_removed.strip()\n rows = leading_removed.split('\\n')\n\n def get_row(key):\n return [r.strip() for r in rows if r.strip().startswith('<{}'.format(key))][0]\n\n title = str_between(get_row('title'), '<title>', '</title>').strip()\n url = str_between(get_row('link'), '<link href=\"', '\" />')\n ts_str = str_between(get_row('published'), '<published>', '</published>')\n time = datetime.strptime(ts_str, \"%Y-%m-%dT%H:%M:%S%z\")\n\n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title or None,\n 'tags': '',\n 'sources': [rss_file.name],\n }", "def parse_rss_export(rss_file):\n\n rss_file.seek(0)\n items = rss_file.read().split('<item>')\n items = items[1:] if items else []\n for item in items:\n # example item:\n # <item>\n # <title><![CDATA[How JavaScript works: inside the V8 engine]]></title>\n # <category>Unread</category>\n # <link>https://blog.sessionstack.com/how-javascript-works-inside</link>\n # <guid>https://blog.sessionstack.com/how-javascript-works-inside</guid>\n # <pubDate>Mon, 21 Aug 2017 14:21:58 -0500</pubDate>\n # </item>\n\n trailing_removed = item.split('</item>', 1)[0]\n leading_removed = trailing_removed.split('<item>', 1)[-1].strip()\n rows = leading_removed.split('\\n')\n\n def get_row(key):\n return [r for r in rows if r.strip().startswith('<{}>'.format(key))][0]\n\n url = str_between(get_row('link'), '<link>', '</link>')\n ts_str = str_between(get_row('pubDate'), '<pubDate>', '</pubDate>')\n time = datetime.strptime(ts_str, \"%a, %d %b %Y %H:%M:%S %z\")\n title = str_between(get_row('title'), '<![CDATA[', ']]').strip() or None\n\n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title,\n 'tags': '',\n 'sources': [rss_file.name],\n }", "def getFeed(self):\n\n entries_xml = []\n\n for entry in self.middleware.entries:\n request = entry['request']\n response = entry.get('response')\n begin = time.localtime(request['begin'])\n entry_id = self._generateEntryTagURI(entry)\n entry_title = '%s %s ' % (request['method'], request['url'])\n\n short_url = request['url']\n max_url_len = 40\n if len(short_url) > max_url_len:\n prefix = short_url[:9]\n suffix = short_url[-max_url_len+9:]\n short_url = prefix + '...' + suffix\n entry_title = '%s %s ' % (request['method'], short_url)\n\n # Make the <rz:cgi_variable> nodes into a string\n cgivars = \"\"\n for k,v in request['cgi_variables']:\n newv = escape(str(v))\n s = cgi_variable_fmt % (k, newv)\n cgivars = cgivars + s\n\n # Make the <rz:cgi_variable> nodes into a string\n wsgivars = \"\"\n for k,v in request['wsgi_variables']:\n newv = escape(str(v))\n s = wsgi_variable_fmt % (k, newv)\n wsgivars = wsgivars + s\n\n # Make the <rz:request> node\n rzrequest = rzrequest_fmt % {\n 'begin': request['begin'],\n 'cgi_variables': cgivars,\n 'wsgi_variables': wsgivars,\n 'method': request['method'],\n 'url': request['url'],\n 'body': escape(request['body']),\n }\n\n if response is not None:\n # Make the <rz:request> node\n headers = ''\n for k,v in response['headers']:\n newv = escape(str(v))\n s = header_fmt % (k, newv)\n headers = headers + s\n\n rzresponse = rzresponse_fmt % {\n 'begin': response['begin'],\n 'end': response['end'],\n 'content-length': response['content-length'],\n 'headers': headers,\n 'status': response['status'],\n 'body': escape(response['body']),\n }\n else:\n rzresponse = ''\n\n\n # Make the atom:entry/atom:content node\n content = contentfmt % {\n 'logentry_id': entry_id,\n 'rzrequest': rzrequest,\n 'rzresponse': rzresponse,\n }\n\n entry_xml = entryfmt % {\n 'entry_id':entry_id,\n 'entry_title':escape(entry_title),\n 'updated':time.strftime('%Y-%m-%dT%H:%M:%SZ', begin),\n 'summary':escape(pprint.pformat(entry)),\n 'content':content,\n }\n entries_xml.append(entry_xml)\n\n now = time.time()\n\n body = feedfmt % {\n 'title':'repoze.debug feed for pid %s' % self.middleware.pid,\n 'entries':'\\n'.join(entries_xml),\n 'feed_id':self._generateFeedTagURI(now, self.middleware.pid),\n 'updated':time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(now)),\n }\n\n resp = Response(content_type='application/atom+xml', body=body)\n return resp" ]
[ "0.7831155", "0.729255", "0.7232448", "0.67960274", "0.674423", "0.6664003", "0.66603625", "0.6557846", "0.6554401", "0.65351653", "0.65075", "0.64848477", "0.647246", "0.6385361", "0.63827616", "0.63343084", "0.6323492", "0.6318049", "0.6304754", "0.6304091", "0.62566954", "0.6235731", "0.6212814", "0.618577", "0.6166525", "0.6085335", "0.60850614", "0.6077082", "0.60504395", "0.603122" ]
0.7714306
1
Check if search matches any tags as stored in the Entry namedtuple (case insensitive, only whole, not partial string matches).
def filter_entries_by_tag(search, entry) -> bool: tags = entry.tags search_words = search.strip().translate(str.maketrans("&|", " ")).split() if "&" in search: search_type = "AND" else: search_type = "OR" for word in search_words: if word.lower() in tags: if search_type == "OR": return True elif search_type == "AND": return False if search_type == "OR": return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_entries_by_tag(search, entry):\n \n entry_tags = entry.tags\n if '&' in search:\n splits = search.split('&')\n\n return all(split.lower() in entry_tags for split in splits)\n elif '|' in search:\n splits = search.split('|')\n return any(split.lower() in entry_tags for split in splits)\n else:\n return search.lower() in entry_tags", "def match(self, name, tags):\n return name.lower() in tags", "def find(self, search):\n if type(search) == str:\n search = [search]\n\n for s in search:\n if self.text.lower().find(s.lower()) != -1:\n return True\n\n return False", "def test_search_tags(self):\n page = self.page1\n page.search_tags = \"Chutes, Ladders\"\n page.save_revision().publish()\n taglist = page.clean_search_tags\n for name in [\"Chutes\", \"Ladders\"]:\n self.assertIn(name, taglist)", "def match(self, name, tags):\n name, tags = self.get_compiled(name, tags)\n \n def index_of_letter(l):\n return ord(l) - ord('a')\n \n true_val, false_val = name\n \n if true_val:\n return index_of_letter(true_val) in tags\n else:\n return index_of_letter(false_val) not in tags", "def name_search(self, search):\n if isinstance(search, str):\n name_re = re.compile(search)\n else:\n name_re = search\n matches = [\n entry\n for entry in self\n if entry is not None and name_re.search(entry.name)\n ]\n return matches", "def hasname(self, tag: str) -> bool:\n for key in self.formal_names:\n if key in tag.lower():\n return True\n\n # Exit case if key -> value not in mapping \n return False", "async def search(self, ctx: \"IceTeaContext\", *, query):\n response_list = await ctx.guild_data.search_tags(query)\n if len(response_list) > 0:\n response_message = \"\\n\".join([tag.title for tag in response_list])\n await ctx.send(f\"Found these tags:\\n{response_message}\")\n else:\n await ctx.send(\"No similar tags found\")", "def filter_search_results_entries(tag, form_type, description_text):\n desc_re = re.compile(description_text, re.I)\n form_re = re.compile(form_type, re.I)\n try:\n return (tag.parent.name == 'td' and\n tag.name == 'a' and\n tag['id'] == 'documentsbutton' and\n tag.parent.parent.find(string=form_re) and\n tag.parent.parent.find(string=desc_re))\n except:\n return False", "def test_case_insensitive(self):\r\n # Generate demo tag into the system\r\n tags = [make_tag() for i in range(5)]\r\n [DBSession.add(t) for t in tags]\r\n\r\n test_str = tags[0].name[0:4].upper()\r\n suggestions = TagMgr.complete(test_str)\r\n self.assertTrue(\r\n tags[0] in suggestions,\r\n \"The sample tag was found in the completion set\")", "def test_tag_search(self):\n url = reverse_lazy('tag-list') + '?search={}'.format('testtag')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n results = response.data['results']\n self.assertEqual(len(results), 3)\n\n for tag in ('testtag1', 'testtag3'):\n result = list(filter(lambda it: it['title'] == tag, results))\n self.assertEqual(len(result), 1)\n result = result[0]\n\n self.assertEqual(len(result['posts']), 3)", "def exact_search(string, row):\n clear_screen()\n found = False\n for item in row:\n if string.lower() in item[\"Task\"].lower() \\\n or string.lower() in item[\"Notes\"].lower():\n print_entry(item)\n found = True\n if found is False:\n print(\"No Entries Found..\")", "def match(self, name, tags):\n S, tags = self.get_compiled(name, tags)\n return bool(S & tags)", "def search(self, term):", "def match(self, name, tags):\n or_exprs, tags = self.get_compiled(name, tags)\n \n # or_exprs = [{'a'}, {'c'}, {'d', 'a'}, {'d', 'e'}]\n return any(and_expr <= tags for and_expr in or_exprs)", "def search(self, word):", "def text_search():\n existing_fields = self.attr_name_map[object_class]\n text = \"%{}%\".format(exp[\"text\"])\n p = lambda f: f.ilike(text)\n return or_(*(\n with_key(field, p)\n for field in fields\n if field in existing_fields\n ))", "def search_entries(search):\n _, filenames = default_storage.listdir(\"entries\")\n result = []\n for filename in filenames: \n if filename.endswith(\".md\"):\n nameonly = re.sub(r\"\\.md$\", \"\", filename)\n \n if nameonly.lower() == search.lower():\n #print(\"name only :\", nameonly)\n #print(\"search :\", search)\n return (nameonly)\n elif search.lower() in nameonly.lower():\n result.append(nameonly)\n return(result)", "def match(self, filter_text):\n return filter_text.lower() in self.name.lower() or \\\n filter_text.lower() == self.isbn.lower() or \\\n filter_text.lower() in (str(tag).lower() for tag in self.tags)", "def make_query(term):\n def search(text):\n s=term.lower()\n if s in text.lower():\n return True\n return False\n return search", "def search(self, search):\n raise NotImplementedError", "def tag_dict_contains (self,\r\n tag):\r\n\r\n\r\n\r\n if self.using_database:\r\n aprint('TAGDICT CONTAINS')\r\n value_tuple = (notebookname, tag,)\r\n db_cursor.execute(\"SELECT rowid \"\r\n +\"FROM tags_to_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND tag=?;\",\r\n value_tuple)\r\n try:\r\n return db_cursor.fetchone()[0] # MIGHT BE PROBLEMATIC\r\n except:\r\n return False\r\n\r\n return str(tag) in self.tag_dict", "def __contains__(self, query):\n if not isinstance(query, str): # Checks if the query is entered as a string.\n raise TypeError('The query must be a string')\n if query in self._words:\n return True\n elif query.lower() in self._words:\n return True\n else:\n return False", "def search(self, q):\n for x in self.strings:\n if q in x:\n return True\n \n return False\n\n\n pass", "def has_hashtag(self, tag_list, **kwargs):\n lowlist = [tag.lower() for tag in tag_list]\n alllower = ('case_sensitive' in kwargs and not kwargs['case_sensitive'])\n for ht in self.original.entities['hashtags']:\n lowht = ht['text'].lower()\n if alllower and lowht in lowlist or '#' + lowht in lowlist:\n return True\n if ht['text'] in tag_list or '#' + ht['text'] in tag_list:\n return True\n return False", "def search_all(self, word_list):\n return [k for k,v in self.data_values.iteritems() \n if all(w.lower() in v.lower() for w in word_list)]", "def search(self, find_val):\n return False", "def test_name(self):\n\n self.check_search(\n dict(name=u'flamethrower'),\n [u'Flamethrower'],\n 'searching by name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'durp'),\n [],\n 'searching for a nonexistent name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'quICk AttACk'),\n [u'Quick Attack'],\n 'case is ignored',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'thunder'),\n [ u'Thunder', u'Thunderbolt', u'Thunder Wave',\n u'ThunderShock', u'ThunderPunch', u'Thunder Fang'],\n 'no wildcards is treated as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'*under'),\n [u'Thunder'], # not ThunderShock, etc.!\n 'splat wildcard works and is not used as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'b?te'),\n [u'Bite'], # not Bug Bite!\n 'question wildcard works and is not used as substring',\n exact=True,\n )", "def search(self, tokens: List[str]) -> bool:\n item = \"\".join(tokens)\n if item in self._masked_items:\n return False\n\n cur = self._root\n for token in tokens:\n if token not in cur.children:\n return False\n cur = cur.children[token]\n\n return cur.is_term", "def search_any(self, word_list):\n # Same as search_all except uses the built-in any()\n return [k for k,v in self.data_values.iteritems() \n if any(w.lower() in v.lower() for w in word_list)]" ]
[ "0.7962715", "0.72779536", "0.6499833", "0.62624013", "0.62382317", "0.6214999", "0.6140391", "0.6119588", "0.6078616", "0.60098", "0.59635174", "0.5918138", "0.5911922", "0.58637714", "0.5848267", "0.5831237", "0.5806548", "0.5795138", "0.57761455", "0.57537323", "0.57031983", "0.56951916", "0.56755763", "0.5674097", "0.56596816", "0.5651486", "0.5640643", "0.5638073", "0.5636836", "0.56138223" ]
0.75187373
1
Gather the top 10 words by highest (descending) likelihoods for each class
def top10_likelihoods(likelihoods, vocab, classes): resultDict = {} for cls in classes: results = [] for word in vocab: results.append((word, likelihoods[cls][word])) resultDict[cls] = results # Sort and return top 10 for each class for key in resultDict: results = resultDict[key] resultDict[key] = map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10] return resultDict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def top10_odds_ratio(likelihoods, vocab, classes):\r\n results = []\r\n for word in vocab:\r\n highestOddsRatio = None\r\n for c1 in classes:\r\n for c2 in classes:\r\n # Skip self TODO: Is this right?\r\n # if c1 == c2:\r\n # continue\r\n oddsRatio = odds_ratio(likelihoods, c1, c2, word)\r\n if oddsRatio > highestOddsRatio or highestOddsRatio == None:\r\n highestOddsRatio = oddsRatio\r\n results.append((word, highestOddsRatio))\r\n # Sort and return top 10\r\n return map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10]", "def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]", "def get_top_words(self, label, n):\n score_list = []\n if('sod' in label):\n for term in self.vocab:\n score = self.cond_prob_sod[term] / self.cond_prob_pop[term]\n score_list.append((score,term)) \n else:\n for term in self.vocab:\n score = self.cond_prob_pop[term] / self.cond_prob_sod[term]\n score_list.append((score,term))\n score_list = sorted(score_list, key=lambda x:x[0],reverse=True)[:n]\n return score_list \n pass", "def printTopWords(self, N):\n topWords = []\n for i in range(self.MAX_RATING):\n topWords.append(dict(sorted(self.dictionary.items(), key=lambda x: x[1].tfidf[i+1], reverse=True)[:N]))\n\n outputFormat = \"{:>16} - {:<30}\"\n for i in range(len(topWords)):\n print(\"Top \" + str(N) + \" words for class rating \" + str(i + 1))\n print(\"--------------------------------------\")\n for j in topWords[i]:\n print(outputFormat.format(j, self.dictionary[j].tfidf[i + 1]))\n print()", "def print_top10(vectorizer, clf, class_labels):\n feature_names = vectorizer.get_feature_names()\n for i, class_label in enumerate(class_labels):\n top10 = np.argsort(clf.coef_[i])[-15:]\n print(\"%s: %s\" % (class_label,\n \" \".join(feature_names[j] for j in top10)))", "def print_top_words(components, feature_names, n_top_words: int = 10):\n for topic_idx, topic in enumerate(components):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join(\n [feature_names[i] for i in topic.argsort()[: -n_top_words - 1 : -1]]\n )\n print(message)\n print()", "def top_words(name):\n row = wiki[wiki['name'] == name]\n word_count_table = row[['word_count']].stack('word_count', new_column_name=['word','count'])\n return word_count_table.sort('count', ascending = False)", "def top10(self) -> List[Word]:\n return self._top10", "def top_sentences(query, sentences, idf, n):\n ll=[]\n for s in sentences:\n st=sentences[s]\n st=[word.lower() for word in st]\n found_word=0\n total_idf=0\n\n for word in query:\n if word in st:\n total_idf+=idf[word]\n found_word+=1 \n ll.append((total_idf,found_word/len(st),s))\n ll.sort(reverse=True)\n #print(ll)\n ans=[]\n for i in range(n):\n ans.append(ll[i][2])\n #print(\"answer is : \",*ans)\n return ans", "def print_top_misclassified(test_docs, test_labels, X_test, clf, n):\n# predictedValues = clf.predict(X_test)\n# predictedProbabilities = clf.predict_proba(X_test)\n# missClassifiedDocs = []\n# for index in range(len(predictedValues)):\n# if predictedValues[index] != test_labels[index]:\n# entry = dict()\n# entry['truth'] = test_labels[index]\n# entry['predicted'] = predictedValues[index]\n# entry['proba'] = predictedProbabilities[index][entry['predicted']]\n# entry['document'] = test_docs[index]\n# missClassifiedDocs.append(entry)\n# missClassifiedDocs = sorted(missClassifiedDocs, key=lambda x: -x['proba'])[:n]\n# for docEntry in missClassifiedDocs:\n# print('')\n# print('truth=' + str(docEntry['truth']) + ' predicted=' + str(docEntry['predicted']) + ' proba=' + str(docEntry['proba']))\n# print(str(docEntry['document']))\n to_predict = clf.predict(X_test)\n diff = np.where( to_predict != test_labels)[0] \n\n predict_prob = clf.predict_proba(X_test)\n wrong_predict = predict_prob[diff]\n\n keys1 = np.argsort(np.amax(wrong_predict, axis = 1))[::-1][:n] \n \n for i in range(0, n):\n doc_keys = diff[keys1[i]]\n truth=str(test_labels[doc_keys])\n predicted=str( to_predict[doc_keys])\n probab=str(np.max(predict_prob[doc_keys]))\n print('truth='+truth+' predicted='+predicted+' proba='+probab)\n print(test_docs[doc_keys]+'\\n')", "def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word in words:\n dict_top_freq[word.lower()] = 0\n for string in list_of_texts:\n if word.lower() in string.lower():\n counter = string.lower().count(word.lower())\n dict_top_freq[word.lower()] += counter\n\n list_top_sorted = sorted(dict_top_freq.items(), key=lambda item: item[1], reverse=True)\n print(list_top_sorted)\n\n list_k = []\n for i in list_top_sorted:\n list_k.append(i[0])\n\n return list_k[:top_k]", "def print_top_misclassified(test_docs, test_labels, X_test, clf, n):\n ###TODO\n #print('test_labels =',test_labels) \n\n #step 1 -> find missclassified\n predicted = clf.predict(X_test)\n \n #print('predicted = ',predicted)\n #acc = accuracy_score(test_labels, predicted)\n #print('acc = ',acc )\n \n misclassified = np.where(predicted != test_labels)\n \n #print('misclassified = ',misclassified)\n #print('misclassified = ',misclassified[0])\n #print('misclassified = ',misclassified[0][0])\n\n #step 2 -> find predicted probabilities\n probab = clf.predict_proba(X_test)\n \n #print('probab = ',probab)\n \n #step 3 -> collect all misclassified docs with all required info\n misclassified_docs = []\n \n for i in misclassified[0]:\n #print(i)\n misclassified_docs.append( ( test_labels[i], predicted[i], probab[i][predicted[i]], test_docs[i] ) ) \n\t\t\n #step 4 -> sort in descending order of the predicted probability for the incorrect class \t\n sorted_docs = sorted(misclassified_docs,key=lambda x:(-x[2]))[:n]\n\n #step 5 -> print all value\n for doc in sorted_docs :\n print('\\n',\"truth=\",doc[0],\" predicted=\",doc[1],\" proba=\",doc[2])\n print(str(doc[3])) #.encode(\"utf-8\")", "def get_top_n_words(topic_dict, n=5):\n top_words = []\n for num, data in topic_dict.items():\n sorted_words = {k: v for k, v in sorted(data['words'].items(),\n key=lambda x: x[1],\n reverse=True\n )}\n words = sorted_words.keys()\n top_n_words = list(words)[:n]\n top_words.append(', '.join(top_n_words))\n return top_words", "def _calculate_top(self,\n words_percentage_hit: List[Tuple[str, float]]) -> List[Tuple[str, float]]:\n return sorted(words_percentage_hit, key=(lambda tup: tup[1]))[:self._top_values]", "def get_top_n_words(topic_words_dict, n):\n score_wordlist = topic_words_dict.items()\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]", "def format_top_n(self, n=10):\n output = []\n for t, c in self._freq.most_common(n):\n files_, sents_ = self.fetch_index(t)\n word = t + ' (' + str(c) + ')'\n output.append([word, ','.join(files_), \"\\n\".join(sents_)])\n\n return output", "def _top_n_words(n, f_name):\n word_dict, idx_dict, word_cnt = _extract_words(f_name)\n print (\"number of words: %d\" % len(word_cnt))\n n = min(len(word_cnt), n)\n np_cnt = np.array(word_cnt)\n idx = np.argpartition(np_cnt, -n)[-n:]\n res = []\n for i in idx:\n res.append((idx_dict[i], np_cnt[i]))\n res.sort(key=lambda t: t[1], reverse=True)\n return res", "def top_sentences(query, sentences, idfs, n):\n rank = []\n\n for sentence in sentences:\n sentence_values = [sentence, 0, 0]\n\n for word in query:\n if word in sentences[sentence]:\n # Compute matching word measure. Sum of IDF values.\n sentence_values[1] += idfs[word]\n # Compute query term density. Proportion of words in a sentence that are in the query.\n sentence_values[2] += sentences[sentence].count(\n word) / len(sentences[sentence])\n\n rank.append(sentence_values)\n\n rank = sorted(rank, key=lambda x: (x[1], x[2]), reverse=True)[:n]\n \n return [sentence for sentence, mwm, qtd in rank]", "def get_top_words(input_string):\n # count the words\n top_words = Counter(input_string)\n # order the words in descending order\n top_words_ordered = sorted(top_words.items(), key=operator.itemgetter(1), reverse=True)\n # keep the top twenty elements\n top_twenty = top_words_ordered[0:20]\n print(top_twenty)\n return top_twenty", "def class_conditional_word_dist(self, Mprint=20):\n self.class_word_dist = np.array(np.vstack([self.data[self.labels == ci, :].sum(0)/self.data[self.labels == ci, :].sum() for ci in np.unique(self.labels)])) # num of classes x num of words\n self.labels_word = self.class_word_dist.argmax(0)\n for i in range(self.class_word_dist.shape[0]):\n print('top {} frequent words in class {}'.format(Mprint, i))\n idx = np.argsort(self.class_word_dist[i, :])[::-1][:Mprint]\n for j in range(Mprint):\n print(' {:3d}: {:10s} {:.4f}'.format(j, self.vocab[idx[j]], self.class_word_dist[i, idx[j]]))", "def get_top_n_words(word_list, n):\n\tfreq_dict = make_freq_dict (word_list) # get a dictionary\n\tordered_by_frequency = sorted(freq_dict, key=freq_dict.get, reverse=True) # sort\n\tprint ordered_by_frequency[0:n] # print\n\treturn ordered_by_frequency[0:n]", "def top_sentences(query, sentences, idfs, n):\n\n # claculate idfs of each sentence\n sent_score = dict()\n for sentence in sentences:\n sent_score[sentence] = 0\n for query_word in query:\n if query_word in sentences[sentence]:\n sent_score[sentence] += idfs[query_word]\n\n # create sorted list of sentences\n sorted_sentences = sorted(sent_score, key= lambda item: sent_score[item], reverse= True)\n\n # re-order sentences with the same rank of idfs according to query term density\n loop_sentences = sorted_sentences.copy()\n for sentence1 in loop_sentences:\n for sentence2 in loop_sentences:\n if sentence1 != sentence2:\n if sent_score[sentence1] == sent_score[sentence2]:\n qtd1 = query_term_density(sentence1, query, sentences)\n qtd2 = query_term_density(sentence2, query, sentences)\n index1 = sorted_sentences.index(sentence1)\n index2 = sorted_sentences.index(sentence2)\n if qtd1 > qtd2:\n if index1 > index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n elif qtd1 < qtd2:\n if index1 < index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n\n # get list contains top n sentences\n top_sentences = []\n for index in range(n):\n top_sentences.append(sorted_sentences[index]) \n\n return top_sentences", "def most_frequent_train(train_data):\n ### YOUR CODE HERE\n tags_counts_for_each_word = {}\n # Filling a dictionary from words and tag tags to their counters\n # Going over the words and counting their tags appearances\n for sentance in train_data:\n for word, tag in sentance:\n # If first time seeing word, adding it's tags count dictionary\n if word not in tags_counts_for_each_word:\n tags_counts_for_each_word[word] = {}\n # Fetching word tags count dictionary\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n # If tag not in word's tags dictionary, initializing the counter\n if tag not in word_tags_count_dictionary:\n word_tags_count_dictionary[tag] = 0\n # Incrementing word tag counter\n word_tags_count_dictionary[tag] += 1\n \n words_maximal_tags = {}\n # Going over each word and finding it's maximal tag\n for word in tags_counts_for_each_word:\n # Fetching all word tags counts\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n \n maximal_tag, maximal_tag_counter = '', 0\n # Finding word tag with maximal tag counter\n for curent_tag, current_counter in word_tags_count_dictionary.items():\n if current_counter > maximal_tag_counter:\n maximal_tag, maximal_tag_counter = curent_tag, current_counter\n \n # Setting the maximal tag for current word\n words_maximal_tags[word] = maximal_tag\n \n return words_maximal_tags\n ### END CODE HERE", "def topCommonwords(self,value=5):\n out=self.df.withColumn('word', explode(split(col('name'), ' '))) \\\n .withColumn('norm_word',trim(regexp_replace('word','[^a-zA-Z0-9 ]', ''))) \\\n .filter(col('norm_word') !='')\\\n .groupBy('norm_word')\\\n .count()\\\n .sort('count', ascending=False)\\\n .select('norm_word').limit(value)\n out.withColumnRenamed('norm_word','Top english name in pubname').write \\\n .mode(\"overwrite\").csv('{}pubname/'.format(self.target))\n\n return out.rdd.map(lambda l:l.norm_word).collect()", "def print_top10(vectorizer, clf):\n feature_names = vectorizer.get_feature_names()\n indices=np.argsort(clf.coef_)[0][-10:]\n for i in range(10):\n print(feature_names[indices[i]])", "def display_topics2(model, feature_names, n_top_words=25):\n word_dict = {};\n for topic_idx, topic in enumerate(model.components_):\n word_dict[\"Topic%d\" % (topic_idx)] = [feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]]\n return pd.DataFrame(word_dict).T", "def get_top_words(tfidf_dict: dict, n_words=10):\n header = ['year', 'term', 'tf-idf']\n dfs = []\n for each_year, tfidf_scores in tfidf_dict.items():\n df_list = []\n for term_score in tfidf_scores:\n df_list.append([each_year, term_score[0], float(term_score[1])])\n yr_df = pd.DataFrame(df_list, columns=header)\n yr_df = yr_df.sort_values(by=['tf-idf'], ascending=False)\n if n_words < len(tfidf_scores):\n yr_df = yr_df.iloc[:n_words].reset_index(drop=True)\n dfs.append(yr_df)\n else:\n raise ValueError('input of n_words is more than the words in data!')\n\n df_out = pd.concat(dfs)\n\n return df_out", "def top_sentences(query, sentences, idfs, n):\n tf_idfs = []\n for sentence, words in sentences.items():\n tf_idf = 0\n\n for word in query:\n if word not in idfs:\n continue\n idf = idfs[word]\n tf = (1 if word in words else 0)\n tf_idf += idf * tf\n t = (sentence, tf_idf)\n tf_idfs.append(t)\n\n sorted_list = sorted(tf_idfs, key=sorter)\n sorted_list.reverse()\n file_list = [item[0] for item in sorted_list]\n\n return file_list[:n]", "def test_top_n_grams():\n ngrams = NgramFrequencies()\n unigrams_dic = {\n \"COUNT\": 10,\n \"time_burton's\": 5,\n \"burton's_corpse\": 4,\n \"corpse_bride\": 1\n }\n top_n_unigrams = ngrams.top_n_grams(unigrams_dic, 2)\n assert top_n_unigrams == [\n (\"time_burton's\", 0.5),\n (\"burton's_corpse\", 0.4)\n ]", "def get_top_keywords(entries):\n # Extract text for processing\n\n raw_text = [] # raw text in sentences\n for entry in entries:\n # Its a post\n if 'title' in entry:\n raw_text.append(entry['title'])\n raw_text += tokenize.sent_tokenize(entry['selftext'])\n else:\n raw_text += tokenize.sent_tokenize(entry['body'])\n \n # Tokenize\n tokens = tokenize_posts_keywords(raw_text)\n\n # 1-gram\n fdist_1 = FreqDist(tokens)\n top_keywords_1 = fdist_1.most_common(100)\n \n # 2-gram\n bigrams = ngrams(tokens, 2)\n fdist_2 = FreqDist(bigrams)\n top_keywords_2 = fdist_2.most_common(100)\n top_keywords_2 = [(f'{keywords[0]} {keywords[1]}', mentions) for keywords, mentions in top_keywords_2]\n\n # 3-gram\n trigrams = ngrams(tokens, 3)\n fdist_3 = FreqDist(trigrams)\n top_keywords_3 = fdist_3.most_common(100)\n top_keywords_3 = [(f'{keywords[0]} {keywords[1]} {keywords[2]}', mentions) for keywords, mentions in top_keywords_3]\n\n top_keywords = top_keywords_1 + top_keywords_2 + top_keywords_3\n return [{ 'keyword' : keyword, 'mentions' : mentions } for keyword, mentions in top_keywords]" ]
[ "0.7205802", "0.70088744", "0.6974292", "0.6939049", "0.68271315", "0.67730814", "0.6618862", "0.66091466", "0.64232916", "0.6413391", "0.639833", "0.63636", "0.6353295", "0.63519716", "0.6349507", "0.63440084", "0.6335887", "0.63027114", "0.6243625", "0.623767", "0.6233897", "0.62330055", "0.622652", "0.6224103", "0.62226653", "0.6205877", "0.6198221", "0.6185104", "0.61655253", "0.61471224" ]
0.7892108
0
Gather the top 10 words by highest (descending) odds ratios
def top10_odds_ratio(likelihoods, vocab, classes): results = [] for word in vocab: highestOddsRatio = None for c1 in classes: for c2 in classes: # Skip self TODO: Is this right? # if c1 == c2: # continue oddsRatio = odds_ratio(likelihoods, c1, c2, word) if oddsRatio > highestOddsRatio or highestOddsRatio == None: highestOddsRatio = oddsRatio results.append((word, highestOddsRatio)) # Sort and return top 10 return map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]", "def get_top_words(self, label, n):\n score_list = []\n if('sod' in label):\n for term in self.vocab:\n score = self.cond_prob_sod[term] / self.cond_prob_pop[term]\n score_list.append((score,term)) \n else:\n for term in self.vocab:\n score = self.cond_prob_pop[term] / self.cond_prob_sod[term]\n score_list.append((score,term))\n score_list = sorted(score_list, key=lambda x:x[0],reverse=True)[:n]\n return score_list \n pass", "def printTopWords(self, N):\n topWords = []\n for i in range(self.MAX_RATING):\n topWords.append(dict(sorted(self.dictionary.items(), key=lambda x: x[1].tfidf[i+1], reverse=True)[:N]))\n\n outputFormat = \"{:>16} - {:<30}\"\n for i in range(len(topWords)):\n print(\"Top \" + str(N) + \" words for class rating \" + str(i + 1))\n print(\"--------------------------------------\")\n for j in topWords[i]:\n print(outputFormat.format(j, self.dictionary[j].tfidf[i + 1]))\n print()", "def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]", "def test_get_top_n_words_same_frequency(self):\n expected = ['happy', 'man']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 2)\n self.assertEqual(expected, actual)\n expected = ['happy']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 1)\n self.assertEqual(expected, actual)", "def get_top_words(input_string):\n # count the words\n top_words = Counter(input_string)\n # order the words in descending order\n top_words_ordered = sorted(top_words.items(), key=operator.itemgetter(1), reverse=True)\n # keep the top twenty elements\n top_twenty = top_words_ordered[0:20]\n print(top_twenty)\n return top_twenty", "def top10(self) -> List[Word]:\n return self._top10", "def get_top_n_words(filename, n, to_search_word_or_not, word_to_serach, get_random):\n\n histogram = get_word_list(filename, True) #calls histogram file\n output = []\n for word,value in histogram.items(): #sorts words into new histogram that has value, word pairs to sort\n output.append((value,word))\n output.sort()\n output.reverse() #sorting from greatest to least\n final_n_output = []\n\n if get_random == True: #possibly sending getrandom funtion to get random words\n random_word = getrandom(histogram)\n else:\n random_word = None\n\n if to_search_word_or_not == True: #possibly sending getrandom funtion to get random words\n num_of_word = search_for_a_word(histogram, word_to_serach)\n else:\n num_of_word = None\n\n for i in range(n):\n final_n_output.append(output[i]) #making a final output list\n\n print(random_word)\n\n return final_n_output, num_of_word, random_word", "def top_words(name):\n row = wiki[wiki['name'] == name]\n word_count_table = row[['word_count']].stack('word_count', new_column_name=['word','count'])\n return word_count_table.sort('count', ascending = False)", "def format_top_n(self, n=10):\n output = []\n for t, c in self._freq.most_common(n):\n files_, sents_ = self.fetch_index(t)\n word = t + ' (' + str(c) + ')'\n output.append([word, ','.join(files_), \"\\n\".join(sents_)])\n\n return output", "def _calculate_top(self,\n words_percentage_hit: List[Tuple[str, float]]) -> List[Tuple[str, float]]:\n return sorted(words_percentage_hit, key=(lambda tup: tup[1]))[:self._top_values]", "def print_top_words(components, feature_names, n_top_words: int = 10):\n for topic_idx, topic in enumerate(components):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join(\n [feature_names[i] for i in topic.argsort()[: -n_top_words - 1 : -1]]\n )\n print(message)\n print()", "def get_top_n_words(topic_words_dict, n):\n score_wordlist = topic_words_dict.items()\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]", "def test_get_top_n_words_incorrect_numbers(self):\n expected = []\n actual = get_top_n_words({}, -1)\n self.assertEqual(expected, actual)\n actual = get_top_n_words({'happy': 2}, 0)\n self.assertEqual(expected, actual)", "def test_get_top_n_words_more_number(self):\n expected = ['man', 'happy']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 10)\n self.assertEqual(expected, actual)", "def get_top_n_words(word_list, n):\n\tfreq_dict = make_freq_dict (word_list) # get a dictionary\n\tordered_by_frequency = sorted(freq_dict, key=freq_dict.get, reverse=True) # sort\n\tprint ordered_by_frequency[0:n] # print\n\treturn ordered_by_frequency[0:n]", "def get_top_n_words(topic_dict, n=5):\n top_words = []\n for num, data in topic_dict.items():\n sorted_words = {k: v for k, v in sorted(data['words'].items(),\n key=lambda x: x[1],\n reverse=True\n )}\n words = sorted_words.keys()\n top_n_words = list(words)[:n]\n top_words.append(', '.join(top_n_words))\n return top_words", "def _top_n_words(n, f_name):\n word_dict, idx_dict, word_cnt = _extract_words(f_name)\n print (\"number of words: %d\" % len(word_cnt))\n n = min(len(word_cnt), n)\n np_cnt = np.array(word_cnt)\n idx = np.argpartition(np_cnt, -n)[-n:]\n res = []\n for i in idx:\n res.append((idx_dict[i], np_cnt[i]))\n res.sort(key=lambda t: t[1], reverse=True)\n return res", "def top_sentences(query, sentences, idf, n):\n ll=[]\n for s in sentences:\n st=sentences[s]\n st=[word.lower() for word in st]\n found_word=0\n total_idf=0\n\n for word in query:\n if word in st:\n total_idf+=idf[word]\n found_word+=1 \n ll.append((total_idf,found_word/len(st),s))\n ll.sort(reverse=True)\n #print(ll)\n ans=[]\n for i in range(n):\n ans.append(ll[i][2])\n #print(\"answer is : \",*ans)\n return ans", "def get_top_n_words(word_list, n):\n words = []\n\n # Change all words to lowercase\n for word in word_list:\n word = str.lower(word)\n if word not in words:\n words.append(word)\n\n # Calculate frequency of each word\n frequency = []\n for word in words:\n word_count = 0\n for test in word_list:\n if word == test:\n word_count += 1\n frequency.append(word_count)\n\n dic = dict()\n for i, word in enumerate(words):\n dic[frequency[i]] = word\n\n # Sort dictionary to return ranks\n keys = dic.keys()\n keys = sorted(keys)\n words_ranked = []\n for key in keys:\n words_ranked.append(dic.get(key))\n words_ranked = words_ranked[::-1]\n words_ranked = words_ranked[:n]\n return words_ranked", "def wcount(lines, topn=10):\n words=lines.lower()\n words=words.replace('.', '')\n words=words.replace(',', ' ')\n words=words.replace('!', ' ')\n words=words.replace('?', ' ')\n words=words.replace(':', ' ')\n words=words.replace('_', ' ')\n words=words.replace('\"', ' ')\n words=words.replace(\"'\", ' ')\n words=words.replace('(', ' ')\n words=words.replace(')', ' ')\n words=words.replace('[', ' ')\n words=words.replace(']', ' ')\n words=words.replace('-', ' ')\n words=words.replace(';', ' ')\n words=words.replace('\"', ' ')\n words=words.replace('*', ' ')\n lst=words.split(' ')\n lst2=list(set(lst))\n lst2.remove('')\n dic={}\n for i in lst2:\n dic[i]=lst.count(i)\n wds=list(dic.keys())\n numbers=list(dic.values())\n numbers2=sorted(numbers, reverse=True)\n for k in range(topn):\n m=numbers.index(numbers2[k])\n print(\"%-15s%-5d\"%(wds[m],numbers2[k]))", "def test_get_top_n_words_ideal(self):\n expected = ['man']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 1)\n self.assertEqual(expected, actual)", "def top_sentences(query, sentences, idfs, n):\n rank = []\n\n for sentence in sentences:\n sentence_values = [sentence, 0, 0]\n\n for word in query:\n if word in sentences[sentence]:\n # Compute matching word measure. Sum of IDF values.\n sentence_values[1] += idfs[word]\n # Compute query term density. Proportion of words in a sentence that are in the query.\n sentence_values[2] += sentences[sentence].count(\n word) / len(sentences[sentence])\n\n rank.append(sentence_values)\n\n rank = sorted(rank, key=lambda x: (x[1], x[2]), reverse=True)[:n]\n \n return [sentence for sentence, mwm, qtd in rank]", "def get_top_n_words(word_list, n):\n\t\n\t#Uses Counter function to create tuples of words and number of instances of word\n\twordCount = Counter(word_list)\n\ttopWords = []\n\n\torderedByFrequency = sorted(wordCount, key=wordCount.get, reverse=True)\n\n\t#create list of inputted 'n' top words\n\tfor i in range (0 , n):\n\t\ttopWords.append(orderedByFrequency[i])\n\n\treturn topWords", "def topn_similarity(word_vecs, word, n):\n vec = word_vecs[word]\n sim = dict()\n for w in word_vecs:\n if w != '<TOP>' and w != '<BOT>':\n # sim[w] = np.dot(vec, np.transpose(word_vecs[w]))\n sim[w] = 1 - spatial.distance.cosine(vec, word_vecs[w])\n # sim[w] = np.dot(vec, np.transpose(word_vecs[w]))/(mod(vec)*mod(np.transpose(word_vecs[w])))\n dd = OrderedDict(sorted(sim.items(), key=lambda x: x[1], reverse=True))\n return list(dd.items())[1:n+1]", "def prepare_words(self, top_words, total_count):\r\n list_to_return = []\r\n percents = 0\r\n for num, word_tuple in enumerate(top_words.iteritems()):\r\n if num == len(top_words) - 1:\r\n percent = 100 - percents\r\n else:\r\n percent = round(100.0 * word_tuple[1] / total_count)\r\n percents += percent\r\n list_to_return.append(\r\n {\r\n 'text': word_tuple[0],\r\n 'size': word_tuple[1],\r\n 'percent': percent\r\n }\r\n )\r\n return list_to_return", "def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word in words:\n dict_top_freq[word.lower()] = 0\n for string in list_of_texts:\n if word.lower() in string.lower():\n counter = string.lower().count(word.lower())\n dict_top_freq[word.lower()] += counter\n\n list_top_sorted = sorted(dict_top_freq.items(), key=lambda item: item[1], reverse=True)\n print(list_top_sorted)\n\n list_k = []\n for i in list_top_sorted:\n list_k.append(i[0])\n\n return list_k[:top_k]", "def test_top_n_grams():\n ngrams = NgramFrequencies()\n unigrams_dic = {\n \"COUNT\": 10,\n \"time_burton's\": 5,\n \"burton's_corpse\": 4,\n \"corpse_bride\": 1\n }\n top_n_unigrams = ngrams.top_n_grams(unigrams_dic, 2)\n assert top_n_unigrams == [\n (\"time_burton's\", 0.5),\n (\"burton's_corpse\", 0.4)\n ]", "def get_10_most_frequent_words(tokens):\n\n return FreqDist(word.lower() for word in tokens).most_common(10)", "def top_sentences(query, sentences, idfs, n):\n\n # claculate idfs of each sentence\n sent_score = dict()\n for sentence in sentences:\n sent_score[sentence] = 0\n for query_word in query:\n if query_word in sentences[sentence]:\n sent_score[sentence] += idfs[query_word]\n\n # create sorted list of sentences\n sorted_sentences = sorted(sent_score, key= lambda item: sent_score[item], reverse= True)\n\n # re-order sentences with the same rank of idfs according to query term density\n loop_sentences = sorted_sentences.copy()\n for sentence1 in loop_sentences:\n for sentence2 in loop_sentences:\n if sentence1 != sentence2:\n if sent_score[sentence1] == sent_score[sentence2]:\n qtd1 = query_term_density(sentence1, query, sentences)\n qtd2 = query_term_density(sentence2, query, sentences)\n index1 = sorted_sentences.index(sentence1)\n index2 = sorted_sentences.index(sentence2)\n if qtd1 > qtd2:\n if index1 > index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n elif qtd1 < qtd2:\n if index1 < index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n\n # get list contains top n sentences\n top_sentences = []\n for index in range(n):\n top_sentences.append(sorted_sentences[index]) \n\n return top_sentences" ]
[ "0.7232739", "0.6893036", "0.67285866", "0.6680685", "0.66617835", "0.66482335", "0.6614465", "0.6595784", "0.6562109", "0.645501", "0.64387554", "0.6425363", "0.63707566", "0.6370042", "0.6349563", "0.6348198", "0.6341136", "0.6331591", "0.63161516", "0.6312066", "0.6284538", "0.6275941", "0.6267955", "0.6251291", "0.62424177", "0.62385476", "0.6232322", "0.61934334", "0.61820835", "0.6181658" ]
0.7499493
0
Estimate the priors for a class
def calculate_priors(trainingLabels): sum = 0 priors = {} totalSamples = len(trainingLabels) classes = set(trainingLabels) for cls in classes: numCls = len(filter(lambda x: x == cls, trainingLabels)) sum += numCls priors[cls] = float(numCls) / float(totalSamples) # Sanity check: valid partitioning assert(sum == totalSamples) return priors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def priors(self):\n\n return self._priors", "def get_pred_class_probs(self, pred_mu, pred_sigma):", "def __init__(self, classes, data_size):\r\n self.classes = classes\r\n self.data_size = data_size\r\n self.conditional_prob = {class_:{} for class_ in classes} # Conditional Probability Table for storing parameters useful to compute P(feat|class_)\r\n self.class_prob = {} # Stores the priors\r", "def checkPriors(para):\n\t\n\t# extract parameters\n\tA = para[0]\n\tw = para[1]\n\tp = para[2]\n\t\n\t# check them\n\tif (A<0.01 or A>10.0): A = s.uniform.rvs(0.01,10.)\n\t\n\tif (w<0.01 or w>10.0): w = s.uniform.rvs(0.01,10.)\n\t\t\n\tif ( p<0. or p>2*np.pi): p = s.uniform.rvs(0.0,2*np.pi)\n\t\n\treturn np.array([A,w,p])", "def classify(priors, likelihoods, testData, classes):\r\n results = []\r\n for document in testData:\r\n bestClass = None\r\n bestProb = None\r\n currentProb = 0.0\r\n for cls in classes:\r\n prior = priors[cls]\r\n currentProb = log(prior)\r\n lhoods = likelihoods[cls]\r\n for (word, count) in document:\r\n if word in lhoods:\r\n currentProb += log(lhoods[word])\r\n else:\r\n currentProb += log(lhoods[None])\r\n if currentProb > bestProb or bestClass == None:\r\n bestProb = currentProb\r\n bestClass = cls\r\n results.append(bestClass)\r\n return results", "def class_probability(self, x):\n # permutation before softmax b x a x c x spatial dims --> b x c x a x spatial dims\n # as expected by PyTorch Softmax the class axis = 1 \n return self._class_prob(x.permute([0, 2, 1, 3, 4]))", "def _estimate_priors(self):\n\n # Estimate the log UMI count turning point between cells and 'empties'.\n self.priors['log_counts_crossover'] = \\\n np.mean(np.log1p([self.priors['cell_counts'],\n self.priors['empty_counts']])).item()\n\n # Estimate prior for the scale param of LogNormal for d.\n if self.model_name != \"simple\":\n self.priors['d_std'] = (np.log1p(self.priors['cell_counts'])\n - self.priors['log_counts_crossover']) / 5\n else:\n self.priors['d_std'] = 0.2 # This is a reasonable prior in log space.\n\n # Priors for models that include empty droplets:\n if self.model_name != \"simple\":\n # Estimate fraction of trimmed dataset that contains cells.\n # cell_prob = self.priors['n_cells'] / self.analyzed_barcode_inds.size\n cell_prob = (1 - self.fraction_empties) \\\n * (self.priors['n_cells'] / self.analyzed_barcode_inds.size)\n self.priors['cell_prob'] = cell_prob\n\n assert cell_prob > 0, f\"Fraction of trimmed dataset \" \\\n f\"containing cells should be > 0, \" \\\n f\"but is {cell_prob}.\"\n\n assert cell_prob <= 1, f\"Fraction of trimmed dataset \" \\\n f\"containing cells should be at most 1, \" \\\n f\"but is {cell_prob}.\"\n\n # Turn cell probability into logit.\n self.priors['cell_logit'] = np.log(cell_prob / (1 - cell_prob)).item()\n\n # Estimate the ambient gene expression profile.\n self.priors['chi_ambient'], self.priors['chi_bar'] = \\\n estimate_chi_from_dataset(self)", "def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)", "def classes_calculations(input):\n counts, _ = np.histogram(input, bins=int(\n input.max() + 1), range=(0, int(input.max())))\n return np.nonzero(counts)[0]", "def __init__(self, N=40):\n self._primes = []\n self.find_primes(N)", "def make_priors(self):\r\n if self.last_img_size != (self.target_size, self.target_size):\r\n prior_data = []\r\n\r\n for conv_w, conv_h, scale in zip(self.conv_ws, self.conv_hs, self.scales):\r\n for i in range(conv_h):\r\n for j in range(conv_w):\r\n # +0.5 because priors are in center-size notation\r\n cx = (j + 0.5) / conv_w\r\n cy = (i + 0.5) / conv_h\r\n\r\n for ar in self.aspect_ratios:\r\n ar = np.sqrt(ar)\r\n\r\n w = scale * ar / self.target_size\r\n h = scale / ar / self.target_size\r\n\r\n # This is for backward compatability with a bug where I made everything square by accident\r\n h = w\r\n\r\n prior_data += [cx, cy, w, h]\r\n\r\n self.priors = np.array(prior_data).reshape(-1, 4)\r\n self.last_img_size = (self.target_size, self.target_size)\r\n return self.priors", "def __init__(self, num_class):\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_f1score = 0.0\n self.sum_f2score = 0.0\n self.sum_loss = 0.0\n self.num_examples = 0", "def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]", "def probabilities(self):\n raise NotImplementedError", "def _class_distribution(y):\n unique, counts = np.unique(y, return_counts = True)\n\n percentages = counts / np.sum(counts)\n\n return unique, counts, percentages", "def p(self) -> Probability:\n ...", "def calc_priors(self, prior_U, method='inverse'):\n if self.Pchance is None:\n raise IOError(\"Set Pchance before calling this method\")\n\n # TODO -- Move this into Bayesian\n if prior_U < 0.:\n self.prior_U = np.product(self.candidates['P_c'])\n else:\n self.prior_U = prior_U\n\n # Raw priors\n self.raw_prior_Oi = bayesian.raw_prior_Oi(method, self.candidates[self.filter].values,\n Pchance=self.Pchance,\n half_light=self.candidates.half_light.values)\n\n # Normalize\n self.prior_Oi = bayesian.renorm_priors(self.raw_prior_Oi, self.prior_U)\n\n # Add to table\n self.candidates['P_O'] = self.prior_Oi", "def determineClasses(self, particles):\n\t\tapDisplay.printMsg(\"sorting refineparticledata into classes\")\n\t\tt0 = time.time()\n\t\tclasses={}\n\t\tclass_stats={}\n\t\tquality=numpy.zeros(len(particles))\n\t\tfor partnum in range(len(particles)):\n\t\t\tquality[partnum] = particles[partnum]['quality_factor']\n\t\t\tkey = (\"%.3f_%.3f\"%(particles[partnum]['euler1'], particles[partnum]['euler2']))\n\t\t\tif key not in classes.keys():\n\t\t\t\tclasses[key]={}\n\t\t\t\tclasses[key]['particles']=[]\n\t\t\t\tclasses[key]['euler1'] = particles[partnum]['euler1']\n\t\t\t\tclasses[key]['euler2'] = particles[partnum]['euler2']\n\t\t\t\t#classes have no inplane rotation\n\t\t\t\tclasses[key]['euler3'] = 0.0 #particles[partnum]['euler3']\n\t\t\tclasses[key]['particles'].append(particles[partnum])\n\t\tclass_stats['meanquality']=quality.mean()\n\t\tclass_stats['stdquality']=quality.std()\n\t\tclass_stats['max']=quality.max()\n\t\tclass_stats['min']=quality.min()\n\t\tapDisplay.printMsg(\"sorted %d particles into %d classes\"%(len(particles), len(classes)))\n\t\t### print stats\n\t\tprint \"-- quality factor stats --\"\n\t\tprint (\"mean/std :: \"+str(round(class_stats['meanquality'],2))+\" +/- \"\n\t\t\t+str(round(class_stats['stdquality'],2)))\n\t\tprint (\"min/max :: \"+str(round(class_stats['min'],2))+\" <> \"\n\t\t\t+str(round(class_stats['max'],2)))\n\t\tapDisplay.printMsg(\"finished sorting in \"+apDisplay.timeString(time.time()-t0))\n\t\treturn classes, class_stats", "def carbon_prime(C,p,p0):\r\n \r\n if p > p0:\r\n return C\r\n else:\r\n return .03", "def perplexity(self):\n raise NotImplementedError(\"To be implemented\")", "def percent_to_class(prc, fair):\n assert len(prc) == 1, \"Should be only one column.\"\n prc = prc[0]\n\n # Threshold between fair and unfair.\n tsh_fair = 0.1\n # Threshold between unfair and very unfair.\n tsh_unfair = 0.4\n\n dif = (fair - prc) / fair\n if dif < -1 * tsh_unfair:\n # We are much higher than fair.\n cls = 4\n elif -1 * tsh_unfair <= dif < -1 * tsh_fair:\n # We are not that much higher than fair.\n cls = 3\n elif -1 * tsh_fair <= dif <= tsh_fair:\n # We are fair.\n cls = 2\n elif tsh_fair < dif <= tsh_unfair:\n # We are not that much lower than fair.\n cls = 1\n elif tsh_unfair < dif:\n # We are much lower than fair.\n cls = 0\n else:\n assert False, \"This should never happen.\"\n return cls", "def detect_class_onpic(boxes, allowed_classes):\n object_class = \"all\"\n highest_prob = 0\n for box in boxes:\n box_prob = float(box[1].strip('%')) / 100.0\n if box[0] in allowed_classes and box_prob > highest_prob:\n highest_prob = box_prob\n object_class = box[0]\n return object_class, highest_prob", "def decision(self, neighbors=None):\n if not neighbors:\n return sorted(self.class_prb.items(), key=lambda n: n[1],\n reverse=True)\n\n else:\n n = len(neighbors)\n prb = {}\n for label in self.labels:\n prb[label] = 0.0\n for kdnode, dist in neighbors:\n index = self.train_data.index(kdnode.data)\n prb[self.train_label[index]] += 1\n for label in self.labels:\n prb[label] = prb[label] / n\n return sorted(prb.items(), key=lambda n: n[1], reverse=True)", "def proportion_of_primes(bound, **args):\n v = []\n k = 0.0\n for n in range(1, bound + 1):\n if is_prime(n):\n k += 1\n v.append((n, k / n))\n return plot_step_function(v, **args)", "def prob(self, tple, class_counts, feature_counts):\n feats = self.dataset.input_features\n unnorm = [prod(feature_counts[i][feat(tple)][c]\n for (i,feat) in enumerate(feats))\n /(class_counts[c]**(len(feats)-1))\n for c in range(self.num_classes)]\n thesum = sum(unnorm)\n return [un/thesum for un in unnorm]", "def ComparePriors():\n dataset = [60]\n high = 1000\n\n thinkplot.Clf()\n thinkplot.PrePlot(num=2)\n\n constructors = [Train, Train2, Train3]\n labels = ['uniform', 'power law', 'many companies']\n\n for constructor, label in zip(constructors, labels):\n suite = MakePosterior(high, dataset, constructor)\n suite.name = label\n thinkplot.Pmf(suite)\n\n thinkplot.Save(root='train4',\n xlabel='Number of trains',\n ylabel='Probability')", "def sort_priors(self):\n return", "def _get_model_priors(self):\n if self._alpha_model_priors:\n return self._alpha_model_priors\n # sample the variables from their corresponding distributions\n params = self._get_prior_params()\n self._alpha_model_priors = self._params2probs(params)\n return self._alpha_model_priors", "def propose(self):\n\n p = type(self)(self.n, alpha=self.alpha)\n\n return p, p.compute_prior() - self.compute_prior()" ]
[ "0.66456854", "0.6316928", "0.6273953", "0.60905004", "0.6055876", "0.60154176", "0.5993265", "0.59645927", "0.59377426", "0.5896327", "0.58948433", "0.58720356", "0.58248436", "0.5820803", "0.58123535", "0.5798083", "0.57771003", "0.5732761", "0.5723448", "0.57202864", "0.5715706", "0.5703063", "0.56836087", "0.5680424", "0.5671575", "0.56651324", "0.5663909", "0.56608474", "0.56446904", "0.5637236" ]
0.699718
0
Calculate the likelihoods for multinomial
def calculate_likelihoods_multinomial(data, labels, vocab): likelihoods = {} counts = {} words = {} classes = set(labels) vocabLen = len(vocab) for cls in classes: # Initialize counts[cls] = {} words[cls] = 0 # Perform counts line = 0 for doc in data: cls = labels[line] wordCounts = counts[cls] for (word, count) in doc: if word not in wordCounts: wordCounts[word] = 0 wordCounts[word] += count words[cls] += count line += 1 # Compute likliehoods for cls in counts: wordCounts = counts[cls] likelihoods[cls] = {} wordsInClass = words[cls] for word in wordCounts: likelihoods[cls][word] = laplace_smooth(wordCounts[word], wordsInClass, vocabLen) # Add all training words: for word in vocab: if word not in likelihoods[cls]: likelihoods[cls][word] = laplace_smooth(0, wordsInClass, vocabLen) # Special laplace smoothing for words not found in training data likelihoods[cls][None] = laplace_smooth(0, wordsInClass, vocabLen) return likelihoods
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multinomial_likelihood(m_true, alpha, alpha0, m_probs):\n\n ll = tf.reduce_sum(input_tensor=m_true * (tf.math.log(alpha0) - tf.math.log(alpha)), axis=1, keepdims=True)\n ll = tf.reduce_mean(input_tensor=ll)\n return ll", "def multinomial_nll(true_counts, logits):\n counts_per_example = tf.reduce_sum(true_counts, axis=-1)\n dist = tfp.distributions.Multinomial(total_count=counts_per_example,\n logits=logits)\n return (-tf.reduce_sum(dist.log_prob(true_counts)) / \n tf.cast(tf.shape(true_counts)[0], dtype=tf.float32))", "def log_multinomial_coefficient(n, x):\n return gammaln(n + 1) - gammaln(x + 1).sum()", "def calculate_log_p_multinomial(self, n_counts, nbins, batch_size):\n\n n_counts = tf.cast(n_counts, tf.float32)\n nbins = tf.cast(nbins, tf.float32)\n batch_size = tf.cast(batch_size, tf.float32)\n\n term_a = tf.lgamma(batch_size + 1)\n term_b = tf.reduce_sum(tf.lgamma(n_counts + 1))\n term_c = batch_size * tf.log(nbins)\n\n return term_a - term_b - term_c", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def _compute_likelihood(self, mus, pmfs):\n expected_counts = pmfs.copy()\n for mu, _p_bin_source in zip(mus, expected_counts):\n _p_bin_source *= mu # Works because of numpy view magic...\n expected_total = np.sum(expected_counts, axis=0)\n\n observed_counts = self.data_events_per_bin.histogram\n\n ret = observed_counts * np.log(expected_total) - expected_total - gammaln(observed_counts + 1.).real\n return np.sum(ret)", "def likelihood(params,data):\n spec, isnflux, igalflux = data\n chi2=0\n modflux = (params[0]*isnflux + params[1]*igalflux)\n chi2 += sum((spec.flux - modflux)**2)/((0.05*sum(spec.var)**2)/2.0)\n return np.exp(-chi2/2.0)", "def likelihood(self):\n \n raise NotImplementedError()", "def likelihood_prediction():\n # Get info\n selected_word = prompt_tech_selection()\n article_json = get_json_from_file()\n\n # Calculate results\n total_word_counter, selected_word_counter = count_occurrences(article_json, selected_word)\n probability = selected_word_counter / total_word_counter\n total_time = article_json[-1]['time'] - article_json[0]['time'] # unix subtraction = seconds\n months_in_train_set = total_time / SECONDS_IN_MONTH\n expected_posts_per_month = int(total_word_counter / months_in_train_set)\n\n # Show results\n print_text_results(expected_posts_per_month, probability, selected_word)\n plot_likelihood(expected_posts_per_month, probability)", "def regularized_multinomial_likelihood(m_true, alpha, alpha0, m_probs, global_step, annealing_step=1000, max_lambda=1.0):\n\n ll = multinomial_likelihood(m_true, alpha, alpha0, m_probs)\n kl = kullback_leibler_dirichlet(m_true, alpha)\n lamb = tf.cast(tf.minimum(max_lambda, global_step / annealing_step), dtype=tf.float32)\n loss = ll + lamb * kl\n return loss", "def likelihood(self, data, hypo):\n tagged, n, k = data\n if hypo < tagged + n - k:\n return 0\n\n p = tagged / hypo\n like = thinkbayes.eval_binomial_pmf(k, n, p)\n return like", "def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])", "def multinomial_prob(counts, probs):\n return nCkarray(*counts.values) * (probs ** counts).prod()", "def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood", "def calculate_likelihoods_bernoulli(data, labels, vocab):\r\n classes = set(labels)\r\n likelihoods = {}\r\n # Calculate likelihood for each class\r\n for cls in classes:\r\n documentsInClass = [set(map(lambda y: y[0], data[x])) for x in range(len(data)) if labels[x] == cls]\r\n numDocsInClass = len(documentsInClass)\r\n results = {}\r\n for word in vocab:\r\n numDocsWithWordInClass = len(filter(lambda x: word in x, documentsInClass))\r\n # Binary variable-- either present or not present\r\n results[word] = laplace_smooth(numDocsWithWordInClass, numDocsInClass, 2)\r\n # Special laplace smoothing for words not found in training data\r\n results[None] = laplace_smooth(0, numDocsInClass, 2)\r\n likelihoods[cls] = results\r\n return likelihoods", "def multinomial(rng, logits, num_samples):\n # NOTE(tycai): Currently, tf.multinomial uses CDF for non-XLA CPU only.\n # We may want to switch to the Gumbel trick as used in XLA.\n if len(logits.shape) > 2 or not logits.shape:\n raise ValueError(\"Logits must be rank-1 or rank-2.\")\n probs = jax.nn.softmax(logits)\n probs = jnp.cumsum(probs, axis=-1)\n # Special-case num_samples == 1 due to TPU padding, as in TF2XLA.\n # https://github.com/tensorflow/tensorflow/blob/b1608511d5a50d05825c4025b0c347e8689a241f/tensorflow/compiler/tf2xla/kernels/categorical_op.cc#L79\n if num_samples == 1:\n a = jax.random.uniform(rng, logits.shape[:-1] + (1,))\n out = jnp.argmin(a > probs, axis=-1)\n return out[..., None]\n else:\n a = jax.random.uniform(rng, (num_samples,) + logits.shape[:-1] + (1,))\n out = jnp.argmin(a > probs, axis=-1)\n return jnp.transpose(out)", "def log_likelihood(X, Z, variable_types):\n\tk = Z['pi_unconstrained'].shape[1]+1 # the number of mixture components\n\t## We gather the log probabilities of each indiv in batch for each mixture component into\n\t## a matrix of size (B x k), where B is the batch size.\n\tlogps = torch.zeros([len(X), k])\n\t## First insert the mixture weight contribution to the array\n\tlogps += logsoftmax(Z['pi_unconstrained'], dim=-1)\n\t## Next loop over the features and sum the contributions to logps\n\tfor i, (key, z) in enumerate(Z.items()):\n\t\tif key not in ['pi_unconstrained']:\n\t\t\tdata = torch.Tensor(X[key].values).unsqueeze(-1)\n\t\t\tdist = variable_types[key]\n\t\t\tif dist == 'Categorical':\n\t\t\t\talpha = softmax(z, dim=-1, additional=-50.)\n\t\t\t\tlogps += Categorical(probs = alpha).log_prob(data)\n\t\t\telif dist == 'Bernoulli':\n\t\t\t\ttheta = z\n\t\t\t\tlogps += Bernoulli(logits = theta).log_prob(data)\n\t\t\telif dist == 'Beta':\n\t\t\t\talpha, beta = torch.exp(z).transpose(0,1)\n\t\t\t\tlogps += Beta(alpha, beta).log_prob(data)\n\t## Compute logsumexp over the mixture components and return the sum over data elements.\n\tlogp = torch.logsumexp(logps, dim=-1)\n\treturn logp.sum()", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood", "def likelihoods(self, alleles):\n\n models = self.models_dict[len(alleles)]\n\n F = self.joint_frequencies_combo(alleles)\n\n ### BPH ###\n (((A0, A1),((B0,),)),) = models['BPH'][1].items()\n\n BPH = (A0 / A1) * F[B0]\n\n\n BPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['BPH'][2].items())\n\n if len(alleles)>2:\n BPH += sum( sum(F[B0] * sum( F[B1] * F[B2] for (B1, B2) in C[B0]) for B0 in C) * A0 / A1\n for (A0, A1), C in models['BPH'][3].items())\n\n ### SPH ###\n (((A0, A1),((B0,),)),) = models['SPH'][1].items()\n SPH = (A0 / A1) * F[B0]\n\n SPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['SPH'][2].items())\n\n ### DIPLOIDY ###\n (((A0, A1),((B0,),)),) = models['DISOMY'][1].items()\n DISOMY = (A0 / A1) * F[B0]\n\n DISOMY += sum( sum( F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['DISOMY'][2].items())\n\n ### MONOSOMY ###\n ((B0,),) = models['MONOSOMY'][1][(1,1)]\n MONOSOMY = F[B0]\n\n result = likelihoods_tuple(MONOSOMY, DISOMY, SPH, BPH)\n return result", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def _learn_global_mixture_weights(alpha, multinomials, val_data, num_em_iter=100, tol=0.001):\n num_comp = len(multinomials)\n if np.any(alpha <= 1):\n raise ValueError('alpha values have to be bigger than 1')\n\n for i, mult in enumerate(multinomials):\n if np.any(np.abs(np.sum(mult, axis=1) - 1) > 0.001):\n raise ValueError('component %d param is not a proper multinomial -- all rows must sum to 1' % i)\n\n if type(alpha) == float or type(alpha) == int:\n alpha = np.ones(num_comp) * alpha * 1.\n\n # Creating responsibility matrix and initializing it hard assignment on random\n log_like_tracker = [-np.inf]\n pi = np.ones(num_comp) / num_comp\n start = time.time()\n em_iter = 0\n for em_iter in xrange(1, num_em_iter + 1):\n # Evey 5 iteration we will compute the posterior log probability to see if we converged.\n if em_iter % 2 == 0:\n\n event_prob = _data_prob(pi, multinomials, val_data)\n event_prob = np.sum(event_prob, axis=0) # prob\n\n # The data likelihood was computed for each location, but it should be in the power of the number\n # of observations there, or a product in the log space.\n data_likelihood = np.log(np.array(event_prob)) * val_data[:, 2]\n\n prior_probability = dirichlet.logpdf(pi, alpha=alpha)\n log_likelihood = np.sum(data_likelihood + prior_probability) / np.sum(val_data[:, 2])\n\n if np.abs(log_likelihood - log_like_tracker[-1]) < tol:\n log.debug('[iter %d] [Reached convergence.]' % em_iter)\n break\n\n log.debug('[iter %d] [Likelihood: [%.4f -> %.4f]]' % (em_iter, log_like_tracker[-1], log_likelihood))\n log_like_tracker.append(log_likelihood)\n\n # E-Step\n\n resp = _data_prob(pi, multinomials, val_data)\n\n if np.all(resp == 0):\n raise ValueError('0 mix probability')\n\n resp = np.array(resp).T\n resp = normalize(resp, 'l1', axis=1)\n\n resp = np.multiply(resp, val_data[:, 2][:, np.newaxis])\n pi = np.sum(resp, axis=0)\n pi += alpha - 1\n pi /= np.sum(pi)\n\n total_time = time.time() - start\n log.debug('Finished EM. Total time = %d secs -- %.3f per iteration' % (total_time, total_time / em_iter))\n\n data_log_like = _data_prob(pi, multinomials, val_data)\n data_log_like = np.sum(data_log_like, axis=0)\n ll = np.sum(np.log(np.array(data_log_like)) * val_data[:, 2]) / np.sum(val_data[:, 2])\n return pi, ll", "def likelihood(self, w, class_words):\n return log((class_words.count(w) + 1)/(len(class_words) + self.N))", "def log_prob(self):", "def __calc_likelihood(self, *args):\n params = {}\n for i, p in enumerate(self._par_names):\n if self._par_islog[p]:\n params[p] = np.power(10., args[i])\n else:\n params[p] = args[i]\n return self.return_likelihood(params)", "def likelihood_ratio(cls, *marginals):\n cont = cls._contingency(*marginals)\n return (cls._n *\n sum(obs * _ln(float(obs) / (exp + _SMALL) + _SMALL)\n for obs, exp in zip(cont, cls._expected_values(cont))))", "def test_multinomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.multinomial((4,4), 1, [0.1]*10), updates=random.updates())\r\n\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.multinomial(1, [0.1]*10, size=(4,4))\r\n numpy_val1 = rng.multinomial(1, [0.1]*10, size=(4,4))\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def _LL(state, effects, observed_frequencies) -> float:\n observed_frequencies = np.array(observed_frequencies)\n predicted_probs = np.array([np.real(np.trace(state.dot(effect))) for effect in effects])\n return sum(np.log10(predicted_probs) * observed_frequencies)", "def log_likelihood(self, x):\n # set nuisance parameters to their central values!\n predictions = self.get_predictions(self.shortarray_to_array(x), nuisance=False)\n m_obj = flavio.Measurement['Pseudo-measurement for FastFit instance: ' + self.name]\n m_obs = m_obj.all_parameters\n prob_dict = m_obj.get_logprobability_all(predictions)\n ll = sum(prob_dict.values())\n return ll" ]
[ "0.7405791", "0.71270245", "0.6805791", "0.67948574", "0.6551636", "0.6476551", "0.64614797", "0.6425553", "0.6342785", "0.63420993", "0.6332708", "0.633066", "0.6330074", "0.6298897", "0.62908417", "0.62799925", "0.62471", "0.62063324", "0.61783415", "0.6177655", "0.61492676", "0.6103034", "0.60990226", "0.60570467", "0.60409313", "0.601715", "0.6015817", "0.6010898", "0.6003754", "0.59715736" ]
0.7152682
1
Extract the known vocabulary from our training data
def get_vocab(trainingData): return set(reduce(lambda x,y: x+y, map(lambda x: map(lambda y: y[0], x), trainingData), []))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vocab(self):\n\n\t\tself.parse_transcript() \n\t\tself.purge_words()\n\t\tself.analyze_words()\n\t\tself.sort_word_analysis()", "def get_vocabulary(documents):\n cv_model = CountVectorizer(binary=True)\n cv_model.fit(documents)\n\n vocabulary = cv_model.get_feature_names()\n vocabulary = list(map(str, vocabulary))\n\n return vocabulary", "def vocabulary(self) -> np.ndarray:\n return np.array(\n list(set(word for text in self.preprocess_corpus for word in text))\n )", "def create_vocab():\n \n cutoff = CUTOFF\n \n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines]\n cntx = Counter( [ w for e in raw for w in e ] )\n vocab = { x for x, y in cntx.items() if y > cutoff }\n \n return vocab", "def known(words):\n return [w for w in words if w in tokenizer.vocab] #change vocab file?", "def get_vocab(train_data, valid_data, test_data):\n \n print(\"-----------------------------------------------\")\n print(\"Constructing Vocabulary of Words and Characters\")\n print(\"-----------------------------------------------\")\n\n with open(train_data,'r') as f:\n train_corpus = f.readlines()\n f.close()\n\n with open(valid_data,'r') as f:\n valid_corpus = f.readlines()\n f.close()\n\n with open(test_data,'r') as f:\n test_corpus = f.readlines()\n f.close()\n\n word_vocab = {}\n char_vocab = {}\n max_len = 0\n\n word_vocab, char_vocab, max_len = make_vocab(train_corpus, word_vocab, char_vocab, max_len)\n word_vocab, char_vocab, max_len = make_vocab(valid_corpus, word_vocab, char_vocab, max_len)\n word_vocab, char_vocab, max_len = make_vocab(test_corpus, word_vocab, char_vocab, max_len)\n\n char_vocab['<SOT>'] = len(char_vocab)+1 \n char_vocab['<EOT>'] = len(char_vocab)+1\n\n print(\"Word Vocabulary Size : %d\"%len(word_vocab))\n print(\"Character Vocabulary Size : %d\"%len(char_vocab))\n print(\"Max Length of Word - 2 : %d\"%max_len)\n\n return word_vocab, char_vocab, max_len", "def vocabulary(self):\n return self._vocabulary", "def vocabulary(corpus_tokenized):\n vocab = list()\n for element in corpus_tokenized:\n document = element['document']\n for word in document:\n if word not in vocab:\n vocab.append(word)\n return vocab", "def vocabulary(self):\n return [recid for recid in self._model.vocab]", "def load_target_vocab(self):\n vocab = [line.split()[0] for line in open(os.path.join('preprocessed', 'all_vocab.txt'), 'r').read().splitlines()]\n self.word2idx = {word: idx for idx, word in enumerate(vocab)}\n self.idx2word = {idx: word for idx, word in enumerate(vocab)}\n self.vocab_size = len(self.word2idx)", "def vocabulary(self):\n lst = []\n for key in self.frequencies().keys():\n lst.append(key)\n return sorted(lst)\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # for word in wordslst:\n # if word not in lst:\n # lst.append(word.lower())\n #return sorted(lst)", "def load_vocabulary(self):\n vocab_file = open(vocabulary_path, \"r\")\n self.vocab_list = vocab_file.read().split(\"\\n\")\n vocab_file.close()\n print(\"[INFO] Reading vocabulary...\")\n print(self.vocab_list[0:15])", "def get_vocabulary(corpus,\n initial_vocab={\n '<unk>': 0,\n '<sssss>': 1\n },\n vocabsize=0):\n vocab = copy.copy(initial_vocab)\n word_count = Counter()\n for text in corpus:\n for w in text.split(' '):\n word_count[w] += 1\n\n # if vocabulary size is specified, most common words are selected\n if vocabsize > 0:\n for w in word_count.most_common(vocabsize):\n if w[0] not in vocab:\n vocab[w[0]] = len(vocab)\n if len(vocab) >= vocabsize:\n break\n else: # all observed words are stored\n for w in word_count:\n if w not in vocab:\n vocab[w] = len(vocab)\n return vocab", "def build_vocab(self):\n if self.test_file is None:\n print('test_file is None')\n file_list = [self.train_file, self.dev_file]\n else:\n file_list = [self.train_file, self.dev_file, self.test_file]\n\n examples = []\n for file_name in file_list:\n examples += ParseExample.load_data(file_name)\n\n sents = []\n for example in examples:\n warrant0, warrant1, reason, claim, debate_meta_data, negclaim = example.get_six(type=WORD_TYPE)\n sents.append(warrant0)\n sents.append(warrant1)\n sents.append(reason)\n sents.append(claim)\n sents.append(debate_meta_data)\n\n vocab = data_utils.build_word_vocab(sents)\n\n return vocab", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)) # 实际没用到\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # 加入 <UNK>\n vocabulary_inv.insert(0, '</s>')\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def load_vocab(self):\n\n if self.vocabulary_path: \n # For now, the file format is derived from the file extension.\n if self.vocabulary_path.endswith('csv'):\n self.logger.info(\"Filter spymaster vocabulary by csv-file: {}\".format(self.vocabulary_path))\n with open(self.vocabulary_path, 'r') as fin:\n reader = csv.reader(fin)\n header = next(reader)\n for row in reader:\n word = row[1].lower()\n self.update_vocab(word) \n elif self.vocabulary_path.endswith('txt'):\n self.logger.info(\"Filter spymaster vocabulary by txt-file: {}\".format(self.vocabulary_path))\n with open(self.vocabulary_path, 'r') as fin:\n for line in fin:\n word = line.strip()\n self.update_vocab(word)\n else:\n raise ValueError(\"Unknown file format for filter spymaster vocabulary.\") \n else:\n self.logger.info(\"Load spymaster vocabulary from gensim.models.KeyedVectors.\")\n self.vocab = self.model.vocab\n self.vocab_size = len(self.vocab)\n\n self.logger.info(\"Spymaster vocabulary size is {}\".format(self.vocab_size))", "def load_vocab():\n # vocab loaded internally at google\n unused = r.sp_model\n del unused\n return r", "def trainingsVocabulary(context):\n ct = getToolByName(context,'portal_catalog')\n dictSearch = {'portal_type':'apyb.papers.training',\n 'sort_on':'sortable_title',\n 'review_state':'confirmed'}\n trainings = ct.searchResults(**dictSearch)\n trainings = [SimpleTerm(b.UID,b.UID,b.Title) for b in trainings]\n return SimpleVocabulary(trainings)", "def get_input_vocab():\n vocab = set()\n vocab.update(list(string.ascii_letters))\n vocab.update(list(string.digits))\n vocab.update(list(string.punctuation))\n vocab.update(list(string.whitespace))\n vocab.update(['<unk>', '<pad>'])\n return dict(zip(sorted(vocab), list(range(len(vocab)))))", "def preproc_user_input(txt, model):\r\n txt = pre_process(txt)\r\n txt_tokenized = [word for word in txt.split(\" \") if word in model.wv.vocab]\r\n return \" \".join(txt_tokenized)", "def vocab(self):\n num_words = -1\n if not self._vocab:\n c = self._conn.cursor()\n c.execute('select feature, censored, word_id from vocab')\n\n d = {}\n for ww, cc, ii in c:\n d[ii] = ww\n d[ww] = ii\n if cc == 1:\n self._censored.add(ww)\n num_words = max(ii, num_words)\n\n logger.info(\"Loaded vocab with %i words; %i censored\" % \\\n (len(d) / 2, len(self._censored)))\n\n # Add the start symbol\n if not START_SYMBOL in d:\n d[START_SYMBOL] = num_words + 1\n d[num_words + 1] = START_SYMBOL\n\n logger.info(\"Retrieved %i words\" % num_words)\n self._vocab = d\n\n return self._vocab", "def getVocabularyDict(vocabulary: dict, training_feature: TrainingFeature):\n vocab = {}\n index = 0\n if training_feature.FEATURE_DROP_FREQUENT_WORDS:\n print(\"Select vocabdict with drop_frequent\")\n array = sorted([(k, v) for (k, v) in vocabulary.items()], key= lambda x: x[1])\n print(\"Total length: \", len(array))\n length = len(array)\n array = array[int(length * 0.75): int(length * 1.0)][0:training_feature.VOCAB_SIZE]\n for (k , _) in array:\n vocab.setdefault(k, index)\n index += 1\n else:\n print(\"Select vocabdict with non_drop_frequent\")\n array = sorted([(k, v) for (k, v) in vocabulary.items()], key=lambda x: x[1])\n length = len(array)\n print(\"Total length: \", length)\n array = array[-training_feature.VOCAB_SIZE:]\n for (k, _) in array:\n vocab.setdefault(k, index)\n index += 1\n # for (k, v) in vocabulary.items():\n # if v > 50:\n # vocab.setdefault(k, index)\n # index += 1\n print(\"VocabDict length: \", len(vocab))\n # print(vocab)\n return vocab", "def _get_vocabulary(connection):\n print('---Getting vocabulary---')\n vocabulary = {}\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM words;\")\n res = cursor.fetchall()\n num_words = 0\n for word in res:\n vocabulary[word[0]] = num_words\n num_words += 1\n return vocabulary", "def initialize_vocabulary(self,vocabulary_path):\n if tf.gfile.Exists(vocabulary_path):\n vocab = corpora.Dictionary.load(vocabulary_path)\n print(\"vocab length: \",len(vocab.token2id))\n\n return vocab.token2id, vocab.token2id.keys()\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def get_vocab(self):\n if self.dtm is None:\n raise ValueError(\"Preprocessor has not been fit. \\\n Provide series of articles.\")\n return list(self.dtm.columns)", "def build_vocab(self, corpus):\n if self.vocabulary_counts != None:\n logger.debug(\"building vocabulary from provided frequency map\")\n vocab = self.vocabulary_counts\n else:\n logger.debug(\"default vocabulary building\")\n super(Skipgram, self).build_vocab(corpus)\n return\n\n # assign a unique index to each word\n self.vocab, self.index2word = {}, []\n\n for word, count in vocab.iteritems():\n v = Vocab()\n v.count = count\n if v.count >= self.min_count:\n v.index = len(self.vocab)\n self.index2word.append(word)\n self.vocab[word] = v\n\n logger.debug(\"total %i word types after removing those with count<%s\" % (len(self.vocab), self.min_count))\n\n if self.hs:\n # add info about each word's Huffman encoding\n self.create_binary_tree()\n if self.negative:\n # build the table for drawing random words (for negative sampling)\n self.make_table()\n # precalculate downsampling thresholds\n self.precalc_sampling()\n self.reset_weights()", "def vocabulary(self, config=Config()):\n raise NotImplementedError(\"Class %s doesn't implement vocabulary()\" % self.__class__.__name__)", "def getVocabulary(self): # real signature unknown; restored from __doc__\n pass", "def build_vocab(data):\n # data = _read_words(filename)\n counter = collections.Counter(data)\n # print('counter', counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n # print('count_pairs',count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1)\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n # print(words) # list of words\n # print(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746\n return word_to_id", "def build_doc_sense_vec(self):\n\t\twith codecs.open(self.vocab_file, encoding='utf-8', mode='r') as infile:\n\t\t\tline = infile.readline()\n\t\t\ti = 0\n\t\t\twhile line:\n\t\t\t\tword = line.split()[0]\n\t\t\t\tif not self.word2IdVocabulary.has_key(word):\n\t\t\t\t\t# print i, word\n\t\t\t\t\t# else:\n\t\t\t\t\tself.word2IdVocabulary[word] = i\n\t\t\t\tif not self.id2WordVocabulary.has_key(i):\n\t\t\t\t\tself.id2WordVocabulary[i] = word\n\t\t\t\tline = infile.readline()\n\t\t\t\ti += 1\n\t\t\tself.vocab_num = len(self.word2IdVocabulary)\n\t\t\tprint \"vocabulary number:\" + str(self.vocab_num)\n\n\t\twith codecs.open(self.vec_file, encoding='utf-8', mode='r') as vecfile:\n\t\t\twith codecs.open(self.vec_out_file, encoding='utf-8', mode='a+') as vec_outfile:\n\n\t\t\t\tfor i, line in enumerate(vecfile):\n\t\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\t\tprint i\n\t\t\t\t\t# if i > 72:\n\t\t\t\t\t# \tbreak\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ta, b, c = map(int, line.split()[:3])\n\t\t\t\t\t\tprint('Number of sememes: {}\\n'\n\t\t\t\t\t\t\t 'Number of words: {}\\n'\n\t\t\t\t\t\t\t 'Dimension of vectors: {}'.format(a, b, c))\n\t\t\t\t\telif i > 462667:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tvector_list.append(sline[1:])\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\t# vector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_array\n\t\t\t\t\t\t# vec_outfile.write(line)\n\t\t\t\t\telif i > 462887:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tsense_num = int(sline[1])\n\t\t\t\t\t\tvectors = sline[2:sense_num*c+2] # (sense_num*c+2)\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tfor start in range(0, len(vectors), c):\n\t\t\t\t\t\t\tvector_list.append(list(map(float, vectors[start: start+c])))\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\tvector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_mean\n\t\t\t\t\t\t'''j = 0\n\t\t\t\t\t\tfor each_sense_vec in vector_array:\n\t\t\t\t\t\t\tif len(vector_array) > 1:\n\t\t\t\t\t\t\t\tnew_line = word + '_' + str(j) + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tformatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n'\n\t\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnew_line = word + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t x: '%6f' % x})[1:-1] + '\\n'\n\n\t\t\t\t\t\t\tvec_outfile.write(new_line)'''\n\n\t\twith codecs.open(self.doc_file, encoding='utf-8', mode='r') as docfile:\n\t\t\twith codecs.open(self.doc_out_file, encoding='utf-8', mode='a+') as doc_outfile:\n\t\t\t\twith codecs.open(self.vec_out_file_bydoc, encoding='utf-8', mode='a+') as vec_outfile_bydoc:\n\t\t\t\t\tprint \"Processing document file......\"\n\t\t\t\t\tline = docfile.readline().strip('\\n')\n\t\t\t\t\twhile line:\n\t\t\t\t\t\twords = line.split()\n\t\t\t\t\t\tnew_words = [x for x in words]\n\t\t\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\t\t\tword_id = self.word2IdVocabulary[words[i]]\n\t\t\t\t\t\t\tsense_vecs = self.vectors[word_id]\n\t\t\t\t\t\t\tsense_num = len(sense_vecs)\n\t\t\t\t\t\t\tif sense_num > 1:\n\t\t\t\t\t\t\t\tcontext_words = []\n\t\t\t\t\t\t\t\tfor x in range(i-int(self.context_num), i+int(self.context_num)+1):\n\t\t\t\t\t\t\t\t\tif x != i and 0 <= x < len(words):\n\t\t\t\t\t\t\t\t\t\tcontext_words.append(words[x])\n\t\t\t\t\t\t\t\tsense_index = self.select_attention(context_words, sense_vecs)\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[sense_index]\n\t\t\t\t\t\t\t\tnew_wordi = words[i] + '_' + str(sense_index)\n\t\t\t\t\t\t\t\tself.vector_word_doc[new_wordi.encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\t\tnew_words[i] = new_wordi\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[0]\n\t\t\t\t\t\t\t\tself.vector_word_doc[words[i].encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\tvec_outfile_bydoc.write(new_words[i] + ' ' + np.array2string(word_vec_i, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n')\n\n\t\t\t\t\t\tdoc_outfile.write(' '.join(new_words) + '\\n')\n\n\t\t\t\t\t\tline = docfile.readline()\n\n\t\treturn self.vector_word_doc" ]
[ "0.7562361", "0.7207228", "0.7081693", "0.70540327", "0.7029069", "0.70169085", "0.7005524", "0.7005293", "0.7000013", "0.69506437", "0.69444656", "0.6888032", "0.6835956", "0.68238574", "0.6815228", "0.6788677", "0.6784426", "0.6746104", "0.6692373", "0.6628482", "0.660853", "0.65966874", "0.6590938", "0.65770054", "0.6518075", "0.6517311", "0.6511452", "0.65074986", "0.6500501", "0.6489932" ]
0.7412368
1
Return elements from the iterable until it is exhausted. Then repeat the sequence indefinitely. cycle(seq) ==> seq[0], seq[1], ..., seq[n 1], seq[0], seq[1], ...
def cycle(seq, n=None): if n is not None: return Iter(_ncycle(n, seq)) return Iter(itertools.cycle(seq))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n while True:\n yield from iterator", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def repeat(iterable, count=None):\n if count is None:\n while True:\n for sample in iterable:\n yield sample\n else:\n for i in range(count):\n for sample in iterable:\n yield sample", "def pick(iterable):\n for element in iterable:\n yield element\n while True:\n yield element", "def cycle(obj):\r\n while True:\r\n for item in obj:\r\n yield item", "def forever(iterable):\n it = iter(iterable)\n while True:\n try:\n yield next(it)\n except Exception as e:\n print(e)\n it = iter(iterable)", "def simple_seq(seq):\n for i in seq:\n yield i", "def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)", "def forever():\n\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing)\n return animate", "def color_cycle():\n while True:\n for color in colors:\n yield color", "def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(next(seq))\n except StopIteration:\n pass\n return result", "def loop(self):\n yield self\n e = self.next\n while e is not self:\n yield e\n e = e.next", "def eat(seq, n=None):\n if n is None:\n collections.deque(seq, maxlen=0)\n else:\n next(itertools.islice(seq, n, n), None)", "def cycle(effect):\n\n def animate(thing):\n frames = (list(frame) for frame in effect(thing))\n yield from cycle(frames)\n return animate", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def cycles(n, support, randomize=False):\n support = np.array(support)\n\n def gen(p):\n g = combinations(support, n)\n if randomize:\n g = list(g)\n random.shuffle(g)\n\n for local_support in g:\n for output_p in all_permutations(local_support)(p):\n yield output_p\n\n return gen", "def takeNGenerator(seq, n):\n\tindex = 0\n\twhile index + n <= len(seq):\n\t\tyield seq[index:index + n]\n\t\tindex = index + 1", "def repeat(obj, times=None):\n if times is None:\n return Iter(itertools.repeat(obj))\n return Iter(itertools.repeat(obj, times))", "def iter_n(sequence: Sequence[T], n: int) -> List[T]:\n\t\n\tfor i in range(len(sequence) - (n-1)):\n\t\tyield sequence[i:i+n]", "def iterate(iterator, n):\n # throw away n-1 elements\n for index in range(1, n):\n next(iterator, None)\n\n return next(iterator, None)", "def __iter__(self):\n for x in self.seq: yield x", "def __iter__(self):\n while True:\n for item in (self[i] for i in range(len(self))):\n yield item", "def random_iterator(seq:Sequence[Any], maxlen=None) -> Any:\n if not hasattr(seq, \"__len__\") or not hasattr(seq, \"__getitem__\"):\n raise TypeError(\"Sequence must be indexable\")\n N = len(seq)\n order = list(range(N))\n random.shuffle(order)\n for i,j in enumerate(cycle(order)):\n if maxlen is not None and i > maxlen:\n return\n yield seq[j]", "def just(n, seq):\n it = iter(seq)\n for _ in range(n - 1):\n yield next(it, None)\n yield tuple(it)", "def _cycle_loop(self):\n cycle, idx = self.cycling, self.current_idx # Local copy to avoid race condition updates\n\n if cycle: # Iterate to next command\n idx = (idx+1) % len(self)\n self.current_idx = idx\n self.updated = True\n\n time.sleep(self.cycle_interval)", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def intersperse(value, seq):\n seq = iter(seq)\n\n try:\n yield next(seq)\n except StopIteration:\n return\n\n for item in seq:\n yield value\n yield item", "def roll(self):\n try:\n for s in self.seq:\n yield s\n except StopIteration as si:\n # You called roll after using up the sequence. return None\n return None", "def infinite_odd_generator():\n current = 1\n while True:\n yield current\n current = current + 2" ]
[ "0.77041095", "0.73121995", "0.68373466", "0.68154997", "0.64987105", "0.64865166", "0.64855176", "0.61515063", "0.61507326", "0.6116417", "0.585476", "0.5825893", "0.5797128", "0.5789132", "0.57815135", "0.5774213", "0.577342", "0.5757885", "0.5697775", "0.56689405", "0.5658311", "0.5647523", "0.5640964", "0.56174076", "0.5600153", "0.55896604", "0.55807495", "0.55608416", "0.55259067", "0.54828894" ]
0.78215605
0
Returns the object for the specified number of times. If not specified, returns the object endlessly.
def repeat(obj, times=None): if times is None: return Iter(itertools.repeat(obj)) return Iter(itertools.repeat(obj, times))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repeat(self, count):\n return self.Sequence((self,) * count)", "def repeat(self, fn, *args, **kwargs):\n return repeat_n_times(self.n, fn, *args, **kwargs)", "def twist(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(0, 50)\n time.sleep(.75)\n r.stop()\n time.sleep(.1)\n r.go(0, -50)\n time.sleep(.75)\n r.stop()\n time.sleep(.1)", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat", "def retry(times):\n return repeat_with_success_at_least(times, 1)", "def repeat(iterable, count=None):\n if count is None:\n while True:\n for sample in iterable:\n yield sample\n else:\n for i in range(count):\n for sample in iterable:\n yield sample", "def range(self, n):\n for i in range(n):\n yield self.get()", "def repeat_count(instance, args):\r\n count = instance.repeat_count(args)\r\n return count", "def run(self,n=10):\n return self.transduce([None] * n)", "def take(n, iterable, islice=islice):\n return islice(iterable, n)", "def taking(n):\n if n <= 0:\n raise ValueError('taking() requires a positive value.')\n\n @coroutine\n def gen(target):\n for _ in range(n):\n x = (yield)\n target.send(x)\n\n raise StopConsumption()\n\n return gen", "def limit(iterable, n):\n for count, element in enumerate(iterable):\n if count >= n: break\n else: yield element", "def next_n(self, n: int, fast_forward=False):\n data = []\n while len(data) < n:\n try:\n record = self.queue.get(True, self.wait)\n data.append(record)\n except Empty:\n raise StopIteration\n return data", "def nextNumberOfResults(self, N=10):\n self.start += self.N\n self.N = N", "def take(iterable, n):\n\n def taking(iterable_):\n for i, e in enumerate(iterable_):\n if i < n:\n yield e\n\n return taking(iterable)", "def get_first_n_crawled_chunks(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM crawler WHERE c_task = 'crawled' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def forever(shard):\n def repeat(*args, **kwargs):\n while True:\n for delay in shard(*args, **kwargs):\n yield delay\n return repeat", "def next(self, x):\n self.next_called_n_times += 1\n return SequentialTaskCollection.next(self, x)", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def next ( num = 1 ) :\n return run ( num )", "def test_orm_full_objects_chunks(n):\n\n sess = Session(engine)\n for obj in sess.query(Customer).yield_per(100).limit(n):\n print(obj.name)", "def take_nth(n):\n def _take_nth_xducer(step):\n outer = {\"idx\": 0}\n def _take_nth_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if outer[\"idx\"] % n:\n outer[\"idx\"] += 1\n return r\n else:\n outer[\"idx\"] += 1\n return step(r, x)\n return _take_nth_step\n return _take_nth_xducer", "def create_n_items(n):\n total_objects = models.Item.objects.all().count()\n for i in range(n):\n models.Item.objects.create(\n name=\"Randomly generated object {}\".format(i+total_objects),\n value=random.random() * 1000000\n )", "def repeat_every(repeats=5, every=2):\n\n def repeat_wrapper(func):\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n for _ in range(repeats):\n value = func(*args, **kwargs)\n if value:\n return value\n time.sleep(every)\n\n return func_wrapper\n\n return repeat_wrapper", "def peek(self, n: int | None = None) -> Any:\n self._fillcache(n)\n if n is None:\n result = self._cache[0]\n else:\n result = [self._cache[i] for i in range(n)]\n return result", "def shake(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)\n r.go(-25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)", "def repeat(value: T, times: int) -> List[T]:\n return [value] * times", "def loop(func, n):\n for i in range(n):\n func()", "def nth(n, iterable, default = None):\n return next(islice(iterable, n, None), default)" ]
[ "0.6203525", "0.59771645", "0.5969791", "0.5901691", "0.5872414", "0.5850168", "0.58327895", "0.58200485", "0.57241195", "0.5649693", "0.56443447", "0.5643324", "0.56162107", "0.5555079", "0.5550507", "0.5531917", "0.54840827", "0.54764825", "0.5443261", "0.54094815", "0.5397046", "0.5382421", "0.53783256", "0.5375706", "0.5357077", "0.5341014", "0.53331083", "0.5319535", "0.5316214", "0.5314601" ]
0.64713174
0
Make infinite calls to a function with the given arguments. End sequence if func() raises StopIteration.
def repeatedly(func, /, *args, **kwargs): func = to_callable(func) try: while True: yield func(*args, **kwargs) except StopIteration as e: yield from stop_seq(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def iterate(func: Callable[..., T], x: T, *args, index: Index = None):\n func = to_callable(func)\n index = to_index_seq(index)\n\n if index is None and not args:\n out = _iterate(func, x)\n elif index is None:\n out = _iterate_n(func, (x, *args))\n else:\n if not args:\n out = _iterate_indexed(func, index, x)\n else:\n out = _iterate_indexed_n(func, index, (x, *args))\n\n return Iter(out)", "def loop(func):\n def wrapper(*a, **b):\n while True:\n func(*a, **b)\n return wrapper", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def infinite_loop(func):\n @wraps(func) # Preserve target's metadata\n def wrapper(*args, **kwargs):\n while True:\n try:\n func(*args, **kwargs)\n except KeyboardInterrupt:\n break\n return wrapper", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def coroutine(f, *a, **kw):\n i = f(*a, **kw)\n i.next()\n return i", "def iter_except(func, exception):\n try:\n while True:\n yield func()\n except exception:\n pass", "def iter_except(func, exception, first=None):\n try:\n if first is not None:\n yield first()\n while True:\n yield func()\n except exception:\n pass", "def make_func_repeater(f, x):\n\n def repeat(i, x=x):\n if i == 0:\n return x\n else:\n return repeat(i-1, f(x))\n return repeat", "def retryCall(fn, args=None, keywordArgs=None, failureTester=None, sleepManager=None):\n sleepManager = sleepManager or time.SleepManager()\n while True:\n try:\n result = yield fn(*args, **keywordArgs)\n defer.returnValue(result)\n except Exception: # pylint: disable=W0703\n failureTester(failure.Failure())\n yield sleepManager.sleep()", "def RunCoroutineOrFunction(function, args=[]):\r\n if inspect.isgeneratorfunction(function):\r\n coroutine = function(*args)\r\n response = yield coroutine.next()\r\n while True:\r\n response = yield coroutine.send(response)\r\n else:\r\n function(*args)", "def iter_except(function, exception):\n try:\n while True:\n yield function()\n except exception:\n return", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def iter_except(function, exception):\r\n try:\r\n while True:\r\n yield function()\r\n except exception:\r\n return", "def unfold(func, seed):\n try:\n elem = func(seed)\n while elem is not None:\n seed, x = elem\n yield x\n elem = func(seed)\n except StopIteration as e:\n yield from stop_seq(e)", "def retrying(func, *retry_args, **retry_kwargs):\n yield retriable(*retry_args, **retry_kwargs)(func)", "def repeat_func(func, *args, **kwargs):\n if kwargs:\n return starmap(lambda args, kwargs: func(*args, **kwargs),\n repeat((args, kwargs))\n )\n else:\n return starmap(func, repeat(args))", "def wrapper_fn(*args, **kwargs):\n\n if __enveloop_number_of_loops__[fn.__name__] > 0:\n __enveloop_number_of_loops__[fn.__name__] -= 1\n return fn(*args, **kwargs)\n else:\n del __enveloop_number_of_loops__[fn.__name__]\n if callback:\n return callback(*args, **kwargs)", "def loop(func, n):\n for i in range(n):\n func()", "def run_with_args(self):\n while True:\n if self.cancelled:\n return\n self.func(self.args)\n time.sleep(self.sleep_time / 1000.00)", "def coroutine(func):\n\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n\n return start", "def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))", "def coroutine(func):\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start", "def coroutine(func):\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start", "def coroutine(func):\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start", "def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen", "def call(self, func: Callable[..., T], *args: Any, **kwargs: Any) -> T:\n ret = None\n\n self.before_call(func, *args, **kwargs)\n for listener in self._breaker.listeners:\n listener.before_call(self._breaker, func, *args, **kwargs)\n\n try:\n ret = func(*args, **kwargs)\n if isinstance(ret, types.GeneratorType):\n return self.generator_call(ret)\n\n except BaseException as e:\n self._handle_error(e)\n else:\n self._handle_success()\n return ret", "def interleave(inter, f, seq):\n seq = iter(seq)\n try:\n f(next(seq))\n except StopIteration:\n pass\n else:\n for x in seq:\n inter()\n f(x)" ]
[ "0.7004105", "0.6646999", "0.63588154", "0.633149", "0.6150608", "0.6094851", "0.6073668", "0.59957045", "0.5961045", "0.5894083", "0.5876988", "0.58702266", "0.5862073", "0.58489794", "0.5841767", "0.5814898", "0.57949513", "0.5713033", "0.5708868", "0.57078665", "0.5697791", "0.56956506", "0.5677816", "0.56695217", "0.56593865", "0.56593865", "0.56593865", "0.5604495", "0.55875486", "0.5585913" ]
0.7911098
0
Invert a fold. Similar to iterate, but expects a function of seed > (seed', x). The second value of the tuple is included in the resulting sequence while the first is used to seed func in the next iteration. Stops iteration if func returns None or raise StopIteration.
def unfold(func, seed): try: elem = func(seed) while elem is not None: seed, x = elem yield x elem = func(seed) except StopIteration as e: yield from stop_seq(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(f):\n return lambda y: search(lambda x: f(x) == y)", "def fold(iterable, func, base):\n acc = base\n for element in iterable:\n acc = func(acc, element)\n return acc", "def foldl(func, start, itr):\n return _foldl(func, start, iter(itr))", "def flip(func):\n if not callable(func):\n raise TypeError(\"First argument to flip must be callable\")\n \n def flipped_func(*args, **kwargs):\n return func(*reversed(args), **kwargs)\n return flipped_func", "def flip(f):\n return lambda *args, **kwargs: f(*args[::-1], **kwargs)", "def inverse(func: Callable):\n @wraps(func)\n def _wrapper(*args, **kwargs):\n return 1.0 / func(*args, **kwargs)\n return _wrapper", "def invert_function(self, qubits):\n\n for qubit in qubits:\n X | qubit", "def foldr(func, start, itr):\n return _foldr(func, start, iter(itr))", "def flip(f: Callable[[A, B], Any]) -> Callable[[B, A], Any]:\n return lambda x, y: f(y, x)", "def flip(f: Callable) -> Callable:\n return curry(lambda *args, **kwargs: f(*reversed(args), **kwargs))", "def foldl2(link, fn, z):\n def step(x, g):\n \"*** YOUR CODE HERE ***\"\n return foldr(link, step, identity)(z)", "def negate_all(f):\r\n return lambda *args, **kwargs: [-y for y in f(*args,**kwargs)]", "def negate_all(f):\n return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]", "def negate_all(f):\n return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]", "def _fold_loop(cls, f, agg, next):\n\n while next is not None:\n (val, next) = next\n agg = f(val, agg)\n return agg", "def interleave(inter, f, seq):\n seq = iter(seq)\n try:\n f(next(seq))\n except StopIteration:\n pass\n else:\n for x in seq:\n inter()\n f(x)", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))", "def cofold(function, initial, iterator):\n acc = [initial]\n\n def handleAcc(newAcc):\n acc[0] = newAcc\n\n def dofold(item):\n return function(acc[0], item)\n\n d = _CoFunCaller(dofold, resultCollector=handleAcc).coiterate(iterator)\n d.addCallback(lambda _: acc[0])\n return d", "def ifilter_c(func):\n return functools.partial(ifilter, func)", "def _walk_inverse(self, step_fn, y, **kwargs):\n for bij in self._bijectors:\n y = step_fn(bij, y, **kwargs.get(bij.name, {}))\n return y # Now `x`", "def inverse(self, x, *args, **kwargs):\n if self.list_of_inverses is None:\n utils.print_warning(\"inverses were not given\")\n return\n for i in range(len(self.list_of_inverses)):\n x = self.list_of_inverses[i](x, *args, **kwargs)\n return x", "def imap_c(func):\n return functools.partial(imap, func)", "def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)", "def inverted( self ):\n return self._modifier(\n self,\n lambda x: invert_bits( x, self.nr_of_pins )\n )", "def __iter__(self):\n makeit = getattr(self._data, '__inverted__', self.__next__)\n return makeit()", "def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc", "def flipflop(it, state=True):\n for i in it:\n yield (state, i)\n state = not state", "def selfie_depreceated(f):\n return partial(f, f)", "def negate(f):\n return lambda *args, **kwargs: -f(*args, **kwargs)" ]
[ "0.551734", "0.54347765", "0.5407587", "0.53981245", "0.5355724", "0.5321342", "0.5257516", "0.52478385", "0.50181824", "0.5012077", "0.49952018", "0.49402606", "0.493333", "0.493333", "0.4932859", "0.492038", "0.49162006", "0.4866282", "0.4826197", "0.480424", "0.48014733", "0.4791138", "0.47671264", "0.47487557", "0.47135735", "0.46805188", "0.46710217", "0.46284175", "0.46256432", "0.4621728" ]
0.76988935
0
Create iterator from sequence of numbers.
def from_sequence(self, seq): return Iter(self._from_sequence(seq))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def numeric_sequence_iteration(self) -> global___Statement.Iteration.NumericSequenceIteration:", "def simple_seq(seq):\n for i in seq:\n yield i", "def numbers():\n for number in range(1, 76):\n yield number", "def __iter__(self):\r\n \r\n return iter(self._by_number)", "def __iter__(self):\n for x in self.seq: yield x", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def __iter__(self):\n return iter(range(1, self.size() + 1))", "def get_numbers(sequence):\r\n\r\n new_list = []\r\n for element in sequence:\r\n if isinstance(element, numbers.Number) == True:\r\n new_list.append(element)\r\n\r\n return new_list", "def seq_ints(n, start=0, step=1):\n return list(range(start, start + n*abs(step), step))", "async def a_enumerate(seq, start=0):\n i = start\n async for val in seq:\n yield i, val\n i += 1", "def FastaM10Iterator(handle, seq_count=...):\n ...", "def uniform_iterator(sequence):\n\n if isinstance(sequence, abc.Mapping):\n return six.iteritems(sequence)\n else:\n return enumerate(sequence)", "def fromSequence(self, sequence):\n for aVal, bVal in sequence:\n self.add(aVal, bVal)\n\n return self", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def __iter__(cls):\n return iter(cls.__by_number.values())", "def xrange1(value):\n try:\n i = int(value)\n return [x+1 for x in xrange(i)]\n except:\n return []", "def xrange(*args):\n len_args = len(args)\n if len_args == 1:\n stop = int(args[0])\n start = 0\n step = 1\n elif len_args == 2:\n start = int(args[0])\n stop = int(args[1])\n step = 1\n elif len_args == 3:\n start = int(args[0])\n stop = int(args[1])\n step = int(args[2])\n else:\n raise TypeError(\"xrange() requires 1-3 int arguments\")\n if step < 0:\n bcmp = operator.gt\n elif step > 0:\n bcmp = operator.lt\n else:\n raise StopIteration\n act = int(start)\n while bcmp(act, stop):\n yield act\n act += step", "async def aenumerate(asequence, start=0):\n n = start\n async for elem in asequence:\n yield n, elem\n n += 1", "def __iter__(self) -> Generator:\r\n yield from self.sequence", "def digit_generator(N=1_000_000):\n i = 0\n number = 1\n while N > i:\n for _i in str(number):\n yield _i\n i += 1\n number += 1", "def __init__(self, generator, to_hashable=lambda x: x):\n self.index_to_result = []\n self.hashable_to_index = dict()\n for i, result in enumerate(generator):\n self.index_to_result.append(result)\n hashable = to_hashable(result)\n if hashable in self.hashable_to_index:\n break\n else:\n self.hashable_to_index[hashable] = i\n else:\n raise Exception(\"generator terminated without repeat\")\n self.cycle_begin = self.hashable_to_index[hashable]\n self.cycle_end = i\n self.cycle_length = self.cycle_end - self.cycle_begin\n\n self.first_repeated_result = self.index_to_result[self.cycle_begin]\n self.second_repeated_result = self.index_to_result[self.cycle_end]", "def __init__(self, seq, start=0, header_lines=[], comments=[], end=[], load_headers=True, **kwargs):\n\n self.iter = iter(seq)\n self.start = start if (start or start == 0) else 1\n self.header_lines = header_lines if isinstance(header_lines, (tuple, list)) else [int(e) for e in\n header_lines.split(',') if e]\n self.comment_lines = comments\n self.end = end\n\n self.load_headers = load_headers\n\n self.headers = []\n self.comments = []\n\n int(self.start) # Throw error if it is not an int", "def __iter__(self) -> Iterator:\n if self.ndim > 1:\n for i in range(len(self)):\n yield self[i]\n else:\n # convert in chunks of 10k for efficiency\n data = self.asi8\n length = len(self)\n chunksize = 10000\n chunks = (length // chunksize) + 1\n\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, length)\n converted = ints_to_pydatetime(\n data[start_i:end_i],\n tz=self.tz,\n box=\"timestamp\",\n reso=self._creso,\n )\n yield from converted", "def __init__(self, iterator):\n self.iterator = []\n while iterator.hasNext():\n self.iterator.append(iterator.next())", "def numbers(num):\n r = []\n for i in range(num):\n d = len(r)\n r = [1 if i == 0 or i == d else r[i-1]+r[i] for i in range(d+1)]\n yield r", "def enumerate_list(seq):\n return zip(xrange(len(seq)), seq)", "def __init__(self, iterator):\n self.iterator = iterator\n self.peek_num = None", "def __iter__(self):\n pass\n\n # TODO: range, based on iterator", "def iter_nums():\n saved = dict()\n\n def get_or_zero(x, y):\n \"\"\" Get the value at (x, y) in the cache, or return 0 \"\"\"\n coord = (x, y)\n if coord in saved:\n return saved[coord]\n else:\n return 0\n\n for coord in iter_coords():\n x, y = coord\n if coord == (0, 0):\n val = 1\n else:\n val = 0\n val += get_or_zero(x-1, y-1)\n val += get_or_zero(x, y-1)\n val += get_or_zero(x+1, y-1)\n val += get_or_zero(x-1, y)\n val += get_or_zero(x+1, y)\n val += get_or_zero(x-1, y+1)\n val += get_or_zero(x, y+1)\n val += get_or_zero(x+1, y+1)\n\n saved[coord] = val\n\n yield val" ]
[ "0.6523413", "0.6352263", "0.6295878", "0.6285677", "0.6251581", "0.6161719", "0.6161719", "0.6123521", "0.59559906", "0.59429246", "0.5929433", "0.59055716", "0.5842463", "0.58171993", "0.57896346", "0.57666224", "0.57651085", "0.5762295", "0.5750994", "0.5719213", "0.5710208", "0.5660005", "0.5647652", "0.56463015", "0.5594679", "0.5562186", "0.5554404", "0.55411124", "0.55244726", "0.5523953" ]
0.70666045
0
Create iterator from slice object.
def from_slice(self, slice): start = 0 if slice.start is None else slice.start step = 1 if slice.step is None else slice.step return self.count(start, step, stop=slice.step)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slice(iterable, *args):\n return iter(it.islice(iterable, *args))", "def __getitem__(self, arg):\n if isinstance(arg, slice):\n # get value from slice\n start, stop, step = arg.start, arg.stop, arg.step\n # sanitize step\n if step is None:\n step = 1\n if step < 1:\n raise ValueError(\"step must be positive\")\n # make sure the stream is in the right place to start\n if start is None:\n start = 0\n if start < self.current:\n self.rewind(start)\n if start > self.current:\n self.skip_forward(start - self.current)\n\n # sanity check\n if stop is not None and stop < start:\n raise ValueError(\"start must be less than stop\")\n # special case, we can't just return self, because __iter__ rewinds\n if step == 1 and stop is None:\n # keep going until exhausted\n return (self.next() for _ in itertools.repeat(True))\n\n return self._step_gen(step, stop)\n\n elif isinstance(arg, int):\n self.rewind(arg)\n return self.next()\n else:\n raise ValueError(\"Invalid arguement, use either a `slice` or \" +\n \"or an `int`. not {t}\".format(t=str(type(arg))))", "def from_sequence(self, seq):\n return Iter(self._from_sequence(seq))", "def __getitem__(self, i):\n self._load(False)\n\n if type(i) == slice:\n # Normalize the slice a bit such that it doesn't\n # have any negative or None values\n start, stop, step = i.start, i.stop, i.step\n if start is None:\n start = 0\n elif start < 0:\n start += self.count\n if stop is None:\n stop = self.count\n elif stop < 0:\n stop += self.count\n if not step:\n step = 1\n\n # If we're iterating backwards, start at the end\n if step < 0:\n key = self.head - self.count + stop - 1\n else:\n key = self.head - self.count + start\n\n return self._iter(key, stop - start, step)\n else:\n if i < 0:\n i += self.count\n return self.db[(self.head - self.count + i) % self.size]", "def __getslice__(self, start, stop):\n return self.__getitem__(slice(start, stop, None))", "def frombuffer(self, slice_data):\n return NotImplemented", "def SliceView(sequence, start=None, stop=None, step=1):\n start, stop, step = slice(start, stop, step).indices(len(sequence))\n for i in range(start, stop, step):\n yield sequence[i]", "def __iter__(self):\n\n starts = range(0, self.data.shape[self.axis], self.chunksize)\n\n for t in zip_longest(starts, starts[1:], fillvalue=None):\n yield self.data[self._slice(*t)]", "def __getitem__(self, ndx):\n if type(ndx) is slice:\n return list(islice(self._all(), ndx.start, ndx.stop, ndx.step or 1))\n else:\n return islice(self._all(), ndx, ndx+1).next()", "def _slice(self, start, stop, step=None):\n\n slices = [slice(None)] * self.data.ndim\n slices[self.axis] = slice(start, stop, step)\n return tuple(slices)", "def slice(self, begin, end):\n sliced = self._collection.slice(begin, end)\n return self.set_collection(sliced)", "def _conv_slice_to_list(slice_obj, start_def=0, stop_def=100, step_def=1):\n if slice_obj.start is None:\n start = start_def\n else:\n start = slice_obj.start\n if slice_obj.stop is None:\n stop = stop_def\n else:\n stop = slice_obj.stop\n if slice_obj.step is None:\n step = step_def\n else:\n step = slice_obj.step\n return list(range(start, stop, step))", "def create_slice(*, stop : Optional[int] = None, start : Optional[int] = None, step : Optional[int] = None) -> slice:\n return slice(start, stop, step)", "def normalize_slice(s):\n start, stop, step = s.start, s.stop, s.step\n if start is None:\n start = 0\n if step is None:\n step = 1\n if start < 0 or step < 0 or stop is not None and stop < 0:\n raise NotImplementedError()\n return slice(start, stop, step)", "def _make_dataset_iterator(self, dataset):\n # Note that split_batch_by argument is not passed because it is always 1 in\n # this strategy, and adding it adds unnecessary overhead to the dataset.\n return input_lib_v1.DatasetIterator(dataset, self._input_workers,\n self._container_strategy())", "def from_iterable(self, iterable):\n raise NotImplementedError()", "def test_getslice1(self):\n class C(list):\n def __getitem__(self, index):\n return (index.start, index.stop)\n\n a = C()\n self.assertEqual(a[32:197], (32,197))", "def __getitem__(self, item):\n if isinstance(item, slice):\n start = item.start or 0\n stop = item.stop if item.stop is not None else len(self.data)\n stop = min(stop, len(self.data))\n if stop - start == 0:\n return type(self)(xnd.xnd([], type=self.data.type))\n\n elif isinstance(item, Iterable):\n if not is_array_like(item):\n item = np.array(item)\n if is_integer_dtype(item):\n return self.take(item)\n elif is_bool_dtype(item):\n indices = np.array(item)\n indices = np.argwhere(indices).flatten()\n return self.take(indices)\n else:\n raise IndexError(\n \"Only integers, slices and integer or boolean \\\n arrays are valid indices.\"\n )\n\n elif is_integer(item):\n if item < 0:\n item += len(self)\n if item >= len(self):\n return None\n else:\n\n return self.data[item]\n\n value = self.data[item]\n return type(self)(value)", "def __setslice__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint2___setslice__(self, *args)", "def slicer(seq, start=None, stop=None, step=None):\n return seq[start:stop:step]", "def make_data_iterator(input):\n assert isinstance(input, DataLoader)\n data_iterator = iter(input)\n return data_iterator", "def fromiter(iterable, dtype, count=-1):\n\n return call_origin(numpy.fromiter, iterable, dtype, count)", "def _get_slice(segments, shape):\n\n if not (1 <= len(shape) <= 2):\n raise ValueError('Cannot segment array of shape: %s' % str(shape))\n else:\n size = shape[0]\n slice_length = np.ceil(float(size) / segments)\n start_idx = 0\n end_idx = slice_length\n while start_idx < size:\n if len(shape) == 1:\n yield slice(start_idx, end_idx)\n else:\n yield (slice(start_idx, end_idx), slice(None))\n start_idx = end_idx\n end_idx = min(start_idx + slice_length, size)", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i,j))", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j))", "def __iter__(self):\n pass\n\n # TODO: range, based on iterator", "def __getitem__(self, t: Union[slice, Sequence[int], Sequence[bool]]\n ) -> \"ImageSequence\":\n t = self._resolve_index(t)\n if isinstance(t, np.ndarray):\n ret = copy.copy(self)\n ret._indices = t\n ret._is_slice = True\n ret._len = len(t)\n return ret\n # Assume t is a number\n return self._get_single_frame(int(t))", "def __getitem__(self, idx):\n if not isinstance(idx, slice):\n return self._fetch()[idx]\n return self._fetch()[idx.start:idx.stop]", "def __getitem__(self, item: slice | tuple):\n if isinstance(item, slice):\n start, stop = item.start, item.stop\n if start is None:\n start = 0\n if stop is None:\n stop = maxsize\n if start > stop:\n raise IndexError(\"make sure start <= stop\")\n return self.query(Orthotope([Interval(start, stop)]))\n elif isinstance(item, tuple):\n pass\n else:\n raise TypeError(f\"unrecognized index {item}\")", "def process_slice(slc, shape, n):\n if not isinstance(slc, tuple):\n slc = (slc,)\n slc = list(slc)\n ndim = len(shape) - n\n assert ndim >= 0\n shape_idx = 0\n for slice_idx, s in enumerate(slc):\n if s == nax:\n continue\n if shape[shape_idx] == 1:\n if type(s) == int:\n slc[slice_idx] = 0\n else:\n slc[slice_idx] = slice(None)\n shape_idx += 1\n if shape_idx != ndim:\n raise IndexError('Must have %d terms in the slice object' % ndim)\n return extend_slice(tuple(slc), n)" ]
[ "0.6592863", "0.6384958", "0.6363769", "0.62649775", "0.61393666", "0.60202515", "0.59986544", "0.5966865", "0.5952172", "0.5857153", "0.5757738", "0.574337", "0.5653791", "0.5642721", "0.56035215", "0.5587913", "0.5578734", "0.5572186", "0.5529783", "0.5510107", "0.5508437", "0.5500182", "0.54971117", "0.5488021", "0.54690593", "0.54680836", "0.5460781", "0.54580855", "0.545602", "0.5444084" ]
0.6573793
1
Return a sequence of n evenly spaced numbers from a to b.
def evenly_spaced(self, a: Real, b: Real, n: int) -> Iter: return Iter(_evenly_spaced(a, b, n))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_by_n( seq, n ):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def split_by_n(seq, n):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def genslices(n):\n return product(range(-n, n+1), range(-n, n+1), range(-n, n+1))", "def genslices(n):\n return product(range(-n, n + 1), range(-n, n + 1), range(-n, n + 1))", "def evenly_spaced_BDs_OLD(BDs, n):\n BDs = BDs.iloc[:,0].tolist()\n BD_min = min(BDs)\n BD_max = max(BDs)\n return np.linspace(BD_min, BD_max, n)", "def seq_ints(n, start=0, step=1):\n return list(range(start, start + n*abs(step), step))", "def partitions(n):\n for a in range(2,n//2+1):\n yield a, n-a", "def binaire(x,n):\n a,q = [],0\n \n for i in range(n):\n q = x%2\n x //=2\n a = [q] + a\n \n return(a)", "def calcSpacings(self, n):\n\n first = next = (n) / float(n + 1)\n for i in range(n):\n yield (next, 1 - next)\n next = first - (1 - next)", "def groups_of(seq, n):\n for i in range(0, len(seq), n):\n yield seq[i : (i + n)]", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(np.ceil(i * length / num))]", "def split(a, n):\n k, m = divmod(len(a), n)\n ret = [a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)]\n return ret", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(ceil(i * length / num))]", "def genslices(n):\n def range_with_none():\n yield None\n yield from range(-n, n+1)\n\n for t in product(range_with_none(), range_with_none(), range_with_none()):\n s = slice(*t)\n if s.step != 0:\n yield s", "def splitevery(s, n):\n\treturn [s[x:x+n] for x in range(0,len(s), n)]", "def farey(n):\n a, b, c, d = 0, 1, 1, n\n #yield a, b\n while (c <= n):\n k = (n + b) // d\n a, b, c, d = c, d, (k*c-a), (k*d-b)\n yield a, b", "def even_split(a, n):\n n = min(n, len(a)) # if less elements in array than chunks to output, change chunks to array length\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))", "def shingle(iterable, n):\n num_shingles = max(1, len(iterable) - n + 1)\n return [iterable[i:i + n] for i in range(num_shingles)]", "def split(a, n):\n n = min(n, len(a))\n k, m = divmod(len(a), n)\n return [a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]", "def sum_series(n, a=0, b=1):\n\tseq = []\n\tnth_term = 0\n\t\n\tfor i in range(0,n+1):\n\t\tif i == 0:\n\t\t\tseq.append(a)\n\t\tif i == 1:\n\t\t\tseq.append(b)\n\t\tif i > 1:\n\t\t\tnth_term = seq[-1] + seq[-2]\n\t\t\tseq.append(nth_term)\n\t\n\tprint(seq)\n\tprint(seq[n])\n\treturn(seq[n])", "def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(next(seq))\n except StopIteration:\n pass\n return result", "def split_range(r, n):\n \n step = int(r / n)\n segments = []\n for i in range(n):\n new_segment = [step * i, step * (i + 1)]\n segments.append(new_segment)\n # correct the gap in the missing index due to the truncated step\n segments[-1][-1] = r\n return segments", "def split_into_n(s, n):\n return [s[k:k + n] for k in range(0, len(s), n)]", "def pairs_upto(n):\n return ((a, b)\n for a in range(1, n)\n for b in range(1, n)\n if a <= b)", "def batches(l, n):\n for i in range(0, l, n):\n yield range(i,min(l,i+n))", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def randomSeq(n, a, b):\n \n return [\n Complex(a + np.random.random()*(b-a), a + np.random.random()*(b-a))\n for _ in range(n)\n ]", "def iter_n(sequence: Sequence[T], n: int) -> List[T]:\n\t\n\tfor i in range(len(sequence) - (n-1)):\n\t\tyield sequence[i:i+n]", "def seq(n,x=0, y=1):\r\n if n==1:\r\n return x\r\n elif n==2:\r\n return y\r\n else:\r\n return seq(n-1,x,y)+seq(n-2,x,y)", "def plus_table(n):\n return [[(i + j) % n for i in range(n)] for j in range(n)]" ]
[ "0.67545396", "0.67385876", "0.6609418", "0.6586301", "0.6527549", "0.6490447", "0.64674157", "0.6464234", "0.64498013", "0.6438853", "0.6431555", "0.64294153", "0.6413279", "0.63846004", "0.63330746", "0.63312566", "0.6319206", "0.6290663", "0.6289663", "0.62633723", "0.62446594", "0.62296426", "0.6226975", "0.62158066", "0.62067735", "0.6196273", "0.61873305", "0.61803776", "0.6178925", "0.6147991" ]
0.7720257
0
Convert int to string without using builtin str()
def int_to_string(num): if num < 0: num, is_neg = -num, True else: is_neg = False s = [] while num > 0: s.append(chr(ord('0') + num%10)) num //= 10 return ('-' if is_neg else '') + ''.join(reversed(s))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _int2str(num):\n if num<10:\n return '00%s'%str(num)\n elif 10<=num<100:\n return '0%s'%str(num)\n else:\n return '%s'%str(num)", "def _int_to_string(v):\n \n if not isinstance(v,int):\n raise InstrumentParameterException('Value %s is not an int.' % str(v))\n else:\n return '%i' % v", "def intToString(*args):\n return _libsbml.SBO_intToString(*args)", "def to_str(variable):\n try:\n int(variable)\n return str(variable)\n except ValueError:\n return variable", "def ints_to_string(iterable):\n return ''.join([chr(i) for i in iterable])", "def num2str(num):\n require_type(is_number(num), 'parameter of number->string must be a number')\n return tostr(num)", "def base2str(self, int_number):\r\n return self.format_base % (float(int_number) / self.mult_base)", "def SBO_intToString(*args):\n return _libsbml.SBO_intToString(*args)", "def process_int(integer: int) -> str:\n\n return str(integer) if integer else Presenter.DEFAULT", "def validate_int_to_str(x):\n\n if isinstance(x, int):\n return str(x)\n if isinstance(x, str):\n return str(int(x))\n\n raise TypeError(f\"Value {x} of type {type(x)} must be either int or str\")", "def __rank_from_int_to_str(rank: int) -> str:\n return str(rank + 1)", "def __int__(self):\n return int(str(self),2)", "def int2dec(n: int) -> str:", "def serialize_number(n):\n return str(n)", "def _num2str(self, num):\n q, mod = divmod(num, 10)\n suffix = \"th\" if q == 1 else self.SUFFIX_DICT[mod]\n return f\"{num}{suffix}\"", "def convert_int_to_str(df):", "def transforme(n):\n if n<10 :\n return '0'+str(n)\n else :\n return str(n)", "def issnint2str(issn_int):\n if type(issn_int) is not int:\n raise TypeError(\"issn_int is not int\")\n issn_ = \"{num:07d}\".format(num=issn_int)\n check = map(lambda x: int(x), issn_)\n res = 0\n for pp in zip(check, range(8, 1, -1)):\n res += pp[0] * pp[1]\n\n rem = (11 - res) % 11\n rem = \"X\" if rem == 10 else rem\n issn_str = \"{0}-{1}{2}\".format(issn_[:4], issn_[4:], rem)\n return issn_str", "def int2str(value_int, currency):\r\n if currency in \"BTC LTC NMC\":\r\n return (\"%16.8f\" % (value_int / 100000000.0))\r\n elif currency in \"JPY SEK\":\r\n return (\"%12.3f\" % (value_int / 1000.0))\r\n else:\r\n return (\"%12.5f\" % (value_int / 100000.0))", "def quote2str(self, int_number):\r\n return self.format_quote % (float(int_number) / self.mult_quote)", "def convertInt(s):\n try:\n int(s)\n return \"INT\"\n except:\n return s", "def to_str(n: float) -> str:\n return str(n)", "def _format(self, id_num: int) -> typing.Union[int, str]:\n return id_num", "def int_to_str(number):\n rb = RuleBasedNumberFormat(URBNFRuleSetTag.SPELLOUT, Locale('pl_PL'))\n verbalized = rb.format(int(number))\n return verbalized", "def format_int(self, data):\n return u'%d' % data", "def get_simple_digit_str(a_float_int):\r\n int_value = int(a_float_int)\r\n if int_value == a_float_int:\r\n return str(int_value)\r\n return \"%.3f\" % a_float_int", "def convert(num):\r\n if len(str(num))==1:\r\n return \"000%i\"%num\r\n elif len(str(num)) == 2:\r\n return \"00%i\"%num\r\n elif len(str(num)) == 3:\r\n return \"0%i\"%num\r\n elif len(str(num)) == 4:\r\n return \"%i\"%num", "def convert_to_str(value):\n\tif value is None:\n\t\treturn '-'\n\treturn str(value)", "def internal_id_to_display_id(i_id: int) -> str:\n i_id = str(i_id).zfill(9)\n return ''.join(i_id[x - 1] for x in [1, 5, 9, 6, 3, 8, 2, 4, 7])", "def intRender(self, number):\n\n data = unicode(number)\n bites = list()\n\n while data:\n bites.append(data[-3:])\n data = data[:-3]\n\n return \" \".join(reversed(bites))" ]
[ "0.8262509", "0.7803891", "0.7300844", "0.7271281", "0.72517717", "0.718009", "0.7099532", "0.70860934", "0.7008732", "0.69923896", "0.68970364", "0.68703055", "0.6847767", "0.67874974", "0.6779158", "0.676237", "0.67338043", "0.6702256", "0.6671091", "0.6608676", "0.65521425", "0.6549257", "0.64477986", "0.6446148", "0.6391005", "0.63486177", "0.6310224", "0.629431", "0.62617946", "0.62579745" ]
0.78195685
1
Plot a template over a detected stream, with picks corrected by lagcalc.
def plot_repicked(template, picks, det_stream, size=(10.5, 7.5), save=False, savefile=None, title=False): # _check_save_args(save, savefile) fig, axes = plt.subplots(len(det_stream), 1, sharex=True, figsize=size) if len(template) > 1: axes = axes.ravel() mintime = det_stream.sort(['starttime'])[0].stats.starttime template.sort(['network', 'station', 'starttime']) lengths = [] lines = [] labels = [] n_templates_plotted = 0 for i, tr in enumerate(det_stream.sort(['starttime'])): # Cope with a single channel template case. if len(det_stream) > 1: axis = axes[i] else: axis = axes tr_picks = [pick for pick in picks if pick.waveform_id.station_code == tr.stats.station and pick.waveform_id.channel_code[0] + pick.waveform_id.channel_code[-1] == tr.stats.channel[0] + tr.stats.channel[-1]] if len(tr_picks) > 1: msg = 'Multiple picks on channel %s' % tr.stats.station + ', ' + \ tr.stats.channel raise NotImplementedError(msg) if len(tr_picks) == 0: msg = 'No pick for chanel %s' % tr.stats.station + ', ' + \ tr.stats.channel print(msg) else: pick = tr_picks[0] pick_delay = pick.time - mintime delay = tr.stats.starttime - mintime y = tr.data # Normalise if len(tr_picks) > 0 and template: y /= max(abs(y[int(pick_delay/tr.stats.delta):int(pick_delay/tr.stats.delta) + len(template[0])])) else: y /= max(abs(y)) x = np.linspace(0, (len(y) - 1) * tr.stats.delta, len(y)) x += delay axis.plot(x, y, 'k', linewidth=1.5) axis.set_ylim(-max(abs(y)), max(abs(y))) if template.select(station=tr.stats.station, channel=tr.stats.channel): btr = template.select(station=tr.stats.station, channel=tr.stats.channel)[0] bdelay = pick.time - mintime by = btr.data by /= max(abs(by)) bx = np.linspace(0, (len(by) - 1) * btr.stats.delta, len(by)) bx += bdelay if len(tr_picks) > 0: # Heads up for the x - 0.1 fudge factor here accounting for template pre-pick time template_line, = axis.plot(bx - 0.1, by, 'r', linewidth=1.6, label='Template') if not pick.phase_hint: pcolor = 'k' label = 'Unknown pick' elif 'P' in pick.phase_hint.upper(): pcolor = 'red' label = 'P-pick' elif 'S' in pick.phase_hint.upper(): pcolor = 'blue' label = 'S-pick' else: pcolor = 'k' label = 'Unknown pick' pdelay = pick.time - mintime ccval = pick.comments[0].text.split('=')[-1] line = axis.axvline(x=pdelay, color=pcolor, linewidth=2, linestyle='--', label=label) axis.text(pdelay, max(by), ccval, fontsize=12) if label not in labels: lines.append(line) labels.append(label) if n_templates_plotted == 0: lines.append(template_line) labels.append('Template') n_templates_plotted += 1 lengths.append(max(bx[-1], x[-1])) else: lengths.append(bx[1]) axis.set_ylabel('.'.join([tr.stats.station, tr.stats.channel]), rotation=0, horizontalalignment='right') axis.yaxis.set_ticks([]) if len(det_stream) > 1: axis = axes[len(det_stream) - 1] else: axis = axes axis.set_xlabel('Time (s) from %s' % mintime.datetime.strftime('%Y/%m/%d %H:%M:%S.%f')) plt.figlegend(lines, labels, 'upper right') if title: if len(template) > 1: axes[0].set_title(title) else: axes.set_title(title) else: plt.subplots_adjust(top=0.98) plt.tight_layout() plt.subplots_adjust(hspace=0) if not save: plt.show() plt.close() else: plt.savefig(savefile) plt.close() return fig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xx_plot(epoch, model, features, filters, figname, fgal=0.5):\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n N = 20000\n X = X[:N]\n Xcov = Xcov[:N]\n Xcoadd = Xcoadd[:N]\n Xcoaddcov = Xcoaddcov[:N]\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n # Calculate the posteriors, draw samples\n a, m, v = model.posterior(X, Xcov)\n posts = np.zeros_like(X)\n for i in range(X.shape[0]):\n posts[i] = model.sample(a[i], m[i], v[i], size=1)\n\n lo = [0.01, 0.02, 0.06]\n hi = [0.99, 0.96, 0.98]\n idx = [0, 1, 4]\n bins = [100, 100, 300]\n label = ['psfmag $r$', 'modelmag $u-g$', 'modelmag $i-z$']\n N = len(idx)\n fs = 5\n lsize = 20\n f = pl.figure(figsize=(N * fs, 2 * fs))\n pl.subplots_adjust(wspace=0.3)\n for i in range(N):\n x = X[:, idx[i]]\n y = Xcoadd[:, idx[i]]\n p = posts[:, idx[i]]\n ind = (y > -999) & (Xcoaddcov[:, idx[i]][:, idx[i]] < 10.)\n x = x[ind]\n y = y[ind]\n p = p[ind]\n ax = pl.subplot(2, N, i + 1)\n v = np.sort(x)\n mn, mx = v[np.int(lo[i] * x.shape[0])], v[np.int(hi[i] * x.shape[0])]\n hist2d(x, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('Single Epoch ' + label[i], fontsize=lsize)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n ax = pl.subplot(2, N, i + 4)\n hist2d(p, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('XD Posterior ' + label[i], fontsize=lsize)\n f.savefig(figname, bbox_inches='tight')", "def makePlot(timeStamp):\n\n #-------------------------------------------------------------------------\n # Create figure and axes\n #-------------------------------------------------------------------------\n\n width = 12 # inches\n height = 8 # inches\n fig = plt.figure(figsize=(width, height))\n\n # We'll use gridspec to create axes in rectangular 6-by-5 lattice\n import matplotlib.gridspec as gridspec\n nrows = 6\n ncols = 5\n Grid = gridspec.GridSpec(nrows, ncols)\n\n # axis for elevation time series\n axElev = fig.add_subplot(Grid[:2, :2]) # first 2 rows, first 2 columns\n # axis for slab\n axSlab = fig.add_subplot(Grid[:2, 2:]) # first 2 rows, columns > 2\n # and the transects\n axTran1 = fig.add_subplot(Grid[2:4, :]) # rows 2,3,4, all columns\n # rows 5,6,7, all columns, share x/y axis with previous (sets same ticks\n # etc)\n axTran2 = fig.add_subplot(Grid[4:6, :], sharex=axTran1, sharey=axTran1)\n\n # gridspec allows to tune the spacing between plots (unit is fraction of\n # font size)\n boundary_pad = 3.5\n horizontal_pad = 0.2\n vertical_pad = 1.0\n # figure area left,bottom,right,top in normalized coordinates [0,1]\n bounds = [0, 0, 1, 1]\n Grid.tight_layout(\n fig,\n pad=boundary_pad,\n w_pad=horizontal_pad,\n h_pad=vertical_pad,\n rect=bounds)\n\n #-------------------------------------------------------------------------\n # Create plots\n #-------------------------------------------------------------------------\n\n # for all avaiable colormaps see ( '_r' reverses the colormap )\n # http://matplotlib.org/examples/color/colormaps_reference.html\n colormap = plt.get_cmap('Spectral_r')\n colormap_kine = plt.get_cmap('gist_heat')\n\n # slab\n salt_clim = [0, 32]\n ncontours = 16\n # bouding box for slab [xmin,xmax,ymin,ymax] in model x,y coordinates\n estuarybbox = [330000, 360000, 284500, 297500]\n dia = slabSnapshotDC(\n clabel='Salinity',\n unit='psu',\n clim=salt_clim,\n cmap=colormap)\n dia.setAxes(axSlab)\n dia.addSample(slabDC, timeStamp=timeStamp, plotType='contourf',\n bbox=estuarybbox, N=ncontours)\n # overrides default format for colorbar floats\n dia.showColorBar(format='%.2g')\n #dia.addTitle('in case you want a custom title')\n # get transect (x,y) coordinates from the transectDC\n transectXYCoords = generateTransectFromDataContainer(transectDC_salt, 0)[4]\n # plot transect on the map (thin black on thick white)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='w', linewidth=2.0)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='k', linewidth=1.0)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(\n staX,\n staY,\n label=station,\n printLabel=True,\n marker='*')\n # add text to plot. x,y are in normalized axis coordinates [0,1]\n dia.ax.text(0.05, 0.98, 'custom text', fontsize=fontsize,\n verticalalignment='top', horizontalalignment='left',\n transform=dia.ax.transAxes)\n\n # elevation time series\n # define the time range to plot\n elevStartTime = datetime.datetime(2012, 5, 4, 0, 0)\n elevEndTime = datetime.datetime(2012, 5, 5, 0, 15)\n elevMeanTime = elevStartTime + (elevEndTime - elevStartTime) / 2\n elevLim = [-1.5, 2.5]\n dia = timeSeriesPlotDC2(\n xlabel=elevMeanTime.strftime('%Y %b %d'),\n ylim=elevLim)\n dia.setAxes(axElev)\n #dia.addShadedRange( timeStamp, timeStamp+datetime.timedelta(seconds=30), facecolor='IndianRed')\n dia.addShadedRange(\n timeStamp,\n timeStamp,\n edgecolor='IndianRed',\n facecolor='none',\n linewidth=2)\n tag = elevDC.getMetaData('tag')\n dia.addSample(\n elevDC.timeWindow(\n elevStartTime,\n elevEndTime),\n label=tag,\n color='k')\n dia.addTitle('Elevation ({0:s}) [m]'.format(\n elevDC.getMetaData('location').upper()))\n # adjust the number of ticks in x/y axis\n dia.updateXAxis(maxticks=5)\n dia.updateYAxis(maxticks=3, prune='lower')\n\n # transects\n dia = transectSnapshotDC(\n clabel='Salinity',\n unit='psu',\n cmap=colormap,\n clim=salt_clim)\n dia.setAxes(axTran1)\n #transectDC_salt.data *= 1e-3\n dia.addSample(transectDC_salt, timeStamp, N=ncontours)\n dia.addTitle('')\n dia.showColorBar()\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n # do not show x axis ticks and label for this plot\n dia.hideXTicks()\n\n dia = transectSnapshotDC(clabel='TKE', unit='m2s-1', logScale=True,\n clim=[-7, -2], climIsLog=True, cmap=colormap_kine)\n dia.setAxes(axTran2)\n dia.addSample(transectDC_kine, timeStamp, N=ncontours)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n dia.addTitle('')\n dia.showColorBar()\n dia.updateXAxis(maxticks=15)\n dia.updateYAxis(maxticks=6)\n\n #-------------------------------------------------------------------------\n # Save to disk\n #-------------------------------------------------------------------------\n dateStr = timeStamp.strftime('%Y-%m-%d_%H-%M')\n filename = '_'.join([imgPrefix, dateStr])\n saveFigure(\n imgDir,\n filename,\n imgFiletype,\n verbose=True,\n dpi=200,\n bbox_tight=True)\n plt.close()", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def show(self, fig=None):\n i = 0\n # for t = 0:obj.step_size:obj.duration\n # TODO: make a generator?\n iterator = np.linspace(0, self.duration(), num=math.ceil(self.duration() / self.step_precision) + 1)\n tfInterp_l = np.zeros((4, 4, len(iterator)))\n tfInterp_r = np.zeros((4, 4, len(iterator)))\n for t in iterator:\n [lfp, rfp] = self.footPosition(t)\n tfInterp_l[:, :, i] = lfp\n tfInterp_r[:, :, i] = rfp\n i = i + 1\n\n self.show_tf(fig, tfInterp_l, len(iterator))\n self.show_tf(fig, tfInterp_r, len(iterator))", "def show_template_bundles(final_streamlines, template_path, fname):\n import nibabel as nib\n from fury import actor, window\n\n renderer = window.Renderer()\n template_img_data = nib.load(template_path).get_data().astype(\"bool\")\n template_actor = actor.contour_from_roi(\n template_img_data, color=(50, 50, 50), opacity=0.05\n )\n renderer.add(template_actor)\n lines_actor = actor.streamtube(\n final_streamlines, window.colors.orange, linewidth=0.3\n )\n renderer.add(lines_actor)\n window.record(renderer, n_frames=1, out_path=fname, size=(900, 900))\n return", "def plot(self):\n\n # initialize outside the loop to avoid memory leak\n\n plot_a = None\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n pmin = 0\n pmax = 0\n\n sr = self.dio.get_properties(self.channel)['samples_per_second']\n\n if self.control.verbose:\n print 'sample rate: ', sr\n\n # initial time info\n display_lag = 60\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print 'data bounds: ', b\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print 'start sample st0: ', st0\n print 'end sample et0: ', et0\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = self.control.num_fft * \\\n self.control.integration * self.control.decimation\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print 'Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld' % (samples_per_stripe, blocks, st0, et0)\n return\n\n stripe_stride = (et0 - st0) / blocks\n\n bin_stride = stripe_stride / self.control.bins\n\n start_sample = st0\n\n print 'first ', start_sample\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[mdt.keys()[0]]\n cfreq = md['center_frequencies'].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print 'processing info : ', self.control.frames, self.control.bins, samples_per_stripe, bin_stride\n\n for p in numpy.arange(self.control.frames):\n sti_psd_data = numpy.zeros(\n [self.control.num_fft, self.control.bins], numpy.float)\n sti_times = numpy.zeros([self.control.bins], numpy.complex128)\n\n for b in numpy.arange(self.control.bins):\n\n if self.control.verbose:\n print 'read vector :', self.channel, start_sample, samples_per_stripe\n\n d_vec = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel)\n data = d_vec[:, self.sub_channel]\n\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr / self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data, NFFT=self.control.num_fft, Fs=float(sample_freq), detrend=detrend_fn, scale_by_freq=False)\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = numpy.real(\n 10.0 * numpy.log10(numpy.abs(psd_data) + 1E-12))\n\n sti_times[b] = start_sample / sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n numpy.min(freq_axis) / 1e3,\n numpy.max(freq_axis) / 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n if self.control.zaxis:\n vmin = int(string.split(self.control.zaxis, ':')[0])\n vmax = int(string.split(self.control.zaxis, ':')[1])\n else:\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n im = ax.imshow(sti_psd_data, cmap='jet', origin='lower', extent=extent,\n interpolation='nearest', vmin=vmin, vmax=vmax, aspect='auto')\n\n ax.set_ylabel('f (kHz)', fontsize=8)\n\n # plot dates\n\n tick_spacing = numpy.arange(\n self.control.bins / 8, self.control.bins, self.control.bins / 8)\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = ''\n else:\n gm_tick_time = time.gmtime(numpy.real(tick_time))\n tick_string = '%02d:%02d:%02d' % (\n gm_tick_time[3], gm_tick_time[4], gm_tick_time[5])\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print 'last ', start_sample\n\n # create a time stamp\n start_time = st0 / sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (srt_time[0], srt_time[\n 1], srt_time[2], srt_time[3], srt_time[4], srt_time[5], sub_second)\n\n self.f.suptitle('%s %s %4.2f MHz (%s)' % (\n self.control.title, timestamp, cfreq / 1E6, self.control.path), fontsize=10)\n\n # ax.legend(fontsize=8)\n ax.set_xlabel('time (UTC)', fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == '':\n ext = '.png'\n print \"Save plot as {}\".format(fname+ext)\n matplotlib.pyplot.savefig(fname+ext)\n if self.control.appear or not self.control.outname:\n print \"Show plot\"\n matplotlib.pyplot.show()", "def plotTI():\n min_dl = dlam[dlam != 0].min()\n S = int(0.4/min_dl)\n fig = pl.figure(figsize = (8,6))\n ax = fig.add_subplot(1,1,1)\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for k, spine in ax.spines.items():\n spine.set_zorder(12.2)\n\n xs, ndx, dx = [0], 0, 0.001\n colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']\n min_y, max_y = 0, 0\n\n lines = tuple()\n ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper\n lv_names2 = []\n for j in range(n_components):\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())\n\n for j in range(n_components):\n\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n\n # Get the coordinates.\n lj = lchange[:,j]\n x = lv[:,j][lj]\n y = y[lj]/P.beta_report\n\n if 'TI' in P.methods:\n # Plot the TI integration area.\n ss = 'TI'\n for i in range(len(x)-1):\n min_y = min(y.min(), min_y)\n max_y = max(y.max(), max_y)\n #pl.plot(x,y)\n if i%2==0:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)\n xlegend = [-100*wnum for wnum in range(len(lv_names2))]\n pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper\n\n if 'TI-CUBIC' in P.methods and not cubspl[j]==0:\n # Plot the TI-CUBIC interpolation curve.\n ss += ' and TI-CUBIC'\n xnew = numpy.arange(0, 1+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)\n\n else:\n # Plot the TI-CUBIC integration area.\n ss = 'TI-CUBIC'\n for i in range(len(x)-1):\n xnew = numpy.arange(x[i], x[i+1]+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n ynew[0], ynew[-1] = y[i], y[i+1]\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n if i%2==0:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)\n\n # Store the abscissa values and update the subplot index.\n xs += (x+ndx).tolist()[1:]\n ndx += 1\n\n # Make sure the tick labels are not overcrowded.\n xs = numpy.array(xs)\n dl_mat = numpy.array([xs-i for i in xs])\n ri = range(len(xs))\n\n def getInd(r=ri, z=[0]):\n primo = r[0]\n min_dl=ndx*0.02*2**(primo>10)\n if dl_mat[primo].max()<min_dl:\n return z\n for i in r:\n for j in range(len(xs)):\n if dl_mat[i,j]>min_dl:\n z.append(j)\n return getInd(ri[j:], z)\n\n xt = [i if (i in getInd()) else '' for i in range(K)]\n pl.xticks(xs[1:], xt[1:], fontsize=10)\n pl.yticks(fontsize=10)\n #ax = pl.gca()\n #for label in ax.get_xticklabels():\n # label.set_bbox(dict(fc='w', ec='None', alpha=0.5))\n\n # Remove the abscissa ticks and set up the axes limits.\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n pl.xlim(0, ndx)\n min_y *= 1.01\n max_y *= 1.01\n pl.ylim(min_y, max_y)\n\n for i,j in zip(xs[1:], xt[1:]):\n pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')\n if ndx>1:\n lenticks = len(ax.get_ymajorticklabels()) - 1\n if min_y<0: lenticks -= 1\n if lenticks < 5:\n from matplotlib.ticker import AutoMinorLocator as AML\n ax.yaxis.set_minor_locator(AML())\n pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)\n pl.ylabel(r'$\\mathrm{\\langle{\\frac{ \\partial U } { \\partial \\lambda }}\\rangle_{\\lambda}\\/%s}$' % P.units, fontsize=20, color='#151B54')\n pl.annotate('$\\mathit{\\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')\n if not P.software.title()=='Sire':\n lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)\n for l in lege.legendHandles:\n l.set_linewidth(10)\n pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))\n pl.close(fig)\n return", "def display4(*args):\n #-------------------- unpack\n twiss_func = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n #-------------------- beta x,y & dispersion x\n s = [twiss_func(i,'s') for i in range(twiss_func.nbpoints)] # Abszisse\n bx = [twiss_func(i,'bx') for i in range(twiss_func.nbpoints)] # beta x\n by = [twiss_func(i,'by') for i in range(twiss_func.nbpoints)] # beta y\n dx = [twiss_func(i,'dx') for i in range(twiss_func.nbpoints)] # dispersion x\n#-------------------- longitudinal trajectories\n z1= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n z2= [sin_like(i,'s') for i in range(sin_like.nbpoints)]\n sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n # fighdr = 'lattice version = {}, input file = {}'.format(PARAMS['lattice_version'],PARAMS['input_file'])\n fig = plt.figure(num=1,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- beta functions\n splot211=plt.subplot(211)\n splot211.set_title('beta x,y')\n # mapping box\n splot211.text(0.01, 1.1, UTIL.FLAGS.get('mapping'),transform=splot211.transAxes,fontsize=8,bbox=dict(boxstyle='round',facecolor='wheat',alpha=0.5),verticalalignment='top')\n # function plots\n plt.plot(s,bx, label=r\"$\\beta$x [m]\", color='black', linestyle='-')\n plt.plot(s,by, label=r\"$\\beta$y [m]\", color='red', linestyle='-')\n plt.plot(s,dx, label=r'$\\eta_x$ [m]' , color='green', linestyle='-') # dispersion x\n vscale=splot211.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n plt.plot(vis_abszisse,viseoz,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='green',linestyle='--')\n # zero line\n splot211.plot(vis_abszisse,vzero,color='green',linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- longitudinal tracks z, dP/P\n # ax_l = left abszisse\n ax_l=plt.subplot(212)\n # ax_l=plt.subplot(10,1,(7,9))\n ax_l.set_title('synchrotron oscillation')\n ax_l.set_ylabel(r\"z [mm]\")\n ax_l.tick_params(axis='y', colors='green')\n ax_l.yaxis.label.set_color('green')\n ax_l.plot(z1,cz,label='C',color='green')\n ax_l.plot(z2,sz,label='S',color='green',linestyle=':')\n plt.legend(loc='lower left',fontsize='x-small')\n # ax_r = right abszisse\n ax_r = ax_l.twinx()\n ax_r.set_ylabel(r'$\\Delta$p/p [%]')\n ax_r.tick_params(axis='y', colors='red')\n ax_r.yaxis.label.set_color('red')\n ax_r.plot(z2,cdp,label='C',color='red')\n ax_r.plot(z2,sdp,label='S',color='red',linestyle=':')\n ax_r.plot(vis_abszisse,vzero,color='red', linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n # lattice elements\n vscale=ax_l.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n ax_l.plot(vis_abszisse,viseoz,label='',color='black')\n ax_l.plot(vis_abszisse,vzero,color='green',linestyle='--')", "def generatePlot(data):\n addendum = \"\"\n destination = \"D:\\\\Research\\\\scripts\\\\Results\\\\FullSet1\\\\$FilteredPlots\\\\take 4\\\\\"\n if len(data.detections.smallIncrease) != 0:\n addendum = \"small increases\\\\\"\n if len(data.detections.smallDecrease) != 0:\n addendum = \"small decreases\\\\\"\n if len(data.detections.largeIncrease) != 0:\n addendum = \"large increases\\\\\"\n if len(data.detections.largeDecrease) != 0:\n addendum = \"large decreases\\\\\"\n if addendum == \"\":\n addendum = \"no decreases\\\\\"\n \n plt.figure(1)\n plt.subplot(211)\n #print np.min(data.magdata), np.max(data.magdata)\n axes = plt.gca()\n axes.set_title(\"Year: '{year}, Day: {day}\".format(year=data.calendarDay[:2], day=data.calendarDay[3:] ))\n axes.set_ylim([np.min(data.magdata)-1.2,np.max(data.magdata)+0.25])\n axes.set_ylabel(r'$\\mathbf{B}$ (nT)' )\n\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes.xaxis.set_major_locator(dates.MinuteLocator())\n axes.xaxis.set_major_formatter(formats)\n \n br, = pp.plot(dates.date2num(data.timestamps),[row[0] for row in data.magdata],label='$B_r$')\n bt, = pp.plot(dates.date2num(data.timestamps),[row[1] for row in data.magdata],label='$B_t$')\n bn, = pp.plot(dates.date2num(data.timestamps),[row[2] for row in data.magdata],label='$B_n$')\n b0, = pp.plot(dates.date2num(data.timestamps),[row[3] for row in data.magdata],label='$B_0$')\n print len(data.detections.rotationBoundary)\n if len(data.detections.rotationBoundary) == 1:\n rotation, = pp.plot([dates.date2num(data.detections.rotationBoundary), dates.date2num(data.detections.rotationBoundary)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n else:\n for index, value in enumerate(data.detections.rotationBoundary):\n rotation, = pp.plot([dates.date2num(value), dates.date2num(value)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n if len(data.detections.rotationBoundary) != 0:\n pp.legend(handles=[br,bt,bn,b0,rotation], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n else:\n pp.legend(handles=[br,bt,bn,b0], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n\n start, end = axes.get_xlim()\n axes.xaxis.set_ticks(np.arange(start, end, (end-start)/5))\n \n \n\n plt.subplot(212)\n axes2 = plt.gca()\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes2.xaxis.set_major_locator(dates.MinuteLocator())\n axes2.xaxis.set_major_formatter(formats)\n axes2.set_ylabel(r'$\\theta$ (deg)' )\n rotations, = pp.plot(dates.date2num(data.detections.rotationTimeTags),data.detections.rotations)\n #pp.legend(handles=[rotations], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n \n\n outplotname = 'Plot ' + str(len(os.listdir(destination+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + '.pdf'\n completename = os.path.join(destination+addendum,outplotname)\n plt.savefig(completename, bboxinches='tight')\n plt.clf()\n\n outplotname = 'Plot ' + str(len(os.listdir(destination+'rawdata\\\\'+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + ' rawdata.csv'\n completename1 = os.path.join(destination+'rawdata\\\\'+addendum,outplotname)\n generateDataFile(data.rawdata,completename1)\n\n print \"Done generating plot...\"", "def plot_tcv(self):\n self.plot_profiles(0, title='Shot #{:d} @ t={:.2f} s'.format(self.shot, self.t))", "def plot(frame, clipped, auto, lag, threshold, freq, save):\n fig, axes = plt.subplots(4, constrained_layout=True)\n fig.set_size_inches(8.0, 8.0)\n fig.canvas.set_window_title('Excercise 4')\n\n ax_frame, ax_clipped, ax_auto, ax_freq = axes\n\n time = np.linspace(0, frame.size / SAMPLE_RATE, num=frame.size)\n for ax in axes:\n ax.set_xlabel('time [s]')\n ax.set_ylabel('y')\n\n\n ax_frame.plot(time, frame)\n ax_clipped.plot(time, clipped)\n\n ax_auto.plot(auto)\n ax_auto.axvline(threshold, color='black', label='Threshold')\n ax_auto.stem([lag[0]], [lag[1]], linefmt='r-', basefmt=None, label='Lag')\n\n ax_freq.plot(freq[0], 'g-', label='mask-on')\n ax_freq.plot(freq[1], 'r-', label='mask-off')\n\n ax_auto.legend(loc=1)\n ax_freq.legend(loc=0)\n\n ax_frame.set_title('Maskon frame')\n ax_clipped.set_title('Central clipping with 70%')\n ax_auto.set_title('Autocorrelation')\n ax_freq.set_title('Primary frequencies of frames')\n\n ax_auto.set_xlabel('frames')\n ax_freq.set_xlabel('frames')\n\n ax_freq.set_ylabel('f0')\n\n if save:\n save_figure(fig, 'ex4')\n else:\n plt.show()", "def make_plot(solution, t, plot_Ts, plot_T1, plot_T2, xaxis, cc, delta_cc, albedo,delta_albedo\\\n , em1, delta_em1, em2, delta_em2):\n\n plt.close('all')\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n \n if xaxis == 'cloud cover':\n inc_cc = []\n for i in range(len(solution[0,:])):\n inc_cc.append(cc + (i*delta_cc)/calcs_per_timestep)\n\n if plot_Ts == 'On': ax1.plot(inc_cc,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_cc,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_cc,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n\n elif xaxis == 'time':\n \n #for i in range(len(solution[0,:])):\n #t.append(i*(timestep/calcs_per_timestep))\n \n if plot_Ts == 'On': ax1.plot(t,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(t,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(t,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'albedo':\n inc_alb = []\n for i in range(len(solution[0,:])):\n inc_alb.append(albedo+(i*delta_albedo)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_alb,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_alb,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_alb,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon1':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em1+(i*delta_em1)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon2':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em2+(i*delta_em2)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n else: raise ValueError('No x axis selected')\n \n fig.suptitle('Global Average Temperature')\n ax1.set_title(f'Final Surface Temperature = {round(solution[0,-1],2)} K')\n ax1.legend()\n\n if xaxis == 'cloud cover': ax1.set_xlabel('Cloud Cover (%)')\n elif xaxis == 'time': ax1.set_xlabel('Time (years)')\n elif xaxis == 'albedo': ax1.set_xlabel('Albedo')\n elif xaxis == 'epsilon1': ax1.set_xlabel(u'\\u03B5\\u2081')\n elif xaxis == 'epsilon2': ax1.set_xlabel(u'\\u03B5\\u2082')\n plt.ylabel('Temerature (K)')\n return fig", "def _init_plot(self) -> None:\n\n # create a grayscale plot\n out = sys.stdout\n sys.stdout = open(\"/dev/null\", \"w\")\n hdu = self.image_generator.image(self.ra, self.dec)\n self.plot = aplpy.FITSFigure(hdu)\n self.plot.show_grayscale()\n self.plot.set_theme(\"publication\")\n sys.stdout = out\n\n # label for the position angle\n pa_string = \"PA = %.1f\" % self.mode_details.position_angle().to_value(u.deg)\n if self.mode_details.automated_position_angle():\n pa_string += \" (auto)\"\n self.draw_label(0.95, -0.05, pa_string, style=\"italic\", weight=\"bold\")\n\n # label for the title\n if self.title:\n self.draw_label(\n 0.5, 1.03, self.title, style=\"italic\", weight=\"bold\", size=\"large\"\n )\n\n # label for the image source\n self.draw_label(\n -0.05,\n -0.05,\n \"%s\" % self.image_generator.source(),\n style=\"italic\",\n weight=\"bold\",\n )\n\n # grid overlay\n self.plot.add_grid()\n self.plot.grid.set_alpha(0.2)\n self.plot.grid.set_color(\"b\")\n\n # indicate the RSS field of view\n self.draw_circle(self.ra, self.dec, 4.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.79,\n 0.79,\n \"RSS\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # indicate the Salticam field of view\n self.draw_circle(self.ra, self.dec, 5.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.86,\n 0.86,\n \"SCAM\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # labels for north and east direction\n self.draw_label(\n self.ra,\n self.dec + 4.8 * u.arcmin,\n \"N\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n color=(0, 0.5, 1),\n )\n self.draw_label(\n self.ra + 4.8 * u.arcmin / np.abs(np.cos(self.dec)),\n self.dec,\n \"E\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"right\",\n color=(0, 0.5, 1),\n )\n\n # add cross hairs\n self.draw_centered_line(\n 0 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n self.draw_centered_line(\n 90 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n\n # label for the magnitude range and bandpass\n if self.magnitude_range:\n self._show_magnitudes()\n\n # add mode specific content\n if not self.basic_annotations:\n self.mode_details.annotate_finder_chart(self)", "def plot_vis_test(plotfile,pdf_file):\n\t# First some parameters looked up from configfile---------------------------------\n\t\n\tgrbdir = runconf['l2file'][0:10]\n\tpre_tstart = runconf['bkg1start']\n\tpre_tend = runconf['bkg1end']\n\ttrigtime = runconf['trigtime']\n\tgrb_tstart = runconf['transtart']\n\tgrb_tend = runconf['tranend']\n\tpost_tstart = runconf['bkg2start']\n\tpost_tend = runconf['bkg2end']\n\tt_src = grb_tend - grb_tstart \n\tt_tot = (pre_tend-pre_tstart)+(post_tend-post_tstart)\n\tra_tran = runconf['ra']\n\tdec_tran = runconf['dec']\n\tlc_bin = runconf['lc_bin']\n\talpha = runconf['alpha']\n\tbeta = runconf['beta']\n\tE0 = runconf['E0']\n\tA = runconf['A']\n\tsim_scale = t_src\n\tpixbin = int(runconf['pixsize'])\n\tcomp_bin = int(runconf['comp_bin'])\n\ttyp = runconf['typ']\n\n\t# Calling txy to calculate thetax thetay and the coordinates----------------------\n\t\n\tthetax,thetay,x,y,z,t = txy(runconf['mkffile'], trigtime, ra_tran, dec_tran)\n\t\n\t# Plot the 3d visualisation for the position of the transient---------------------\n\tplt.figure()\n\tfig = visualize_3d(grbdir,x,y,z, t, thetax, thetay, grbdir)\t\n\tpdf_file.savefig(fig)\n\t\n\t# Plotting the lightcurves for the four quadrants---------------------------------\n\tfig = plt.figure()\n\tclean_file = fits.open(runconf['infile'])\n\tplt.title('Light curves for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\t\n\tquad0 = clean_file[1].data\n\tdata0,bin_edge = np.histogram(quad0['time'], bins=np.arange(quad0['time'][0],quad0['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data0,label='Quad 0',lw=0.7)\n quad1 = clean_file[2].data\n\tdata1,bin_edge = np.histogram(quad1['time'], bins=np.arange(quad1['time'][0],quad1['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data1,label='Quad 1',lw=0.7) \n\tquad2 = clean_file[3].data\n\tdata2,bin_edge = np.histogram(quad2['time'], bins=np.arange(quad2['time'][0],quad2['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data2,label='Quad 2',lw=0.7)\n quad3 = clean_file[4].data\n data3,bin_edge = np.histogram(quad3['time'], bins=np.arange(quad3['time'][0],quad3['time'][-1],lc_bin))\n plt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data3,label='Quad 3',lw=0.7)\n\tplt.axvspan(grb_tstart,grb_tend,color='blue',alpha=0.1,label='GRB')\n\tplt.axvspan(pre_tstart,pre_tend,color='orange',alpha=0.2)\n\tplt.axvspan(post_tstart,post_tend,color='orange',alpha=0.2,label='Background')\n\tplt.legend(prop={'size':6})\n\tplt.xlim(pre_tstart-100,post_tend+100)\n\tpdf_file.savefig(fig)\n\t\n\t# Calling the sim_dph--------------------------------------------------------------\n\t\n\tgrb_flat,bkgd_flat,grb_dph,bkgd_dph,t_src,t_total = data_bkgd_image(grbdir,pre_tstart,pre_tend,grb_tstart,grb_tend,post_tstart,post_tend)\n\n\tsim_flat,sim_dph,badpix_mask,sim_err_dph = simulated_dph(grbdir,typ,t_src,alpha,beta,E0,A)\n\n\tsrc_dph = grb_dph-bkgd_dph*t_src/t_tot\n\n print \"Total counts in simulated dph: \",(sim_dph).sum()\n print \"Total counts after badpix mask is applied: \",(sim_dph*badpix_mask).sum()\n\tprint \"Excess counts in badpix masked src dph: \",(src_dph*badpix_mask).sum()\n \n\t# Plotting the DPHs before badpix correction---------------------------------------\n\t\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs before badpix correction for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 - 0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\n\t # Source \n\tim = ax4.imshow(src_dph,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n \t# Source + Background\n\tim = ax1.imshow(grb_dph,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\n \t# Background\n\tim = ax2.imshow(bkgd_dph*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\t\n\t# Plotting the Badpix mask---------------------------------------------\n\n\tfig = plt.figure()\n\tax = plt.subplot(111)\n\tplt.title('Badpix Mask for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\tim = ax.imshow(badpix_mask,interpolation='none')\n\tax.set_xlim(-9,128 -0.5)\n\tax.axvline(x=-5.,ymin=0,ymax=64,linewidth=5,color='k')\n\tax.spines['left'].set_position(('data',-0.5))\n\tax.xaxis.set_ticks(np.arange(0,128,16))\n\tax.yaxis.set_ticks(np.arange(0,128,16))\n\tfig.colorbar(im,ax=ax,fraction=0.046, pad=0.04)\n\t\n\tpdf_file.savefig(fig) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs--------------------------------------------\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph*badpix_mask,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 -0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\n\t # Source \n\tim = ax4.imshow(src_dph*badpix_mask,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n\t # Source + Background\n\tim = ax1.imshow(grb_dph*badpix_mask,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\n\t # Background\n\tim = ax2.imshow(bkgd_dph*badpix_mask*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs (Binned) ----------------------------------------------------\n\tfor p in [4,8,16]:\n\t\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\t\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay)+ \"pixsize=\"+str(p))\n\t\t # Sim\n\t\tim = ax3.imshow(resample(sim_dph*badpix_mask,p),interpolation='none')\n\t\tax3.set_title('Sim DPH',fontsize=8)\n\t\tax3.set_xlim(-1,128/p -0.5)\n\t\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax3.spines['left'].set_position(('data',-0.5))\n\t\tax3.set_yticklabels([])\n\t\tax3.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n\t\tax3.set_xticklabels(np.arange(0,128,16))\n\t\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source \n\t\tim = ax4.imshow(resample(src_dph*badpix_mask,p),interpolation='none',vmin=0)\n\t\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\t\tax4.set_xlim(-1,128/p -0.5)\n\t\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax4.spines['left'].set_position(('data',-0.5))\n\t\tax4.set_yticklabels([])\n ax4.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax4.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source + Background\n\t\tim = ax1.imshow(resample(grb_dph*badpix_mask,p),interpolation='none')\n\t\tax1.set_title('Src + Bkg DPH',fontsize=10)\n\t\tax1.set_xlim(-1,128/p -0.5)\n\t\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax1.spines['left'].set_position(('data',-0.5))\n\t\tax1.set_yticklabels([])\n ax1.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax1.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Background\n\t\tim = ax2.imshow(resample(bkgd_dph*badpix_mask*t_src/t_total,p),interpolation='none')\n\t\tax2.set_title('Bkg DPH',fontsize=8)\n\t\tax2.set_xlim(-1,128/p -0.5)\n\t\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax2.spines['left'].set_position(('data',-0.5))\n\t\tax2.set_yticklabels([])\n ax2.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax2.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\t\tf.set_size_inches([6.5,6.5])\n\t\t\n\t\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\tprint \"No. of pixels with zero counts in sim_dph: \",sim_dph[sim_dph==0].size\n\tprint \"No. of pixels with zero counts in grb_dph(no bkg subtration): \",grb_dph[grb_dph==0].size\n\t\n\t# Generating the array for module number ------------------------------------------------\n\tA = ['A'+str(i) for i in range(16)]\n\tB = np.flip(['B'+str(i) for i in range(16)],0)\n\tC = np.flip(['C'+str(i) for i in range(16)],0)\n\tD = ['D'+str(i) for i in range(16)]\n\tquad_a = np.reshape(A,(4,4))\n\tquad_b = np.reshape(B,(4,4))\n\tquad_c = np.reshape(C,(4,4))\n\tquad_d = np.reshape(D,(4,4))\n\tMod_arr = np.ndarray((8,8),dtype='|S3')\n\tMod_arr[:4,:4] = quad_a\n\tMod_arr[:4,4:] = quad_b\n\tMod_arr[4:,4:] = quad_c\n\tMod_arr[4:,:4] = quad_d\n\tMod_names = Mod_arr.flatten()\n\t#print \"Module name array : \",Mod_names\n\t#-----------------------------------------------------------------------------------------\n\t\t\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\tmodel = sim_flat_bin\n\tmodel_copy = np.copy(model)\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\tdata_copy = np.copy(data)\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\terr_model = sim_err_flat_bin\n\terr_model_copy = np.copy(err_model)\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\terr_data_copy = np.copy(err_data)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f}\".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model\",elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tprint \"No. of pixels with zero counts in sim_flat: \",sim_flat[sim_flat==0].size\n\tprint \"No. of pixels with zero counts in src_flat: \",src_flat[src_flat==0].size\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\t#print \"The bin edges: \",x # ---------------------------------------------------------------\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n\tprint \"Total sim_flat_bin : \",sim_flat_bin.sum() #-----------------------------------------\n\t#print \" Max(cumsum) : \",max(np.cumsum(sim_flat)) #-----------------------------------------\n\n # Defining model background and data\n model = sim_flat_bin #avg_flat_bin\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n err_model = sim_err_flat_bin\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\t# Plotting observed vs predicted counts------------------------------------------------------\n\n\tfig = plt.figure()\n\tplt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$={cs:0.1f}\".format(cs=chi_sq))\n\tplt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n\tplt.plot(np.arange(-1000,1000),np.arange(-1000,1000),'k',linewidth=0.5)\n\tplt.xlim(min(model_copy)-5,max(model_copy)+5)\n\tplt.ylim(min(data_copy)-5,max(data_copy)+5)\n\tplt.xlabel('Predicted Counts')\n\tplt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n\tpdf_file.savefig(fig)\n\n\t# Scaling the model using curve fit =============================================================== \n\t\n\tparam,pcov = curve_fit(fit_line_int,model_copy,data_copy)\n\tscaling = param[0]\n\tintercept = param[1]\n\t\n\t# Plotting the scaled plots ===================================================================\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\t#model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\t#err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated (scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f},offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\t\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model(scaling = {s:0.2f}, offset={o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n # Defining model background and data\n #model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n #err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated(scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f}, offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\n\t# Plotting observed vs predicted counts--------------------------------------------------------\n\n\tfig = plt.figure()\n plt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$ = {cs:0.1f}\".format(cs=chi_sq))\n plt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\t\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n #plt.plot(np.arange(-1000,1000),fit_line(np.arange(-1000,1000),scaling),'k',linewidth=0.5,label='m = {s:0.2f}'.format(s=scaling))\n\tplt.plot(np.arange(-1000,1000),fit_line_int(np.arange(-1000,1000),scaling,intercept),'k',linewidth=0.5,label='scaling = {s:0.2f}, offset = {i:0.2f}'.format(s=scaling,i=intercept))\n\tplt.plot(np.arange(min(model_copy)-5,max(model_copy)+5),np.ones(len(np.arange(min(model_copy)-5,max(model_copy)+5)))*intercept,'r-',label='intercept',linewidth=0.5)\n plt.xlim(min(model_copy)-5,max(model_copy)+5)\n plt.ylim(min(data_copy)-5,max(data_copy)+5)\n plt.xlabel('Predicted Counts')\n plt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n pdf_file.savefig(fig)\n\t\t\n\tprint \"===============================================================================================\"\n\t\n\treturn", "def plot_tiltres(setup, mtilt, ytilt, yfit, slit=None, outfile=None, show_QA=False, out_dir=None):\n\n plt.rcdefaults()\n plt.rcParams['font.family']= 'times new roman'\n\n # Outfil\n method = inspect.stack()[0][3]\n if (outfile is None) and (not show_QA):\n outfile = qa.set_qa_filename(setup, method, slit=slit, out_dir=out_dir)\n\n # Setup\n plt.figure(figsize=(8, 4.0))\n plt.clf()\n ax = plt.gca()\n\n # Scatter plot\n res = (mtilt-ytilt) - yfit\n ax.scatter(mtilt, res)\n\n rms = np.std(res)\n ax.text(0.90, 0.90, 'Slit {:d}: RMS (pix) = {:0.5f}'.format(slit, rms),\n transform=ax.transAxes, size='large', ha='right', color='black')\n # Label\n ax.set_xlabel('Row')\n ax.set_ylabel('Residual (pix)')\n\n # Finish\n plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)\n if show_QA:\n plt.show()\n else:\n plt.savefig(outfile, dpi=400)\n plt.close()\n\n plt.rcdefaults()\n\n return", "def _display_tsne(self):\n self._tsne_window.clear()\n self._tsne_window.plot(self._Y_tsne[:,0], self._Y_tsne[:,1], 'b.')", "def construct_plot(self, amprtb):\n self.fig, [[self.ax1, self.ax2], [self.ax3, self.ax4]] = \\\n plt.subplots(2, 2, figsize=(10, 10),\n subplot_kw={'projection': self.projection})\n ind1, ind2 = amprtb._get_scan_indices(\n self.scanrange, self.timerange, False)\n\n # 10 GHz plot\n stuff = amprtb.plot_ampr_track(\n var='10'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax1, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange, return_flag=True)\n self.ax1.set_title(self.make_title('10', amprtb, ind1, ind2))\n\n # 19 GHz plot\n amprtb.plot_ampr_track(\n var='19'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax2, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax2.set_title(self.make_title('19', amprtb, ind1, ind2))\n\n # 37 GHz plot\n amprtb.plot_ampr_track(\n var='37'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax3, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax3.set_title(self.make_title('37', amprtb, ind1, ind2))\n\n # 85 GHz plot\n amprtb.plot_ampr_track(\n var='85'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax4, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax4.set_title(self.make_title('85', amprtb, ind1, ind2))\n\n # plt.tight_layout()\n return True", "def tplot(self, analytes=None, figsize=[10, 4], scale=None, filt=None,\n ranges=False, stats=False, stat='nanmean', err='nanstd',\n interactive=False, focus_stage=None, err_envelope=False):\n\n if interactive:\n enable_notebook() # make the plot interactive\n\n if type(analytes) is str:\n analytes = [analytes]\n if analytes is None:\n analytes = self.analytes\n\n if focus_stage is None:\n focus_stage = self.focus_stage\n\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes([.1,.12,.77,.8])\n\n for a in analytes:\n x = self.Time\n y, yerr = unpack_uncertainties(self.data[focus_stage][a])\n\n if scale is 'log':\n ax.set_yscale('log')\n y[y == 0] = np.nan\n\n if filt:\n ind = self.filt.grab_filt(filt, a)\n xf = x.copy()\n yf = y.copy()\n yerrf = yerr.copy()\n if any(~ind):\n xf[~ind] = np.nan\n yf[~ind] = np.nan\n yerrf[~ind] = np.nan\n if any(~ind):\n ax.plot(x, y, color=self.cmap[a], alpha=.4, lw=0.6)\n ax.plot(xf, yf, color=self.cmap[a], label=a)\n if err_envelope:\n ax.fill_between(xf, yf - yerrf, yf + yerrf, color=self.cmap[a],\n alpha=0.2, zorder=-1)\n else:\n ax.plot(x, y, color=self.cmap[a], label=a)\n if err_envelope:\n ax.fill_between(x, y - yerr, y + yerr, color=self.cmap[a],\n alpha=0.2, zorder=-1)\n\n # Plot averages and error envelopes\n if stats and hasattr(self, 'stats'):\n sts = self.stats[sig][0].size\n if sts > 1:\n for n in np.arange(self.n):\n n_ind = ind & (self.ns == n + 1)\n if sum(n_ind) > 2:\n x = [self.Time[n_ind][0], self.Time[n_ind][-1]]\n y = [self.stats[sig][self.stats['analytes'] == a][0][n]] * 2\n\n yp = ([self.stats[sig][self.stats['analytes'] == a][0][n] +\n self.stats[err][self.stats['analytes'] == a][0][n]] * 2)\n yn = ([self.stats[sig][self.stats['analytes'] == a][0][n] -\n self.stats[err][self.stats['analytes'] == a][0][n]] * 2)\n\n ax.plot(x, y, color=self.cmap[a], lw=2)\n ax.fill_between(x + x[::-1], yp + yn,\n color=self.cmap[a], alpha=0.4,\n linewidth=0)\n else:\n x = [self.Time[0], self.Time[-1]]\n y = [self.stats[sig][self.stats['analytes'] == a][0]] * 2\n yp = ([self.stats[sig][self.stats['analytes'] == a][0] +\n self.stats[err][self.stats['analytes'] == a][0]] * 2)\n yn = ([self.stats[sig][self.stats['analytes'] == a][0] -\n self.stats[err][self.stats['analytes'] == a][0]] * 2)\n\n ax.plot(x, y, color=self.cmap[a], lw=2)\n ax.fill_between(x + x[::-1], yp + yn, color=self.cmap[a],\n alpha=0.4, linewidth=0)\n\n if ranges:\n for lims in self.bkgrng:\n ax.axvspan(*lims, color='k', alpha=0.1, zorder=-1)\n for lims in self.sigrng:\n ax.axvspan(*lims, color='r', alpha=0.1, zorder=-1)\n\n if filt is not None:\n ind = self.filt.grab_filt(filt)\n lims = bool_2_indices(~ind)\n for l, u in lims:\n if u >= len(self.Time):\n u = -1\n ax.axvspan(self.Time[l], self.Time[u], color='k',\n alpha=0.05, lw=0)\n\n # drawn = []\n # for k, v in self.filt.switches.items():\n # for f, s in v.items():\n # if s & (f not in drawn):\n # lims = bool_2_indices(~self.filt.components[f])\n # for u, l in lims:\n # ax.axvspan(self.Time[u-1], self.Time[l], color='k',\n # alpha=0.05, lw=0)\n # drawn.append(f)\n\n ax.text(0.01, 0.99, self.sample + ' : ' + self.focus_stage,\n transform=ax.transAxes,\n ha='left', va='top')\n\n ax.set_xlabel('Time (s)')\n ax.set_xlim(np.nanmin(x), np.nanmax(x))\n \n # y label\n ud = {'rawdata': 'counts',\n 'despiked': 'counts',\n 'bkgsub': 'background corrected counts',\n 'ratios': 'counts/{:s} count',\n 'calibrated': 'mol/mol {:s}'}\n if focus_stage in ['ratios', 'calibrated']:\n ud[focus_stage] = ud[focus_stage].format(self.internal_standard)\n ax.set_ylabel(ud[focus_stage])\n\n if interactive:\n ax.legend()\n plugins.connect(fig, plugins.MousePosition(fontsize=14))\n display.clear_output(wait=True)\n display.display(fig)\n input('Press [Return] when finished.')\n disable_notebook() # stop the interactivity\n else:\n ax.legend(bbox_to_anchor=(1.15, 1))\n\n return fig, ax", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def make_source_location_histogram_plots_uvis(data, file_name, ff, im, coordfile, \\\n filt, path_to_cleans=''):\n\tpylab.ion()\n\tif ff == 0:\n\t\tfig = pylab.figure()\n\t\tfig.subplots_adjust(wspace=0.4)\n\telse:\n\t\tpylab.clf()\n\t\t\n\txc,yc = np.loadtxt(coordfile, unpack=True, usecols = (0,1)) \n\t# plot #1 - object position\n\tsz=50.0\n\tx0=np.round(xc)-sz/2.\n\tx1=np.round(xc)+sz/2.\n\ty0=np.round(yc)-sz/2.\n\ty1=np.round(yc)+sz/2.\n\tax1 = pylab.subplot(1,2,1)\n\tax1.imshow(np.log10(im[y0:y1,x0:x1]),interpolation='nearest')\n\tax1.autoscale(axis='both',enable=False)\n\tax1.scatter([xc-x0-1.0], [yc-y0-1.0], marker='x', s=200., color='w')\n\tpylab.title('X = '+str(xc)+' Y = '+str(yc))\n\n\t# plot #2 - background histogram\n\ttmp_image=glob.glob(path_to_cleans + '*back.fits')[0]\n\tbackim = pyfits.getdata(tmp_image)\n\t#--measure back statistics (mean and mode via IRAF)\n\tinitback = iraf.imstatistics(tmp_image+'[0]', fields='mode,stddev', \\\n\t lower = -100, upper = 10000, nclip=7, \\\n\t lsigma=3.0, usigma=3.0, cache='yes', \\\n\t format='no',Stdout=1)\n\t#print 'initback:'\n\t#print initback\n\tif 'INDEF' not in initback[0]:\n\t\tllim = float(initback[0].split(' ')[0]) - 10.0*\\\n\t\t\t\tfloat(initback[0].split(' ')[1])\n\t\tulim = float(initback[0].split(' ')[0]) + 10.0*\\\n\t float(initback[0].split(' ')[1])\n\t\tbackstats=iraf.imstatistics(tmp_image+'[0]', fields='mean,mode', \\\n\t lower=llim, upper=ulim, nclip=7,lsigma=3.0, \\\n\t usigma=3.0, cache='yes', format='no',Stdout=1)\n\t\tbackmean=float(backstats[0].split(' ')[0])\n\t\tbackmode=float(backstats[0].split(' ')[1])\n\t\tfbackim= np.ndarray.flatten(backim)\n\t\tgd=np.where((fbackim > llim) & (fbackim < ulim))[0]\n\t\tbackmedian=meanclip(fbackim[gd],maxiter=7,return_median=1)[0]\n\n\t\tax2 = pylab.subplot(1,2,2)\n\t\tpylab.hist(fbackim[gd],log=True)\n\t\tpylab.ylim(0.5,600000)\n\t\tpylab.xlim(-20,20)\n\t\tpylab.plot([backmode,backmode],[0.5,600000],ls='-',color='red',\\\n\t label='mode')\n\t\tpylab.plot([backmedian,backmedian],[0.5,600000],ls='--',color='aqua',\\\n \t label='median')\n\t\tpylab.plot([backmean,backmean],[0.5,600000],ls=':',color='black',\\\n \t label='mean')\n\t\tpylab.legend(loc=2, handletextpad=0.0, borderpad=0.0, frameon=False, \\\n \t handlelength=1.)\n\t\tpylab.title('Histogram of Background Pixels')\n\t\tpylab.xlabel('Background [e-]')\n\t\tpylab.ylabel('Number of Objects')\n\t\tpylab.annotate('chip '+str(data[ff]['chip']), [0.77,0.95], \\\n \t xycoords='axes fraction')\n\t\tpylab.annotate(filt,[0.77,0.80],xycoords='axes fraction')\n\n\t\t\n\tpylab.savefig(file_name.split('.fits')[0]+'_srcloc.png')\n\tpylab.ioff()", "def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot", "def plot(self):\n\t\t\n\t\ttf=tfData(self.shotno,tStart=None,tStop=None)\n\t\t\n\t\t_plt.figure()\n\t\tax1 = _plt.subplot2grid((3,2), (0,1), rowspan=3) #tf\n\t\tax2 = _plt.subplot2grid((3,2), (0,0)) #vf\n\t\tax3 = _plt.subplot2grid((3,2), (1,0),sharex=ax2) #oh\n\t\tax4 = _plt.subplot2grid((3,2), (2, 0),sharex=ax2) #sh\n\t\tfig=_plt.gcf()\n\t\tfig.set_size_inches(10,5)\n\t\t\t\t\n\t\ttStart=-2\n\t\ttStop=20\n\t\t\n\t\tax1.plot(tf.time*1e3,tf.tfBankField)\n\t\tax1.axvspan(tStart,tStop,color='r',alpha=0.3)\n\t\t_plot.finalizeSubplot(ax1,xlabel='Time (s)',xlim=[-150,450],ylabel='TF Field (T)')#,title=self.title\n\t\t\n\t\tax2.plot(self.vfTime*1e3,self.vfBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax2,ylabel='VF Current\\n(kA)')\n\t\t\n\t\tax3.plot(self.ohTime*1e3,self.ohBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax3,ylim=[-20,30],ylabel='OH Current\\n(kA)')\n\t\t\n\t\tax4.plot(self.shTime*1e3,self.shBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax4,ylim=[tStart,tStop],xlabel='Time (s)',ylabel='SH Current\\n(kA)')\n\t\t\n\t\t_plot.finalizeFigure(fig,title=self.title)\n#\t\tfig.set_tight_layout(True)\n\t\t\n\t\treturn fig", "def pf_plot(pf, t):\n xx = pf.XS[t, :, 0]\n yy = pf.XS[t, :, 1]\n ww = pf.WS[t, :]\n plt.scatter(xx, yy, s=ww * 5000)", "def plot_traces(self, cellname, targettime, historytime, srctype, syntype):\n self.tstart = targettime - historytime\n self.istart = int(self.tstart / self.plotdt + 0.5)\n self.tend = targettime + historytime\n self.iend = int(self.tend / self.plotdt + 0.5)\n self.tseries = np.linspace(self.tstart, self.tend, \n self.iend - self.istart)\n if cellname not in self.datafile['/Vm']:\n return []\n vm = self.datafile['/Vm/' + cellname] \n plt.plot(self.tseries, \n normalize(vm[self.istart:self.iend]),\n label=cellname)\n stimdata = np.asarray(self.datafile['/stimulus/stim_bg'])\n stim_start = int(self.tstart/self.simdt+0.5)\n stim_end = int(self.tend/self.simdt+0.5)\n stimdata = stimdata[stim_start: stim_end]\n plt.plot(np.linspace(self.tstart, self.tend, len(stimdata)),\n normalize(stimdata),\n 'r--', \n label='STIMULUS')\n precells = self.plot_presynaptic(cellname, srctype, syntype)\n return precells", "def plot(self):\n\t\tself.plotOfSpect()", "def misclass_plot(epoch, model, features, filters, figname, fgal=0.5, idx=-1):\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n N = 20000\n X = X[:N]\n Xcov = Xcov[:N]\n Xcoadd = Xcoadd[:N]\n Xcoaddcov = Xcoaddcov[:N]\n ind = (Xcoaddcov[:, idx][:, idx] < 1.) & (Xcov[:, idx][:, idx] < 1.)\n X = X[ind]\n Xcov = Xcov[ind]\n Xcoadd = Xcoadd[ind]\n Xcoaddcov = Xcoaddcov[ind]\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n # Calculate the posteriors, draw samples\n a, m, v = model.posterior(X, Xcov)\n posts = np.zeros_like(X)\n for i in range(X.shape[0]):\n posts[i] = np.median(model.sample(a[i], m[i], v[i], size=1000), axis=0)\n\n stol = 0.145\n ptol = 0.03\n Nbins = 12\n magbins = np.linspace(18., 22., Nbins)\n dlt = magbins[1] - magbins[0]\n s = np.zeros(Nbins)\n p = np.zeros(Nbins)\n for i in range(Nbins):\n ind = (Xcoadd[:, 0] > magbins[i] - dlt) & \\\n (Xcoadd[:, 0] <= magbins[i] + dlt)\n sind = ind & (np.abs(Xcoadd[:, idx]) < 0.03)\n gind = ind & (np.abs(Xcoadd[:, idx]) > 0.03)\n ssind = sind & (np.abs(X[:, idx] > stol))\n sgind = gind & (np.abs(X[:, idx] < stol))\n psind = sind & (np.abs(posts[:, idx] > ptol))\n pgind = gind & (np.abs(posts[:, idx] < ptol))\n s[i] = 1. * len(X[ssind, 0]) + len(X[sgind, 0])\n p[i] = 1. * len(X[psind, 0]) + len(X[pgind, 0])\n s[i] /= len(X[ind, 0])\n p[i] /= len(X[ind, 0])\n\n fs = 5\n lsize = 20\n f = pl.figure(figsize=(fs, fs))\n pl.plot(magbins, s, 'k--', drawstyle='steps-mid', label='Single Epoch',\n lw=2)\n pl.plot(magbins, p, 'k', drawstyle='steps-mid', label='XD Posterior', lw=2)\n pl.xlabel('psfmag $r$', fontsize=lsize)\n pl.ylabel('Misclassification Rate', fontsize=lsize)\n f.savefig(figname, bbox_inches='tight')", "def plot(self):\n # Find only unmasked data :\n # xyz, sData, sColor, _ = self._select_unmasked()\n xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n self.mesh = visu.Markers(name='Sources')\n self.mesh.set_data(xyz, edge_color=self.edgecolor, face_color=sColor,\n size=sData, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n self.mesh.set_gl_state('translucent')", "def sample_and_plot(self):\n fig = plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(self.X, self.Y, self.sample(), cmap = plt.cm.jet, rstride = 2, cstride = 2, linewidth = 1)\n plt.show()", "def plot12(self, dataset, ts_string_indices, source_jpg_folder='jpg_images', extension='jpg', rows=3, cols=4,\n outfname='Sample Frames.png', cmap=None, gui_color='green'):\n # Settings ############################################################\n font_label_box = {\n 'color': 'green',\n 'size': 16,\n }\n font_steering = {'family': 'monospace',\n # 'color': 'darkred',\n 'weight': 'normal',\n 'size': 20,\n }\n ROWS = rows\n COLS = cols\n NUM_IMAGES = ROWS * COLS\n\n # Figure ##############################################################\n # figsize = [width, height]\n fig = plt.figure(figsize=PAPER_A3_LAND, facecolor='white')\n fig.suptitle(\"Sample frames, Dataset: {}\".format(dataset.data_folder), fontsize=20)\n\n for i, ts_string_index in enumerate(ts_string_indices):\n rec = dataset.df.loc[ts_string_index]\n\n timestamp_string = rec['datetime'].strftime(\"%D %H:%M:%S.\") + \"{:.2}\".format(\n str(rec['datetime'].microsecond))\n\n if 'steering_pred_signal' in dataset.df.columns:\n this_label = \"{}\\n{:0.2f}/{:0.2f} steering \\n{:0.2f} throttle\".format(timestamp_string,\n rec['steering_signal'],\n rec['steering_pred_signal'],\n rec['throttle_signal'])\n else:\n this_label = \"{}\\n{:0.2f}/ steering \\n{:0.2f} throttle\".format(timestamp_string, rec['steering_signal'],\n rec['throttle_signal'])\n\n ax = fig.add_subplot(ROWS, COLS, i + 1)\n\n # Main Image ##########################################################\n jpg_path = os.path.join(dataset.path_dataset, source_jpg_folder, ts_string_index + '.' + extension)\n assert os.path.exists(jpg_path), \"{} does not exist\".format(jpg_path)\n img = mpl.image.imread(jpg_path)\n ax.imshow(img, cmap=cmap)\n # plt.title(str_label)\n\n # Data box ########################################################\n\n # ax.axes.get_xaxis().set_visible(False)\n # ax.axes.get_yaxis().set_visible(False)\n t = ax.text(5, 25, this_label, color=gui_color, alpha=1)\n # t = plt.text(0.5, 0.5, 'text', transform=ax.transAxes, fontsize=30)\n t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='none'))\n\n # Steering widget HUD #################################################\n # Steering HUD: Actual steering signal\n steer_actual = ''.join(['|' if v else '-' for v in dataset.linear_bin(rec['steering_signal'])])\n text_steer = ax.text(80, 105, steer_actual, fontdict=font_steering, horizontalalignment='center',\n verticalalignment='center', color=gui_color)\n # Steering HUD: Predicted steering angle\n if 'steering_pred_signal' in dataset.df.columns:\n steer_pred = ''.join(['◈' if v else ' ' for v in dataset.linear_bin(rec['steering_pred_signal'])])\n text_steer_pred = ax.text(80, 95, steer_pred, fontdict=font_steering, horizontalalignment='center',\n verticalalignment='center', color='red')\n\n outpath = os.path.join(dataset.path_dataset, outfname)\n fig.savefig(outpath)\n logging.debug(\"Wrote Sample Frames figure to {}\".format(outpath))", "def create_lag_plot(series_name, lag = 1):\n plt.figure(figsize = (8,5))\n plt.title('Lag Plot of the Trade Value of Imports')\n plt.xlim(min(series_name), max(series_name))\n plt.ylim(min(series_name), max(series_name))\n lag_plot(series_name, lag = lag)\n plt.show()" ]
[ "0.59035116", "0.58859456", "0.578238", "0.5767331", "0.5742631", "0.57282263", "0.55894816", "0.5575051", "0.5565919", "0.55595213", "0.554277", "0.5542592", "0.55424595", "0.5524052", "0.5506664", "0.55034083", "0.5472254", "0.54397625", "0.54324085", "0.5425673", "0.54157317", "0.54133844", "0.5412371", "0.5399858", "0.53945", "0.538516", "0.5383022", "0.53827167", "0.53777903", "0.53697264" ]
0.6715108
0
Carga tola la pila con strings
def cargaAutoStr(pila): while not pila_llena(pila): largo = random.randint(1, 15) apilar(pila, randString(largo))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comenzar_nuevo_juego():\n escena_uno.cargarEscena1(screen, display_width, display_height)#Se pone a correr la escena\n #escena_uno.cargarEscena2(screen, display_width, display_height)", "def stringToPila(palabra):\n pila = Pila()\n for elemento in palabra:\n apilar(pila, elemento)\n return pila", "def saluda2(sujeto):\n print 'Hola %s !!' % sujeto", "def concatenare_str(obiect, string):\n\tobiect[\"descriere\"] = get_descriere(obiect) + string\n\treturn obiect", "def arroba_letras(cadena, long_palabra, costo_palabra_corta, costo_palabra_larga):\n palabras = cadena.split(\" \")\n frase_final = \"\"\n costo_total = 0\n for i in range(len(palabras)):\n if len(palabras[i]) > long_palabra:#verificio si la longitud de esa palabra cortada es menor a lo previamente establecido\n frase_final += palabras[i][0:long_palabra] + \"@ \" # corto la palabra en la posicion max y agrego un @\n costo_total += costo_palabra_corta\n if palabras[i][-1] == \".\": # veo si en la palabra corta cortada hay un punto y si lo lo borro y reemplazo por un STOP\n frase_final = frase_final.strip() + palabras[i].replace(palabras[i], \" STOP \")\n elif palabras[i][-1] == \".\": # veo si en la palabra larga cortada hay un punto y si lo hay lo borro y lo reemplazo por un STOP\n frase_final = frase_final.strip(\".\") + palabras[i].replace(palabras[i][-1], \" STOP \") \n else:\n frase_final += palabras[i] + \" \"\n costo_total += costo_palabra_larga\n frase_final += \"STOPSTOP\" \n \n return f\"\"\"El telegrama final es: \n{frase_final} \nutilizando {long_palabra} letras maximas por palabra a un costo de ${costo_total} \"\"\"", "def priprema_za_extrakciju_stringova_po_prethodno_generiranoj_naredbi(loka, nare):\r\n lscrypt=loka+'\\\\scrypt.ps1'\r\n narediti=nare\r\n file=open(lscrypt, \"w\")\r\n file.write(narediti)\r\n file.close()", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def creaLE(venta): #Esta sección fue hecha por Ángel\n listaPGA = [] # Esto genera la lista necesaria para pasarlo al archivo\n for elemento in venta:\n listaN = elemento[0] + \",\"\n listaN += str(elemento[1]) + \"\\n\"\n listaPGA.append(listaN)\n return listaPGA", "def empezando_la_partida():\n #estas 2 funciones las importo al menu y al juego\n texto = ' Una vez empezada la partida se encuentran a disposición del jugador el tablero \\n'+\\\n 'y el atril con las fichas para poder jugar, simplemente dando click en la ficha\\n'+\\\n 'deseada y el casillero del tablero deseado podemos ir armando letra a letra la \\n'+\\\n 'palabra de nuestro turno, de esta forma, formando palabras válidas, aprovechando\\n'+\\\n 'los casilleros de bonus y evitando los casilleros de penalización, el jugador va\\n'+\\\n 'sumando puntos.\\n'+\\\n ' El objetivo del juego es obtener más puntos que la maquina antes de que se acabe\\n'+\\\n 'el tiempo, se acaben las fichas del juego o que ya no se puedan formar palabras.'\n return texto", "def saluda1(sujeto):\n print 'Hola '+sujeto+' !!'", "def acquisizioneParametri(self):\n\n messaggio =''\n\n try: \n self.__rete = slugify(self.ui.nomeRete.text())\n # controllo se la lunghezza del nome inserito sia > di 5 caratteri\n if(len(self.__rete) < 5 or len(self.__rete) > 30):\n\n messaggio = 'err: inserimento Nome'\n raise NameError\n \n # controllo che il nome scelto sia univoco\n isPresent = self.__NNNameCheck()\n if(isPresent):\n messaggio = 'err: nome già utilizzato'\n raise NameError\n\n # controlli su numero layer e numero nodi che siano >= 1\n # e che siano rispettivamente <= 20 e <= 50\n self.__layer = int(self.ui.nLayer.text())\n if(self.__layer < 1):\n messaggio = 'err: numero layer < 1'\n raise ValueError\n elif(self.__layer >= 20):\n messaggio = 'err: numero layer > 20'\n raise ValueError\n\n self.__nodi = int(self.ui.nNodi.text())\n if(self.__nodi < 1):\n messaggio = 'err: numero nodi < 1'\n raise ValueError\n if(self.__nodi >= 50):\n messaggio = 'err: numero nodi > 50'\n raise ValueError\n\n # salvataggio della funzione scelta\n self.__funzione = self.ui.funzione.currentText()\n \n # controllo che la percentuale di Vs sia < 25%\n # e che la percentuale di Ts sia > 75%\n if(self.__percentuale < 25):\n messaggio = 'err: suddivisione'\n raise ValueError\n if (self.__percentuale > 75):\n messaggio = 'err: suddivisione'\n raise ValueError\n\n # controllo che sia stato scelto effettivamente un dataset\n if(len(self.__dataSet) == 0):\n messaggio = 'err: dataSet errato'\n raise NameError\n\n # setto il tasto caricamento di una rete non cliccabile\n self.ui.but_caricaRete.setEnabled(False)\n\n # cambio nome del tasto convalida\n self.ui.but_convalida.setText('confermato')\n self.ui.comunicazione.setText('')\n #abilito salvataggio\n self.ui.but_salva.setEnabled(True)\n\n # settandola a True permetto che il training venga effettuato\n # dato che i dati inseriti sono validi\n self.__convalida = True\n return True\n except:\n # in caso di eccezzioni faccio comparire il messaggio\n self.ui.comunicazione.setText(messaggio)\n return False", "def __get_data(self):\n ips = self.server.JUGADORES.keys()\n convida = list(ips)\n retorno = \"\"\n for ip in ips:\n nick = self.server.JUGADORES[ip]['nick']\n tanque = self.server.JUGADORES[ip]['path']\n energia = self.server.JUGADORES[ip]['energia']\n vidas = self.server.JUGADORES[ip]['vidas']\n puntos = self.server.JUGADORES[ip]['puntos']\n posicion = self.server.JUGADORES[ip]['pos']\n bala = self.server.JUGADORES[ip]['bala']\n\n datos = \"%s,%s,%s,%s,%s,%s,%s,%s\" % (ip, nick, tanque,\n posicion, vidas, energia, puntos, bala)\n\n explosion = self.server.JUGADORES[ip]['explosiones'].get(\n self.client_address[0], False)\n if explosion:\n datos = \"%s,%s\" % (datos, explosion)\n del(self.server.JUGADORES[ip][\n 'explosiones'][self.client_address[0]])\n\n retorno = \"%s%s||\" % (retorno, datos)\n if vidas == 0:\n convida.remove(ip)\n\n if len(ips) > 1 and len(convida) == 1:\n return \"END\"\n else:\n return retorno.strip()", "def llegir_placa(p):\n\t# Obrim el fitxer\n\ts = \"\"\n\tf=open('places.dat','r+')\n\t# Calculem la posicio que volem mirar\n\tposicio = p*7\n\tf.seek(posicio)\n\ts+=f.read(7)\n\tf.close()\n\treturn s", "def agregar_bolsa(self, letra, cantidad):", "def encode_strings(self):\n self.version = u2b_if_py2(self.version)\n self.short = u2b_if_py2(self.short)\n self.description = u2b_if_py2(self.description)\n self.destination = [u2b_if_py2(m) for m in self.destination]", "def get_cmd_string(res, DVDFab_path, src_iso_path, client_dest_path): \n dest_path = change_fuhao(res[6])\n Dest = get_value(res[6], \"/DEST\")\n Mode = get_value(res[4], \"/MODE\")\n Src = get_value(src_iso_path, \"/SRC\")\n Audio = get_value(res[9], \"/AUDIO\")\n Audio_type = get_value(res[10], \"/AUDIOTYPE\")\n Change_play_order = get_value(res[11], \"/CHANGEPLAYORDER\")\n Copy_IFO = get_value(res[12], \"/COPYIFO\")\n Display_forced_sub = get_value(res[13], \"/DISPLAYFORCEDSUB\")\n Jump_menu = get_value(res[14], \"/JUMPMENU\")\n Jump_main = get_value(res[15], \"/JUMPMAIN\")\n Out_disc = get_value(res[16], \"/OUTDISC\")\n Path_player = get_value(res[17], \"/PATHPLAYER\")\n Preserve_menu_disc2 = get_value(res[18], \"/PRESERVEMENUDISC2\")\n Profile = get_value(res[19], \"/PROFILE\")\n Remove_DTS = get_value(res[20], \"/REMOVEDTS\")\n Remove_HD_audio = get_value(res[21], \"/REMOVEHDAUDIO\")\n Remove_menu = get_value(res[22], \"/REMOVEMENU\")\n Remove_PGC = get_value(res[23], \"/REMOVEPGC\")\n Rewind = get_value(res[24], \"/REWIND\")\n Subtitle = get_value(res[25], \"/SUBTITLE\")\n Title = get_value(res[26], \"/TITLE\")\n Volume = get_value(res[27], \"/VOLUME\")\n BD3DT = get_value(res[44], \"/BD3DCONVERTTYPE\")\n COMPRESSTOAC3 = get_value(res[45], \"/COMPRESSTOAC3\")\n Close = ' /CLOSE' \n Createminiso = ' /CREATEMINISO' if os.name == 'nt' else ''\n cmd_string = Mode + Src + Dest + Audio + Audio_type + Change_play_order + Copy_IFO + Display_forced_sub + Jump_menu + Jump_main\\\n + Out_disc + Path_player + Preserve_menu_disc2 + Profile + Remove_DTS + Remove_HD_audio + Remove_menu + Remove_PGC\\\n + Rewind + Subtitle + Title + Volume + BD3DT + COMPRESSTOAC3 + Close + Createminiso\n DVDFab_path_cmd_string = '\"' + DVDFab_path + '\"' + cmd_string \n initlog('the cmd_string: %s' % DVDFab_path_cmd_string)\n return DVDFab_path_cmd_string, dest_path", "def cargar_atril(self,lista,bolsa):\n self.atril = lista\n self.bolsa = bolsa", "def get_data(archivo):\n\n datos = commands.getoutput(\n 'file -ik %s%s%s' % (\"\\\"\", archivo, \"\\\"\"))\n\n retorno = \"\"\n\n for dat in datos.split(\":\")[1:]:\n retorno += \" %s\" % (dat)\n\n return retorno", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def func_piezo(self,piezo):\n self.write('SOURce:VOLTage:PIEZo '+str(piezo))\n self.read()", "def geneA(nombreA,listaPGA): #Esta sección fue hecha por Ángel\n with open(nombreA + \".txt\", \"w\") as archivo:\n archivo.writelines(listaPGA)", "def mostrarSiglas(cadena):\n cadena_final = \"\"\n palabras = cadena.split(\" \")\n lista_iniciales = []\n for palabra in palabras: # recorro la palabra separada y saco la primera letra\n lista_iniciales.append(palabra[0])\n return cadena_final.join(lista_iniciales) # y aca devuelo la lisa convertida en str", "def cargar_bolsa(self,lista):\n self.bolsa = lista", "def psea(pname): # -> str:\n ...", "def cargar_mapa (self):\n\n stream_cargar = open ('yo_mapa.txt', 'rt',encoding=\"utf-8\")\n mapa=stream_cargar.readlines()\n \n a = mapa[0].split(\"X\")\n mapa__I=[]\n mapa__D=[]\n toca = \"izda\"\n for lista in a:\n pasar=\"X\"\n linea1=[]\n trozo=\"\"\n for i in lista:\n if pasar==\"X\":\n \n borrar = [\"[\",\"'\"]\n if i in borrar:\n pass\n elif i == \",\" or i == \"]\":\n linea1.append(trozo)\n trozo=\"\"\n pasar=\"V\"\n elif i == \"S\":\n toca=\"dxa\"\n else:\n trozo+=i\n\n else:\n pasar=\"X\"\n pass\n if toca == \"izda\":\n mapa__I.append(linea1)\n else:\n mapa__D.append(linea1)\n\n mapa_cargado=[]\n for i in range (len(mapa__I)):\n\n mapa_cargado.append(mapa__I[i]+mapa__D[i]) \n\n stream_cargar=(close)\n return mapa_cargado", "def getAll(nombre, apellidos):\n texto = getNombre(nombre) + '\\n' + getApellidos(apellidos) \n return texto\n pass" ]
[ "0.58442247", "0.5648824", "0.56434804", "0.56288433", "0.5588605", "0.5584497", "0.5554773", "0.5449711", "0.54150164", "0.53789884", "0.5370985", "0.532739", "0.53188556", "0.5307319", "0.52647316", "0.51832354", "0.51682794", "0.51462793", "0.51443756", "0.51443756", "0.51443756", "0.51443756", "0.51443756", "0.5140873", "0.51317626", "0.51271486", "0.5119429", "0.5099722", "0.5092418", "0.5089064" ]
0.59360135
0
Desapila el elemento en cima
def desapilar(pila): dato = pila.datos[pila.tope] pila.tope -= 1 return dato
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, *args, **kwargs):\n campo = Campostagimg.objects.filter(tag=self.tag, imagen=self.imagen)\n for c in campo:\n c.medidas = \"\"\n c.save()\n c.precision = 0\n c.save()\n c.v_esperado = \"\"\n c.save() \n\n super(Campos_defecto, self).delete(*args, **kwargs)", "def usar(self,letra):\n self.atril.remove(letra)", "def removeItem(self, valor):\n if not self.esta_vazia():\n ## Os dois ponteiros apontam pro primeiro elemento da lista\n elementoAnterior = self._cabeca\n elementoAtual = self._cabeca\n while True:\n ## Se o elemento for encontrado\n if elementoAtual._inteiro == valor:\n while elementoAtual._inteiro == valor:\n if elementoAtual == elementoAnterior:\n ## Se o elemento a ser removido é o primeiro\n self.removeInicio()\n elementoAnterior = self._cabeca\n elementoAtual = self._cabeca\n else:\n elementoAnterior._proximo = elementoAtual._proximo\n elementoAnterior._proximo._anterior = elementoAnterior\n elementoAtual = elementoAnterior._proximo\n if elementoAtual == self._cabeca:\n break\n break\n else:\n ## se o elemento não foi encontrado ainda\n if elementoAnterior != elementoAtual:\n ## Avança o ponteiro que marca o nó anterior apenas quando não é a primeira passagem\n ## do Loop (os dois ponteiros já estão diferentes)\n elementoAnterior = elementoAnterior._proximo\n ## de qualquer forma avança o ponteiro para o atual\n elementoAtual = elementoAtual._proximo\n ## Testar se o elemento buscado não existe\n if elementoAtual == self._cabeca:\n break\n return None", "def atender(self):\n\n if self.enfila>0: #Para que atiendan solamente si e que hay alguien en la fila\n\n self.enfila-=1\n self.fila.pop(0) #Saco primer elemento de la fila (Atienden al primer cliente)", "def eliminarDetalle(self):\n\n itemActual = self.tableFactura.currentItem()\n if itemActual == None:\n self.showMsjEstado(\"Debe seleccionar un item para dar de baja\")\n else:\n detalle = self.detallesTabla[itemActual.row()]\n for loteVenta in self.lotesVentas[detalle]:\n loteVenta[0].aumentarCantidad(loteVenta[1])\n loteVenta[0].modificar(self.sesion)\n detalle.eliminarLotesAsociados(self.sesion)\n detalle.bajaFisica(self.sesion)\n del self.lotesVentas[detalle]\n del self.data[itemActual.row()]\n self.tableFactura.hideRow(itemActual.row())\n self.actualizar()\n self.productosAgregados -=1\n self.objectModified.emit()", "def remove(self,producto):\n id_producto = str(producto.id)\n if id_producto in self.carro:\n del self.carro[id_producto]\n self.save()", "def __delitem__(self, i):\n if not (0 <= i < len(self)):\n raise IndexError(\"index en dehors de la plage admissible.\")\n\n if i == 0:\n self.supprimer_tete()\n return\n\n courante = self.tete\n\n for j in range(i - 1):\n courante = courante.suivante\n \n self._supprimer_apres(courante)", "def desaparecer(self,identificador_de_lista):\n self.mapa.delet_bomberman(identificador_de_lista)", "def elimnar_fila(self):\n button = self.sender()\n if button:\n row = self.tablaSincronizaciones.indexAt(button.pos()).row()\n contenido = self.tablaSincronizaciones.item(row, 0).text()\n Archivo_crontab.eliminar_sincronizacion(contenido)\n self.tablaSincronizaciones.removeRow(row)", "def _primerElem(l):\n return l[0]", "def cargarObra(self):\n rowActual=self.tableObra.currentItem().row()\n self.lineObra.setText(str(self.tableObra.item(rowActual,0).text()))\n self.lineCuit.setText(str(self.tableObra.item(rowActual,1).text()))\n self.tableObra.hide()\n self.lineObra.setEnabled(False)\n self.lineCuit.setEnabled(False)\n self.obraSocialSeleccionada = str(self.lineObra.text())\n self.cargar_productos(self.obraSocialSeleccionada)\n self.gbProducto.setVisible(True)", "def cliquer_sur_unité(self):", "def afficher(dico):\n return dico", "def eliminar(self):\n\n itemActual = self.tablePagos.currentItem()\n if itemActual == None:\n self.showMsjEstado(\"Debe seleccionar un para poder eliminar\")\n else:\n monto = self.detalles_cobro[itemActual.row()][1]\n del self.detalles_cobro[itemActual.row()]\n self.total_a_pagar += monto\n self.tablePagos.setRowHidden(itemActual.row(),True)\n self.actualizar_total()", "def cargar_atril(self,lista,bolsa):\n self.atril = lista\n self.bolsa = bolsa", "def usunPrzedmiot(self, przedmiot: str):\n self.przedmioty.pop(przedmiot,0) # jesli ze slownika to dodaje popa aby pokazac mu co usunac", "def marcarPunto(self):\n # Es primera vez que marco\n if self.tempSelected == None:\n # Capturo el ultimo elemento se se selecciono\n self.tempSelected = self.telaMAPA.find_withtag(self.elementoSeleccionado)\n # Lo Pinto\n self.telaMAPA.itemconfigure(self.elementoSeleccionado, fill=\"purple\")\n else:\n # Desmarco el anterior\n self.telaMAPA.itemconfigure(self.tempSelected, fill=\"white\")\n # Marco el nuevo\n self.tempSelected = self.telaMAPA.find_withtag(self.elementoSeleccionado)\n # Lo Pinto\n self.telaMAPA.itemconfigure(self.elementoSeleccionado, fill=\"purple\")", "def remove_diluciju(self, naziv):\n self.dilucijskeJedinice.pop(naziv, 0)", "def Imagenes_del_ahorcado(intento):\n HANGMANPICS = ['''\n +---+\n | |\n |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n | |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /| |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / \\ |\n |\n=========''']\n\n os.system(\"clear\")\n print(HANGMANPICS[intento])", "def obtenerObra(self):\n rowActual = self.tableOs.currentItem().row()\n self.lineRazon.setText(str(self.tableOs.item(rowActual,0).text()))\n self.lineRazon.setEnabled(False)\n self.obraSocial=str(self.tableOs.item(rowActual,0).text())\n self.lineCuit.setText(str(self.tableOs.item(rowActual,1).text()))\n self.lineCuit.setEnabled(False)\n self.tableOs.setEnabled(False)\n self.gbFactura.setEnabled(True)\n self.gbNotaCredito.setEnabled(True)", "def nuevo(self, ventana):\n self.objeto = ventana.objeto\n self.set_text(self.objeto.__str__())\n self.id = self.objeto.id\n self.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"#FFFFFF\"))\n ventana.destroy()\n self.busqueda.destroy()", "def retirer_objet(self, nom_membre):\n membre = self.get_membre(nom_membre)\n objet = membre.tenu\n membre.tenu = None", "def get_ostale_kanale(self, x):\n out = self.sviKanali\n out.remove(x)\n return out", "def mostrEmpl2(finalData): #Esta sección fue hecha por Ángel\n listaUE = []\n for elemento in finalData:\n nombre = elemento[0]\n listaUE.append(nombre) \n return listaUE", "def remove(self):", "def mezclar_bolsa(self):", "def retireSommet(self, sommet):\r\n nouveauGraphe = copy.deepcopy(self) # on effectue une copie du graphe\r\n nouveauGraphe.n = self.n-1 # On a n-1 points\r\n # NB: il faut aussi changer m et listeArretes mais on va pas le faire tout de suite car pas urgent\r\n # 1. On suprrime la ligne d'indice sommet\r\n #* AUTRE MÉTHODE del nouveauGraphe.adjMatrix[sommet]\r\n # print(nouveauGraphe.adjMatrix)\r\n nouveauGraphe.adjMatrix.pop(sommet)\r\n # print(nouveauGraphe.adjMatrix)\r\n #2. On supprime la colonne d'indice sommet = on supprime l'index sommet de chaque sous liste\r\n # la liste comprehension ne marche pas bien :(\r\n for line in nouveauGraphe.adjMatrix:\r\n line.pop(sommet)\r\n # print(nouveauGraphe.adjMatrix)\r\n # nouveauGraphe.m = 0\r\n # 2ème méthode:\r\n # for ligne in nouveauGraphe.adjMatrix:\r\n # ligne.pop(sommet)\r\n return nouveauGraphe", "def remove():", "def __init__(self, id, padre, hijos):\n\n self.__id = id\n self.__padre = padre\n self.__hijos = np.array([])\n for i in range(len(hijos)):\n newHijos = np.delete(hijos,i,0)#Se remueve el nodo de la lista una vez recorrido\n self.__hijos = np.append(self.__hijos, NodoCiudad(hijos[i],self,newHijos))", "def abrir(self):\n assert self.open == False\n self.ne = [n for n in self.ne]\n self.je = [e1 for e1 in self.je]\n self.ie = []\n self.open = True" ]
[ "0.61714435", "0.61389273", "0.6033699", "0.5995851", "0.5916466", "0.5895718", "0.58551925", "0.5754909", "0.5726794", "0.5719078", "0.5699067", "0.56908727", "0.5681546", "0.56783044", "0.5675249", "0.5675179", "0.56316787", "0.55975395", "0.55830395", "0.55497795", "0.5538994", "0.55239606", "0.55217326", "0.5504797", "0.5500727", "0.5465768", "0.5437616", "0.5422737", "0.54194456", "0.5393014" ]
0.6247548
0
Devuelve elemento de la cima
def cima(pila): return pila.datos[pila.tope]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first(self):\n if self.is_empty():\n raise Empty('La cola está vacía')\n return self._head._element # frente alineado con la cabeza de la lista", "def getFactura(self): \n return self.caja", "def getFactura(self): \n return self.caja", "def Cima(self):\n if(self.Pila_Vacia()=='true'):\n return \"Pila Vacia\"\n else:\n return self.pila[self.puntero]", "def valor(self):\n try:\n objeto = self.objeto\n except AttributeError:\n objeto = None\n return objeto", "def marcarPunto(self):\n # Es primera vez que marco\n if self.tempSelected == None:\n # Capturo el ultimo elemento se se selecciono\n self.tempSelected = self.telaMAPA.find_withtag(self.elementoSeleccionado)\n # Lo Pinto\n self.telaMAPA.itemconfigure(self.elementoSeleccionado, fill=\"purple\")\n else:\n # Desmarco el anterior\n self.telaMAPA.itemconfigure(self.tempSelected, fill=\"white\")\n # Marco el nuevo\n self.tempSelected = self.telaMAPA.find_withtag(self.elementoSeleccionado)\n # Lo Pinto\n self.telaMAPA.itemconfigure(self.elementoSeleccionado, fill=\"purple\")", "def scrape_carteleraVIEJA(data, comp_nom):\t\n\t\n\tfunciones = []\n\tsoup = BeautifulSoup(data, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)\n\tshow_exp = re.compile(r'sid=(\\d+)')\n\t\n\tcomplejo_org = Complejo.objects.get(nombre=comp_nom)\n\t\n\t#Busca complejo platino... en caso de existir:\n\tcomplejo_platino = complejo_org\n\t\n\t\n\tpeliculas = soup.find('table', cellspacing='0', cellpadding='0', border='0').contents[3:-1:2]\n\t\n\tfor peli in peliculas:\n\t\ttres_D = False\n\t\tidioma = None\n\t\t\n\t\t#Checar tiene logo de 3d\n\t\tif peli.find('div', 'icono_platino').find('img', src=re.compile(r'3d.png$')): tres_D = True\n\t\t\n\t\t#Encabezado contiene titulo e idioma\n\t\tencabezado = peli.find('li', 'texto_3', style='margin: 2px 0px 0px; float: left; width: 155px;')\n\t\ttitulo = ''.join(encabezado.findAll(text=True)).replace('\\n', '').strip()\n\t\t\n\t\t\n\t\t#Determina Idioma\n\t\tif encabezado.find('img', alt='idioma').get('src', '').find('ing') > 0:\n\t\t\tidioma = 'ingles'\n\t\telse:\n\t\t\tidioma = 'espanol'\n\t\t\n\t\ttit = '|'+ titulo + '|'\n\t\t#Buscar pelicula por titulo segun idioma y 3d.. subtitulada o no.\n\t\tpeli_query = Pelicula.objects.filter(alt_tit__icontains=tit, tres_D=tres_D)\n\t\tif len(peli_query) > 1:\n\t\t\t#Si idioma == ingles, selecciona la pelicula subtitulada\n\t\t\tpelicula = peli_query.filter(subtitulada= (idioma == 'ingles') )\n\t\telif len(peli_query) == 1:\n\t\t\tpelicula = peli_query[0]\n\t\telse:\n\t\t\tlogger.debug( \"No se encontro pelicula %s\" % titulo\t\t)\n\t\t\t\n\t\thoras_html = peli.find('div', id='horax')\n\t\tplatino_b= False\t\t\n\t\tfor tag in horas_html.contents:\n\t\t\t#Me salto todo lo que no es html\n\t\t\tif type(tag) != NavigableString:\t\t\n\t\t\t\t#En caso de que sea funciones de platino\n\t\t\t\tif tag.name == 'center':\n\t\t\t\t\tplatino_b = True\n\t\t\t\t\tfuncion_name = ''.join(tag.findAll(text=True)).strip()\n\t\t\t\t\tif funcion_name.find('Platino') > -1:\n\t\t\t\t\t\t#Ajustar el complejo para platino\n\t\t\t\t\t\tcomplejo = complejo_platino\n\t\t\t\t\t\t\n\t\t\t\telif tag.get('style','').find('border-bottom: 1px solid rgb(238, 207, 0);') > -1:\n\t\t\t\t\t#Ajustar de regreso el complejo normal\n\t\t\t\t\tcomplejo = complejo_org\n\t\t\t\t\tplatino_b = False\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t#Si es renglon de hora y no algo mas como <br/>\t\t\t\n\t\t\t\tif tag.name== 'div' and tag.get('id','') == 'general':\n\t\t\t\t\tfecha = parseDate(tag.find('div', id=fecha).string)\n\t\t\t\t\tfunciones.extend(\n\t\t\t\t\t\t[{\n\t\t\t\t\t\t\t'pelicula': pelicula,\n\t\t\t\t\t\t\t'complejo': complejo,\n\t\t\t\t\t\t\t'hora': datetime.datetime(fecha.year, fecha.month, fecha.day, *time.strptime( hora_html.string , '%H:%M')[3:5]),\n\t\t\t\t\t\t\t'pol_idShowTime': show_exp.search(hora_html['href']).group(1),\n\t\t\t\t\t\t\t} for hora_html in tag.find('div', id='funciones').find('a', 'texto_1')]\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t)\n\t\t\t\t\t#logger.debug( funciones)\n\treturn funciones", "def obtenerObra(self):\n rowActual = self.tableOs.currentItem().row()\n self.lineRazon.setText(str(self.tableOs.item(rowActual,0).text()))\n self.lineRazon.setEnabled(False)\n self.obraSocial=str(self.tableOs.item(rowActual,0).text())\n self.lineCuit.setText(str(self.tableOs.item(rowActual,1).text()))\n self.lineCuit.setEnabled(False)\n self.tableOs.setEnabled(False)\n self.gbFactura.setEnabled(True)\n self.gbNotaCredito.setEnabled(True)", "def __init__(self, lista_enlazada): \n\t\tself.lista = lista_enlazada\n\t\tself.anterior = None\n\t\tself.actual = lista_enlazada.prim\n\t\tself.pila_anteriores = Pila()\n\t\tself.posicion = 0", "def obter_caminho(self):\n return self.caminho", "def scraper_voto(self):\n\n #per trovare il link a fantacalcio.it devo prima trovare il link della squadra e trovare il suo nome\n soup_rosa = BeautifulSoup(\n requests.get(f\"{self.LINK_FANTACALCIO_IT}/{self.team}#rosa\").text,\n \"html.parser\",\n )\n print(self.name)\n\n displayed_name = self.name\n if displayed_name == \"Coulibaly\": # caso estremo, il sito si confonde\n displayed_name = \"Coulibaly M.\"\n\n # trovo il link personale del giocatore e glielo assegno\n link = soup_rosa.find(\"a\", text=displayed_name.upper())[\"href\"]\n self.scheda_giocatore = link\n\n # leggo voto e media voto\n soup = BeautifulSoup(requests.get(link).text, \"html.parser\")\n\n self.media_voto = float(soup.find_all(class_=\"nbig2\")[0].text.replace(\",\", \".\"))\n self.media_fantavoto = float(\n soup.find_all(class_=\"nbig2\")[1].text.replace(\",\", \".\")\n )\n\n # leggo anche il ruolodalla schedina delle info\n infos = soup.find_all(class_=\"col-lg-6 col-md-6 col-sm-12 col-xs-12\")[-2]\n self.ruolo = str(infos.find(\"span\").text)\n\n # compilo i dati: partite, gol e assist\n dati_partite = soup.find_all(class_=\"nbig\")\n\n partite = \"🥅 \" + dati_partite[0].text\n # i portieri hanno statistiche diverse!\n if self.ruolo == \"P\":\n goal = \"❌ \" + dati_partite[1].text\n self.dati = \"<br>\".join([partite, goal])\n else:\n goal = \"⚽ \" + dati_partite[1].text\n assist = \"👟 \" + dati_partite[2].text\n self.dati = \"<br>\".join([partite, goal, assist])\n\n # aggiungo stellina al nome se hanno una bella media voto\n if self.media_fantavoto > 7:\n self.name += \" ⭐\"", "def removeItem(self, valor):\n if not self.esta_vazia():\n ## Os dois ponteiros apontam pro primeiro elemento da lista\n elementoAnterior = self._cabeca\n elementoAtual = self._cabeca\n while True:\n ## Se o elemento for encontrado\n if elementoAtual._inteiro == valor:\n while elementoAtual._inteiro == valor:\n if elementoAtual == elementoAnterior:\n ## Se o elemento a ser removido é o primeiro\n self.removeInicio()\n elementoAnterior = self._cabeca\n elementoAtual = self._cabeca\n else:\n elementoAnterior._proximo = elementoAtual._proximo\n elementoAnterior._proximo._anterior = elementoAnterior\n elementoAtual = elementoAnterior._proximo\n if elementoAtual == self._cabeca:\n break\n break\n else:\n ## se o elemento não foi encontrado ainda\n if elementoAnterior != elementoAtual:\n ## Avança o ponteiro que marca o nó anterior apenas quando não é a primeira passagem\n ## do Loop (os dois ponteiros já estão diferentes)\n elementoAnterior = elementoAnterior._proximo\n ## de qualquer forma avança o ponteiro para o atual\n elementoAtual = elementoAtual._proximo\n ## Testar se o elemento buscado não existe\n if elementoAtual == self._cabeca:\n break\n return None", "def cargar_atril(self,lista,bolsa):\n self.atril = lista\n self.bolsa = bolsa", "def __carta(soup):\n news = []\n container = soup.find('dd', id='fieldset-maisacessadas-semana')\n most_read = container.find_all('li')\n\n for item in most_read:\n news.append(dict(title=item.a.string, link=item.a['href']))\n return news", "def __carta(soup):\n news = []\n container = soup.find('dd', id='fieldset-maisacessadas-semana')\n most_read = container.find_all('li')\n\n for item in most_read:\n news.append(dict(title=item.a.string, link=item.a['href']))\n return news", "def mezclar_bolsa(self):", "def get_posicion(self):\n return self.posicion", "def first(self):\n if self.is_empty():\n raise Empty(\"Deque está vacío\")\n return self._header._next._element # un artículo real justo después de la cabecera", "def afficher(dico):\n return dico", "def retirer_objet(self, nom_membre):\n membre = self.get_membre(nom_membre)\n objet = membre.tenu\n membre.tenu = None", "def __init__(self):\n self.enfila= 0\n self.fila = []", "def atributo_complejidad():\n tipo_defecto = ItemTipos.objects.filter(es_supertipo=True)\n if tipo_defecto.count() > 0:\n attr1 = ItemAtributos.objects.filter(nombre='complejidad').\\\n filter(idtipoitem=tipo_defecto)\n return attr1\n return None", "def elemento_actual(self):\n\t\tif not self.actual:\n\t\t\treturn None\n\t\treturn self.actual.dato", "def pretraga_po_cijeni(self, lst, broj):\n pretrazeno = []\n for i in lst:\n if i.cijena == broj:\n pretrazeno.append(i)\n return pretrazeno", "def elems(self):", "def mostrEmpl2(finalData): #Esta sección fue hecha por Ángel\n listaUE = []\n for elemento in finalData:\n nombre = elemento[0]\n listaUE.append(nombre) \n return listaUE", "def get_contenu(self):\n return self.objets", "def get_first_item(self):\n params = urllib.parse.urlencode({'o':'1', 'q':self.query})\n url = 'https://www.leboncoin.fr/annonces/offres/ile_de_france/?{:s}'.format(params) # Cree l'url de recherche en get\n html = urllib.request.urlopen(url)\n if url != html.geturl():\n return None\n soup = BeautifulSoup.BeautifulSoup(html, 'html5lib')\n try:\n products = soup.section.find_all('a', 'list_item clearfix trackable')\n except Exception as e:\n print('Nothing found on leboncoin')\n return None\n for product in products: # recupere les differentes informations de chaque produit\n if str(product.section.h2).strip() == 'None':\n continue\n name = product.section.h2.contents[0].strip()\n price = self.__get_price(product)\n link = 'http:' + product['href']\n return (name, price, link)\n return None", "def _pega_no(self, index):\n ponteiro = self.inicio\n for i in range(index):\n if ponteiro:\n ponteiro = ponteiro.prox\n else:\n raise IndexError(\"list index out of range\")\n return ponteiro", "def __init__(self):\n self.tours = []\n self.grille = Grille()" ]
[ "0.62369305", "0.5968242", "0.5968242", "0.5929463", "0.5875082", "0.5795964", "0.574635", "0.57377017", "0.56929976", "0.566961", "0.5612241", "0.5606185", "0.5604151", "0.55805415", "0.55805415", "0.5572482", "0.555679", "0.5539605", "0.5539363", "0.5522629", "0.55216", "0.5502805", "0.5500729", "0.5494472", "0.5476412", "0.547567", "0.54575396", "0.54571426", "0.5414721", "0.5413411" ]
0.6678378
0
Devuelve la pila invertida
def invertir(pila1): pila2 = Pila() while not pila_vacia(pila1): apilar(pila2, desapilar(pila1)) return pila2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __invert__(self):\n return self.inverse()", "def inverse(self, x, y):", "def __invert(self, args):", "def invert(self,el):\n return el^(self.q-2)", "def invert(self):\n tmp = self.pvt\n self.pvt = self.nvt\n self.nvt = tmp\n tmp = self.pFace\n self.pFace = self.nFace\n self.nFace = tmp", "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def invert(self):\n raise NotImplementedError()", "def scale_invert(self):", "def invert(self, img):\n return self.inverse()(img)", "def inverse(self):\n return self.invert()", "def __invert__(self):\n a = self.angle\n x, y = Vector.cartesian([1, a])\n return Vector(x, y)", "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def __invert__(self):\r\n return 1 - self", "def __invert__(self):\n return self.reverse()", "def flip(self, p):\n return -p", "def flip(self):", "def invert(x):\n return linalg.inv(x)", "def invert(self):\n self._c = ~self._c", "def invert (y):\n\n if eq(y,pos) : return y\n elif lt(y,nil) : return neg(invert(neg(y)))\n elif eq(y,nil) : raise ZeroDivisionError()\n yl,yr = split(y)\n il = nil\n ir = None\n r = None,None\n iyr,iyl = None,None\n width = 0\n while (il or ir) and width < 3:\n width += 1\n nl = nr = None\n if il is not None:\n r = (il,r[1])\n if yr is not None:\n if iyr is None:\n #iyr = ~yr\n iyr = invert(yr)\n left = mul(mul(add(pos,sub(yr,y)),il),iyr)\n if r[0] is None or gt(left,r[0]):\n nl = left\n if yl is not None and not le(yl,nil):\n if iyl is None:\n #iyl = ~yl\n iyl = invert(yl)\n right = mul(mul(add(pos,sub(yl,y)),il),iyl)\n if r[1] is None or lt(right,r[1]):\n nr = right\n if ir:\n r = (r[0],ir)\n if yl is not None and not le(yl,nil):\n if iyl is None:\n #iyl = ~yl\n iyl = invert(yl)\n left = mul(mul(add(pos,sub(yl,y)),ir),iyl)\n if r[0] is None or (gt(left,r[0]) and (not nl or gt(left,nl))):\n nl = left\n if yr is not None:\n if iyr is None:\n #iyr = ~yr\n iyr = invert(yr)\n right = mul(mul(add(pos,sub(yr,y)),ir),iyr)\n if r[1] is None or (lt(right,r[1]) and (not nr or lt(right,nr))):\n nr = right\n il,ir = nl,nr\n #print(r)\n if r[0] is None: r = (0,r[1])\n if r[1] is None: r = (r[0],0)\n return join(*r)", "def __invert__(self):\n return BitBoard(~self.num)", "def __invert__(self):\r\n if self.field.characteristic == 2:\r\n return runtime.invert(self)\r\n\r\n return super().__invert__()", "def __invert__(self):\n return self.fam.c_unop('invert', self)", "def opposite(direction):\n return (direction+2)%4", "def __invert__(self) -> Operators:\n return self.operate(inv)", "def invert(self):\n self.vertices.reverse()", "def __invert__(self):\n \n return Vector(-self.y, self.x)", "def __invert__(self):\n return self.negated()", "def __invert__(self):\n return self.__neg__()", "def __invert__(self):\n return Factorization([(p,-e) for p,e in reversed(self)],\n cr=self._cr(), unit=self.unit()**(-1))", "def inv(self):\n return self.conjugate()" ]
[ "0.7260351", "0.7015506", "0.69790083", "0.6911814", "0.6806856", "0.6755985", "0.6703932", "0.67010486", "0.66792345", "0.6677001", "0.6664742", "0.66548175", "0.6640481", "0.65897536", "0.65876037", "0.6559641", "0.6545105", "0.65431285", "0.6482718", "0.64756066", "0.6431034", "0.6425339", "0.6421494", "0.64213645", "0.6406834", "0.6405458", "0.63910156", "0.63891184", "0.63673955", "0.63574374" ]
0.7615787
0
Devuelve pila del string ingresado
def stringToPila(palabra): pila = Pila() for elemento in palabra: apilar(pila, elemento) return pila
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_string2(self):\n pass", "def getApellidos(apellido):\n texto = f'El apellido es: {apellido}'\n return texto\n pass", "def psea(pname): # -> str:\n ...", "def print_as_text(pi):\n\n pi_string = str(\"%1.18f\" % pi)\n\n print(\"Definitive: \" + PI_STRING)\n\n print(\"Estimated: \", end=\"\")\n\n for i in range(0, len(pi_string)):\n\n if pi_string[i] == PI_STRING[i]:\n\n print(GREEN + pi_string[i] + RESET, end=\"\")\n\n else:\n\n print(RED + pi_string[i] + RESET, end=\"\")\n\n print(\"\\n\")", "def EnglishToPig(str):\r\n\r\n # TODO: Your code here\r\n\r\n\r\n # Change the return to return the converted string\r\n return(\"\")", "def __str__(self):\n return \"p(\" + \",\".join([str(round(c, digits)) for c in self.components]) + \")\"", "def input_cislo_policka():\n str_policka = input('\\nNa ktore policko chces umiestnit svoje \\'X\\'? Zadaj hodnotu 0 - 19: ')\n return str_policka", "def __repr__(self):\n s = ''\n no = self.getRaiz()\n s += str(no.getPai()) + '\\n'\n s += '^' + '\\n'\n s += str(no.getEsquerdo()) + ' <- '\n s += str(no.getDado()) + ' -> '\n s += str(no.getDireito())\n return s", "def text(self) -> str:", "def __unicode__(self):\n d = ((2, \".\"), (6, \".\"), (10, \"/\"), (15, \"-\"))\n s = list(map(str, self.cnpj))\n \n for i, v in d:\n s.insert(i, v)\n \n r = ''.join(s)\n \n return r", "def __str__(self):\n\t\tif self.__valide:\n\t\t\treturn str(self.__tete)\n\t\telse:\n\t\t\treturn \"(polynome invalide)\"", "def stringReco(obj):\n name = obj.get_name()\n name = obj._pid if (name is None) else name\n return (\"pdg: \" + name + \" E: \" + str(obj._E)\n + \" px: \" + str(obj._px) + \" py: \" + str(obj._py)\n + \" pz: \"+ str(obj._pz) + \" mass: \" + str(obj._m))", "def refang(self, text: str):", "def get_pi_as_string():\n\n request = requests.get(\"http://www.eveandersson.com/pi/digits/10000\")\n doc = BeautifulSoup(request.text, \"html.parser\").select_one(\"pre\").text.strip()\n pi_string = doc.replace(\" \", \"\").replace(\".\", \"\").replace(\"\\n\", \"\")\n return pi_string", "def __str__(self) -> str:\r\n return self.process(self.string)", "def value(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return \"\"", "def us(self, string=''):\n return string.replace(' ', '_')", "def to_pinyin(s: str) -> str:\n if s == '山西':\n return 'Shan1xi'\n elif s == '陕西':\n return 'Shan3xi'\n pylist = lazy_pinyin(s)\n py = ''.join(pylist)\n return py", "def get_orion_space_string(self) -> str:", "def saluda2(sujeto):\n print 'Hola %s !!' % sujeto", "def gerarPalavraSecreta():\n global palavraOculta\n for _ in range(len(palavraDoJogo)):\n palavraOculta += '*'\n print(palavraOculta)", "def __str__(self):\n allowed = ['!', '@', '#', '$', '%', '^', '&', '*', '/', '.', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n returnstring = \"\"\n for row in self.positions:\n for char in row:\n if char.isupper() or char == 'r' or char in allowed:\n returnstring += \"| \" + char + \" \"\n else:\n returnstring += \"| \" + \"_\" + \" \"\n returnstring += \"\\n\"\n return returnstring", "def label(mi_, ma_):\n\treturn \"caractères Unicode des points de code {} à {}\".format(mi_, ma_)", "def make_spondaic(self, scansion: str) -> str:\n mark_list = string_utils.mark_list(scansion)\n vals = list(scansion.replace(\" \", \"\"))\n new_vals = self.SPONDAIC_PENTAMETER[:-1] + vals[-1]\n corrected = \"\".join(new_vals)\n new_line = list(\" \" * len(scansion))\n for idx, car in enumerate(corrected):\n new_line[mark_list[idx]] = car\n return \"\".join(new_line)", "def __str__(self):\n turnstile = \"⊢\" if settings.unicode else \"|-\"\n if self.hyps:\n str_hyps = \", \".join(str(hyp) for hyp in self.hyps)\n return str_hyps + ' ' + turnstile + ' ' + str(self.prop)\n else:\n return turnstile + ' ' + str(self.prop)", "def __str__(self):\n value = str(self.puzzle) + str(\" \") + str(self.g) + str(\" \") + str(self.h)\n return value", "def GenerateString(self, i, string):\r\n \r\n if i <= 0:\r\n return string\r\n\t\t\r\n string = string.replace(\"F\", (self.f).lower()) \r\n string = string.replace(\"X\", (self.x).lower())\r\n string = string.replace(\"Y\", (self.y).lower())\r\n \r\n \r\n string = string.upper()\r\n string = self.GenerateString(i - 1, string)\r\n\r\n return string", "def strand_string(self):\n if self.is_forward():\n return '+'\n if self.is_reverse():\n return '-'\n return '.'", "def __str__(self):\n return '{} {}'.format(self.nombre, self.apellido)", "def cliquer_sur_unité(self):" ]
[ "0.5971977", "0.59691834", "0.5927014", "0.59089327", "0.58850706", "0.5883489", "0.5877891", "0.5867647", "0.58412963", "0.5805934", "0.57704586", "0.5736824", "0.57056093", "0.5686816", "0.5681042", "0.5671489", "0.5662893", "0.56376225", "0.5629446", "0.56220335", "0.5607931", "0.5601104", "0.5598237", "0.5593191", "0.55482614", "0.5536287", "0.5530084", "0.5498233", "0.5497834", "0.54927486" ]
0.599477
0
Finalize the grades and print. Only for assessors.
def finalize(request, pk, version=0): ts = get_timeslot() if not hasattr(ts, 'resultoptions'): raise PermissionDenied("Results menu is not yet visible.") else: if not get_timeslot().resultoptions.Visible: raise PermissionDenied("Results menu is not yet visible.") dstr = get_object_or_404(Distribution, pk=pk) if not hasattr(dstr, 'presentationtimeslot'): raise PermissionDenied('This student does not have a presentation planned. Please plan it first.') if not request.user.is_superuser and \ request.user not in dstr.presentationtimeslot.Presentations.Assessors.all() and \ request.user != dstr.Proposal.Track.Head: raise PermissionDenied("You are not the correct owner of this distribution. " " Grades can only be finalized by assessors or track heads. " " To get a preview of the print view, use the 'Print Preview' button.") version = int(version) # check if grade is valid error_list = '' for cat in GradeCategory.objects.filter(TimeSlot=get_timeslot()): try: cat_res = cat.results.get(Distribution=dstr) if not cat_res.is_valid(): error_list += ('<li>Category {} is not completed.</li>'.format(cat)) except CategoryResult.DoesNotExist: error_list += ('<li>Category {} is missing</li>'.format(cat)) if error_list: return render(request, "base.html", context={ 'Message': '<h1>The results of this student are not yet finished</h1><p>The following error(s) occurred:</p><ul>{}</ul>'.format(error_list), "return": "results:gradeformstaff", "returnget": str(pk), }) if version == 0: # The normal page summarizing the grades of the student return render(request, "results/finalize_grades.html", { "dstr": dstr, "catresults": dstr.results.all(), "final": all(f.Final is True for f in dstr.results.all()), "finalgrade": dstr.TotalGradeRounded(), "preview": False, }) else: # type 1 and 2, finalize grades. if get_timephase_number() != 7: raise PermissionDenied("Finalize grades is only possible in the time phase 'Presentation of results'") for cat in dstr.results.all(): # set final to True, disable editing from here onward. cat.Final = True cat.save() if version == 1: # printable page with grades return render(request, "results/print_grades_pdf.html", { "dstr": dstr, "catresults": dstr.results.all(), "finalgrade": dstr.TotalGradeRounded(), }) elif version == 2: # pdf with grades html = get_template('results/print_grades_pdf.html').render({ "dstr": dstr, "catresults": dstr.results.all(), "finalgrade": dstr.TotalGradeRounded(), }) buffer = BytesIO() pisa_status = pisa.CreatePDF(html.encode('utf-8'), dest=buffer, encoding='utf-8') if pisa_status.err: raise Exception("Pisa Failed PDF creation in print final grade for distribution {}.".format(dstr)) buffer.seek(0) response = HttpResponse(buffer, 'application/pdf') response['Content-Disposition'] = 'attachment; filename="bepresult_{}.pdf"'.format(dstr.Student.usermeta.get_nice_name()) return response raise PermissionDenied('Invalid type.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n students = [\"Chris\", \"Jesse\", \"Sally\"]\n grades = [90, 80, 70]\n print_gradebook(students, grades)", "def finalize():\n\n print(\"\"\"\n The script analysis/sedov_compare.py can be used to analyze these\n results. That will perform an average at constant radius and\n compare the radial profiles to the exact solution. Sample exact\n data is provided as analysis/cylindrical-sedov.out\n \"\"\")", "def main():\n given_scores = []\n num_grades = int(raw_input())\n for i in xrange(num_grades):\n given_scores.append(int(raw_input()))\n for score in grading_students(given_scores):\n print score", "def finalize(self):\n\t\tif self._sum_factor != 0.0:\n\t\t\tself._last_score = self._current_score / self._sum_factor\n\t\telse:\n\t\t\tself._last_score = 0.0\n\n\t\tself._scores.append(self._last_score)\n\t\tself._scores = self._scores[-self._range[1]:]\n\t\n\t\tself._sum_factor = 0.0\n\t\tself._current_score = 0.0", "def finalize(self):\n logger.debug(\"Generation Complete\")\n self.events.generation_complete()", "def finalize(self):\n print('Cleaning up...')", "def print_scores(self):\n ### FILL IN ###", "def finalize():\n global interpreter\n del interpreter\n blotish._cleanup()\n\n # Set the progress printing state to whatever it was before\n import paraview.servermanager\n global wasProgressPrinting\n paraview.servermanager.SetProgressPrintingEnabled(wasProgressPrinting)", "def print_grades(grades, grader_name):\n grades = sorted(grades,\n key=lambda grade: grade.student_name())\n # Length of longest name\n max_name_len = max(len(grade.student_name()) for grade in grades)\n\n grade_report = '\\n'.join(\n '{:<{max_name_len}}\\t{}\\t{}'.format(\n grade.student_name(),\n grade.score() if grade.graded() else '(ungraded)',\n grade.breakdown(grader_name) if grade.graded() else '',\n max_name_len=max_name_len)\n for grade in grades)\n click.echo_via_pager('grade report:\\n\\n' + grade_report)", "def print_allocations(self, ):\n pass", "def finalize(self):\r\n\r\n self.find_parents()\r\n self.order_vertices()\r\n self.set_rotation_matrices()", "def finalize(self):\n self.clear()\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def finalize(self):\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def finalize(self):\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def test_a_grade(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.submit_question_answer('p3', {'2_1': 'Correct'})\r\n self.check_grade_percent(1.0)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'A')", "def finalize_scores(self):\n if self.candidates_finalized:\n return\n self.candidates_finalized = True\n for cand in self.candidates:\n new_logp_blank = cand.logp_total()\n last_word = cand.text_state.last_word\n if self.lm is not None and last_word != '':\n # Merging cands with texts differing only in the final sep was not done in the reference.\n new_lm_state = kenlm.State()\n logp_lm_last_word = self.lm.BaseScore(cand.lm_state, last_word, new_lm_state) * self.log10_to_ln\n cand.lm_state = new_lm_state\n if self.oov_score is not None and last_word not in self.lm:\n logp_lm_last_word = self.oov_score\n new_logp_blank += self.alpha * logp_lm_last_word + self.beta\n cand.logp_blank = new_logp_blank\n cand.logp_non_blank = -np.inf\n cand.new_logp_blank = None\n cand.new_logp_non_blank = None", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)", "def finalize(self):\r\n pass", "def finalize(self):\n print(\"%d default sprite names found:\" % self.total_default)\n for name in self.list_default:\n print name", "def print_students_gpa(std):\n print (\"Student Id:\", get_id(std))\n print (\"Student name:\", get_fname(get_name(std)), get_lname(get_name(std)))\n print (\"GPA: %.2f\" %(calc_gpa(std)))", "def finalize(self):\n print(\"%d default backgdrop names found\" % self.total_default)\n for name in self.list_default:\n print name", "def finalize(self):\n return", "def finalize():\n pass", "def finalize():\n pass" ]
[ "0.59876657", "0.57981944", "0.564417", "0.55673975", "0.55103207", "0.55028576", "0.5473704", "0.5460755", "0.5440873", "0.5409173", "0.53980476", "0.53825235", "0.5363283", "0.5363283", "0.5361439", "0.5361439", "0.5361439", "0.5361439", "0.5361439", "0.5361439", "0.53493667", "0.5339003", "0.5333089", "0.5324575", "0.53207976", "0.53089803", "0.52872986", "0.5278692", "0.52744025", "0.52744025" ]
0.5862954
1
List all aspects of a given grade category in the current timeslot
def list_aspects(request, pk): category = get_object_or_404(GradeCategory, pk=pk) aspects = GradeCategoryAspect.objects.filter(Category=category) ts = get_timeslot() return render(request, "results/list_aspects.html", { "aspects": aspects, 'ts': ts, 'cat': category, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_budget_analysis(cursor, plot_parameters, by_category=False):\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories", "def about(request, pk=None):\n if pk and get_grouptype('3') in request.user.groups.all():\n ts = get_object_or_404(TimeSlot, pk=pk)\n else:\n ts = get_timeslot()\n return render(request, \"results/about_grades.html\", {\n 'scores': CategoryAspectResult.ResultOptions,\n \"categories\": GradeCategory.objects.filter(TimeSlot=ts),\n 'ts': ts,\n })", "def getCategory():", "def time_budget(self, mode):\n\n def time_budget_analysis(cursor, plot_parameters, by_category=False):\n \"\"\"\n extract number of occurrences, total duration, mean ...\n if start_time = 0 and end_time = 0 all events are extracted\n \"\"\"\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories\n\n def default_value(behav, param):\n \"\"\"\n return value for duration in case of point event\n \"\"\"\n default_value_ = 0\n if ({self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behav} == {\"Point event\"}\n and param in [\"duration\"]):\n default_value_ = \"-\"\n return default_value_\n\n def init_behav_modif():\n \"\"\"\n initialize dictionary with subject, behaviors and modifiers\n \"\"\"\n behaviors = {}\n for subj in plot_parameters[\"selected subjects\"]:\n behaviors[subj] = {}\n for behav_modif in distinct_behav_modif:\n behav, modif = behav_modif\n if behav not in behaviors[subj]:\n behaviors[subj][behav] = {}\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n behaviors[subj][behav][param[0]] = default_value(behav, param[0])\n\n if plot_parameters[\"include modifiers\"]:\n behaviors[subj][behav][modif] = {}\n for param in parameters:\n behaviors[subj][behav][modif][param[0]] = default_value(behav, param[0])\n\n return behaviors\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"Some observations have UNPAIRED state events<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n flagGroup = False\n if len(selectedObservations) > 1 and mode != \"synthetic\":\n flagGroup = dialog.MessageDialog(programName, \"Group observations in one time budget analysis?\",\n [YES, NO]) == YES\n\n '''\n # check if state events are paired\n out = \"\"\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId],\n self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n '''\n\n selectedObsTotalMediaLength = Decimal(\"0.0\")\n max_obs_length = 0\n for obsId in selectedObservations:\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n logging.debug(\"media length for {0}: {1}\".format(obsId, obs_length))\n\n if obs_length in [0, -1]:\n selectedObsTotalMediaLength = -1\n break\n max_obs_length = max(max_obs_length, obs_length)\n\n selectedObsTotalMediaLength += obs_length\n\n # an observation media length is not available\n if selectedObsTotalMediaLength == -1:\n # propose to user to use max event time\n if dialog.MessageDialog(programName,\n \"A media length is not available.<br>Use last event time as media length?\",\n [YES, NO]) == YES:\n maxTime = 0 # max length for all events all subjects\n for obsId in selectedObservations:\n if self.pj[OBSERVATIONS][obsId][EVENTS]:\n maxTime += max(self.pj[OBSERVATIONS][obsId][EVENTS])[0]\n logging.debug(\"max time all events all subjects: {}\".format(maxTime))\n selectedObsTotalMediaLength = maxTime\n else:\n selectedObsTotalMediaLength = 0\n\n logging.debug(\"selectedObsTotalMediaLength: {}\".format(selectedObsTotalMediaLength))\n\n if mode in [\"by_behavior\", \"by_category\"]:\n if len(selectedObservations) > 1:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n by_category=(mode == \"by_category\"))\n else:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=selectedObsTotalMediaLength,\n by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n flagShowExcludeBehaviorsWoEvents=False,\n by_category=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n # check if time_budget window must be used\n if mode in [\"by_behavior\", \"by_category\"] and (flagGroup or len(selectedObservations) == 1):\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n total_observation_time = 0\n for obsId in selectedObservations:\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n logging.debug(\"distinct_modifiers: {}\".format(distinct_modifiers))\n\n for modifier in distinct_modifiers:\n\n logging.debug(\"modifier #{}#\".format(modifier[0]))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n total_observation_time += (max_time - min_time)\n\n cursor.execute(\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n # widget for results visualization\n self.tb = timeBudgetResults(logging.getLogger().getEffectiveLevel(), self.pj)\n\n # observations list\n self.tb.label.setText(\"Selected observations\")\n for obs in selectedObservations:\n self.tb.lw.addItem(obs)\n\n # media length\n if len(selectedObservations) > 1:\n if total_observation_time:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {}\".format(seconds2time(total_observation_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {:0.3f}\".format(float(total_observation_time)))\n else:\n self.tb.lbTotalObservedTime.setText(\"Total observation length: not available\")\n else:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {} to {}\".format(seconds2time(min_time), seconds2time(max_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {:0.3f} to {:0.3f} s\".format(float(min_time), float(max_time)))\n\n if mode == \"by_behavior\":\n\n tb_fields = [\"Subject\", \"Behavior\", \"Modifiers\", \"Total number\", \"Total duration (s)\",\n \"Duration mean (s)\", \"Duration std dev\", \"inter-event intervals mean (s)\",\n \"inter-event intervals std dev\", \"% of total length\"]\n\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\", \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for row in out:\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n column = 0\n for field in fields:\n item = QTableWidgetItem(str(row[field]).replace(\" ()\", \"\"))\n # no modif allowed\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n column += 1\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n item = QTableWidgetItem(str(round(row[\"duration\"] / float(total_observation_time) * 100, 1)))\n else:\n item = QTableWidgetItem(\"NA\")\n\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n if mode == \"by_category\":\n tb_fields = [\"Subject\", \"Category\", \"Total number\", \"Total duration (s)\"]\n fields = [\"number\", \"duration\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for subject in categories:\n\n for category in categories[subject]:\n\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n\n column = 0\n item = QTableWidgetItem(subject)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n column = 1\n if category == \"\":\n item = QTableWidgetItem(\"No category\")\n else:\n item = QTableWidgetItem(category)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n for field in fields:\n column += 1\n item = QTableWidgetItem(str(categories[subject][category][field]))\n item.setFlags(Qt.ItemIsEnabled)\n item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n self.tb.twTB.resizeColumnsToContents()\n\n self.tb.show()\n\n if mode in [\"by_behavior\", \"by_category\"] and (\n not flagGroup and len(selectedObservations) > 1) or mode == \"synthetic\":\n\n if mode in [\"by_behavior\", \"by_category\"]:\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma separated values (*.csv)\",\n \"OpenDocument Spreadsheet (*.ods)\",\n \"OpenDocument Workbook (*.ods)\",\n \"Microsoft Excel Spreadsheet (*.xlsx)\",\n \"Microsoft Excel Workbook (*.xlsx)\",\n \"HTML (*.html)\",\n \"Legacy Microsoft Excel Spreadsheet (*.xls)\")\n\n formats = [\"tsv\", \"csv\", \"od spreadsheet\", \"od workbook\", \"xlsx spreadsheet\", \"xlsx workbook\", \"html\",\n \"xls legacy\"]\n\n item, ok = QInputDialog.getItem(self, \"Time budget analysis format\", \"Available formats\", items, 0,\n False)\n if not ok:\n return\n\n outputFormat = formats[items.index(item)]\n extension = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n flagWorkBook = False\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" in outputFormat:\n workbook = tablib.Databook()\n flagWorkBook = True\n if \"xls\" in outputFormat:\n filters = \"Microsoft Excel Workbook *.xlsx (*.xlsx);;All files (*)\"\n if \"od\" in outputFormat:\n filters = \"Open Document Workbook *.ods (*.ods);;All files (*)\"\n\n if QT_VERSION_STR[0] == \"4\":\n WBfileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget analysis\",\n \"\", filters)\n else:\n WBfileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget analysis\", \"\",\n filters)\n if not WBfileName:\n return\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" not in outputFormat: # not workbook\n exportDir = QFileDialog(self).getExistingDirectory(self,\n \"Choose a directory to save the time budget analysis\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if mode == \"synthetic\":\n\n formats_str = (\"Tab Separated Values *.txt, *.tsv (*.txt *.tsv);;\"\n \"Comma Separated Values *.txt *.csv (*.txt *.csv);;\"\n \"Open Document Spreadsheet *.ods (*.ods);;\"\n \"Microsoft Excel Spreadsheet *.xlsx (*.xlsx);;\"\n # \"Pandas dataframe (*.df);;\"\n \"Legacy Microsoft Excel Spreadsheet *.xls (*.xls);;\"\n \"HTML *.html (*.html);;\"\n \"All files (*)\")\n\n while True:\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget report\",\n \"\", formats_str)\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget report\", \"\",\n formats_str)\n\n if not fileName:\n return\n\n extension = \"\"\n availableFormats = (\n \"tsv\", \"csv\", \"ods\", \"xlsx)\", \"xls)\", \"html\") # ) is added to distinguish between xls and xlsx\n for fileExtension in availableFormats:\n if fileExtension in filter_:\n extension = fileExtension.replace(\")\", \"\")\n if not extension:\n QMessageBox.warning(self, programName, \"Choose a file format\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n else:\n break\n\n data_report = tablib.Dataset()\n data_report.title = \"Synthetic time budget\"\n\n parameters = [[\"duration\", \"Total duration\"], [\"number\", \"Number of occurrences\"]]\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"],\n selectedObservations, plot_parameters[\"selected behaviors\"])\n\n cursor.execute(\"SELECT distinct code, modifiers FROM events WHERE subject in ({})\".format(\n \",\".join(\"?\" * len(plot_parameters[\"selected subjects\"]))),\n (plot_parameters[\"selected subjects\"]))\n\n distinct_behav_modif = [[rows[\"code\"], rows[\"modifiers\"]] for rows in cursor.fetchall()]\n\n # add selected behaviors that are not observed\n for behav in plot_parameters[\"selected behaviors\"]:\n if [x for x in distinct_behav_modif if x[0] == behav] == []:\n distinct_behav_modif.append([behav, \"-\"])\n\n behaviors = init_behav_modif()\n\n subj_header, behav_header, modif_header, param_header = [\"\", \"\"], [\"\", \"\"], [\"\", \"\"], [\"\",\n \"Total length (s)\"]\n # subj_header, behav_header, modif_header, param_header = [\"\"], [\"\"], [\"\"], [\"\"]\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n param_header.append(param[1])\n\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n modif_header.append(modif)\n param_header.append(param[1])\n\n data_report.append(subj_header)\n data_report.append(behav_header)\n if plot_parameters[\"include modifiers\"]:\n data_report.append(modif_header)\n data_report.append(param_header)\n\n if mode == \"by_behavior\":\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\",\n \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n\n if mode == \"by_category\":\n fields = [\"subject\", \"category\", \"number\", \"duration\"]\n\n for obsId in selectedObservations:\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], [obsId],\n plot_parameters[\"selected behaviors\"])\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n # if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\"\"\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\"\"\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n cursor.execute(\"\"\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\"\"\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n\n behaviors = init_behav_modif()\n\n for element in out:\n for param in parameters:\n if not plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][param[0]] = element[param[0]]\n except:\n pass\n if plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][element[\"modifiers\"]][param[0]] = \\\n element[param[0]]\n except:\n pass\n\n columns = []\n columns.append(obsId)\n columns.append(\"{:0.3f}\".format(max_time - min_time))\n # columns.append([obsId])\n\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n columns.append(behaviors[subj][behav][param[0]])\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n columns.append(behaviors[subj][behav][modif][param[0]])\n\n data_report.append(columns)\n\n if mode in [\"by_behavior\", \"by_category\"]:\n rows = []\n # observation id\n rows.append([\"Observation id\", obsId])\n rows.append([\"\"])\n\n labels = [\"Independent variables\"]\n values = [\"\"]\n if INDEPENDENT_VARIABLES in self.pj and self.pj[INDEPENDENT_VARIABLES]:\n for idx in self.pj[INDEPENDENT_VARIABLES]:\n labels.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n if (INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][obsId]\n and self.pj[INDEPENDENT_VARIABLES][idx][\"label\"] in self.pj[OBSERVATIONS][obsId][\n INDEPENDENT_VARIABLES]):\n values.append(self.pj[OBSERVATIONS][obsId][INDEPENDENT_VARIABLES][\n self.pj[INDEPENDENT_VARIABLES][idx][\"label\"]])\n rows.append(labels)\n rows.append(values)\n rows.append([\"\"])\n\n rows.append(\n [\"Analysis from\", \"{:0.3f}\".format(float(min_time)), \"to\", \"{:0.3f}\".format(float(max_time))])\n rows.append([\"Total length (s)\", \"{:0.3f}\".format(float(max_time - min_time))])\n rows.append([\"\"])\n rows.append([\"Time budget\"])\n\n if mode == \"by_behavior\":\n\n rows.append(fields + [\"% of total length\"])\n # data.headers = fields + [\"% of total media length\"]\n\n for row in out:\n values = []\n for field in fields:\n values.append(str(row[field]).replace(\" ()\", \"\"))\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n # if row[\"duration\"] != \"-\" and row[\"duration\"] != 0 and row[\"duration\"] != UNPAIRED and selectedObsTotalMediaLength:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n if len(selectedObservations) > 1:\n values.append(round(row[\"duration\"] / float(selectedObsTotalMediaLength) * 100, 1))\n else:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n else:\n values.append(\"-\")\n\n rows.append(values)\n\n if mode == \"by_category\":\n rows.append = fields\n # data.headers = fields # + [\"% of total media length\"]\n for subject in categories:\n\n for category in categories[subject]:\n values = []\n values.append(subject)\n if category == \"\":\n values.append(\"No category\")\n else:\n values.append(category)\n\n values.append(categories[subject][category][\"number\"])\n values.append(categories[subject][category][\"duration\"])\n\n rows.append(values)\n\n data = tablib.Dataset()\n data.title = obsId\n for row in rows:\n data.append(complete(row, max([len(r) for r in rows])))\n\n if \"xls\" in outputFormat:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n\n if flagWorkBook:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n if \"xls\" in outputFormat:\n if len(data.title) > 31:\n data.title = data.title[:31]\n workbook.add_sheet(data)\n\n else:\n\n fileName = exportDir + os.sep + safeFileName(obsId) + \".\" + extension\n\n if outputFormat in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data.export(outputFormat)))\n\n if outputFormat == \"od spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.ods)\n\n if outputFormat == \"xlsx spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.xlsx)\n\n if outputFormat == \"xls legacy\":\n if len(data.title) > 31:\n data.title = data.title[:31]\n QMessageBox.warning(None, programName, (\n \"The worksheet name <b>{0}</b> was shortened to <b>{1}</b> due to XLS format limitations.\\n\"\n \"The limit on worksheet name length is 31 characters\").format(obsId, data.title),\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n with open(fileName, \"wb\") as f:\n f.write(data.xls)\n\n if mode == \"synthetic\":\n if extension in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data_report.export(extension)))\n if extension in [\"ods\", \"xlsx\", \"xls\"]:\n with open(fileName, \"wb\") as f:\n f.write(data_report.export(extension))\n\n if mode in [\"by_behavior\", \"by_category\"] and flagWorkBook:\n if \"xls\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.xlsx)\n if \"od\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.ods)", "def __statistics_disciplines_graded(self):\n disciplines_list = self.__grade_controller.get_list_of_graded_disciplines()\n if len(disciplines_list) == 0:\n print(\"There is no graded discipline!\")\n return\n\n for discipline in disciplines_list:\n print(str(discipline) + \"\\n\")", "def PrintCategoryScore(Cat):\r\n print()\r\n print(\"########## Individual Category Results ##########\")\r\n for i in range(len(Cat)): # prints out the results per category \r\n print()\r\n print(Cat[i])\r\n print(CategoryScore(Cat[i]))\r\n print()\r\n return print(\"----- End of Individuals Category Results -----\")", "def get_award_list(actioncluster):\n award_queryset = (ActionClusterAward.objects\n .select_related('award').filter(actioncluster=actioncluster))\n return [a.award for a in award_queryset]", "def extract_abilities(self):\n titleLabel = QLabel(\"Ability Scores\")\n titleLabel.setStyleSheet('font: 20pt \"Imprint MT Shadow\"; color: #ffffff;')\n grid = QGridLayout()\n self.filterVbox.addWidget(titleLabel, alignment=Qt.AlignCenter)\n self.filterVbox.addLayout(grid)\n\n counter = 0\n abilities = [\"Strength\", \"Dexterity\", \"Constitution\", \"Intelligence\", \"Wisdom\", \"Charisma\"]\n for [minVal, maxVal] in self.filters[\"Abilities\"].values():\n nextLabel = QLabel(f\"{abilities[counter]} - Between {str(minVal)} & {str(maxVal)}\")\n nextLabel.setStyleSheet('font: 12pt \"Times New Roman\"; color: rgb(188, 189, 177);')\n grid.addWidget(nextLabel, math.floor(counter / 2), counter % 2, alignment=Qt.AlignCenter)\n counter += 1", "def print_tod_accomplishments(accomplishments):\n print(Colors.BLUE + \"Accomplishments from Tod:\" + Colors.NORMAL)\n for accomplishment in accomplishments:\n print(Colors.CYAN + '* ' + Colors.NORMAL + accomplishment)", "def Subcategories():\n subcat = {\n \t\"Featured\": 0,\n \t\"All\": 1,\n \t\"Collectibles\": 2,\n \t\"Clothing\": 3,\n \t\"BodyParts\": 4,\n \t\"Gear\": 5,\n \t\"Models\": 6,\n \t\"Plugins\": 7,\n \t\"Decals\": 8,\n \t\"Hats\": 9,\n \t\"Faces\": 10,\n \t\"Packages\": 11,\n \t\"Shirts\": 12,\n \t\"Tshirts\": 13,\n \t\"Pants\": 14,\n \t\"Heads\": 15,\n \t\"Audio\": 16,\n \t\"RobloxCreated\": 17,\n \t\"Meshes\": 18,\n \t\"Accessories\": 19,\n \t\"HairAccessories\": 20,\n \t\"FaceAccessories\": 21,\n \t\"NeckAccessories\": 22,\n \t\"ShoulderAccessories\": 23,\n \t\"FrontAccessories\": 24,\n \t\"BackAccessories\": 25,\n \t\"WaistAccessories\": 26,\n \t\"AvatarAnimations\": 27,\n \t\"ClimbAnimations\": 28,\n \t\"FallAnimations\": 30,\n \t\"IdleAnimations\": 31,\n\t \"JumpAnimations\": 32,\n\t \"RunAnimations\": 33,\n \t\"SwimAnimations\": 34,\n \t\"WalkAnimations\": 35,\n \t\"AnimationPackage\": 36,\n \t\"Bundles\": 37,\n \t\"AnimationBundles\": 38,\n\t \"EmoteAnimations\": 39,\n\t \"CommunityCreations\": 40,\n\t \"Video\": 41,\n\t \"Recommended\": 51\n }\n return subcat", "def gradeReport(course):\n report = []\n for student in course.allStudents():\n total = 0.0\n numberOfGrades = 0\n for grade in course.getGrades(student):\n total += grade\n numberOfGrades += 1\n \n try:\n average = total / numberOfGrades\n report.append(str(student) + \"'s mean grade is \" + str(average))\n except ZeroDivisionError:\n report.append(str(student) + \" has no grades\")\n \n return '\\n'.join(report)", "def get_queryset(self):\n return Objective.objects.filter(perspective__description='Learning and Capacity').order_by('code')", "def get_knowledge_category_terms(self):\n return # osid.grading.GradeQueryInspector", "def get_list(self):\n categories = []\n for attribut in self.attributes:\n attr = getattr(self, attribut, False)\n if attr is True:\n categories.append(attribut)\n if getattr(self, 'education') is True:\n categories.append(_(u'education'))\n if getattr(self, 'training') is True:\n categories.append(_(u'training'))\n if getattr(self, 'tutoring') is True:\n categories.append(_(u'tutoring'))\n\n return categories", "def graph_course(self):\n group = self.__data[\"filted_general_groupby\"]\n graph = {}\n if self.analysis[\"courses\"] is None:\n self.courses_list()\n\n # inicializa o dicionario que vai guardar o grafico\n for course in self.analysis[\"courses\"].index:\n graph[course] = []\n\n for i in range(18):\n min_v = i * 5\n max_v = min_v + 4.99\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n min_v = 95\n max_v = 100\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n self.analysis[\"graph_course\"] = graph", "def toughCategory():\n return prepJSON(cs411_dash.toughCategory())", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def get_attributes(cls, entity):\n return entity.category.facts.all()", "def get_categs_section(sect):\n return Category.objects.filter(section__section=sect)", "def get_queryset(self):\n return Initiative.objects.filter(objective__perspective__description='Capacity').order_by('objective')", "def items(self, course):\r\n pass", "def get_section_grades(self, section, section_goal=False):\n if not section_goal:\n SECTION_GRADES = \"\"\"SELECT count_ap, count_a, count_am, count_bp, count_b, count_bm, count_cp, count_c, count_cm, count_dp, count_d, count_dm, count_f,count_i, count_w FROM SectionGrades WHERE course = %s AND semester = %s AND year = %s AND section_id = %s\"\"\"\n else:\n SECTION_GRADES = \"\"\"SELECT count_ap, count_a, count_am, count_bp, count_b, count_bm, count_cp, count_c, count_cm, count_dp, count_d, count_dm, count_f FROM SectionGoalGrades WHERE course = %s AND semester = %s AND year = %s AND section_id = %s AND goal_id = %s\"\"\"\n\n ret = None\n #try:\n if not section_goal:\n self.db_cursor.execute(\n SECTION_GRADES,\n (section.course_name, section.semester, section.year, section.section_id))\n else:\n self.db_cursor.execute(\n SECTION_GRADES,\n (section.course, section.semester, section.year, section.section_id, section.goal_id))\n section_grades = self.db_cursor.fetchall()\n if len(section_grades) > 0 and not section_goal:\n ret = SectionGrades()\n ret.section_id = section.section_id\n ret.semester = section.semester\n ret.year = section.year\n ret.course = section.course_name\n ret.count_ap = section_grades[0][0]\n ret.count_a = section_grades[0][1]\n ret.count_am = section_grades[0][2]\n ret.count_bp = section_grades[0][3]\n ret.count_b = section_grades[0][4]\n ret.count_bm = section_grades[0][5]\n ret.count_cp = section_grades[0][6]\n ret.count_c = section_grades[0][7]\n ret.count_cm = section_grades[0][8]\n ret.count_dp = section_grades[0][9]\n ret.count_d = section_grades[0][10]\n ret.count_dm = section_grades[0][11]\n ret.count_f = section_grades[0][12]\n ret.count_i = section_grades[0][13]\n ret.count_w = section_grades[0][14]\n ret.course = section.course_name\n ret.semester = section.semester\n ret.year = section.year\n ret.section_id = section.section_id\n elif len(section_grades) > 0 and section_goal:\n ret = SectionGoalGrades()\n ret.section_id = section.section_id\n ret.semester = section.semester\n ret.year = section.year\n ret.course = section.course\n ret.goal_id = section.goal_id\n ret.count_ap = section_grades[0][0]\n ret.count_a = section_grades[0][1]\n ret.count_am = section_grades[0][2]\n ret.count_bp = section_grades[0][3]\n ret.count_b = section_grades[0][4]\n ret.count_bm = section_grades[0][5]\n ret.count_cp = section_grades[0][6]\n ret.count_c = section_grades[0][7]\n ret.count_cm = section_grades[0][8]\n ret.count_dp = section_grades[0][9]\n ret.count_d = section_grades[0][10]\n ret.count_dm = section_grades[0][11]\n ret.count_f = section_grades[0][12]\n else:\n ret = None\n\n #except:\n # logging.warning(\"DBAdapter: Error- cannot retrieve section grades: \" + str(id))\n\n return ret", "def test_list_grading_periods_courses(self):\r\n course_id = None # Change me!!\r\n\r\n r = self.client.list_grading_periods_courses(course_id)", "def get_categories():\n page = requests.get(BASE_URL, verify=False)\n soup = BeautifulSoup(page.content)\n output = [{'title': 'Top 10 Courses'}]\n\n for c in soup.find(id='main_aside').findAll('h4'):\n output.append({'title': c.text})\n\n return output", "def amenities(self):\n ats = storage.all(Amenity)\n ltats = []\n for objects in ats.values():\n if self.amenity_ids == objects.id:\n ltats.append(objects)\n return ltats", "def json_export(json_file,gradebook):\n \n try:\n import json\n except ImportError:\n warnings.warn('Failed to import json module. Cannot execute json_export')\n return\n if not hasattr(json_file,'write'):\n if not isinstance(json_file,basestring) or not \\\n os.path.exists(os.path.dirname(os.path.abspath(json_file))):\n raise ValueError, 'Argument \\'json_file\\' is not readable, ' \\\n 'and could not be validated as a file path.'\n else:\n json_file = open(json_file,'w+')\n \n if not isinstance(gradebook,Gradebook):\n raise TypeError, 'gradebook argument must be a Gradebook objcet.'\n \n cat_dict = dict(gradebook._Gradebook__categories)\n all_items = []\n for cat_name in cat_dict:\n gr_leest = cat_dict[cat_name].grades.select(docopy=True,aslist=True)\n for i in gr_leest:\n x = {}\n if not isinstance(i,Grade):\n continue\n x['type']='Grade'\n x['name']=i.name\n if i.parent is not None:\n if i.parent.name == cat_name or i.parent.name in cat_dict:\n x['parent'] = i.parent.name\n else:\n x['parent'] = None\n attribs = {}\n attribs['score'] = i.score\n attribs['maximum'] = i.maximum\n attribs['weight'] = i.weight\n attribs['extra_credit'] = i.extra_credit\n if i.overrides:\n attribs['overrides'] = list(i.overrides)\n for ovrrd in i.overrides:\n if not isinstance(overrd,basestring):\n attribs['overrides'].remove(overrd)\n if i.timestamp and isinstance(i.timestamp,datetime.datetime):\n tmstmp = calendar.timegm(i.timestamp.utctimetuple())\n attribs['timestamp'] = tmstmp\n if i.identifiers:\n attribs['identifiers'] = dict(i.identifiers)\n \n x['attribs'] = attribs\n all_items.append(x)\n catt = {'type':'Category','name':cat_name}\n if cat_dict[cat_name].parent is not None:\n catt['parent'] = cat_dict[cat_name].parent.name\n else:\n catt['parent'] = None\n catt['attribs'] = dict(cat_dict[cat_name]._Category__attribs)\n all_items.append(catt)\n \n grbk = {'type':'Gradebook','name':gradebook.name,'user':gradebook.user}\n all_items.append(grbk)\n enc_me = {\"grading\": all_items}\n \n encoder = json.JSONEncoder(indent=4,separators=(', ',': '))\n enc = encoder.encode(enc_me)\n \n json_file.write(enc)\n json_file.close()", "def grade_report(course):\n report = []\n for st in course.get_students():\n try:\n average = sum(course.get_grades(st)) / len(course.get_grades(st))\n report.append(str(st) + '\\'s mean grade is: ' + str(average) + '.')\n except ZeroDivisionError:\n report.append(str(st) + ' has no grades.')\n return '\\n'.join(report)", "def upcoming_courses(aud):\n \n courses = [c for c in aud.all_courses() if c.grade == u\"*\"]\n return [c.number.replace(\"-\", \"\") for c in courses]", "def get_class_grades(class_id):\n\n grades = []\n quiz_grades = query_db(\n \"SELECT people.name, quizzes.name, grade FROM quiz_grades JOIN people \"\n \"ON quiz_grades.student_id=people.id JOIN quizzes \"\n \"ON quiz_grades.quiz_id=quizzes.id JOIN topics \"\n \"ON quizzes.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE classes.id=?;\",\n [class_id],\n )\n for grade in quiz_grades:\n grade_class = {}\n grade_class[\"student_name\"] = grade[0]\n grade_class[\"thing_name\"] = str(grade[1]) + \" (Quiz)\"\n grade_class[\"grade\"] = grade[2]\n grades.append(grade_class)\n assignment_grades = query_db(\n \"SELECT people.name, assignments.name, grade FROM assignment_grades \"\n \"JOIN people ON assignment_grades.student_id=people.id \"\n \"JOIN assignments ON assignment_grades.assignment_id=assignments.id \"\n \"JOIN topics ON assignments.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE classes.id=?;\",\n [class_id],\n )\n for grade in assignment_grades:\n grade_assign = {}\n grade_assign[\"student_name\"] = grade[0]\n grade_assign[\"thing_name\"] = str(grade[1]) + \" (Assignment)\"\n grade_assign[\"grade\"] = grade[2]\n grades.append(grade_assign)\n return grades", "def print_loc_acrnym():\n\n #Method2\n val = College.objects.values('acronym','contact')\n for i in val:\n print(i['acronym'],i['contact'])" ]
[ "0.5233529", "0.5082282", "0.5039836", "0.49076796", "0.49012667", "0.4887644", "0.4880797", "0.48579165", "0.48460177", "0.4806707", "0.48065493", "0.47938785", "0.478363", "0.47725368", "0.47473636", "0.47454002", "0.47437078", "0.47360337", "0.47296265", "0.47234464", "0.47124693", "0.4704325", "0.4670285", "0.4658767", "0.46473724", "0.46345207", "0.46278307", "0.4606785", "0.4601236", "0.45969978" ]
0.71396613
0
Add a child config
def add(self, key, child_config): self.__dict__[key] = child_config child_config.root = self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_config(self, parent_node, child_value):\n edge_cost = self.cost(parent_node.value, child_value)\n child_node = Node(\n child_value,\n parent=parent_node,\n cost=parent_node.cost + edge_cost,\n depth=parent_node.depth + 1\n )\n parent_node.children.append(child_node)\n self.nodes.append(child_node)\n # self.tree = KDTree(np.vstack((self.tree.data, np.array([child_value[0], child_value[1]]))))\n coords = child_value[:2] # child_value only has 2 coords (x, y) right now, but it may have theta in the future.\n self.idx.insert(self.size, self.make_bounding_box(coords))\n self.size += 1\n \n return child_node", "def add_subconfig(self, name, arg_kws=None, func=None):\n if name in self.subconfig:\n raise ValueError(\"Subconfig '%s' is already defined.\" % name)\n if arg_kws is None:\n arg_kws = dict()\n argparser = self.subparsers.add_parser(name, **arg_kws)\n subconfig = SubConfig(argparser, self.config, name, func)\n self.subconfig[name] = subconfig\n return subconfig", "def inherit_config(child, parent, keys):\n for key in keys:\n if key not in child.keys():\n child[key] = parent[key]\n print(\n \"{} not found in io.yaml file, falling back to main config\".format(key)\n )\n\n return child", "def addChild(self, title, path, orig_cp):\n cp = L10nConfigParser(urljoin(self.baseurl, path), **self.defaults)\n cp.loadConfigs()\n self.children.append(cp)", "def new_child(self, prefix: str, root: Any = None, values: Dict[str, Any] = None) -> 'Config':\n config = Config(prefix, self)\n if root and prefix:\n config[prefix] = root\n if values:\n config.add_all(values)\n return config", "def add_child(self, node):\n\n\t\tif Defaults == node.__class__:\n\t\t\tself.__defaults = node\n\t\telif Variables == node.__class__:\n\t\t\tself.__variables = node\n\t\telif Servers == node.__class__:\n\t\t\tself.__servers = node\n\t\telif FileSets == node.__class__:\n\t\t\tself.__filesets = node\n\t\telif Targets == node.__class__:\n\t\t\tself.__targets = node\n\t\telse:\n\t\t\traise DepFileParsingError()\n\n\t\treturn True", "def add_child(self, child):\r\n self.children.append(child)", "def add_child(self, child):\r\n \r\n self._children.append(child)\r\n self.update_batch(self._batch, self._group)", "def add_child(self, child):\r\n self.children.append(child)", "def override(self, parent):\n return self.__class__(Cfg._mergedicts(self, parent, True))", "def add_child(self, child):\n self.children.append(child)", "def add_child(self, child):\n self.children.append(child)", "def add_child(self, child):\n self.childs.append(child)", "def add_child(self, text, alert_on_duplicate=False, idx=None, force_duplicate=False):\n\n if idx is None:\n idx = len(self.children)\n # if child does not exist\n if text not in self:\n new_item = HConfigChild(self, text)\n self.children.insert(idx, new_item)\n self.children_dict[text] = new_item\n return new_item\n # if child does exist and is allowed to be installed as a duplicate\n elif self._duplicate_child_allowed_check() or force_duplicate:\n new_item = HConfigChild(self, text)\n self.children.insert(idx, new_item)\n self.rebuild_children_dict()\n return new_item\n else:\n # If the child is already present and the parent does not allow\n # duplicate children, return the existing child\n # Ignore duplicate remarks in ACLs\n if alert_on_duplicate and not text.startswith('remark '):\n if self is self.root:\n path = [text]\n else:\n path = list(self.path()) + [text]\n self.logs.append(\"Found a duplicate section: {}\".format(path))\n return self.get_child('equals', text)", "def addChild( self, child ):\n\n self.childs.append( child )", "def appendChild(self, child):\n self.__initChild()\n self.__child.append(child)", "def add_child(self, child: UIComponent):\n child.parent = self\n child.set_chronometer(self._chronometer)\n self.children.append(child)\n if self.props.resize_mode == ResizeMode.AUTO:\n self._reset('add_child')", "def _newChild(self, child):\n self._testKeySubNsAdd()\n self._getSubNsList().append(child)", "def addChild(self, child):\n #assert child not in self.children\n #if child not in self.children:\n child.parents.append(self)\n self.children.append(child)", "def _init_child(self,child,path):\n pass", "def add_child(self, child, label):\n self.children[label] = child\n child.parents.append(self)", "def init_config(self):\n super().init_config()\n for param in self.parameters():\n if param.name == 'source':\n continue\n self.add_config_item(param.name,\n saver=lambda p=param: getattr(p, \"value\"),\n loader=lambda x, p=param: setattr(p, \"value\", x),\n default=param.default)", "def add_config_field(self, content_type, name, *args, **kwargs):\n if name == 'representation_args':\n raise ValueError('{} is a reserved Config field name'.format(name))\n self._add_config_arg(ConfigField, content_type, name, *args, **kwargs)", "def append_child(self, child):\n\t\tself._children.append(child)", "def add_child(self, child):\n name = child.name\n self._children[name] = child\n self._name_dict[name.split('-')[0]] += 1", "def create_subparser(self, parent, storage):\n p = parent.add_parser(\n 'inject-config',\n help=\"Inject a configuration file into an OVF package\",\n usage=self.UI.fill_usage(\"inject-config\", [\n \"PACKAGE -c CONFIG_FILE [-o OUTPUT]\",\n \"PACKAGE -s SECONDARY_CONFIG_FILE [-o OUTPUT]\",\n \"PACKAGE -c CONFIG_FILE -s SECONDARY_CONFIG_FILE [-o OUTPUT]\",\n ]),\n description=\"\"\"Add one or more \"bootstrap\" configuration \"\"\"\n \"\"\"file(s) to the given OVF or OVA.\"\"\")\n\n p.add_argument('-o', '--output',\n help=\"\"\"Name/path of new VM package to create \"\"\"\n \"\"\"instead of updating the existing package\"\"\")\n\n p.add_argument('-c', '--config-file',\n help=\"\"\"Primary configuration text file to embed\"\"\")\n p.add_argument('-s', '--secondary-config-file',\n help=\"\"\"Secondary configuration text file to embed \"\"\"\n \"\"\"(currently only supported in IOS XRv for \"\"\"\n \"\"\"admin config)\"\"\")\n p.add_argument('PACKAGE',\n help=\"\"\"Package, OVF descriptor or OVA file to edit\"\"\")\n p.set_defaults(instance=self)\n\n storage['inject-config'] = p", "def spawnWithConfig(self, config, here, there):\n raise NotImplementedError(\"subclasses must implement the specifics\")", "def add(ctx, option, value):\n properties = option.split(\".\")\n section = properties[0]\n option = properties[1]\n cfg = ctx.obj['cfg']\n if not cfg.has_section(section):\n cfg.add_section(section)\n cfg.set(section, option, value)\n with open(config_path(), 'w') as fp:\n cfg.write(fp)", "def add_child(self, child, probe_id=None):\n node = None\n matching_nodes = [x for x in self.children if x.name == child.name] # see if the added node has already in its children list\n # print(\"[*] add children with the name {}.. matching_nodes: {}\".format(child.name, matching_nodes))\n if len(matching_nodes) > 0:\n node = matching_nodes[0]\n if probe_id is not None:\n node.probes = probe_id\n # print(\"\\t[*] current node: {}\".format(node.name))\n if node is None:\n if probe_id is not None:\n child.probes = probe_id\n self.children.append(child)\n node = child\n # print(\"\\t[*] node {} is appended to {} child list\".format(node.name, self.name))\n return node", "def addConfiguration(self, d):\n self.__populateDict(self._configuration, d)" ]
[ "0.7265803", "0.6487871", "0.64664084", "0.62878114", "0.6264923", "0.6200054", "0.6032241", "0.59954774", "0.5990614", "0.59605205", "0.59455884", "0.59455884", "0.591581", "0.5893812", "0.5794489", "0.5790334", "0.57764447", "0.5702759", "0.56942874", "0.5688074", "0.56839925", "0.5677099", "0.56742185", "0.56387055", "0.56370103", "0.5616215", "0.5584076", "0.5555786", "0.554189", "0.5536735" ]
0.7865205
0
Format strings using CONFIG object. This method uses python's builtin `str.format()` method. All root properties in CONFIG are passed in as kwargs. The properties lazy evaluate and recursively expand.
def format(self, value, key=None, **kwargs): if not isinstance(value, str): return value # always format strings using the root so the full path is available if self.root: return self.root.format(value, key, **kwargs) variables = CONFIG_VARIABLE_PATTERN.findall(value) expanded = {} for variable in variables: if variable not in kwargs: try: root_key = variable.split(".")[0] root = self.root if self.root else self expanded[root_key] = self.format(getattr(root, root_key), variable, **kwargs) except AttributeError: raise MissingConfiguration(variable, key) expanded.update(**kwargs) return value.format(**expanded)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recursively_update_config(config, string_formatting_dict):\n\n for k in _iterate_list_or_dict(config):\n v = config[k]\n if isinstance(v, dict) or isinstance(v, list):\n recursively_update_config(v, string_formatting_dict)\n else:\n if _key_in_string(v, string_formatting_dict):\n config[k] = v.format(**string_formatting_dict)", "def set_formatter_string(config: dict):\n formatter_str = \"%(levelname)s %(name)s\"\n\n if config.get(\"formatter\"):\n return config[\"formatter\"]\n\n if config.get(\"extended\"):\n formatter_str += \".%(funcName)s():\"\n\n if config.get(\"timestamp\"):\n formatter_str = \"%(asctime)s \" + formatter_str\n\n formatter_str += \" %(message)s\"\n\n return formatter_str", "def format_string(self, template):\n out_str = \"\"\n search_property_name = \"\"\n in_property = False\n for char in template:\n if (in_property):\n if (char == '%'):\n if (len(search_property_name) > 0):\n prop_value = \"\"\n try:\n prop_value = str(self.get_property(search_property_name))\n except KeyError:\n pass\n out_str += prop_value\n search_property_name = \"\"\n in_property = False\n else:\n search_property_name += char\n else:\n if (char == '%'):\n in_property = True\n else:\n out_str += char\n\n # Handle unterminated property names\n if (in_property):\n out_str += '%'\n out_str += search_property_name\n\n return out_str", "def cfg_to_prop_string(cfg, key_transform=lambda k: k, value_transform=lambda v: v, separator=\";\"):\n return separator.join([\"%s:%s\" % (key_transform(key), value_transform(value)) for key, value in iteritems(cfg)])", "def format(self, **kw):\n params = self.defaults.copy()\n params.update(kw)\n if self.filter:\n self.filter(self, params)\n msg = self.msg\n if self.key is not None:\n key = self.key.format(**params)\n msg = msg[key]\n return msg.format(**params)", "def format(*args, **kwargs):\n if args:\n print ', '.join([str(s) for s in args])\n if kwargs:\n sub_items = []\n for k, v in kwargs.items():\n sub_items.append('{}={}'.format(k, v))\n print ', '.join(sub_items)", "def dump_config(self, obj, level=-1):\n indent = u\" \"\n if level >= 0:\n self._nginx_config += f\"{level * indent}{{\\n\"\n if isinstance(obj, dict):\n for key, val in obj.items():\n if hasattr(val, u\"__iter__\") and not isinstance(val, str):\n self._nginx_config += f\"{(level + 1) * indent}{key}\\n\"\n self.dump_config(val, level + 1)\n else:\n self._nginx_config += f\"{(level + 1) * indent}\" \\\n f\"{key} {val};\\n\"\n else:\n for val in obj:\n self._nginx_config += f\"{(level + 1) * indent}{val};\\n\"\n if level >= 0:\n self._nginx_config += f\"{level * indent}}}\\n\"", "def __str__(self):\n return self.fmt.format(*self.args, **self.kwargs)", "def format_string(self, pat=None, pat_args={}):\n if pat is None:\n pat = self.parent.pat\n if pat_args == {}:\n pat_args = self.parent.pat_args\n return entry_format.output(self, pat, pat_args)", "def GetConfigAsString(self, config, enum_values, index):\n string = textwrap.dedent(\"\"\"\\\n [{index}] = {{\n .monitor = {monitor_name},\n .stack_level = 0x{address:02X},\n .input_mask = 0x{input_mask:03X},\n .v_balance_min = {balance_min_cutoff}f,\n .v_balance_thres = {balance_thres}f,\n .v_balance_hyst = {balance_hysteresis}f,\n .num_max_simult_bal = {max_simult_balance}L,\n .num_series_cells = {num_series_cells}L,\n .control = {{\n .under_volt_thres = {under_volt_thres}f,\n .over_volt_thres = {over_volt_thres}f,\n .reference_on = {reference_on},\n .discharge_permitted = {discharge_permitted},\n .rate = {rate_str},\n .cell_channels = {cell_ch_str},\n .aux_channels = {aux_ch_str},\n .stat_channels = {stat_ch_str},\n .discharge_timeout = {dcto_str},\n .self_test_mode = {self_test_str}}}}},\n \"\"\").format(\n index=index,\n monitor_name=enum_values['monitor'],\n rate_str=rate_helper.Name(config['rate']),\n cell_ch_str=cell_ch_helper.Name(config['cell_ch']),\n aux_ch_str=aux_ch_helper.Name(config['aux_ch']),\n stat_ch_str=stat_ch_helper.Name(config['stat_ch']),\n dcto_str=dcto_helper.Name(config['discharge_timeout']),\n self_test_str=self_test_helper.Name(config['self_test_mode']),\n **config)\n return string", "def format_yaml(template, config):\n formatted = template\n for k, v in config.items():\n formatted = formatted.replace('${%s}' % k, v)\n return formatted", "def _process_str(self, fmt, *args, **kwargs):\n log_str = fmt\n if len(args) > 0 or len(kwargs) > 0:\n log_str = fmt.format(*args, **kwargs)\n\n return log_str", "def print_formatted_values(**kwargs):\n string = ', '.join([f'{k}: {format_number(kwargs[k])}' for k in kwargs])\n print(string)", "def digest_config(obj, kwargs, local_args = {}):\n ### Assemble list of CONFIGs from all super classes\n classes_in_heirarchy = [obj.__class__]\n configs = []\n while len(classes_in_heirarchy) > 0:\n Class = classes_in_heirarchy.pop()\n classes_in_heirarchy += Class.__bases__\n if hasattr(Class, \"CONFIG\"):\n configs.append(Class.CONFIG) \n\n #Order matters a lot here, first dicts have higher priority\n all_dicts = [kwargs, filtered_locals(local_args), obj.__dict__]\n all_dicts += configs\n item_lists = reversed([d.items() for d in all_dicts])\n obj.__dict__ = dict(reduce(op.add, item_lists))", "def __str__(self):\n config_str = 'Configurations\\n'\n config_str += pprint.pformat(self.__dict__)\n return config_str", "def format(obj): # pylint: disable=W0622\n# print '>>', obj\n if hasattr(obj, 'format'):\n return obj.format()\n return \"%s\" % obj", "def format(self, *args, **kwargs) -> String:\n pass", "def format(self, *args, **kwargs):\n return self._format(args, kwargs)", "def format_string(self, pat=None, pat_args=None):\n if pat is None:\n pat = self.pat\n if pat_args is None:\n pat_args = self.pat_args\n return '\\n'.join(e.format_string(pat, pat_args) for e in self)", "def _format_bases_config(bases_config: BasesConfiguration) -> str:\n return \"_\".join([_format_run_on_base(r) for r in bases_config.run_on])", "def get_formatter(self, **kwargs):\n config = dict([\n (attr, getattr(self, attr))\n for attr in [\n \"include_sign\",\n \"group_with_commas\",\n \"num_decimal_places\",\n ]\n ])\n config.update(kwargs)\n return \"\".join([\n \"{\",\n config.get(\"field_name\", \"\"),\n \":\",\n \"+\" if config[\"include_sign\"] else \"\",\n \",\" if config[\"group_with_commas\"] else \"\",\n \".\", str(config[\"num_decimal_places\"]), \"f\",\n \"}\",\n ])", "def print_config(config, logger):\n for k, v in config.items():\n logger.info(\"{}:\\t{}\".format(k.ljust(15), v))", "def format(self, obj, indent=0):\r\n return pformat(obj, indent=indent, depth=self.depth)", "def format(self):\n groups = [g + \".\" for g in self.groups]\n params = [\";\" + p.format() for p in self.params]\n groups_name_params = \"\".join(groups) + self.name + \"\".join(params)\n return groups_name_params + \":\" + self.format_value() + CRLF", "def log_cfg(cfg: dict, logger: Logger, prefix: str = \"cfg\"):\n for k, v in cfg.items():\n if isinstance(v, dict):\n p = \".\".join([prefix, k])\n log_cfg(v, logger, prefix=p)\n else:\n p = \".\".join([prefix, k])\n logger.info(\"{:34s} : {}\".format(p, v))", "def get_config_string(params, units=None):\n compact_str_items = []\n # first make a list of compact strings for each parameter\n for k, v in params.items():\n unit = \"\"\n if isinstance(units, dict): #check if not None not enough, units could be mocked which causes errors\n unit = units.get(k, \"\")\n compact_str_items.append(k + \"=\" + str(v) + unit)\n # and finally join them\n compact_str = \", \".join(compact_str_items)\n return compact_str", "def context_formatter(\n full_context: dict,\n *,\n flask_context: dict,\n schema_context: dict,\n model_context: dict,\n):\n sections = [(\"Flask\", flask_context)]\n if schema_context: # pragma: no cover\n sections.append((\"Schemas\", schema_context))\n if model_context: # pragma: no cover\n sections.append((\"Models\", model_context))\n\n additional_context_keys = (\n full_context.keys()\n - flask_context.keys()\n - schema_context.keys()\n - model_context.keys()\n )\n additional_context = {\n key: full_context[key] for key in additional_context_keys\n }\n if additional_context:\n sections.append((\"Additional\", additional_context))\n return \"\\n\".join([format_section(*section) for section in sections])", "def __format__(self, format_spec):\n # Reject anything that isn't an s\n if format_spec[-1] != 's':\n raise ValueError('{} format specifier not understood for this object',\n format_spec[:-1])\n # Output in this example will be (<a>,<b>,<c>)\n raw = \"(\" + \",\".join([str(self.a), str(self.b), str(self.c)]) + \")\"\n # Honor the format language by using the inbuilt string format\n # Since we know the original format_spec ends in an 's'\n # we can take advantage of the str.format method with a\n # string argument we constructed above\n return \"{r:{f}}\".format( r=raw, f=format_spec )", "def formatargvalues(args, varargs, varkw, locals,\r\n formatarg=str,\r\n formatvarargs=lambda name: '*' + name,\r\n formatvarkw=lambda name: '**' + name,\r\n formatvalue=lambda value: '=' + repr(value),\r\n join=joinseq):\r\n def convert(name, locals=locals,\r\n formatarg=formatarg, formatvalue=formatvalue):\r\n return formatarg(name) + formatvalue(locals[name])\r\n specs = []\r\n for i in range(len(args)):\r\n specs.append(strseq(args[i], convert, join))\r\n if varargs:\r\n specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))\r\n if varkw:\r\n specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))\r\n return '(' + string.join(specs, ', ') + ')'", "def str(self):\n\n list_of_entries = ['{0} name={1}'.format(self.layer_type, self.name)]\n for key, value in sorted(self.config.items()):\n if isinstance(value, str) and re.search('=', value):\n # the value is a string that contains an '=' sign, so we need to\n # enclose it in double-quotes, otherwise we woudldn't be able to\n # parse from that output.\n if re.search('\"', value):\n print(\"Warning: config '{0}={1}' contains both double-quotes \"\n \"and equals sign; it will not be possible to parse it \"\n \"from the file.\".format(key, value), file=sys.stderr)\n list_of_entries.append('{0}=\"{1}\"'.format(key, value))\n else:\n list_of_entries.append('{0}={1}'.format(key, value))\n\n return ' '.join(list_of_entries)" ]
[ "0.61522377", "0.61236", "0.60001415", "0.57900184", "0.57206047", "0.57040524", "0.5618517", "0.5617663", "0.56145966", "0.56098795", "0.5547251", "0.55374193", "0.5514143", "0.54919505", "0.5468669", "0.54580975", "0.54474247", "0.54033774", "0.5401085", "0.53975976", "0.539324", "0.53666776", "0.5347559", "0.53226763", "0.5318113", "0.52701443", "0.52681345", "0.5237397", "0.5222003", "0.5207491" ]
0.75447696
0
Directory where ixian is installed
def IXIAN(cls): import ixian return os.path.dirname(os.path.realpath(ixian.__file__))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_teamocil_dir() -> pathlib.Path:\n return pathlib.Path(\"~/.teamocil/\").expanduser()", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph5/src\"", "def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path", "def ifaces_dir(self):\n return self.system_path(self._ifaces_dir)", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph4/src\"", "def eplus_home(self):\n if self.idf.file_version <= Version(\"7.2\"):\n install_dir = self.idf.file_version.current_install_dir / \"bin\"\n else:\n install_dir = (\n self.idf.file_version.current_install_dir\n / \"PreProcess\"\n / \"GrndTempCalc\"\n )\n return install_dir.expand()", "def GetPackageDirectory():\n return os.path.dirname(__file__)", "def systemdir():\n if platform == 'windows':\n return os.path.join(os.environ['ProgramFiles'], 'automaton')\n else:\n return \"/etc/automaton/\"", "def package_dir(self):\r\n return \".\"", "def get_qiime_scripts_dir():\r\n script_fp = which('print_qiime_config.py')\r\n\r\n if script_fp is None:\r\n raise ScriptsDirError(\"Could not find the directory containing QIIME \"\r\n \"scripts. QIIME scripts must be accessible via \"\r\n \"the PATH environment variable, and they must \"\r\n \"be executable. Please ensure that you have a \"\r\n \"valid QIIME installation (see the QIIME \"\r\n \"Installation Guide: \"\r\n \"http://qiime.org/install/install.html).\")\r\n\r\n return dirname(script_fp)", "def personaldir():\n if platform == 'windows':\n return os.path.join(os.environ['APPDATA'], 'automaton')\n else:\n return os.path.expanduser('~/.automaton/')", "def rliPath():\r\n if isWindows():\r\n homeDir = win32api.GetShortPathName(os.path.expanduser('~'))\r\n return os.path.join(homeDir, 'AppData', 'Roaming', 'GRASS7', 'r.li')\r\n else:\r\n return os.path.join(os.path.expanduser(\"~\"), '.grass7', 'r.li')", "def path(self):\n installed_packages_folder_path = site.getsitepackages()[0]\n return f'{installed_packages_folder_path}/{SITE_PACKAGES_FOLDER_NAME}'", "def syspath():\n import sys\n pprint(sys.path)", "def home():\n if sys.prefix == sys.exec_prefix:\n return sys.prefix\n else:\n return ':'.join((sys.prefix, sys.exec_prefix))", "def get_axebindir():\n import sys\n\n if 'axesrc' in sys.modules:\n modfile = sys.modules['axesrc'].__file__\n axebindir = os.path.abspath(os.path.join(os.path.dirname(modfile),'../bin/'))\n\n else:\n from pyraf import iraf\n\n # just read all variables\n all_variables = iraf.getVarDict()\n\n arch = all_variables['arch']\n stsdas = all_variables['stsdas']\n # compose the bin directory\n axebindir = os.path.join(stsdas, 'bin'+arch)\n #axe = all_variables['axe']\n #axebindir = all_variables['axebin']\n\n # compose the bin directory\n #axebindir = os.path.join(axe, 'bin')\n\n # return the path\n return axebindir", "def radishdir():\n return __RADISH_FILES_DIR__", "def install_location(self):\r\n return self._content_at_path('/template/os/install/%s' % self.install_type)", "def datadir():\n return '../data/'", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/egs5\"", "def sirv_truth_dir(self):\n return op.join(self.root_dir, \"SIRV\")", "def lib_dir(self):\n raise NotImplementedError('Implement this property.')", "def get_qiime_project_dir():\r\n # Get the full path of util.py\r\n current_file_path = abspath(__file__)\r\n # Get the directory containing util.py\r\n current_dir_path = dirname(current_file_path)\r\n # Return the directory containing the directory containing util.py\r\n return dirname(current_dir_path)", "def set_syspath(self, hasal_dir):\n library_path = os.path.join(hasal_dir, \"lib\", \"sikuli\")\n sys.path.append(library_path)\n return library_path", "def get_enry_dir() -> str:\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"build\"))", "def thisdir():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n return os.path.dirname(__file__)", "def dcm2niix() -> str:\n fsldir = fslplatform.platform.fsldir\n candidates = [\n shutil.which('dcm2niix')\n ]\n\n if fsldir is not None:\n candidates.insert(0, op.join(fsldir, 'bin', 'dcm2niix'))\n\n for c in candidates:\n if c is not None and op.exists(c):\n return c\n\n return 'dcm2niix'", "def root_dir():\n return dirname(dirname(__file__))", "def get_int_dir():\n try:\n int_dir = os.environ[\"INT_DIR\"]\n except KeyError:\n int_dir = input(\"Enter the (full path) directory \"\n \"where your interactions are stored: \")\n os.system(f\"echo 'export INT_DIR=\\\"{int_dir}\\\"\\n' >> ~/.bash_profile\")\n os.system(\". ~/.bash_profile\")\n return int_dir", "def user_conf_dir(self):\n return os.path.join(BaseDirectory.xdg_config_home, \"speech-dispatcher\")" ]
[ "0.59720445", "0.5938821", "0.5927095", "0.59131306", "0.58979726", "0.586261", "0.5823516", "0.58234173", "0.5788261", "0.57610697", "0.5754324", "0.57422614", "0.5687996", "0.565429", "0.5623156", "0.56043833", "0.5588141", "0.5566707", "0.55413353", "0.55124944", "0.55004644", "0.5496944", "0.5494677", "0.5489928", "0.54826725", "0.54781085", "0.5447915", "0.54107183", "0.53995085", "0.5384671" ]
0.7647922
0
Trova uno zero della funzione f tra i punti a e b, dove la f assume segno discorde. Il parametro opzionale toll indica la precisione con cui si vuole calcolare il valore dello zero
def bisezione(f,a,b,toll=10**-5): m = (a+b)/2 f_m = f(m) while abs(f_m) > toll: if f(a)*f_m < 0: b = m elif f(b)*f_m < 0: a = m elif f_m == 0: print("Trovata solzione esatta") return m else: print("Metodo fallito") return None m = (a+b)/2 f_m = f(m) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Calcular(a: float) ->float:\n \n return (a*2)", "def p() -> float:\n return 0.9", "def f(x0: float, x1: float) -> float:\n return 8 - (x0 - 2) ** 2 - (x1 - 2) ** 2", "def p2f (p):\n #return 11000**((p+1)/2)\n #return (p+1)*11000\n return (p+1)*5500", "def p2f(self):\n\n stale = self.m_f\n self.m_f = self.v.b / self.m_v", "def _correct_p(self, f0, f1):\n return self.p * np.exp(self.dbeta * (f0 + f1) / 2)", "def f1(x, a, b):\n #return x**43 - b*x**42 + x**7 - x**6 * a + 84*x - 42 * b - 42 * a\n return (x**42 + 42)/(x-a) + (x**6 + 42)/(x-b)", "def erfc(x):\n return 0.0", "def f2b(self, fres, f):\n return f / fres", "def biseccion(func, a, b, tol=1e-4):\n p = (a + b) / 2 \n while np.fabs(func(p)) > tol:\n p = (a + b) / 2 \n if func(a) * func(p) < 0:\n b = p\n elif func(a) * func(p) > 0:\n a = p\n else:\n return p\n return p", "def sf(x, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n if x < 0:\n return mp.one\n if x > 1:\n return mp.zero\n return mp.betainc(a, b, x1=x, x2=1, regularized=True)", "def RegulaFalsiMethod(f, a=0.0, b=0.75, tol=1e-10):\n\tstart = time()\n\tf_a = f(a)\n\tf_b = f(b)\n\terror = tol + 1\n\t\n\terrs = []\n\ti = 0\n\n\twhile error > tol:\n\t\tx = (a*f_b - b*f_a) / (f_b - f_a)\n\t\tf_x = f(x)\n\n\t\terrs.append(error)\n\n\t\tif f_a*f_x > 0:\n\t\t\ta = x\n\t\t\tf_a = f_x\n\t\telif f_b*f_x > 0:\n\t\t\tb = x\n\t\t\tf_b = f_x\n\t\telse:\n\t\t\tbreak\n\n\t\terror = np.abs(f_x)\n\t\ti = i+1\n\tend = time()\n\treturn x, (end-start), i", "def f2p (f):\n #return 2*math.log(f, 11000) - 1\n #return f/11000 - 1\n return f/5500 - 1", "def _compute_f1(self, tp: torch.Tensor, fp: torch.Tensor,\n fn: torch.Tensor) -> float:\n precision = tp / (tp + fp).clamp(min=1e-8)\n recall = tp / (tp + fn).clamp(min=1e-8)\n f1 = 2 * precision * recall / (precision + recall).clamp(min=1e-8)\n return float(f1.mean())", "def F(self, t, x, **params):\n return 0.*x", "def b(q):\n if q == 0 or q == 1:\n return float(0.0)\n return -(q * log2(q) + (1 - q) * log2(1 - q))", "def f(self,un,tn):\n return -self.a(tn)*un + self.b(tn)", "def erf(x):\n return 0.0", "def F0(t):\n if (t < 1e-6):\n return 1.0 - t / 3.0\n else:\n return 0.5 * (np.pi / t) ** 0.5 * sp.erf(t ** 0.5)", "def calc_f1(precision: float, recall: float) -> float:\r\n return 2 * (precision * recall) / (precision + recall)", "def ft(t):\r\n ft = t ** (1.0 / 3.0) if t > 0.008856 else 7.787 * t + 4 / 29\r\n return ft", "def calculate_automation(f, t):\n return round(5 * f * t)", "def f0(E, fermi, T):\n return 1. / (1. + np.exp((E - fermi) / (k_B * T)))", "def ifrft(f, a):\n return frft(f, -a)", "def fla (mva, vnom):\r\n x=mva*1000000\r\n y=(vnom*1000)\r\n z=round(x/y,3)\r\n return z", "def fppp(x):\n return (-2000.00*math.cos(10.0.x+1))", "def c2f(t):\r\n return round(9*t/5 + 32)", "def f_value(a, b):\r\n if not any(a) or not any(b) or len(a) <= 1 or len(b) <= 1:\r\n raise ValueError(\"Vectors should contain more than 1 element\")\r\n F = var(a) / var(b)\r\n dfn = len(a) - 1\r\n dfd = len(b) - 1\r\n return dfn, dfd, F", "def f(a):\n b = a * 2\n while b.norm().asscalar() < 1000:\n b = b * 2\n if b.sum().asscalar() > 0:\n c = b\n else:\n c = 100 * b\n return c", "def BisectionMethod(f, a=0, b=1, tol=1e-10):\n\tstart = time()\n\tf_a = f(a)\n\tf_b = f(b)\n\t\n\t# Initialization of errors and iters\n\terrs = []\n\ti = 0\n\n\tif f_a == 0:\n\t\treturn a\n\telif f_b == 0:\n\t\treturn b\n\telif f_a*f_b > 0:\n\t\tprint(\"The function values have the same sign!\")\n\telse:\n\t\terror = b-a\n\t\twhile error > tol:\n\t\t\tc = (b+a)/2\n\t\t\tf_c = f(c)\n\t\t\t\n\t\t\terrs.append(error)\n\t\t\t\n\t\t\tif f_a*f_c > 0:\n\t\t\t\ta = c\n\t\t\t\tf_a = f_c\n\t\t\telif f_a*f_c < 0:\n\t\t\t\tb = c\n\t\t\t\tf_b = f_c\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\terror = b-a\n\t\t\ti = i+1\n\tend = time()\n\treturn c, (end-start), i" ]
[ "0.65174574", "0.6448639", "0.63864565", "0.6370828", "0.6296283", "0.6275515", "0.6270769", "0.6238707", "0.6191305", "0.61809313", "0.618049", "0.6167555", "0.6149057", "0.61406493", "0.61188513", "0.6118763", "0.61178863", "0.60342443", "0.6006515", "0.6004016", "0.6002765", "0.59564394", "0.5937206", "0.5922853", "0.59220946", "0.5883357", "0.5857809", "0.5855197", "0.5830326", "0.582943" ]
0.6965852
0
Restituisce il radicale di n
def radicale(n): r = 1 for p in primi(n+1): if p>n: break if n%p==0: r *= p n = n//p return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_wyraz(a1,nr_wyrazu,r):\n return a1+(nr_wyrazu-1)*r", "def Arn(r, n):\n ret = 1\n for t in range(n, n-r+1-1, -1):\n ret *= t\n return ret", "def _rnm(self, n, m, r):\n r_sum = 0\n m = int(abs(m))\n u = int((n-m)/2)\n v = int((n+m)/2)\n for s in range(0, u+1):\n numerator = pow(-1, s) * math.factorial(int(n-s)) * pow(r, n-2*s)\n try:\n denominator = math.factorial(s) * math.factorial(v-s) * math.factorial(u-s)\n except ValueError:\n raise ValueError('(s,n,m,u,v) = (%d,%d,%d,%d,%d)' % (s, n, m, u, v))\n r_sum += numerator / denominator\n return r_sum", "def radrad(rxn_class):\n return rxn_class[2]", "def rad(x) :#en mm!\r\n return topdia(x)/2.0", "def nCr():\n return math.factorial(self.nn) / (math.factorial(self.rr) * math.factorial(self.nn - self.rr))", "def calculateSNR(self):\n pass", "def _r_at_interface(self, polarization, n_1, n_2):\n if polarization == 's':\n return ((n_1-n_2)/(n_1+n_2))\n elif polarization == 'p':\n return ((n_1-n_2)/(n_1+n_2))\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")", "def nCWRk(n, r):\n val = 1\n for i in range(1, r+1):\n val *= n + r - i\n val //= i\n return val", "def get_n1(r,N):\n n1 = N - np.sum(r)\n return n1", "def ncr(n, r):\n r = min(r, n-r)\n if r == 0:\n return 1\n if r < 0:\n return 0\n numer = reduce(op.mul, xrange(n, n-r, -1))\n denom = reduce(op.mul, xrange(1, r+1))\n return numer / denom", "def buscarRecorrido(n):\n #se llama la función para generar una matriz de tamaño N x N\n dist = generarMatriz(n)\n #empezamos con la ciudad 0\n primCiudad = 0\n\n arbol = ArbolNario(primCiudad,n)\n nodo = arbol.getRaiz()\n\n startTime = timeit.default_timer()#Comienzo del algoritmo Hill-Climbing\n while(nodo.getHijos().size > 0):\n hijos = nodo.getHijos()\n nodo = encontrarMejor(nodo.getId(), hijos, dist) \n exeTime = round((timeit.default_timer() - startTime)*SEC_A_MICRO,1)\n return obtenerRecorrido(nodo, dist), exeTime", "def n(self):\n pass", "def Crn(r, n):\n ret = 1\n if(r>n/2):\n return Crn(n-r, n)\n for t in range(n, n-r+1-1, -1):\n ret *= t\n return ret/fact(r)", "def update_RL(self, n):\n newR = self._mps_RL(self.R[n], self.A[n], self.A[n])\n if n == self.L - 1:\n self.R[self.L + 1] = newR.flat[0]\n else:\n self.R[n + 1] = newR", "def nr():\n pass", "def factR(n):\n if n == 1:\n return n\n return n*factR(n-1)", "def rof(number):\n\n return round(number * 2) / 2", "def getR(self):\n # Reynolds number uses the absolute value of the velocity\n V = abs(self.V)\n return (V * self.D) / self.v # formula for Reynolds number", "def ComputeNrb(self):\r\n pass", "def n_suma(a1,nr_wyrazu,r):\n return (2*a1+(nr_wyrazu-1))*nr_wyrazu/2", "def _n_ball_rad(n, vol):\n unitvol = _n_ball_vol(n, 1)\n radius = (vol / unitvol) ** (1.0 / n)\n return radius", "def item_tres(n):\n if n <= 0.167:\n return 0\n elif n > 0.167 and n <= 0.333:\n return 1\n elif n > 0.333 and n <= 0.500:\n return 2\n elif n > 0.500 and n <= 0.667:\n return 3\n elif n > 0.667 and n <= 0.834:\n return 4\n elif n > 0.834 and n <= 1.000:\n return 5", "def _irep_to_value(self,n,i):\n if i == 1:\n j,k = divmod(n,9)\n v = (k+1)*10**j\n return v\n else:\n j,k = divmod(n,int(10.0/i))\n if k == 0:\n v = 10**j\n else:\n v = i*k*10**j\n return v", "def fact_r(n):\n \n if n == 1:\n return n\n \n return n * fact_r(n-1)", "def reciprocal(self):\n return Rational(self.denominator, self.numerator)", "def ne(n):\n return 4*n*n - 2*n + 1", "def toRadString(self):\r\n pass", "def nw(n):\n return 4*n*n + 1", "def Get_direction(n):\n if abs(n) == 0:\n return 0\n else:\n return n / abs(n)" ]
[ "0.6703785", "0.65736157", "0.6541312", "0.6525386", "0.6515335", "0.63054556", "0.6147716", "0.61371934", "0.61207694", "0.6074879", "0.60558325", "0.605559", "0.6001547", "0.59967726", "0.5936649", "0.5906539", "0.58998317", "0.5871071", "0.58641", "0.5848409", "0.58474976", "0.5843289", "0.5839744", "0.58339447", "0.5823272", "0.5820578", "0.5820448", "0.58011466", "0.5767946", "0.5758748" ]
0.68493146
0
Algoritmo di kruskal per la ricerca dell'MST di un grafo, fornito in tramite la sua matrice di adiacenza, usa la funzione ring_finder per cercare anelli nel grafo e di min_nonzero_idx per trovare gli inidici dei rami con costo minimo
def kruskal(m): n = m.shape[0] m_ret = np.zeros([n,n], dtype=int) while np.count_nonzero(m_ret) != 2*(n-1): i_min, j_min = min_nonzero_idx(m) n_min = m[i_min, j_min] m[i_min, j_min], m[j_min, i_min] = 0, 0 m_ret[i_min, j_min], m_ret[j_min, i_min] = n_min, n_min if ring_finder(m_ret, [i_min], []): m_ret[i_min, j_min], m_ret[j_min, i_min] = 0, 0 return m_ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kruskal(self):\n AGM = []\n i = j = 0\n \n self.grafo = sorted(self.grafo,key=lambda item:item[2])\n\n pai = []\n nivel = []\n\n for vertice in range(self.nVer):\n pai.append(vertice)\n nivel.append(0)\n\n while j < self.nVer-1:\n u,v,w = self.grafo[i]\n i+=1\n a = self.findSet(pai,u)\n b = self.findSet(pai,v)\n\n if a!=b:\n j+=1\n AGM.append([u,v,w])\n self.union(a,b,nivel,pai)\n resp = 0\n for u,v,w in AGM:\n resp += w\n print('%.2f' % (resp/100))", "def Kruskal(G): # la fonction prend la liste de edges et de union find\n edges = G.edges\n unionfind_list = G.nodes\n G_k = Graph() # le graph contient le graph de kruskal\n dim = len(unionfind_list) # dimension du nombre de sommet du graph\n kruskal_cost = 0 # initilisation du cout du graphe\n\n sorted_edges = deepcopy(edges)\n sorted_edges.sort() # copy et triage des aretes par cout croissant\n # pour chaque arete on recupere les deux noeuds de leur extremite\n for edge in sorted_edges:\n unionfind_a = edge.get_startnode()\n unionfind_b = edge.get_endnode()\n # s'ils ont deux racines differentes\n if unionfind_a.find() != unionfind_b.find():\n G_k.add_node(unionfind_a)\n G_k.add_node(unionfind_b)\n # on ajoute les deux noeuds et l'arete dans l'arbre de kruskal\n G_k.add_edge(edge)\n # on met a jour le cout\n kruskal_cost += edge.get_vcost()\n unionfind_a.union(unionfind_b)\n # si le nombre d'arete de l'arbre de kruskal est\n # egal au nombre de sommet-1\n # on retourne l'arbre de kruskal et son cout\n if G_k.get_nb_edges() == dim - 1:\n return kruskal_cost, G_k\n return kruskal_cost, G_k", "def find_kx(input_params, search_domain=None, show_progress=False,\r\n grid_points=20, iterations=9, reduction_factor=9,\r\n plot_full_region=True):\r\n w = input_params['w']\r\n d_list = input_params['d_list']\r\n ex_list = input_params['ex_list']\r\n ez_list = input_params['ez_list']\r\n mu_list = input_params['mu_list']\r\n N = len(mu_list)\r\n assert N == len(d_list) == len(ex_list) == len(ez_list)\r\n # error(z) approaches 0 as kx = z approaches a true plasmon mode.\r\n # It's proportional to the determinant of the boundary-condition matrix, \r\n # which equals zero at modes.\r\n def error(kx):\r\n if kx == 0:\r\n return inf\r\n temp_params = input_params.copy()\r\n temp_params['kx'] = kx\r\n should_be_zero = np.linalg.det(bc_matrix(find_kzs(temp_params)))\r\n return should_be_zero / kx**(N+1)\r\n # \"return should_be_zero\" is also OK but has an overall slope that\r\n # makes it harder to find zeros; also, there's a false-positive at k=0.\r\n \r\n # choose the region in which to search for minima. My heuristic is:\r\n # The upper limit of kx should be large enough that\r\n # 2 * pi * i * kzm * d ~ 20 for the thinnest layer we have, or 3 times\r\n # the light-line, whichever is bigger.\r\n if search_domain is None:\r\n kx_re_max = max(max(abs((20 / (2 * pi * d_list[i]))\r\n * cmath.sqrt(ez_list[i] / ex_list[i])) for i in range(1,N)),\r\n 3 * w / nu.c0)\r\n kx_re_min = -kx_re_max\r\n kx_im_min = 0\r\n kx_im_max = abs(kx_re_max)\r\n else:\r\n kx_re_min = search_domain[0]\r\n kx_re_max = search_domain[1]\r\n kx_im_min = search_domain[2]\r\n kx_im_max = search_domain[3]\r\n \r\n # Main part of function: Call find_all_zeros()\r\n kx_list = find_all_zeros(kx_re_min, kx_re_max, kx_im_min, kx_im_max, error,\r\n show_progress=show_progress, grid_points=grid_points,\r\n iterations=iterations,\r\n reduction_factor=reduction_factor,\r\n plot_full_region=plot_full_region)\r\n \r\n # sort and remove \"repeats\" with opposite signs\r\n kx_list = sorted(kx_list, key=(lambda kx : abs(kx)))\r\n i=0\r\n while i < len(kx_list) - 1:\r\n if abs(kx_list[i] + kx_list[i+1]) <= 1e-6 * (abs(kx_list[i]) + abs(kx_list[i+1])):\r\n kx_list.pop(i)\r\n else:\r\n i += 1\r\n \r\n # Fix amplifying waves\r\n kx_list = [(-kx if (kx.imag < 0 or (kx.imag==0 and kx.real < 0)) else kx)\r\n for kx in kx_list]\r\n \r\n return kx_list", "def kruskal(Grafo,diferencia):\n edges = list()\n #print(diferencia,\"la diferencia\" )\n for i in range(len(Grafo)): # collect the edges in G\n for v,w in Grafo[i]:\n if (w!=-1):\n edges.append((i,v,w))\n # sort the edges in ascending order w.r.t weights in the edges\n edges.sort(key=lambda x: x[2])## se organiza por peso \n ans,sans = [ list() for i in range(len(Grafo)) ],0\n df = dforest(len(Grafo))\n i = 0\n contador=0\n while i!=len(edges):\n u,v,w = edges[i]\n if df.find(u)!=df.find(v):\n df.union(u,v)\n contador+=1\n if(contador==diferencia):\n #print (w,\"pinche w\")\n return w\n\n i += 1", "def _qt_radius_clustering_minimal(self, min_to_cluster, reduced, unassigned_orphans, cache, max_cycles):\n # Separating components and removing dominated indices reduced runtime on tbpb82 0.4@100% from 10s to 10ms.\n # Before removing dominated, tree_275 0.04@100% found a solution with score 4.0485 after 228k cycles. After, found it in 49k. After adding the second Counter to CoverManager, found it under 1k cycles. Each cycle was substantially slower, but the solution still was found ~1000x faster (ms instead of 20 min).\n out_of_range = reduced.copy()\n out_of_range[out_of_range != 0] = 1\n neighbors_of = {}\n for ind in self._not_ignored_inds:\n clstr_inds = np.nonzero(reduced[:,ind] == 0)[0]\n neighbors_of[ind] = set(clstr_inds)\n chsn_indices = set(self.index[name] for name in self.chosen)\n avail_indices = set(self.index[name] for name in self.available)\n num_not_ignored = len(self._not_ignored_inds)\n considered_nbrs, dominated_inds = self._remove_dominated_inds(neighbors_of, chsn_indices, avail_indices, out_of_range)\n # # Process depending on the run parameters\n cache['cycles_used'] = 0\n final_centre_inds, final_scores = [], []\n if min_to_cluster == num_not_ignored: # Critical percent equivalent to 100%\n # Can dramatically speed up the search by separating components\n component_inds = self._identify_components(neighbors_of)\n subset_cycles, cycle_rollover = None, 0\n for subset_indices in component_inds:\n subset_to_cluster = len(subset_indices)\n subset_chosen = chsn_indices & subset_indices\n subset_avail = avail_indices & subset_indices\n if max_cycles != None:\n subset_cycles = ceil(subset_to_cluster/float(min_to_cluster) * max_cycles) + cycle_rollover\n subset_centre_inds, subset_scores, subset_cycles_used = self._qt_radius_cluster_subset(subset_indices, subset_chosen, subset_avail, considered_nbrs, dominated_inds, subset_to_cluster, cache, subset_cycles, out_of_range)\n if subset_cycles_used == None or subset_cycles_used >= subset_cycles:\n cycle_rollover = 0\n else:\n cycle_rollover = subset_cycles - subset_cycles_used\n final_centre_inds.extend(subset_centre_inds)\n final_scores.extend(subset_scores)\n elif min_to_cluster == num_not_ignored - len(unassigned_orphans):\n # Can still use the component speedup in this case\n orphan_inds = set(unassigned_orphans)\n component_inds = self._identify_components(neighbors_of)\n subset_cycles, cycle_rollover = None, 0\n for subset_indices in component_inds:\n if max_cycles != None:\n subset_cycles = ceil(len(subset_indices)/float(min_to_cluster) * max_cycles) + cycle_rollover\n subset_to_cluster = len(subset_indices - orphan_inds)\n if subset_to_cluster == 0: # The entire subset is orphaned, so no centers can be found\n if max_cycles != None:\n cycle_rollover += subset_cycles\n continue\n subset_chosen = chsn_indices & subset_indices\n subset_avail = avail_indices & subset_indices\n subset_centre_inds, subset_scores, subset_cycles_used = self._qt_radius_cluster_subset(subset_indices, subset_chosen, subset_avail, considered_nbrs, dominated_inds, subset_to_cluster, cache, subset_cycles, out_of_range)\n if subset_cycles_used == None or subset_cycles_used >= subset_cycles:\n cycle_rollover = 0\n else:\n cycle_rollover = subset_cycles - subset_cycles_used\n final_centre_inds.extend(subset_centre_inds)\n final_scores.extend(subset_scores)\n else:\n # Can't split into components and guarantee optimal, as I can't predict which component should be allowed to miss some variants.\n # May be a way to remove some components from consideration, but likely requires running _qt_radius_cluster_subset() multiple times. May still be faster, so worth considering if more speed is actually useful here.\n # - All unassigned orphans are part of total_allowed_missed by definition. So all other clusters are only allowed to miss allowed_missed = total_allowed_missed - len(unassigned_orphans).\n # - The global optimal solution for some component is guaranteed to fall between the solution for that component finding 100% of variants, and the solution for that component finding len(component)-allowed_missed variants. If they are equal, that's the global optimal solution for that component, and it can be excluded from the combined run. If they're unequal, it was a waste of time and the component has to be included in the combined run.\n final_centre_inds, final_scores, _cycles_used = self._qt_radius_cluster_subset(set(neighbors_of.keys()), chsn_indices, avail_indices, considered_nbrs, dominated_inds, min_to_cluster, cache, max_cycles, out_of_range)\n alt_variants = []\n return final_centre_inds, final_scores, alt_variants", "def minimum_spanning_arborescence(sol):", "def solve(list_of_kingdom_names, starting_kingdom, adjacency_matrix, params=[]):\n\n #A = adjacency matrix, u = vertex u, v = vertex v\n def weight(A, u, v):\n return A[u][v]\n\n #A = adjacency matrix, u = vertex u\n def adjacent(A, u):\n L = []\n for x in range(len(A)):\n if A[u][x] > 0 and x != u and A[u][x] != 'x':\n L.insert(0,x)\n return L\n\n #Q = min queue\n def extractMin(Q):\n q = Q[0]\n Q.remove(Q[0])\n return q\n\n #Q = min queue, V = vertex list\n def decreaseKey(Q, K):\n for i in range(len(Q)):\n for j in range(len(Q)):\n if K[Q[i]] < K[Q[j]]:\n s = Q[i]\n Q[i] = Q[j]\n Q[j] = s\n\n #V = vertex list, A = adjacency list, r = root\n def prim(V, A, r):\n u = 0\n v = 0\n\n # initialize and set each value of the array P (pi) to none\n # pi holds the parent of u, so P(v)=u means u is the parent of v\n P=[None]*len(V)\n\n # initialize and set each value of the array K (key) to some large number (simulate infinity)\n K = [999999]*len(V)\n\n # initialize the min queue and fill it with all vertices in V\n Q=[0]*len(V)\n for u in range(len(Q)):\n Q[u] = V[u]\n\n # set the key of the root to 0\n K[r] = 0\n decreaseKey(Q, K) # maintain the min queue\n\n # loop while the min queue is not empty\n while len(Q) > 0:\n u = extractMin(Q) # pop the first vertex off the min queue\n\n # loop through the vertices adjacent to u\n Adj = adjacent(A, u)\n for v in Adj:\n w = weight(A, u, v) # get the weight of the edge uv\n\n # proceed if v is in Q and the weight of uv is less than v's key\n if Q.count(v)>0 and w < K[v]:\n # set v's parent to u\n P[v] = u\n # v's key to the weight of uv\n K[v] = w\n decreaseKey(Q, K) # maintain the min queue\n return P\n\n\n # graph is a list of kingdoms that previous i is the parent of j where j = i + 1 \n graph = prim(adjacency_matrix, list_of_kingdom_names, starting_kingdom)\n\n # key = parent, value = children\n g = {}\n\n for x in range(len(list_of_kingdom_names)):\n g[x] = []\n\n for x in range(len(graph)):\n for y in range(len(graph)):\n if x == graph[y]:\n g[x].append(y) \n\n\n def path(k):\n if not g[k]:\n return [k]\n\n lst = [k]\n\n for child in g[k]:\n lst += path(child) + [k]\n # print(lst)\n\n return lst\n\n\n full_path = path(starting_kingdom)\n\n # print(full_path)\n\n\n\n # return closed_walk, conquered_kingdoms", "def _G_to_km_on_basis_single_level(self, w, m):\n kB = self._sym.kBoundedSubspace(self.k,t=1)\n g = kB.K_kschur()\n mon = self.km()\n if m < w.length():\n return 0\n ans = self.zero()\n for la in Partitions(m, max_part = self.k):\n ans += g.homogeneous_basis_noncommutative_variables_zero_Hecke((la)).coefficient(w)*mon(la)\n return ans", "def rkm(X, init_W, s, plot_ax=None):\n\n #extract useful info from args\n N = X.shape[0]\n d = X.shape[1]\n NC = init_W.shape[0]-2\n\n #construct boundary matrix\n boundary = init_W[[0,NC+1],:]\n B=np.zeros([NC,d],float)\n B[[0,NC-1],:]=boundary\n\n #construct regularizer hessian\n AW = np.diag(np.ones(NC))+np.diag(-0.5*np.ones(NC-1),1)+np.diag(-0.5*np.ones(NC-1),-1)\n\n #compute initial labels\n XW_dst = distance.cdist(X,init_W,'sqeuclidean')\n u = XW_dst.argmin(1)\n\n #iterate the minimizer\n converged = False\n it = 0\n while(not converged):\n it = it+1\n #print('iteration '+repr(it))\n\n #compute cardinality\n W_card=np.zeros(NC+2,int)\n for i in range(NC+2):\n W_card[i] = np.sum(u==i)\n\n #compute centroid matrix\n C = np.ndarray([NC,d],float)\n for i in range(NC):\n C[i,:] = np.sum(X[u==i+1,:],0)\n\n #construct k-means hessian \n AX = np.diag(W_card[1:NC+1])\n\n #update waypoints\n W = np.matmul(np.linalg.pinv(AX+s*AW),C+0.5*s*B)\n W = np.vstack([boundary[0,:],W,boundary[1,:]])\n\n #compute new labels\n XW_dst = distance.cdist(X,W,'sqeuclidean')\n u_new = XW_dst.argmin(1)\n\n #check for convergence\n converged = not np.sum(u_new!=u)\n u=u_new\n\n #plot\n if(plot_ax is not None):\n pyplot.sca(plot_ax)\n pyplot.ion()\n pyplot.cla()\n pyplot.title('Annealing, s='+repr(s))\n pyplot.plot(X[:,0],X[:,1],'bo')\n pyplot.plot(W[:,0],W[:,1],'-ro')\n pyplot.axis('equal')\n\n pyplot.pause(1.0/60)\n \n return W, u", "def decide_k_min(self, H0_dist, Ha_dist, rnd_index):\r\n\r\n self.H0_dists.append(copy.deepcopy(H0_dist))\r\n self.Ha_dists.append(copy.deepcopy(Ha_dist))\r\n #print(\"Deciding kmin for round index\", rnd_index)\r\n\r\n # If you change the end bound to len(H0_dist) then that's an issue\r\n\r\n for k in range(self.round_sched[rnd_index] // 2 + 1, self.round_sched[rnd_index] + 1):\r\n #print(\"kmin?:\", k)\r\n LR_num = 0\r\n LR_denom = 0\r\n for i in range(k, len(H0_dist)):\r\n LR_num += Ha_dist[i]\r\n LR_denom += H0_dist[i]\r\n \r\n delta = 1\r\n\r\n # FOR METIS\r\n #if (LR_num + self.pr_Ha_sched[max(rnd_index-1, 0)])/ (LR_denom + self.pr_H0_sched[max(rnd_index-1, 0)])> 1 / self.alpha:\r\n\r\n # FOR ATHENA\r\n if LR_num / LR_denom > 1 / self.alpha and Ha_dist[k] > delta * H0_dist[k]:\r\n \r\n # The case of equality essentially only happens when both sides are 0. Then there's no harm\r\n # in calling it a kmin (since it necessarily won't contribute to the risk), in spite of the fact\r\n # that the ratio criterion cannot be satisfied because of division by zero.\r\n # GRANT COULD ALSO BE DENOM = 0 OR ALPHA NUM > DENOM short circuit\r\n\r\n\r\n\r\n # SENTINELS FOR WHEN THERE'S NO KMIN! if we get to the\r\n # end of the dist and there's no satisfaction just return SENTINEL\r\n\r\n # FOR MINERVA\r\n #if self.alpha * LR_num >= LR_denom:\r\n\r\n self.k_min_sched[rnd_index] = k\r\n\r\n cumulative_H0_sched = self.pr_H0_sched[max(rnd_index-1, 0)]\r\n cumulative_Ha_sched = self.pr_Ha_sched[max(rnd_index-1, 0)]\r\n\r\n self.pr_H0_sched[rnd_index] = LR_denom + cumulative_H0_sched\r\n self.pr_Ha_sched[rnd_index] = LR_num + cumulative_Ha_sched\r\n\r\n # FOR MINERVA\r\n self.risk_sched[rnd_index] = LR_denom / LR_num\r\n\r\n # FOR METIS\r\n #self.risk_sched[rnd_index] = self.pr_H0_sched[rnd_index] / self.pr_Ha_sched[rnd_index]\r\n return", "def kto_wygral():\n for x in range(0, ROZMIAR_PLANSZY):\n for y in range(0, ROZMIAR_PLANSZY):\n for kierunek in (\"poziom\", \"pion\", \"skos prawy\", \"skos lewy\"):\n iksy, kolka = sprawdz_linie((x, y), kierunek)\n if iksy == ile_do_wygranej:\n return X\n if kolka == ile_do_wygranej:\n return O\n return False", "def dpsearch(points,k):\n\t#M = k\n\tpoints = np.sort(points,axis=0)\n\tL = len(points)\n\tM = k\n\tT = list(np.zeros(M+1,dtype='int'))\n\tT[0] = 0\t#first threshold is by default always set to index 0 in trellis graph.\n\tT[M] = L \t#last threshold is by default always set to last number in input points.\n\ttrellis_value = np.full((M+1,L+1),np.inf)\n\ttrellis_backpointer = np.full((M+1,L+1),np.inf)\n\n\t# Stage 1: m=1\t\n\tfor l in range(1,L-M+2):\n\t\ttrellis_value[1][l] = ((l-0)/float(L))*np.var(points[0:l])\n\t\ttrellis_backpointer[1][l] = 0\n\n\t\n\tif(M>2):\n\t\t# Stage 2: m=2 to m=M-1\n\t\tfor m in range(2,M):\n\t\t\tfor l in range(m,L-M+m+1):\n\t\t\t\t#finding optimal path\n\t\t\t\tJ_min = np.inf\n\t\t\t\tJ_temp = np.inf\n\t\t\t\tfor i in range(m-1,l):\n\t\t\t\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\t\t\t\tif J_temp < J_min:\n\t\t\t\t\t\tJ_min = J_temp\n\t\t\t\t\t\tptr = i\n\t\t\t\t\n\t\t\t\ttrellis_value[m][l],trellis_backpointer[m][l] = J_min,ptr\n\t\t\t\t\n\n\t# Stage 3: m=M\n\tm = M\n\tl = L\n\t#finding optimal path\n\tJ_min = np.inf\n\tJ_temp = np.inf\n\tfor i in range(m-1,l):\n\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\tif J_temp < J_min:\n\t\t\tJ_min = J_temp\n\t\t\tptr = i\n\n\t\n\ttrellis_value[M][L] = J_min\n\ttrellis_backpointer[M][L] = ptr\n\t\n\t\n\t# Backtracking\n\tl = L\n\tm = M\n\twhile m>=2:\n\t\tT[m-1] = int(trellis_backpointer[m][l])\n\t\tl = int(trellis_backpointer[m][l])\n\t\tm = m - 1\n\n\t#Assign cluster labels\n\tlabels = np.full(len(points),0)\n\tj = T[0]\n\tcounter = 0\n\tfor i in range(1,k+1):\n\t\tlabels[j:T[i]] = counter\n\t\tj = T[i]\n\t\tcounter += 1\n\n\n\treturn labels,T", "def findminpath(tab, gxtab, gytab, pixtab):\n\n pathdist = 2 # the number of points each points on a ray can related to on the previous ray\n pathdist_penalty = 0.3 # penalty of the difference of the pathdist\n pathpix_penalty = 2 # penalty of the difference of pixel values between the point and the previous point\n nray = tab.shape[1]\n\n #tab = np.hstack((tab,tab[:, 0].reshape(tab.shape[0], 1)))\n #pixtab = np.hstack((pixtab,pixtab[:, 0].reshape(pixtab.shape[0], 1)))\n #gxtab = np.hstack((gxtab,gxtab[:, 0].reshape(gxtab.shape[0], 1)))\n #gytab = np.hstack((gytab,gytab[:, 0].reshape(gytab.shape[0], 1)))\n\n tab = np.hstack((tab,tab,tab)) # horizontally stack the tab matrix to prepare for the filtering on the result\n pixtab = np.hstack((pixtab,pixtab,pixtab))\n gxtab = np.hstack((gxtab,gxtab,gxtab))\n gytab = np.hstack((gytab,gytab,gytab))\n\n tab = (tab - tab.min()) / (tab.max() - tab.min()) # noralize the tab matrix\n pixtab = (pixtab - pixtab.min()) / (pixtab.max() - pixtab.min()) * -1 # for we want to find the white contour of the cell so we multipy -1 on the pixtab\n # tab = tab / np.median(tab)\n # pixtab = pixtab / np.median(pixtab)\n path = np.zeros(tab.shape)\n path[:, 0] = np.array(range(0, tab.shape[0]))\n score = np.zeros(tab.shape)\n score[:, 1] = tab[:, 1]\n\n for i in range(1, tab.shape[1]):\n for j in range(tab.shape[0]):\n mins = np.Inf # record the min value of the ray\n minat = 0\n for k in range(-pathdist, pathdist+1):\n if(0 <= (j+k) and (j+k) < tab.shape[0]):\n s = pixtab[j, i]\n pixdiff = abs(pixtab[j, i] - pixtab[j+k, i-1])\n s += pixdiff * pathpix_penalty # two kinds of penalty\n s += abs(k) * pathdist_penalty\n s += score[j+k, i-1]\n\n if(s < mins):\n mins = s\n minat = j + k\n path[j, i] = minat\n score[j, i]= mins\n\n start = int(np.argmin(score[:, -1]))\n path = path.astype(np.int32)\n minpath = [start]\n for i in range(tab.shape[1]-1, 0, -1):\n minpath.append(path[minpath[-1], i])\n minpath = minpath[::-1]\n # print(len(minpath))\n minpath = savgol_filter(minpath, 15, 3) # apply a Savitzky-Golay filter to the raw minpath signal\n minpath = minpath[nray:nray*2] # cut the middle part of minpath whose length is nray\n return np.array(minpath).astype(np.int32)", "def search_minimum_coloring(self,alpha,Beta):\n bestSol=[]\n bestK=0\n k= self.g.n\n iter = 0\n global encore\n encore = True\n timer = threading.Timer(200, findeboucle)\n timer.start()\n while(encore):\n tabus_search = self.compute_solution(k,alpha,Beta)\n if(tabus_search[1]==0):\n bestSol= copyMatrix(tabus_search[0])\n #tmax=tabus_search[2]\n bestK=k\n k=k-1\n return(bestK,bestSol)", "def Wygrana():\r\n for x in range (0, ROZMIAR_PLANSZY):\r\n for y in range (0, ROZMIAR_PLANSZY):\r\n for kierunek in (\"poziom\", \"pion\", \"skos prawy\", \"skos lewy\"):\r\n iksy, kolka = SprawdzLinie ((x, y), kierunek)\r\n if iksy == 5:\r\n return X\r\n if kolka == 5:\r\n return O\r\n return False", "def findroot(self,x0,method='fmin',**kwargs):\n return self._optimize(x0,'root',method,**kwargs)", "def ACM_Kruskal(G):\n pass", "def kruskal_solve(self):\n\n\t\tmin_span_tree = Graph(self.graph.vertices, [])\n\t\tedges = sorted(self.graph.edges[:], key=lambda x: x[2])\n\t\tcount = 0\n\n\t\twhile count < len(self.graph.vertices) - 1:\n\t\t\tcur_edge = edges[0]\n\t\t\tedges = edges[1:]\n\t\t\t\n\t\t\tnode1, node2, weight = cur_edge\n\t\t\tif not min_span_tree.is_connected(node1, node2):\n\t\t\t\tmin_span_tree.edges.append(cur_edge)\n\t\t\t\tcount = count + 1\n\n\t\treturn min_span_tree", "def guyan_forsparse(M, K, master=None, fraction=None):\n\n\n\tif master is None:\n\t\tif fraction is None:\n\t\t\tfraction = 0.25\n\n\t\tratios = np.diag(M) / np.diag(K)\n\t\tranked = [i[0] for i in sorted(enumerate(ratios), key=lambda x: x[1])]\n\t\tthresh = int(fraction * ratios.size)\n\t\tif (thresh >= ratios.size) or thresh == 0:\n\t\t\tprint(\"Can't keep\", thresh, 'DOFs.')\n\t\t\tprint(\"Fraction of\", fraction, \"is too low or too high.\")\n\t\t\treturn 0, 0, 0, 0, 0\n\n\t\tmaster = ranked[-thresh:]\n\n\tmaster = np.array(master)\n\n\tncoord = M.shape[0]\n\n\ti = np.arange(0, ncoord)\n\n\ti = i.reshape(1,-1)\n\n\ti = i + np.ones((1,i.shape[1]),int)\n\n\tlmaster = master.shape[1]\n\n\ti[0,master-1] = np.transpose(np.zeros((lmaster,1)))\n\n\ti = np.sort((i), axis =1)\n\n\tslave = i[0,lmaster + 0:ncoord]\n\n\tK= lil_matrix(K)\n\n\tslave = slave.reshape(1,-1)\n\n\tmaster = master-np.ones((1,master.shape[0]),int)\n\n\tmaster = master.ravel()\n\n\tslave = slave - np.ones((1,slave.shape[0]),int)\n\n\tslave = slave.ravel()\n\n\tkss = slice_forSparse(K, slave, slave)\n\n\tksm = slice_forSparse(K, slave, master)\n\n\tT= np.zeros((len(master)+len(slave), len(master)))\n\n\tT= lil_matrix(T)\n\n\tT[master,:lmaster] = sps.eye(lmaster,lmaster)\n\n\tT[slave,0:lmaster]=spla.spsolve(-kss,ksm)\n\n\tMred = T.T * M * T\n\n\tKred = T.T * K * T\n\n\treturn Mred, Kred, master", "def MinSpanningTreeKruskal(self):\n nodes = [n for n in self.nodes]\n edges = [e for e in self.edges]\n self.ResetGraph()\n for n in nodes:\n self.AddNode(n)\n n.neighbours = []\n\n \n edges.sort(key=lambda e: e.weight)\n \n for edge in edges:\n if not self.CausesCycleIfAdded(edge):\n self.ConnectByEdge(edge)\n if len(self.edges) == self.NodesCount()-1:\n break", "def sky_orbits(test=True):\n \n t = Table.read('/home/ana/data/baumgardt_positions.fits')\n \n ind_disterr = ~np.isfinite(t['e_Rsun'])\n t['e_Rsun'][ind_disterr] = 0.1 * t['Rsun'][ind_disterr]\n e_max = np.nanmax(t['e_Rsun'][~ind_disterr])\n ind_cap = t['e_Rsun']>e_max\n t['e_Rsun'][ind_cap] = e_max\n \n clusters = ['NGC 3201', 'NGC 4590', 'NGC 5824', 'NGC 5272', 'NGC 5139', 'NGC 5024']\n #clusters = ['NGC 5824', 'NGC 5024']\n N = len(clusters)\n \n match = dict()\n match['NGC 3201'] = dict(streams=['gjoll'], direction=[-1], nstep=[35], gc_label='NGC\\n3201', gcra_off=0*u.deg, gcdec_off=-13*u.deg, gcl_off=0*u.deg, gcb_off=-13*u.deg, stream_label=['$Gj\\\\\\\" oll$'], stream_ra=[-156*u.deg], stream_dec=[-4.5*u.deg], eq_angle=[-45*u.deg], stream_l=[-148*u.deg], stream_b=[-33*u.deg], gal_angle=[22*u.deg])\n \n match['NGC 4590'] = dict(streams=['fjorm'], direction=[1], nstep=[100], gc_label='NGC\\n4590', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=-13*u.deg, gcb_off=-10*u.deg, stream_label=['$Fj\\\\\\\" orm$'], stream_ra=[-22*u.deg], stream_dec=[66*u.deg], eq_angle=[35*u.deg], stream_l=[110*u.deg], stream_b=[50*u.deg], gal_angle=[-50*u.deg])\n \n match['NGC 5024'] = dict(streams=['sylgr', 'ravi'], direction=[-1, 1], nstep=[300,500], gc_label='NGC\\n5024', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=10*u.deg, gcb_off=-20*u.deg, stream_label=['Sylgr', 'Ravi'], stream_ra=[-70*u.deg, 83*u.deg], stream_dec=[2*u.deg, -47*u.deg], eq_angle=[25*u.deg, 65*u.deg], stream_l=[-110*u.deg, -18.5*u.deg], stream_b=[62*u.deg, -47*u.deg], gal_angle=[30*u.deg, -10*u.deg])\n \n match['NGC 5139'] = dict(streams=['fimbulthul'], direction=[-1], nstep=[70], gc_label='NGC\\n5139', gcra_off=-5*u.deg, gcdec_off=-15*u.deg, gcl_off=0*u.deg, gcb_off=-12*u.deg, stream_label=['Fimbulthul'], stream_ra=[-20*u.deg], stream_dec=[-15*u.deg], eq_angle=[0*u.deg], stream_l=[-20*u.deg], stream_b=[45*u.deg], gal_angle=[0*u.deg])\n \n match['NGC 5272'] = dict(streams=['svol'], direction=[1], nstep=[70], gc_label='NGC\\n5272', gcra_off=-15*u.deg, gcdec_off=10*u.deg, gcl_off=-23*u.deg, gcb_off=-17*u.deg, stream_label=['$Sv\\\\\\\" ol$'], stream_ra=[-2*u.deg], stream_dec=[34*u.deg], eq_angle=[-10*u.deg], stream_l=[55*u.deg], stream_b=[55*u.deg], gal_angle=[-65*u.deg])\n \n match['NGC 5824'] = dict(streams=['triangulum', 'turbio'], direction=[1,1], nstep=[700,1], gc_label='NGC\\n5824', gcra_off=15*u.deg, gcdec_off=-5*u.deg, gcl_off=15*u.deg, gcb_off=-5*u.deg, stream_label=['Triangulum', 'Turbio'], stream_ra=[152*u.deg, 130*u.deg], stream_dec=[32*u.deg, -51*u.deg], eq_angle=[-48*u.deg, 30*u.deg], stream_l=[120*u.deg, -82*u.deg], stream_b=[-31*u.deg, -57*u.deg], gal_angle=[70*u.deg, 105*u.deg])\n \n dt = 0.5*u.Myr\n wangle = 180*u.deg\n ra_off = 120*u.deg\n l_off = 0*u.deg\n \n colors = [mpl.cm.plasma(0.95*x/N) for x in range(N)]\n \n np.random.seed(27529)\n if test:\n Nsample = 1\n else:\n Nsample = 100\n \n plt.close()\n fig = plt.figure(figsize=(12,12))\n \n ax0 = fig.add_subplot(211, projection='mollweide')\n ax1 = fig.add_subplot(212, projection='mollweide')\n ax = [ax0, ax1]\n \n for i in range(N):\n #ind = t['Name']== clusters[i]\n ind = t['Name']==clusters[i]\n t_ = t[ind]\n \n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n cgal = c.transform_to(coord.Galactic)\n #w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n color = colors[i]\n alpha_text = 0.8\n \n plt.sca(ax[0])\n plt.plot((c.ra + ra_off).wrap_at(wangle).rad, c.dec.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((c.ra + ra_off + match[clusters[i]]['gcra_off']).wrap_at(wangle).rad, (c.dec + match[clusters[i]]['gcdec_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n plt.sca(ax[1])\n plt.plot((cgal.l + l_off).wrap_at(wangle).rad, cgal.b.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((cgal.l + l_off + match[clusters[i]]['gcl_off']).wrap_at(wangle).rad, (cgal.b + match[clusters[i]]['gcb_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n\n for j in range(len(match[clusters[i]]['direction'])):\n # sample gc positional uncertainties\n for k in range(-1, Nsample):\n if k==-1:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1.5\n alpha = 1\n else:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'] + np.random.randn()*t_['e_Rsun'], pm_ra_cosdec=t_['pmRA_'] + np.random.randn()*t_['e_pmRA_'], pm_dec=t_['pmDE'] + np.random.randn()*t_['e_pmDE'], radial_velocity=t_['RV'] + np.random.randn()*t_['e_RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1\n alpha = 0.1\n \n orbit = ham.integrate_orbit(w0, dt=dt*match[clusters[i]]['direction'][j], n_steps=match[clusters[i]]['nstep'][j])\n orbit_eq = orbit.to_coord_frame(coord.ICRS, galactocentric_frame=gc_frame)\n orbit_gal = orbit.to_coord_frame(coord.Galactic, galactocentric_frame=gc_frame)\n \n \n plt.sca(ax[0])\n dra = (orbit_eq.ra+ra_off).wrap_at(wangle)[1:] - (orbit_eq.ra+ra_off).wrap_at(wangle)[:-1]\n if np.any(np.abs(dra)>180*u.deg):\n pos_break = dra>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_eq.dec.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_eq.dec.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad, orbit_eq.dec.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n plt.sca(ax[1])\n dl = orbit_gal.l.wrap_at(wangle)[1:] - orbit_gal.l.wrap_at(wangle)[:-1]\n if np.any(np.abs(dl)>180*u.deg):\n pos_break = dl>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_gal.b.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_gal.b.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad, orbit_gal.b.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n # add streams\n pkl = pickle.load(open('../data/streams/data_{:s}.pkl'.format(match[clusters[i]]['streams'][j]), 'rb'))\n cs = coord.SkyCoord(ra=pkl['dec'][0], dec=pkl['dec'][1], frame='icrs')\n cs_gal = cs.transform_to(coord.Galactic)\n \n plt.sca(ax[0])\n plt.plot((cs.ra+ra_off).wrap_at(wangle).rad, cs.dec.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_ra'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_dec'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['eq_angle'][j].value, ha='center', va='center')\n \n plt.sca(ax[1])\n plt.plot((cs_gal.l+l_off).wrap_at(wangle).rad, cs_gal.b.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_l'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_b'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['gal_angle'][j].value, ha='center', va='center')\n \n \n plt.sca(ax[0])\n plt.grid(ls=':')\n plt.xlabel('R.A. [deg]')\n plt.ylabel('Dec [deg]')\n\n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]-ra_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n plt.sca(ax[1])\n plt.grid(ls=':')\n plt.xlabel('Galactic longitude [deg]')\n plt.ylabel('Galactic latitude [deg]')\n \n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [2,3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]+l_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n \n plt.tight_layout(h_pad=2)\n plt.savefig('../paper/sky_orbits.pdf')", "def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum", "def DBSCAN(M, eps, min_points):\n colors = ['r', 'g', 'b', 'y', 'c', 'm'] # tablica kolorow - inny kolor dla kazdego clustera\n checked = np.zeros(M.shape[\n 0]) # tablica sprawdzonych punktow wypelniona zerami jesli punkt zostal sprawdzony zmieniana jest wartosc na 1print(checked)\n classification = np.empty(M.shape[0])\n classification.fill(0)\n cluster_count = 0\n for i in range(0, len(colors)): # for odpowiedzialny do tworzenia clusterow (kazdy cluster inny kolor)\n for j in range(0, len(checked)): # szukanie pierwszego niesprawdzonego punktu\n if checked[j] != 1:\n seeds = cluster(M, j, eps)\n startpoint = j\n if min_points > len(seeds):\n checked[\n startpoint] = 1 # jesli punkt ma mniej sasiadow niz minimalna liczba to ustawia punkt jako sprawdzony i nic z nim dalej nie robi bo jest do dupy\n\n if min_points <= len(seeds):\n plt.plot(M[startpoint, 0], M[startpoint, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=np.pi * 3 ** 2) # jesli ma minimalna liczbe sasiadow to robi koleczko na wykresie\n checked[startpoint] = 1\n classification[startpoint] = i + 1\n break # jesli znaleziono niesprawdzony punkt wychodzi z petli\n while len(seeds) > 0:\n\n point = seeds[0] # wybranie za kolejny punkt pierwszego punktu z tablicy seeds\n results = cluster(M, point, eps) # zapisanie punktow ktore spelniaja warunek z neighborhood\n if checked[point] != 1:\n if min_points > len(results) and (classification[point] == 0 or classification[point] == -1):\n checked[\n point] = 1 # jesli punkt ma mniej sasiadow niz minimalna liczba to ustawia punkt jako sprawdzony i ustala go jako border\n plt.plot(M[point, 0], M[point, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=8)\n classification[point] = -(i + 1)\n if min_points <= len(results):\n plt.plot(M[point, 0], M[point, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=np.pi * 3 ** 2) # jesli ma minimalna liczbe sasiadow to robi koleczko na wykresie\n checked[point] = 1\n classification[point] = i + 1\n for k in range(0, len(results)):\n result_point = results[k]\n seeds.append(\n result_point) # dodanie do tablicy seeds punktow ktore znajdowaly sie w sasiedztwie punktu point\n seeds.remove(seeds[0]) # usuwa juz sprawdzony element z tablicy seeds\n if np.sum(checked) == M.shape[\n 0]: # jesli juz wszystkie punkty zostaly sprawdzone to wychodzi z petli - po tym wszystkie clustery powinny byc zrobione\n break\n return plt.show()", "def find(Map, PosI, PosF):\n \n # Pour les tests, cf. Pathfinding et Pathfinding2 \n \n InitialPosI = PosI\n InitialPosF = PosF\n Chemin = []\n \n Hvalue = np.zeros((np.shape(Map))) #Distance\n Gvalue = np.zeros((np.shape(Map))) #Movement Cost\n Fvalue = np.zeros((np.shape(Map))) #G+H \n Gvalue[:] = np.nan #initialiser Gvalue à une matrice NaN\n \n OpenList = [(InitialPosI,'N')]\n CloseList = []\n \n # Initialisation de Hvalue\n for i in range(np.shape(Hvalue)[0]):\n for j in range(np.shape(Hvalue)[1]):\n if Map[i,j] !=1:\n Hvalue[i,j] = abs(i-PosF[0]) + abs(j-PosF[1])\n else:\n Hvalue[i,j] = np.nan\n\n### Round 1 (+initialisations)\n \n CloseList.append(tuple(PosI))\n \n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #D : fleche vers le bas..\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) \n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R'))\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L'))\n \n \n for OV in OpenList: #OV pour OpenValue \n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList: #CV pour ClosedValue\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n#### Round NEXT \n ###Vers le min de Fvalue:\n while PosF not in CloseList and PosI != PosF:\n \n if np.all(np.isnan(Fvalue)): #Check si F est égale à une matrice Full NaN\n# print('Pas de chemin')\n return(False) # soit return False, soit return la position init, donc bon..\n \n Index = np.argwhere(Fvalue == np.nanmin(Fvalue))\n PosI = Index.tolist()[0]\n \n CloseList.append(tuple(PosI))\n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #DOWN (fleche vers le bas..)\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) #Up\n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R')) #Right\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L')) #Left\n \n for OV in OpenList:\n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList:\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n \n############## TRACING BACK \n PosF = InitialPosF\n\n while InitialPosI not in Chemin:\n \n for Trace in OpenList:\n if Trace[0] == PosF:\n Chemin.append(PosF)\n if Trace[1] == 'U':\n PosF = (PosF[0]-1,PosF[1]) #Go up\n elif Trace[1] == 'D':\n PosF = (PosF[0]+1,PosF[1]) #Go down\n elif Trace[1] == 'L':\n PosF = (PosF[0],PosF[1]-1) #Go left\n elif Trace[1] == 'R':\n PosF = (PosF[0],PosF[1]+1) #Go right\n# else:\n# print(Chemin)\n Chemin.reverse()\n return(Chemin)", "def _cluster_k_medoids_minibatch(self, num_variants, tolerance, batch_size, cache, max_cycles):\n avail_medoid_indices = [self.index[name] for name in self.tree.get_ordered_names() if name in self.available]\n chsn_indices = [self.index[n] for n in self.chosen]\n num_chsn = len(chsn_indices)\n dists = self._transform_distances(tolerance)\n # This spaces the initial centroids randomly around the tree\n seq_chunk = len(avail_medoid_indices) // (num_variants - num_chsn)\n rand_inds = []\n for i in range(num_variants - num_chsn):\n rand_inds.append(avail_medoid_indices[random.randint(i*seq_chunk, (i+1)*seq_chunk-1)])\n best_med_inds = np.array(chsn_indices + rand_inds)\n # Initial random sets\n best_clusters = self._partition_nearest(best_med_inds, dists)\n best_scores = self._sum_dist_scores(best_med_inds, best_clusters, dists)\n best_score = sum(best_scores)\n # Using a simple greedy algorithm, typically converges after 2-5 iterations.\n num_cycles = 0\n improvement = True\n while improvement == True:\n improvement = False\n med_inds = best_med_inds.copy()\n if len(avail_medoid_indices) > batch_size:\n avail_minibatch_inds = random.sample(avail_medoid_indices, batch_size)\n else:\n avail_minibatch_inds = avail_medoid_indices\n for i in range(num_chsn, num_variants):\n for ind in avail_minibatch_inds:\n if ind in med_inds: continue\n med_inds[i] = ind\n score = self._score_pattern(med_inds, dists)\n if score < best_score:\n best_score = score\n best_med_inds[i] = ind\n improvement = True\n else:\n med_inds[i] = best_med_inds[i]\n num_cycles += 1\n cache['cycles_used'] += 1\n if cache['quit_now'] or max_cycles != None and num_cycles >= max_cycles:\n break\n if cache['quit_now'] or max_cycles != None and num_cycles >= max_cycles:\n improvement = False\n break\n best_clusters = self._partition_nearest(best_med_inds, dists)\n best_scores = self._sum_dist_scores(best_med_inds, best_clusters, dists)\n return best_med_inds, best_scores", "def k_corona(G, k, core_number=None):\n\n def func(v, k, c):\n return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k)\n\n return _core_subgraph(G, func, k, core_number)", "def AGM_prim(mtr_adj, limited_nodes=[], raiz=1):\n\tnum_vertices = len(mtr_adj)\n\n\tfila = []\n\tvertices = [] # ordenados de acordo com a chave\n\t\n\t# adicionando os nodos na lista de vertices e ordenando pela key\n\tfor i in range( 1, num_vertices+1 ):\n\t\tvertices.append( vertice(i) )\n\n\tvertices.sort(key = lambda x: x.key)\n\tlog.debug('vertices: %s' % vertices)\n\t\n\t# fila a ser ordenada pela distancia\n\tfor nodo in vertices:\n\t\tfila.append(nodo)\n\n\t# se a raiz tiver grau máximo =1, seleciona o nodo mais próximo da raiz\n\t# para tornar ele a 'raiz'\n\tverificar_raiz(mtr_adj, limited_nodes, vertices, raiz)\n\t\n\tvertices[raiz-1].dist = 0\n\n\t# ordena a fila por ordem de distancia para o predecessor.\n\treordenar(fila)\n\tlog.debug('fila: %s' % fila)\n\n\t# criando arvore com os nodos que aceitam mais de 1 grau\n\twhile len(fila):\n\t\t# nodo a ser testado\n\t\tu = fila.pop(0)\n\t\t\n\t\t# evitar os nodos com grau máximo = 1 por enquanto\n\t\tif u.key in limited_nodes:\n\t\t\tcontinue\n\t\t\n\n\t\t# passando por todos os outros vértices, e adicionando\n\t\t# para selecionar os nodos que tem o nodo u como predecessor.\n\t\tfor v in range(1, num_vertices+1):\n\t\t\tif u.key != v and \\\n\t\t\tv not in limited_nodes and \\\n\t\t\tmtr_adj[ u.key-1 ][v-1] < vertices[v-1].dist and \\\n\t\t\tna_fila(v, fila):\n\t\t\t\tvertices[v-1].pred = u.key\n\t\t\t\tvertices[v-1].dist = mtr_adj[ u.key-1 ][v-1]\n\t\t\t\treordenar(fila)\n\t\tlog.debug('fila: %s' % fila)\t\n\t\n\t# conectando os nodos que aceitam grau maximo = 1 na arvore\n\tfor u in vertices:\n\t\tif u == raiz:\n\t\t\tcontinue\n\n\t\t# para cada nodo u de grau máximo = 1\n\t\tif u.key in limited_nodes:\n\t\t\t# verificar qual o nodo mais próximo, não limitado, diferente de u\n\t\t\tfor v in range(1, num_vertices+1):\n\t\t\t\tv_dist = mtr_adj[u.key-1][v-1] # distância de u até v\n\t\t\t\tif v != u.key and v_dist < u.dist and v not in limited_nodes:\n\t\t\t\t\tu.dist = v_dist\n\t\t\t\t\tu.pred = v\n\n\treturn vertices", "def _get_ring_nodes(m, namin=3, namax=9, remove_redudant=T):\n # first search for rings\n sets = []\n for i in range(namin, namax+1):\n #if i in [3,4,5]:\n pat_i = '*~1' + '~*'*(i-2) + '~*1'\n #else:\n # pat_i = '*:1' + ':*'*(i-2) + ':*1'\n Qi = Chem.MolFromSmarts( pat_i )\n for tsi in m.GetSubstructMatches(Qi):\n set_i = set(tsi)\n if set_i not in sets:\n sets.append( set(tsi) )\n if remove_redudant:\n # now remove those rings that are union of smaller rings\n n = len(sets)\n sets_remove = []\n ijs = itl.combinations( list(range(n)), 2 )\n sets_u = []\n for i,j in ijs:\n set_ij = sets[i].union( sets[j] )\n if (set_ij in sets) and (set_ij not in sets_remove):\n sets_remove.append( set_ij )\n sets_u = cim.get_compl(sets, sets_remove)\n else:\n sets_u = sets\n return sets_u", "def _rootsFinder(self, fun, jac, bounds, npoints, method):\n if method == \"regular\":\n step = (bounds[1] - bounds[0]) / (npoints + 1)\n try:\n X0 = np.arange(bounds[0] + step, bounds[1], step)\n except:\n X0 = np.random.uniform(bounds[0], bounds[1], npoints)\n elif method == \"random\":\n X0 = np.random.uniform(bounds[0], bounds[1], npoints)\n\n def objFun(X, f, jac):\n g = 0\n j = np.zeros(X.shape)\n i = 0\n for x in X:\n fx = f(x)\n g = g + fx**2\n j[i] = 2 * fx * jac(x)\n i = i + 1\n return g, j\n\n opt = minimize(\n lambda X: objFun(X, fun, jac),\n X0,\n method=\"L-BFGS-B\",\n jac=True,\n bounds=[bounds] * len(X0),\n )\n\n X = opt.x\n np.round(X, decimals=5)\n return np.unique(X)", "def get_kpoints(self,ifwrite='yes'):\n a11 = float(self.lat[2].split()[0])\n a12 = float(self.lat[2].split()[1])\n a13 = float(self.lat[2].split()[2])\n a21 = float(self.lat[3].split()[0])\n a22 = float(self.lat[3].split()[1])\n a23 = float(self.lat[3].split()[2])\n a31 = float(self.lat[4].split()[0])\n a32 = float(self.lat[4].split()[1])\n a33 = float(self.lat[4].split()[2])\n \n x0 = [a11, a12, a13]\n x1 = [a21, a22, a23]\n x2 = [a31, a32, a33]\n \n self.natom = sum(list(map(int,self.lat[6].split())))\n # Number of atoms in POSCAR/CONTCAR\n \n l0 = np.linalg.norm(x0)\n l1 = np.linalg.norm(x1)\n l2 = np.linalg.norm(x2)\n\n self.cell_norm = [l0, l1, l2]\n \n N = (l0*l1*l2*self.kppra/self.natom)**(1.0/3.0)\n \n k0 = int(N/l0)\n k1 = int(N/l1)\n k2 = int(N/l2)\n\n klist = [k0,k1,k2]\n flag = 0\n kn = klist[:]\n\n if len(set(klist)) == 1:\n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n kn = [v+1 for v in kn]\n elif len(set(klist)) == 3:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 3:\n kn[klist.index(sorted(klist)[flag])] += 1\n flag += 1\n else:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 2:\n tmp = sorted(set(klist))[flag]\n tmp_ind = []\n for i in range(3):\n if klist[i] == tmp:\n tmp_ind.append(i)\n kn = [kn[i]+1 if i in tmp_ind else kn[i] for i in range(3)]\n flag += 1\n\n self.kps = kn\n \n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n print(\"===== WARNING =====\")\n print(\"K-points generate method may not be appropriate!\")\n print(\"Check source code!!!!\")\n print(\"===================\")\n exit()\n\n #if ifwrite == 'yes':\n # self.write_output()" ]
[ "0.615799", "0.5902298", "0.58482414", "0.57372916", "0.57352954", "0.57080084", "0.56059325", "0.55901784", "0.5569681", "0.55468035", "0.5539385", "0.5525096", "0.5479864", "0.546164", "0.54158294", "0.5414072", "0.53997874", "0.53977036", "0.5389939", "0.5384784", "0.5362815", "0.53549546", "0.5301085", "0.5295417", "0.5290579", "0.528696", "0.52823", "0.5246708", "0.5243289", "0.52287936" ]
0.68557984
0
Get current NFL season After March, returns year of upcoming season.
def current_season() -> int: now = datetime.now() month, year = now.month, now.year if month < 4: year -= 1 return year
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_season():\n td = datetime.datetime.today()\n if td.month > 8:\n return td.year\n return td.year - 1", "def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year", "def return_football_season(date=datetime.datetime.today()):\n date_aux = subtract_months(date, 6)\n beginning_year = str(date_aux.year)\n ending_year = date_aux.year + 1\n ending_year = str(ending_year)[-2:]\n season = ''.join([beginning_year, '-', ending_year])\n return season", "def get_upcoming_season(self):\n result = self._method_call(\"UpcomingSeason\")\n return int(result)", "def get_current_player_season(self):\n return self.get_player_season(\"current\")", "def media_season(self):\n media_status = self._media_status()[0]\n return media_status.season if media_status else None", "def getSeason(date):\n\n date = validate.timestamp(date)\n day = date.dayofyear\n leap_year = int(date.is_leap_year)\n\n spring = numpy.arange(80, 172) + leap_year\n summer = numpy.arange(172, 264) + leap_year\n autumn = numpy.arange(264, 355) + leap_year\n\n if day in spring:\n season = \"spring\"\n elif day in summer:\n season = \"summer\"\n elif day in autumn:\n season = \"autumn\"\n else:\n season = \"winter\"\n\n return season", "def calcSeasonModified( monthNum ):\r\n\r\n if monthNum == 12 or monthNum == 1 or monthNum == 2:\r\n return 0\r\n\r\n elif monthNum == 6 or monthNum == 7 or monthNum == 7:\r\n return 1\r\n\r\n else:\r\n return 3", "def seasonNumber(self):\n return self.index", "def seasonNumber(self):\n if self._seasonNumber is None:\n self._seasonNumber = self.parentIndex if isinstance(self.parentIndex, int) else self.season().seasonNumber\n return utils.cast(int, self._seasonNumber)", "def test_get_season_19_march(self, calendar, expected):\n date = datetime.date(2017, 3, 19)\n assert calendar.get_season(date) == expected", "def current_season_phase():\n _update_week_number()\n return _cur_season_phase", "def get_season_year(league_id):\n\n today = date.today()\n\n month = today.month\n year = today.year\n\n if league_id == \"10\":\n season_year = str(year)\n else:\n if month >= 10:\n # Defaulting to current season in October\n next_year = int(str(year)[-2:]) + 1\n season_year = str(year) + \"-\" + str(next_year)\n else:\n # Defaulting to the current or just completed season\n # from Jan. to Sept.\n next_year = int(str(year)[-2:])\n season_year = str(year - 1) + \"-\" + str(next_year)\n\n return season_year", "def get_season(\n current_date: date, hemisphere: str, season_tracking_type: str\n) -> str | None:\n\n if hemisphere == \"equator\":\n return None\n\n if season_tracking_type == TYPE_ASTRONOMICAL:\n spring_start = ephem.next_equinox(str(current_date.year)).datetime()\n summer_start = ephem.next_solstice(str(current_date.year)).datetime()\n autumn_start = ephem.next_equinox(spring_start).datetime()\n winter_start = ephem.next_solstice(summer_start).datetime()\n else:\n spring_start = datetime(2017, 3, 1).replace(year=current_date.year)\n summer_start = spring_start.replace(month=6)\n autumn_start = spring_start.replace(month=9)\n winter_start = spring_start.replace(month=12)\n\n if spring_start <= current_date < summer_start:\n season = STATE_SPRING\n elif summer_start <= current_date < autumn_start:\n season = STATE_SUMMER\n elif autumn_start <= current_date < winter_start:\n season = STATE_AUTUMN\n elif winter_start <= current_date or spring_start > current_date:\n season = STATE_WINTER\n\n # If user is located in the southern hemisphere swap the season\n if hemisphere == NORTHERN:\n return season\n return HEMISPHERE_SEASON_SWAP.get(season)", "def convert_season(row): \n if row[\"month\"] >= 8:\n return int(row[\"season\"][:4])\n else:\n return int(row[\"season\"][-4:])", "def get_current_hockey_year_start():\n\n today = date.today()\n\n # if we are in the end of a hockey year (anytime from jan 1 until next season \"sept\")\n if today.month <= 8:\n return get_last_year()\n\n else: # if month >= 9 (Sept)\n return get_current_year()", "def season(self, seasonnum, order='aired'):\n if order=='aired':\n seasons = self.seasons\n elif order == 'dvd':\n seasons = self.dvd_seasons\n try:\n return seasons[seasonnum]\n except KeyError:\n raise SeasonNotFoundError(\n 'Season no %s does not exists' % seasonnum\n ), None, sys.exc_info()[2]", "def get_current_hockey_year():\n\n today = date.today()\n\n # if we are in the end of a hockey year (anytime from jan 1 until next season \"sept\")\n if today.month <= 8: \n return get_last_year() + get_current_year()\n\n\n else: # if month >= 9 (Sept)\n return get_current_year() + get_next_year()", "def getseason(data):\n ## Season key is the most reliable\n season = data.get(\"season\")\n if season:\n ## Season key is an integer formatted \"YYS\" and is 2000-based (i.e.- 171 == 2017-Winter)\n season = str(season)\n year = int(f\"20{season[:2]}\")\n ## Anichart Season key is 1-indexed\n season = int(season[2]) - 1\n ## This should normally pass; if it consistently does not, we'll have to investigate why\n try: return SeasonCharts.buildseason(season,year)\n ## If something goes wrong, we'll try another method\n except: print(f\"Failed to parse season: {data['season']}\")\n ## Next, we'll iterate over rankings to try to determine the season/year\n ## There are multiple types of rankings based on season, year, and both combined,\n ## so we'll piece it together based on whatever we come across first\n season,year = None,None\n for ranking in data.get(\"rankings\",list()):\n ## Quicker exit (without just making this loop its own function)\n if season and year: continue\n ## We'll ignore stuff we've already gotten and assume that nothing in\n ## rankings contradicts eachother\n if not season:\n ## Defaults to None one way or another if it's not supplied\n season = ranking.get(\"season\")\n if not year: year = ranking.get(\"year\")\n ## Check if we made it\n if season and year:\n ## As above, this should always work out-of-the-box\n try: return SeasonCharts.buildseason(season,year)\n except: print(season,year)\n ## Welp, we're stumped...\n return None", "def get_season_dates(date, season):\n start_date_start = date\n start_date_end = date\n if season == \"Spring\":\n start_date_start = date.replace(month=4)\n start_date_end = date.replace(month=6, day=30)\n elif season == \"Summer\":\n start_date_start = date.replace(month=7)\n start_date_end = date.replace(month=9, day=30)\n elif season == \"Fall\":\n start_date_start = date.replace(month=10)\n start_date_end = date.replace(month=12, day=31)\n elif season == \"Winter\":\n start_date_start = date.replace(month=1)\n start_date_end = date.replace(month=3, day=31)\n return start_date_start, start_date_end", "def get_fiscal_year(self):\n next_calendar_year_months = [10, 11, 12]\n if self.start_date.month in next_calendar_year_months:\n fiscal_year = self.start_date.year + 1\n return fiscal_year\n else:\n return self.start_date.year", "def test_21st_century(self):\r\n season = \"2019-20\"\r\n res = get_end_year(season)\r\n assert res == 2020", "def get_mothers_day_date(year):\r\n start_date = parse(f\"Jan {year}\").date()\r\n for date in rrule(YEARLY, dtstart=start_date, bymonth=5, byweekday=SU, bysetpos=2):\r\n if date.year == year:\r\n return date.date()", "def get_season_no(token, url):\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text).get('data')\n high_season = 1\n for episode in json_data:\n if episode.get('airedSeason') > high_season:\n high_season = episode.get('airedSeason')\n return high_season", "def distributeSeason(self):\n i = 1\n for day in self.daylist:\n if i >= monthbeg[5] and i < monthbeg[9]: #june through SEpt as per SCE\n day.season = 'summer' #https://www.sce.com/residential/rates/Time-Of-Use-Residential-Rate-Plans\n i = i + 1\n else:\n day.season = 'winter'\n i = i+1", "def dia_revolucion(year):\n return nth_day_of_month(3, MON, NOV, year)", "def calcSeason(ra, time):\n # Reference RA and equinox to anchor ra/season reference - RA = 0 is overhead at autumnal equinox\n # autumn equinox 2014 happened on september 23 --> equinox MJD\n Equinox = 2456923.5 - 2400000.5\n # convert ra into 'days'\n dayRA = ra / 360 * 365.25\n firstSeasonBegan = Equinox + dayRA - 0.5 * 365.25\n seasons = (time - firstSeasonBegan) / 365.25\n # Set first season to 0\n seasons = seasons - np.floor(np.min(seasons))\n return seasons", "def test_20th_century(self):\r\n season = \"1989-90\"\r\n res = get_end_year(season)\r\n assert res == 1990", "def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.SIDEREAL_YEAR) - (cls.solar_longitude(tee) / 360))", "def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.MEAN_SIDEREAL_YEAR) - (sidereal_solar_longitude(tee) / 360))" ]
[ "0.81641465", "0.7565352", "0.74286443", "0.6773904", "0.66487664", "0.6576127", "0.65571433", "0.6517643", "0.6495419", "0.64488924", "0.64429444", "0.64425147", "0.64404756", "0.6364527", "0.62375706", "0.61532784", "0.61332804", "0.6104992", "0.5971628", "0.5910558", "0.5863225", "0.582131", "0.57585377", "0.57450205", "0.5730995", "0.5719832", "0.5665247", "0.56471336", "0.5624144", "0.56237185" ]
0.8059089
1
Get NFL week (ESPN scoring period) from date The year of the given date determines the relevant NFL season. Assumes week 1 begins the week of Labor Day and ends the following Wednesday. Does not cap value, so may be below 1 or above 17.
def get_week_from_date(date) -> int: month, year = date.month, date.year if month < 4: year -= 1 ld = _labor_day(year) wk1_wed = ld + timedelta(days=2) days_since = (date - wk1_wed).days weeks_since = days_since / 7. week = math.floor(weeks_since) + 1 return int(week)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_week(date):\n\n # TODO: the API seems broken. It returns week, year not year, week as documentef\n # why not use date.isocalendar() from the stdlib?\n\n date = date_trunc('week', date)\n\n first_monday = date_trunc('week', date_trunc('year', date))\n if first_monday.year < date.year:\n first_monday += datetime.timedelta(weeks=1)\n diff = date_trunc('day', date) - first_monday\n week = 1 + (diff.days / 7)\n return week, first_monday.year", "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "def GetWeekNum(self, date):\n (y, m, d) = date.split('-')\n return (dt.date(int(y), int(m), int(d)) - self.START_DATE).days / 7", "def weeks_per_year(year):\n return week_from_date(date(year, 12, 31))", "def workweeks(yr):\n\n # TODO: MOVE all of this crap into a intelDateTime.py module. Does not belong here. JSS\n\n nyd = datetime.date(yr, 1, 1).weekday() # Determine the day of the week on which the 1st of January fell this year.\n if nyd == 5: return 53 # If the 1st of January fell on a Saturday, the year has 53 weeks.\n if nyd == 4 and isleapyear(yr): return 53 # Same deal if the 1st of January fell on a Friday in a leap year.\n return 52 # All other years have 52 work weeks.", "def ISOWEEKNUM(\n date: func_xltypes.XlDateTime\n) -> func_xltypes.XlNumber:\n\n datetime_date = utils.number_to_datetime(int(date))\n isoweeknum = datetime_date.isocalendar()[1]\n return isoweeknum", "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "def date_week_of_year(date, *, sunday_is_first_day_of_week: bool = False):\n if sunday_is_first_day_of_week:\n return date.strftime(\"%U\")\n else:\n return date.strftime(\"%V\")", "def get_week_from_datestr(datestr: str) -> int:\n return date.fromisoformat(datestr).isocalendar()[1]", "def get_week_of_year(date, padded_or_unpadded, start_Sunday_or_Monday):\n if start_Sunday_or_Monday == constants.str_Sunday:\n week_of_year = date.strftime('%U')\n elif start_Sunday_or_Monday == constants.str_Monday:\n week_of_year = date.strftime('%W')\n else:\n err_msg = str_possible_values('start_Sunday_or_Monday', [\n constants.str_Sunday, constants.str_Monday])\n raise ValueError(err_msg)\n\n if padded_or_unpadded == constants.str_padded:\n return week_of_year\n elif padded_or_unpadded == constants.str_unpadded:\n return str(int(week_of_year))\n else:\n err_msg = str_possible_values('padded_or_unpadded', [\n constants.str_padded, constants.str_unpadded])\n raise ValueError(err_msg)", "def WEEKNUM(date, return_type=1):\n if return_type == 21:\n return ISOWEEKNUM(date)\n if return_type not in _weekday_type_map:\n raise ValueError(\"Invalid return type %s\" % (return_type,))\n (first, index) = _weekday_type_map[return_type]\n date = _make_datetime(date)\n jan1 = datetime.datetime(date.year, 1, 1)\n week1_start = jan1 - datetime.timedelta(days=(jan1.weekday() - first) % 7)\n return (date - week1_start).days // 7 + 1", "def WeekCount(year):\n weekday = DayOfWeek(year, 1, 1)\n if weekday == 4:\n return 53\n elif weekday == 3 and LeapYear(year):\n return 53\n else:\n return 52", "def current_week_number(date=datetime.datetime.now()):\n return int(date.strftime(\"%W\"))", "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]", "def get_week_days(year, week):\n d = dt.date(year, 1, 1)\n if(d.weekday() > 3):\n d = d + dt.timedelta(7 - d.weekday())\n else:\n d = d - dt.timedelta(d.weekday())\n dlt = dt.timedelta(days = (week - 1) * 7)\n return d + dlt #, d + dlt + dt.timedelta(days = 6)", "def nflweek(self, irc, msg, args, optlist, optweek):\n \n url = self._b64decode('aHR0cDovL3MzLmFtYXpvbmF3cy5jb20vbmZsZ2MvYWxsU2NoZWR1bGUuanM=')\n \n usePre, useNext, outputWeek = False, False, False\n for (option, arg) in optlist:\n if option == 'pre':\n usePre = True\n \n if optweek:\n if optweek == \"next\":\n useNext = True\n elif optweek.isdigit():\n if usePre: \n if 1 <= int(optweek) <= 4:\n outputWeek = \"Preseason Week %s\" % optweek\n else:\n irc.reply(\"ERROR: Preseason week number must be between 1 and 4.\")\n return\n else:\n if 1 <= int(optweek) <= 17:\n outputWeek = \"Week %s\" % optweek\n else:\n irc.reply(\"ERROR: Week must be between 1-17\")\n return \n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n jsondata = json.loads(html)\n\n week = jsondata.get('week', None) # work with the week data so we know where we are.\n\n if week is None:\n irc.reply(\"Failed to load schedule.\")\n return\n\n currentWeekName = week.get('current', {'current': None}).get('weekName', None) \n nextWeekName = week.get('next', {'next': None}).get('weekName', None) \n\n if currentWeekName is None:\n irc.reply(\"Cannot figure out the current week.\")\n return\n\n games = jsondata.get('content', None) # data in games.\n \n if games is None:\n irc.reply(\"Failed to load the games data.\")\n return\n \n if outputWeek:\n games = [item['games'] for item in games if item['weekName'] == outputWeek]\n weekOutput = outputWeek\n elif useNext:\n games = [item['games'] for item in games if item['weekName'] == nextWeekName]\n weekOutput = nextWeekName\n else:\n games = [item['games'] for item in games if item['weekName'] == currentWeekName]\n weekOutput = currentWeekName\n \n append_list = []\n\n for games in games:\n for t in games:\n awayTeam = self._translateTeam('team', 'nid', t['awayTeamId'])\n homeTeam = self._translateTeam('team', 'nid', t['homeTeamId'])\n append_list.append(\"[\" + t['date']['num'] + \"] \" + awayTeam + \"@\" + homeTeam + \" \" + t['date']['time'])\n \n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} :: {1}\".format(ircutils.bold(weekOutput), descstring)\n \n irc.reply(output)", "def week(self):\n if self._week.lower() == 'wild card':\n return WILD_CARD\n if self._week.lower() == 'division':\n return DIVISION\n if self._week.lower() == 'conf. champ.':\n return CONF_CHAMPIONSHIP\n if self._week.lower() == 'superbowl':\n return SUPER_BOWL\n return self._week", "def getWeeks(year):\n url = \"http://www.boxofficemojo.com/weekend/?yr=%d\" % year\n src = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(src, 'html.parser')\n chart = soup.find(border=\"0\", cellspacing=\"1\", cellpadding=\"5\")\n data = parseTable(chart)\n weeks = [int(row[-1]) for row in data[1:]]\n return weeks", "def date_to_day_of_week(date):\n return date.weekday()", "def get_week_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof - TimeDelta(days=(asof.isoweekday() - 1) % 7)", "def get_weekday_number(date):\n return date.strftime('%w')", "def day_of_week(day, month, year):\n bias = (14 - month) // 12\n m_year = year - bias\n mth = month + 12 * bias - 2\n return (day + m_year + m_year // 4 - m_year // 100 + m_year // 400 + (31 * mth) // 12) % 7", "def week_range(date):\n # isocalendar calculates the year, week of the year, and day of the week.\n # dow is Mon = 1, Sat = 6, Sun = 7\n year, week, dow = date.isocalendar()\n\n # Find the first day of the week.\n if dow == 7:\n # Since we want to start with Sunday, let's test for that condition.\n start_date = date\n else:\n # Otherwise, subtract `dow` number days to get the first day\n start_date = date - timedelta(dow)\n\n return start_date, start_date + timedelta(6)", "def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass", "def next_week_start(iso_date: Optional[str] = None) -> date:\n if iso_date:\n current_date = date.fromisoformat(iso_date)\n else:\n current_date = date.today()\n\n days_until_monday = 7 - current_date.weekday()\n\n candidate_start = current_date + timedelta(days=days_until_monday)\n while candidate_start in holidays.US():\n candidate_start += timedelta(days=1)\n\n return candidate_start", "def weekly():", "def first_day_of_year(year):\n year -= 1\n return (year + (year // 4) - (year // 100) + (year // 400) + 1) % NUM_DAYS_IN_WEEK", "def GetWeekDay(self):\n if self.day is None:\n if self.week:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n self.week,\n None)\n elif self.month is None:\n if self.year is None:\n return (self.century, None, None, None, None)\n else:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n None,\n None)\n else:\n raise DateTimeError(\"can't get week day with month precision\")\n else:\n century, year, ordinalDay = self.GetOrdinalDay()\n year += century * 100\n if LeapYear(year):\n yearLength = 366\n else:\n yearLength = 365\n weekday = DayOfWeek(year, self.month, self.day)\n thursday = ordinalDay + 4 - weekday\n if thursday < 1:\n # Thursday this week was actually last year, and so we are\n # part of the last calendar week of last year too.\n # may return year==0\n year -= 1\n week = WeekCount(year)\n elif thursday > yearLength:\n # Thursday this week is actually next year, and so we are\n # part of the first calendar week of next year too.\n # may return century=100\n year += 1\n week = 1\n else:\n # We are part of this year, but which week?\t Jan 4th is always\n # part of the first week of the year, so we calculate the ordinal\n # value of the Monay that began that week\n yearBase = 5 - DayOfWeek(year, 1, 4)\n week = (ordinalDay - yearBase) // 7 + 1\n return year // 100, (year % 100) // 10, (year % 10), week, weekday", "def _DayNumToWeekdayNum(daynum):\n return (daynum + _WEEKDAY_BASE) % NUM_WEEKDAYS", "def get_next_week(self, startdate):\n dow_today = int(datetime.datetime.strftime(startdate, '%w'))\n days_until_sunday = 7 - ((dow_today + 7) % 7)\n #days_until_sunday = 7 - (dow_today + 1)\n sunday = startdate + datetime.timedelta(days=days_until_sunday)\n following_saturday = sunday + datetime.timedelta(days=6)\n next_week = (sunday, following_saturday)\n return next_week" ]
[ "0.7253059", "0.6891157", "0.68909645", "0.6705168", "0.66147095", "0.6535417", "0.63276374", "0.6250827", "0.62457407", "0.61644757", "0.6102457", "0.60741466", "0.59655815", "0.59634364", "0.59619147", "0.5926681", "0.5889611", "0.5864661", "0.5860158", "0.5819684", "0.58102256", "0.5803096", "0.5769433", "0.5759181", "0.57462853", "0.5728721", "0.5720337", "0.5683695", "0.5683139", "0.56718516" ]
0.7307077
0
Find list of edl directories in all dependencies for the passed module
def get_edl_dirs(mod, gen_cfg): log.info("Fetching dependencies for %s", coordinates.as_path(mod.coords)) dependencies = mod.get_dependencies() edl_dirs = [mod.get_edl_path()] for dep, dep_coords in dependencies.items(): dep_cfg = gen_cfg.get_mod_cfg(dep) log.info("Dependency: %s", coordinates.as_path(dep_coords)) dep_edl_path = os.path.join(mod.mirror_root, coordinates.as_path(dep_coords, False)[1:], dep_coords.version, dep_cfg.edl_dir) edl_dirs.append(dep_edl_path) return edl_dirs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_module_search_path(self, pkg_name):\n pkg_location = self.get_package_location(pkg_name)\n module_search_path = [pkg_location, os.path.join(pkg_location,'lib')]\n st, cycle = graph.dfs(self.package_dependency, pkg_name)\n # computed packages on which this task depends\n required_pkgs = [self.get_package_location(x) for x in \\\n st.keys() if st[x] is not None]\n module_search_path += required_pkgs\n module_search_path += [os.path.join(x, 'lib') for x in required_pkgs]\n return module_search_path, cycle", "def getModules() -> tuple:\n return data.getFoldersOf(data.ETC)", "def library_dirs(self):", "def modules():\n return [os.path.relpath(os.path.join(root, filename), 'groot_ansible')\n for root, _, filenames in os.walk('groot_ansible/playbooks/library') for filename in filenames if '.git' not in root.split(os.sep)\n ]", "def __dir__():\n return __all__", "def find_package_data(module, path):\n files = []\n exclude = re.compile(\"\\.pyc$|~$\")\n for dirpath, dirnames, filenames in os.walk(os.path.join(module,path)):\n for filename in filenames:\n if not exclude.search(filename):\n files.append(os.path.relpath(os.path.join(dirpath,filename),module))\n return {module:files}", "def dcs_modules():\n\n dcs_dirname = os.path.dirname(__file__)\n module_prefix = __package__ + '.'\n\n if getattr(sys, 'frozen', False):\n importer = pkgutil.get_importer(dcs_dirname)\n return [module for module in list(importer.toc) if module.startswith(module_prefix) and module.count('.') == 2]\n else:\n return [module_prefix + name for _, name, is_pkg in pkgutil.iter_modules([dcs_dirname]) if not is_pkg]", "def __dir__():\n keys = (*globals().keys(), *_lazy_imports_obj.keys(), *_lazy_imports_mod.keys())\n return sorted(keys)", "def dependency_dir(self) -> Path:", "def include_dirs(self):", "def getDepList(self, dict):\n \n if( dict.has_key( self.name) ):\n return\n else:\n dict[ self.name ] = self.installPath\n\n if( len( dict ) > 1 ):\n mods = self.reqmodules + self.optmodules\n else:\n mods = self.reqmodules + self.optmodules + self.reqmodules_buildonly\n \n for modname in mods:\n if( self.parent.module(modname) != None ):\n self.parent.module(modname).getDepList( dict )", "def find_enstools_packages():\n\n return [f'enstools.{p}' for p in (find_packages(f'{os.path.dirname(__file__)}/enstools'))]", "def library_search_path(self, pedantic=False):\n return []", "def listConfigModules(etcdir):\n if not os.path.isdir(etcdir):\n return iter(())\n return (name for name in os.listdir(etcdir)\n if (name.endswith('.py')\n and os.path.isfile(os.path.join(etcdir, name)))\n )", "def lib_dirs(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_lib_dirs()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def list_modules():\n for module_name in listdir(modules_directory):\n if isdir(join(modules_directory, module_name)):\n log.debug('Load module: {0}'.format(module_name))\n yield module_name", "def find_modules(x):\n return Path(x).rglob('*.py')", "def find_with_deps(self, package_names):", "def get_dep_map(kerneldir):\n\n\tf = open(os.path.join(kerneldir, 'modules.dep'))\n\tdeps = {}\n\tfor l in f:\n\t\t#print repr(l)\n\t\tmod, dep_list_str = l.strip().split(':', 1)\n\t\tassert mod not in deps\n\n\t\tkmod = KModuleName(mod)\n\t\tdep_list = [KModuleName(x) for x in dep_list_str.strip().split()]\n\t\tdep_list.insert(0, kmod)\t# prepend ourself as a dependency\n\n\t\tdeps[kmod] = dep_list\n\n\tf.close()\n\treturn deps", "def my_find_packages(*args):\n import os\n packages = []\n for root_module_dir in args:\n for root, dirs, files in os.walk(root_module_dir):\n if '__init__.py' in files:\n packages.append(root)\n return packages", "def find_dependent_modules():\n tree = {}\n for module in sys.modules.values():\n if module is None:\n continue\n tree[module] = set()\n for attr_name in dir(module):\n attr = getattr(module, attr_name)\n if isinstance(attr, ModuleType):\n tree[module].add(attr)\n elif type(attr) in (FunctionType, type):\n tree[module].add(attr.__module__)\n return tree", "def _scan_fortran_file_deps(src: Path, srcdir: Path, dirname: Path, tdeps, compiler) -> T.List[str]:\n\n incre = re.compile(FORTRAN_INCLUDE_PAT, re.IGNORECASE)\n usere = re.compile(FORTRAN_USE_PAT, re.IGNORECASE)\n submodre = re.compile(FORTRAN_SUBMOD_PAT, re.IGNORECASE)\n\n mod_files = []\n src = Path(src)\n with src.open(encoding='ascii', errors='ignore') as f:\n for line in f:\n # included files\n incmatch = incre.match(line)\n if incmatch is not None:\n incfile = src.parent / incmatch.group(1)\n # NOTE: src.parent is most general, in particular for CMake subproject with Fortran file\n # having an `include 'foo.f'` statement.\n if incfile.suffix.lower()[1:] in compiler.file_suffixes:\n mod_files.extend(_scan_fortran_file_deps(incfile, srcdir, dirname, tdeps, compiler))\n # modules\n usematch = usere.match(line)\n if usematch is not None:\n usename = usematch.group(1).lower()\n if usename == 'intrinsic': # this keeps the regex simpler\n continue\n if usename not in tdeps:\n # The module is not provided by any source file. This\n # is due to:\n # a) missing file/typo/etc\n # b) using a module provided by the compiler, such as\n # OpenMP\n # There's no easy way to tell which is which (that I\n # know of) so just ignore this and go on. Ideally we\n # would print a warning message to the user but this is\n # a common occurrence, which would lead to lots of\n # distracting noise.\n continue\n srcfile = srcdir / tdeps[usename].fname\n if not srcfile.is_file():\n if srcfile.name != src.name: # generated source file\n pass\n else: # subproject\n continue\n elif srcfile.samefile(src): # self-reference\n continue\n\n mod_name = compiler.module_name_to_filename(usename)\n mod_files.append(str(dirname / mod_name))\n else: # submodules\n submodmatch = submodre.match(line)\n if submodmatch is not None:\n parents = submodmatch.group(1).lower().split(':')\n assert len(parents) in {1, 2}, (\n 'submodule ancestry must be specified as'\n f' ancestor:parent but Meson found {parents}')\n\n ancestor_child = '_'.join(parents)\n if ancestor_child not in tdeps:\n raise MesonException(\"submodule {} relies on ancestor module {} that was not found.\".format(submodmatch.group(2).lower(), ancestor_child.split('_', maxsplit=1)[0]))\n submodsrcfile = srcdir / tdeps[ancestor_child].fname\n if not submodsrcfile.is_file():\n if submodsrcfile.name != src.name: # generated source file\n pass\n else: # subproject\n continue\n elif submodsrcfile.samefile(src): # self-reference\n continue\n mod_name = compiler.module_name_to_filename(ancestor_child)\n mod_files.append(str(dirname / mod_name))\n return mod_files", "def getExtraDlls(self, module):\n\n full_name = module.getFullName()\n\n if full_name == \"kivy\":\n kivy_info = self._getKivyInformation()\n\n kivy_dlls = []\n for dll_folder in kivy_info.sdl2_dep_bins + kivy_info.glew_dep_bins:\n kivy_dlls.extend(self.locateDLLsInDirectory(dll_folder))\n\n for full_path, target_filename, _dll_extension in kivy_dlls:\n yield self.makeDllEntryPoint(\n source_path=full_path,\n dest_path=target_filename,\n package_name=full_name,\n reason=\"needed by 'kivy'\",\n )\n\n self.reportFileCount(full_name, len(kivy_dlls))", "def __dir__():\n import pkgutil\n\n names = [\n name\n for importer, name, ispkg in pkgutil.iter_modules(__path__)\n if not ispkg and name != \"base\"\n ]\n return names + [\"custom\", \"noData\"]", "def moduleList(path):\n\n if os.path.isdir(path):\n folder_list = os.listdir(path)\n elif path.endswith('.egg'):\n try:\n folder_list = [f for f in zipimporter(path)._files]\n except:\n folder_list = []\n else:\n folder_list = []\n #folder_list = glob.glob(os.path.join(path,'*'))\n folder_list = [p for p in folder_list \\\n if os.path.exists(os.path.join(path, p,'__init__.py'))\\\n or p[-3:] in ('.py','.so')\\\n or p[-4:] in ('.pyc','.pyo','.pyd')]\n\n folder_list = [os.path.basename(p).split('.')[0] for p in folder_list]\n return folder_list", "def get_modules(self):\n return self._module_loader.filelist", "def test_get_leaf_modules(request):\n filename = request.module.__file__\n qalgebra_dir = os.path.join(\n os.path.split(filename)[0], '..', 'src', 'qalgebra'\n )\n modules = get_leaf_modules(qalgebra_dir)\n assert \"qalgebra.core.abstract_algebra\" in modules", "def __dir__(self):\n result = list(new_module.__all__)\n result.extend(('__file__', '__path__', '__doc__', '__all__',\n '__docformat__', '__name__', '__path__',\n '__package__', '__version__'))\n return result", "def find_all_test_files():\n #test_file_pattern = re.compile('^t(est)?_.*\\.py$')\n test_file_pattern = re.compile('.*_test\\.py$')\n is_test_file = lambda filename: test_file_pattern.match(filename)\n drop_dot_py = lambda filename: filename[:-3]\n join_module = lambda *names: '/'.join(names)\n\n modules = []\n for root, dirs, files in os.walk(os.curdir):\n root_name = os.path.split(root)[-1]\n for test_file in filter(is_test_file, files):\n module = join_module(root_name, drop_dot_py(test_file))\n modules.append(module)\n #modules += ['.'.join([root_name, drop_dot_py(test_file)]) for test_file in filter(is_test, files)]\n return modules", "def freeze_includes() -> List[str]:\n import _pytest\n\n result = list(_iter_all_modules(_pytest))\n return result" ]
[ "0.65620816", "0.63926107", "0.6388437", "0.63874215", "0.6298563", "0.6170511", "0.6139222", "0.6134263", "0.6119241", "0.61146545", "0.608592", "0.6056361", "0.603119", "0.6020695", "0.5978215", "0.5966075", "0.5961092", "0.5923319", "0.5875171", "0.5873686", "0.58671385", "0.5861534", "0.585865", "0.5849939", "0.58458", "0.58162546", "0.5793757", "0.5763414", "0.57381886", "0.57321966" ]
0.76784027
0
Update the symbol XML node
def edit_symbol_node(node, filename): size = int(re.findall('\d+', filename)[-1]) log.info('New filename %s; size %s', filename, size) node.set('typeId', SYMBOL_ID) node.find('name').text = 'DLS symbol' # Use PV name from rule in control PV for tooltip etc. # Reference that PV in rule to avoid duplication. pv_name = node.find('.//pv').text pv_element = et.Element('pv_name') pv_element.text = pv_name node.append(pv_element) node.find('.//pv').text = '$(pv_name)' rule_element = node.find('.//rule') rule_element.set('prop_id', 'image_index') rule_element.set('out_exp', 'true') file_element = et.Element('image_file') file_element.text = filename num_element = et.Element('symbol_number') num_element.text = '0' img_size_element = et.Element('sub_image_width') img_size_element.text = str(size) node.append(file_element) node.append(num_element) node.append(img_size_element) node.remove(node.find('opi_file'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_symbol(self, symbol):\r\n self.symbol = symbol", "def symbol(self, symbol):\n self._symbol = symbol", "def symbol(self, symbol):\n\n self._symbol = symbol", "def set_symbol(self, row, col, symbol):\n self.field[row, col] = symbol", "def setSymbolProps(self, name, symbol):\r\n self.symbolProps = autosar.base.SymbolProps( str(name), str(symbol))", "def setElementsCoordinates(self, symbol, x, y):\n #If it is the start element\n if symbol == \"D\":\n self._set_start((x,y))\n\n #If it is the end of the level element\n elif symbol == \"F\":\n self._set_end((x,y))\n\n #If it is a spike\n elif symbol == \"S\":\n self._get_spikes().append((x,y))\n \n #If it is a scroll\n elif symbol == \"P\":\n self._get_scrolls().append((x,y))\n\n #If it is a key\n elif symbol == \"K\":\n self._get_keys().append((x,y))", "def create_simple_symbol(xml_document, symbols_element, properties, count, alpha, tags=None):\n symbol_element = xml_document.createElement(\"symbol\")\n symbol_element.setAttribute(\"alpha\", alpha)\n symbol_element.setAttribute(\"clip_to_extent\", \"1\")\n symbol_element.setAttribute(\"type\", properties['symbol_type'])\n symbol_element.setAttribute(\"name\", unicode(count))\n if tags and len(tags) > 0:\n symbol_element.setAttribute(\"tags\", tags)\n symbols_element.appendChild(symbol_element)\n\n for layer in reversed(properties['layer']):\n renderer_layer_element = xml_document.createElement(\"layer\")\n renderer_layer_element.setAttribute(\"pass\", \"0\")\n renderer_layer_element.setAttribute(\"enabled\", \"1\")\n renderer_layer_element.setAttribute(\"locked\", \"0\")\n renderer_layer_element.setAttribute(\"class\", layer['simpleSymbolClass'])\n symbol_element.appendChild(renderer_layer_element)\n\n for key, value in layer['dict_symbols'].items():\n\n symbol_properties_element = xml_document.createElement(\"prop\")\n symbol_properties_element.setAttribute(\"k\", unicode(key))\n symbol_properties_element.setAttribute(\"v\", unicode(value))\n renderer_layer_element.appendChild(symbol_properties_element)\n\n data_defined_properties_element = xml_document.createElement(\"data_defined_properties\")\n renderer_layer_element.appendChild(data_defined_properties_element)\n\n data_defined_option_element = xml_document.createElement(\"Option\")\n data_defined_option_element.setAttribute(\"type\", \"Map\")\n data_defined_properties_element.appendChild(data_defined_option_element)\n\n data_defined_option_value_element = xml_document.createElement(\"Option\")\n data_defined_option_value_element.setAttribute(\"value\", \"\")\n data_defined_option_value_element.setAttribute(\"type\", \"QString\")\n data_defined_option_value_element.setAttribute(\"name\", \"name\")\n data_defined_option_element.appendChild(data_defined_option_value_element)\n\n data_defined_option_name_element = xml_document.createElement(\"Option\")\n data_defined_option_name_element.setAttribute(\"name\", \"properties\")\n data_defined_option_element.appendChild(data_defined_option_name_element)\n\n data_defined_option_collection_element = xml_document.createElement(\"Option\")\n data_defined_option_collection_element.setAttribute(\"value\", \"collection\")\n data_defined_option_collection_element.setAttribute(\"type\", \"QString\")\n data_defined_option_collection_element.setAttribute(\"name\", \"type\")\n data_defined_option_element.appendChild(data_defined_option_collection_element)\n\n if 'subSymbol' in layer:\n SimpleSymbol.create_simple_symbol(xml_document, renderer_layer_element, layer['subSymbol'], \"@0@0\", '1')", "def add_symbol(self, symbol_name: str, attrs: dict = None):\n if attrs is None:\n attrs = {}\n self.current_level().add_symbol(symbol_name, attrs)\n # print(f'After add {symbol_name}, symbol_table is:\\n{self}')", "def updateGraph(self, symbol=None):\n if symbol is None:\n return\n\n # Get all stock data back for the given symbol\n self.stock_data = self.db.queryAllData(table_name=symbol)\n\n # Create a list of prices and a list of dates\n self.prices = [x[1].strip('$') for x in self.stock_data]\n self.dates = [x[0] for x in self.stock_data]\n date_string = [x.strftime(\"%m/%d/%Y\") for x in self.dates]\n self.x = [datetime.datetime.strptime(d, '%m/%d/%Y').date()\n for d in date_string]\n\n # Create an instance of QtMpl\n self.mpl = self.central.mpl\n self.mpl.addLine(x=self.x, y=self.prices, title=symbol)", "def symbol_id(self, value: str):\n self._symbol = value", "def put(self, name_symbol=None, name=None, symbol=None):\n putted_symbl = super(ElemModule.ModuleSymTab, self).put(name_symbol=name_symbol, name=name, symbol=symbol)\n if isinstance(putted_symbl, ElemPort):\n self.module.add_io_decl(putted_symbl)", "def _set_symbol(self, symbol, blank=False):\n self._symbols.add(symbol)\n\n try:\n assert self._blank_symbol == None or not blank\n if blank:\n self._blank_symbol = symbol\n except:\n raise Exception(\n f\"Machine got blank symbol '{symbol}' which is already set to '{self._blank_symbol}'\"\n )", "def __setitem__(self, name, symbol):\n self.current_scope[name] = symbol", "def updateOffset(self):\n raise Exception(\"Unimplemented function in symbol: \"+self.name)", "def put_symbol(self, symbol, row, column):\n\n self.board[row][column] = symbol", "def getMappedSymbolsXML(self, addrstring: unicode) -> unicode:\n ...", "def setSymbol(self, *args):\n return _libsbml.InitialAssignment_setSymbol(self, *args)", "def addPair(self, symbol, address):\r\n self.s_table[symbol] = address", "def update_qml(self, qml):\n if qml is None:\n return qml\n\n try:\n # parse XML\n root = ElementTree.fromstring(qml)\n\n # embed symbols\n self.embed_qml_symbols(root, 'SvgMarker', 'name')\n self.embed_qml_symbols(root, 'SVGFill', 'svgFile')\n self.embed_qml_symbols(root, 'RasterFill', 'imageFile')\n\n # return updated QML\n qml = ElementTree.tostring(\n root, encoding='utf-8', method='xml'\n )\n return qml.decode()\n except Exception as e:\n self.logger.warning(\n \"Could not embed QML symbols:\\n%s\" % e\n )\n return qml", "def add_symbol_attribute(self, symbol_attribute):\n self.symbol_attributes.append(symbol_attribute)", "def update_node(self, uri, xmlnode):\n oldnode = self.sm.get_node(uri)\n if len(oldnode) == 0: raise VOSpaceError(404, 'A Node does not exist with the requested URI.')\n oldnode = self.nf.get_node(oldnode[0]['node'])\n newnode = self.nf.get_node(xmlnode)\n # Check properties\n for property in newnode.properties:\n if property in READ_ONLY_PROPERTIES: raise VOSpaceError(401, 'User does not have permissions to set a readonly property.', summary = PERMISSION_DENIED)\n if property in oldnode.properties: \n oldnode.properties[property] = newnode.properties[property]\n else:\n oldnode.add_property(property, newnode.properties[property])\n # Delete properties if applicable\n props = xmlnode.xpath('//vos:property[@xsi:nil = \"true\"]', namespaces = {'vos': VOSPACE_NS, 'xsi': XSI_NS})\n for prop in props:\n del oldnode.properties[prop.get('uri')]\n # Store update\n self.sm.update_node(oldnode.uri, oldnode.uri, oldnode.tostring())\n return oldnode.tostring()", "def MakeSymbolName(self,content):\n return self.register(SymbolName(content,reg=self))", "def append_state_label(symbol):\n\t\tif symbol == \"c\":\n\t\t\tself.state_label = self.state_label.replace(\"o\", \"\")\n\t\tif symbol == \"d\":\n\t\t\tself.state_label = self.state_label.replace(\"k\", \"\")\n\t\telse:\n\t\t\tself.state_label += symbol", "def update_node(node, attribute, value):\n node.set(attribute, value)\n return", "def add_symbol(self):\n default_state = \"New State\"\n default_file = \"New File\"\n\n row = self.tbl_symbols.rowCount()\n self.tbl_symbols.insertRow(row)\n self.lst_state_item = QtWidgets.QTableWidgetItem(default_state)\n self.tbl_symbols.setItem(row, 0, self.lst_state_item)\n self.lst_file_item = QtWidgets.QTableWidgetItem(default_file)\n self.tbl_symbols.setItem(row, 1, self.lst_file_item)\n\n self.symbols[default_state] = default_file\n self.tbl_symbols.setCurrentItem(self.lst_file_item)\n self.load_from_list()\n self.txt_state.setFocus()", "def visit_text(self, sytext):\n self.current.update(sytext)", "def visit_text(self, sytext):\n self.current.update(sytext)", "def update(self):\n if not self._update:\n return\n\n self._update = False\n stru = self.stru\n sgn = stru.space_group().match_tabulated_settings().number()\n\n # Create the symmetry object\n symm = crystal.symmetry(\n unit_cell = self.unitcell._latpars,\n space_group_symbol = sgn\n )\n\n # Now the new structure\n newstru = stru.__class__(\n crystal_symmetry = symm,\n scatterers = stru.scatterers()\n )\n\n self.unitcell._latpars = list(newstru.unit_cell().parameters())\n\n self.stru = newstru\n return", "def write_symbol(self, new_tape_symbol: str) -> Self:\n tape_elements = list(self.tape)\n tape_elements[self.current_position] = new_tape_symbol\n return self.__class__(\n tape_elements,\n blank_symbol=self.blank_symbol,\n current_position=self.current_position,\n )", "def replace_symbols(node, symbols):\n warning = False\n\n if len(node) == 0:\n if node.text is not None and not node.text.isspace():\n if '$' in node.text and not (node.tag in EXCLUDED_TAGS):\n node.text = try_replace(node.text, symbols)\n\n if node.tag in NON_PV_TAGS:\n warning = True\n else:\n for child in node:\n if replace_symbols(child, symbols):\n warning = True\n\n return warning" ]
[ "0.617891", "0.61759514", "0.61127055", "0.5838499", "0.56468177", "0.56205124", "0.5558976", "0.5554636", "0.5539334", "0.5484995", "0.54771584", "0.5455788", "0.54166734", "0.53877443", "0.5364725", "0.53485316", "0.5334409", "0.5323229", "0.53168017", "0.5301254", "0.5126751", "0.51247275", "0.51015574", "0.5095295", "0.5082515", "0.50778973", "0.50778973", "0.5067637", "0.5064078", "0.50617194" ]
0.65849906
0
Grep on the basepath to find all files that contain an EDM symbol widget. control
def build_filelist(basepath): log.info("Building list of files containing EDM symbols in %s", basepath) symbol_files = [] for dir_path, _, filenames in os.walk(basepath): for filename in filenames: filepath = os.path.join(dir_path, filename) if filename.endswith(".opi") and utils.grep(filepath, "EDM Symbol"): symbol_files.append(filepath) return symbol_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def searchfiles(pattern='C:\\\\RoboDK\\\\Library\\\\*.rdk'):\n import glob\n return glob.glob(pattern)", "def find_files(config, slot='*'):\n f_pattern = os.path.join(os.path.join(config['path'],config['led_name']), slot+'*' + config['led_name'] + '*'\n + config['current'] + '*' + config['exp_time'] + '*'\n + config['xpos'] + '*' + config['ypos'] + '*')\n print(f_pattern)\n return glob.glob(f_pattern)", "def find_dcds(src):\n\n dcd_paths = []\n\n for root, dirs, files in os.walk(src):\n for filename in files:\n if filename.endswith(\".dcd\"):\n dcd_paths.append(os.path.join(root, filename))\n\n return dcd_paths", "def __searchFiles(self):\n self.ui.showFindFilesDialog(self.textForFind())", "def findFiles(self):\n\n with open('analysis_result/firmwalkerOutput.txt', 'r') as firmwalker:\n for line in firmwalker:\n if line.startswith('##################################### ssh'):\n self.ssh = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### dropbear'):\n self.dropbear = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### busybox'):\n self.busyBox = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### telnet'):\n self.telnet = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### openssl'):\n self.openssl = next(firmwalker).strip('d/').strip('\\n')", "def find(pattern):\n files = config.index.files(path_glob=\"*%s*\" % pattern)\n print_files(files)", "def filesearch(word=\"\"):\n logger.info('Starting filesearch')\n file = []\n for f in glob.glob(\"*\"):\n if word[0] == \".\":\n if f.endswith(word):\n file.append(f)\n\n elif word in f:\n file.append(f)\n #return file\n logger.debug(file)\n return file", "def gen_find(filepat, top):\n for path, dir_list, file_list in os.walk(top):\n for name in fnmatch.filter(file_list, filepat):\n yield os.path.join(path, name)", "def get_available_patterns() -> list:\n path_folder = os.path.join(config.ROOT_PATH, config.FOLDER_PATTERNS)\n return [f.replace(\".cells\", \"\") for f in os.listdir(path_folder) if os.path.isfile(os.path.join(path_folder, f)) and f.endswith(\".cells\")]", "def findhtml(pathused,ticker,typ):\n\n allfiles = [] # initializing the return list\n pathused += \"/\"+ticker.upper()+\"/\"+typ # since SEC_edgar has a standard way to store files as its the Ticker and inside \n # sec-edgar-filings ==> AAPL ==> 10-K \n \n for r,d,f in os.walk(pathused): # os.walk will return all the files inside the directory (with absolute path)\n # r is the absolute path\n # f is list of files in the folders\n \n if 'filing-details.html' in f: # if filing.html (SEC-edgar convention to name html files) is in this folder \n pathfol = r.replace(\"\\\\\",\"/\") # we modify it \n allfiles.append(pathfol+'/filing-details.html') # we append the absolute path\n else:\n continue\n return allfiles #and return it", "def full_find(self, file, version):\n matches = []\n for root, dirnames, filenames in os.walk(self.checkout_path(version)):\n for filename in fnmatch.filter(filenames, file):\n matches.append(os.path.join(root, filename))\n return matches", "def find_files(basedir, regexp):\n regexp = re.compile(regexp)\n return sorted(fn for fn in glob.glob(os.path.join(basedir, '**'),\n recursive=True)\n if regexp.match(fn))", "def find_define_file_uses(self):\n # Executing git grep is substantially faster than using the define_re\n # directly on the contents of the file in Python.\n for define_file in self.get_checked_define_files():\n excluded_files = set([define_file])\n excluded_files.update(define_file.get_included_files(recursive=True))\n all_defines = define_file.get_declared_defines()\n args = ['git', 'grep', '-zwIF']\n for define in all_defines:\n args.extend(['-e', define])\n args.extend(['--', '*.cpp', '*.c', '*.cu', '*.h', '*.cuh'])\n define_re = r'\\b(?:' + '|'.join(all_defines)+ r')\\b'\n output = subprocess.check_output(args, cwd=self._source_root).decode()\n for line in output.splitlines():\n (filename, text) = line.split('\\0')\n fileobj = self._files.get(filename)\n if fileobj is not None and fileobj not in excluded_files:\n defines = re.findall(define_re, text)\n fileobj.add_used_defines(define_file, defines)", "def find_package_data(module, path):\n files = []\n exclude = re.compile(\"\\.pyc$|~$\")\n for dirpath, dirnames, filenames in os.walk(os.path.join(module,path)):\n for filename in filenames:\n if not exclude.search(filename):\n files.append(os.path.relpath(os.path.join(dirpath,filename),module))\n return {module:files}", "def find_modules(x):\n return Path(x).rglob('*.py')", "def _FindKeyFiles(self):\r\n \r\n if self.__fCachedFiles is not None:\r\n return self.__fCachedFiles\r\n \r\n app = wingapi.gApplication\r\n proj = app.GetProject()\r\n files = proj.GetAllFiles()\r\n manage_files = []\r\n settings_files = []\r\n for fn in files:\r\n if os.path.basename(fn) == 'manage.py' and not os.path.dirname(fn).endswith('project_template') and os.path.isfile(fn):\r\n manage_files.append(fn)\r\n elif os.path.basename(fn) == 'settings.py' and not os.path.dirname(fn).endswith('project_template') and os.path.isfile(fn):\r\n settings_files.append(fn)\r\n\r\n pairs = []\r\n for manage_file in manage_files:\r\n for settings_file in settings_files:\r\n manage_dir = os.path.dirname(manage_file)\r\n settings_dir = os.path.dirname(settings_file)\r\n if manage_dir == settings_dir:\r\n pairs.append((manage_file, settings_file))\r\n if len(pairs) > 1:\r\n app.SetStatusMessage(\"Warning: Multiple manage.py/settings.py pairs found in project\")\r\n \r\n if len(pairs) > 0:\r\n self.__fCachedFiles = pairs[0]\r\n else:\r\n self.__fCachedFiles = (None, None)\r\n \r\n return self.__fCachedFiles", "def get_drawings(folder):\n # case insensitive in windows system, so \"dwg\" is ok\n return sorted(Path(folder).glob('**/*.dwg'))", "def _findfile(self,path,label):\n files=[];filenames=os.listdir(path)\n for name in filenames:\n if os.path.splitext(name)[0]==str(label):\n files.append(name)\n return files", "def _find_virtual_namespaces(pkg_roots: Dict[str, str]) -> Iterator[str]:\n for pkg in pkg_roots:\n if \".\" not in pkg:\n continue\n parts = pkg.split(\".\")\n for i in range(len(parts) - 1, 0, -1):\n partial_name = \".\".join(parts[:i])\n path = Path(find_package_path(partial_name, pkg_roots, \"\"))\n if not path.exists() or partial_name not in pkg_roots:\n # partial_name not in pkg_roots ==> purposefully/accidentally skipped\n yield partial_name", "def scan(self, base: str, pattern: str):\n for t in glob.iglob(os.path.join(base, pattern), recursive=True):\n self.report(t)", "def __searchOpenFiles(self):\n self.ui.showFindFilesDialog(self.textForFind(), openFiles=True)", "def locate_scripts():\n scripts = []\n bin_dir = os.path.join(os.getcwd(), 'bin')\n if not os.path.isdir(bin_dir):\n return scripts\n for item in os.listdir(bin_dir):\n full_path = os.path.join(bin_dir, item)\n if os.path.isfile(full_path):\n with open(full_path) as f:\n first_line = next(f)\n if first_line.startswith('#!'):\n scripts.append(full_path)\n return scripts", "def findMayaFiles(directory):\n\n pass", "def _GetFontFiles(path):\n return [f for f in listdir(path)\n if os.path.splitext(f)[1] in ('.ttf', '.otf')]", "def find_commands(management_dir):\n command_dir = os.path.join(management_dir, 'commands')\n try:\n return [filename[:-3] for filename in os.listdir(command_dir)\n if not filename.startswith('_') and filename.endswith('.py')]\n except OSError:\n return []", "def find_stub_files(name: str) -> List[str]:\n result = []\n for root, dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result", "def find(self, file, version):\n matches = []\n for root, dirnames, filenames in os.walk(self.full_doc_path(version)):\n for filename in fnmatch.filter(filenames, file):\n matches.append(os.path.join(root, filename))\n return matches", "def __find_eligible_plugins_in_directory(cls, directory_to_search):\n\n plugin_files = [\n x\n for x in os.listdir(directory_to_search)\n if x.endswith(\".py\") and x[0:-3] != \"__init__\"\n ]\n return plugin_files", "def locGlob(): \n #glob = \"From Internal Local Name Space\" # Toggle Comment\n print(glob)\n\n return", "def getAllDSP (self, inDEV):\n result = []\n def filterDSP (list, dirname, names):\n for name in names:\n if name [-4:] == '.dsp':\n fullpath = os.path.join (dirname, name)\n list.append (fullpath)\n os.path.walk (inDEV, filterDSP, result)\n result = filter (self.isValidPattern, result)\n return result" ]
[ "0.6017925", "0.56027436", "0.5597905", "0.555413", "0.5544122", "0.54559", "0.5441576", "0.5366995", "0.53383917", "0.5280805", "0.5255213", "0.5246665", "0.5238796", "0.52089924", "0.5191325", "0.5161962", "0.51558715", "0.5154216", "0.51451254", "0.513593", "0.5125903", "0.5061598", "0.50500447", "0.5048599", "0.50470847", "0.50244004", "0.5017165", "0.49997702", "0.499968", "0.4995953" ]
0.60150164
1
Process one symbol file and convert to PNG.
def process_symbol(filename, mod, mod_cfg, mirror_root, prod_root): working_path = os.path.join(mirror_root, prod_root[1:]) log.debug("Finding version from %s", working_path) mod_version = utils.get_module_version(working_path, mod_cfg.area, mod, mod_cfg.version) log.info("Found version %s", mod_version) coords = coordinates.create(prod_root, mod_cfg.area, mod, mod_version) mirror_path = os.path.join(mirror_root, coordinates.as_path(coords)[1:]) full_path = os.path.join(mirror_path, mod_cfg.edl_dir, filename[:-3] + 'edl') destination = os.path.dirname(os.path.join(mirror_path, mod_cfg.opi_dir, filename)) log.info('Destination directory is {}'.format(destination)) if os.path.exists(destination): for f in os.listdir(destination): n = os.path.split(filename)[1] n = '.'.join(n.split('.')[:-1]) if f.startswith(n) and f.endswith('png'): log.info('Symbol png already exists: %s', f) return f else: log.warn('Failed to process symbol: %s does not exist', destination) return if os.path.exists(full_path): return files.convert_symbol(full_path, [destination]) else: log.warn('Symbol %s does not exist', full_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode():\r\n # Open the file with binary instructions\r\n with open(file_name) as file:\r\n lines = file.readlines()\r\n with open(PATH + file_name, \"w\") as file_write:\r\n for line in lines:\r\n file_write.write(line + \"\\n\")\r\n\r\n # Read the instructions\r\n instructions, instruction_names = [], []\r\n parse_instr_bin_list(lines, instructions, instruction_names)\r\n\r\n # Print formatted binary instructions and their names\r\n instr_print(instructions, instruction_names)\r\n\r\n # Write to each of MPS-Files parsed hex-instructions\r\n write_mps(instructions)\r\n\r\n # Write to Mapping-PROM linked addresses\r\n write_mapping_prom(instruction_names)", "def main():\n folder = \"D:\\\\Noam10\\\\Documents\\\\Documents\\\\dither 2\"\n filename = \"kirigiri\"\n filetype = \".jpg\"\n input_file = folder + \"\\\\\" + filename + filetype\n for palette in paletteDict.keys():\n output_file = folder + \"\\\\\" + filename + \"(\" + palette + \").bmp\"\n Dither(input_file, output=output_file, palette=paletteDict[palette])\n print(output_file)", "def encode_png(track_metadata):\n\tprint(\"---- Encoding\", track_metadata.file_name, \"to PNG...\")\n\n\t# First step: OptiPNG.\n\tnew_file_name = track_metadata.file_name + \".png\"\n\toptipng_command = [\"optipng\", \"-o7\", \"-strip\", \"all\", \"-snip\", \"-out\", new_file_name, track_metadata.file_name]\n\tprint(optipng_command)\n\tprocess = subprocess.Popen(optipng_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"OptiPNG failed with exit code {exit_code}. CERR: {cerr}\".format(exit_code=exit_code, cerr=cerr))\n\n\tect_command = [\"/home/ruben/encoding/Efficient-Compression-Tool/build/ect\", \"-9\", \"-strip\", \"--allfilters-b\", \"--mt-deflate\", new_file_name]\n\tprint(ect_command)\n\tprocess = subprocess.Popen(ect_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"ECT failed with exit code {exit_code}. CERR: {cerr}\".format(exit_code=exit_code, cerr=cerr))\n\n\t#Delete old file.\n\tif os.path.exists(track_metadata.file_name):\n\t\tos.remove(track_metadata.file_name)\n\n\ttrack_metadata.file_name = new_file_name\n\ttrack_metadata.codec = \"png\"", "def main():\n usage = \"usage: %prog [options] input: BioC File (args[0]); Output Directory for the (picture) .svg file.\"\n parser = OptionParser(version='%prog 0.99', usage=usage)\n\n parser.add_option('-l', '--logfile', dest='logfilename',\n help='write log to FILE', metavar='FILE')\n parser.add_option('-q', '--quiet',\n action='store_true', dest='quiet', default=False,\n help='do not print status messages to stderr')\n parser.add_option('-d', '--debug',\n action='store_true', dest='debug', default=False,\n help='print debug information')\n\n\n\n (options, args) = parser.parse_args()\n\n if options.debug: print >> sys.stderr, '# Starting processing'\n\n process(options=options,args=args)\n\n\n\n\n sys.exit(0) # Everything went ok!", "def convert(filename,\nRenderer: \"\"\"By default, the schematic is converted to an SVG file,\n written to the standard output. It may also be rendered using TK.\"\"\",\n):\n \n with open(filename, \"rb\") as file:\n objects = read(file)\n stat = os.stat(file.fileno())\n \n sheet = objects[1]\n assert sheet[\"RECORD\"] == Record.SHEET\n (sheetstyle, size) = {SheetStyle.A4: (\"A4\", (1150, 760)), SheetStyle.A3: (\"A3\", (1550, 1150)), SheetStyle.A: (\"A\", (950, 760))}[sheet.get(\"SHEETSTYLE\", SheetStyle.A4)]\n if \"USECUSTOMSHEET\" in sheet:\n size = tuple(int(sheet[\"CUSTOM\" + \"XY\"[x]]) for x in range(2))\n \n # Units are 1/100\" or 10 mils\n renderer = Renderer(size, \"in\", 1/100,\n margin=0.3, line=1, down=-1, textbottom=True)\n \n for n in range(int(sheet[\"FONTIDCOUNT\"])):\n n = format(1 + n)\n fontsize = int(sheet[\"SIZE\" + n]) * 0.875\n family = sheet[\"FONTNAME\" + n].decode(\"ascii\")\n kw = dict()\n italic = sheet.get(\"ITALIC\" + n)\n if italic:\n kw.update(italic=True)\n bold = sheet.get(\"BOLD\" + n)\n if bold:\n kw.update(bold=True)\n renderer.addfont(\"font\" + n, fontsize, family, **kw)\n renderer.setdefaultfont(\"font\" + sheet[\"SYSTEMFONT\"].decode(\"ascii\"))\n renderer.start()\n \n arrowhead = dict(base=5, shoulder=7, radius=3)\n arrowtail = dict(base=7, shoulder=0, radius=2.5)\n diamond = dict(base=10, shoulder=5, radius=2.5)\n \n pinmarkers = {\n PinElectrical.INPUT: arrowhead,\n PinElectrical.IO: diamond,\n PinElectrical.OUTPUT: arrowtail,\n PinElectrical.PASSIVE: None,\n PinElectrical.POWER: None,\n }\n \n def gnd(renderer):\n renderer.hline(10)\n renderer.vline(-7, +7, offset=(10, 0), width=1.5)\n renderer.vline(-4, +4, offset=(13, 0), width=1.5)\n renderer.vline(-1, +1, offset=(16, 0), width=1.5)\n def rail(renderer):\n renderer.hline(10)\n renderer.vline(-7, +7, offset=(10, 0), width=1.5)\n def arrowconn(renderer):\n renderer.hline(10, endarrow=arrowhead)\n def dchevron(renderer):\n renderer.hline(5)\n renderer.polyline(((8, +4), (5, 0), (8, -4)))\n renderer.polyline(((11, +4), (8, 0), (11, -4)))\n connmarkers = {\n PowerObjectStyle.ARROW: (arrowconn, 12),\n PowerObjectStyle.BAR: (rail, 12),\n PowerObjectStyle.GND: (gnd, 20),\n }\n \n def nc(renderer):\n renderer.line((+3, +3), (-3, -3), width=0.6)\n renderer.line((-3, +3), (+3, -3), width=0.6)\n renderer.addobjects((gnd, rail, arrowconn, dchevron, nc))\n \n with renderer.view(offset=(0, size[1])) as base:\n base.rectangle((size[0], -size[1]), width=0.6)\n base.rectangle((20, -20), (size[0] - 20, 20 - size[1]), width=0.6)\n for axis in range(2):\n for side in range(2):\n for n in range(4):\n translate = [None] * 2\n translate[axis] = size[axis] / 4 * (n + 0.5)\n translate[axis ^ 1] = 10\n if side:\n translate[axis ^ 1] += size[axis ^ 1] - 20\n translate[1] *= -1\n with base.view(offset=translate) as ref:\n label = chr(ord(\"1A\"[axis]) + n)\n ref.text(label, horiz=ref.CENTRE, vert=ref.CENTRE)\n if n + 1 < 4:\n x = size[axis] / 4 / 2\n if axis:\n ref.hline(-10, +10, offset=(0, -x),\n width=0.6)\n else:\n ref.vline(-10, +10, offset=(x, 0), width=0.6)\n \n if \"TITLEBLOCKON\" in sheet:\n if not os.path.isabs(filename):\n cwd = os.getcwd()\n pwd = os.getenv(\"PWD\")\n if os.path.samefile(pwd, cwd):\n cwd = pwd\n filename = os.path.join(pwd, filename)\n with base.view(offset=(size[0] - 20, 20 - size[1])) as block:\n points = ((-350, 0), (-350, 80), (-0, 80))\n block.polyline(points, width=0.6)\n block.hline(-350, 0, offset=(0, 50), width=0.6)\n block.vline(-30, offset=(-300, 50), width=0.6)\n block.vline(-30, offset=(-100, 50), width=0.6)\n block.hline(-350, 0, offset=(0, 20), width=0.6)\n block.hline(-350, 0, offset=(0, 10), width=0.6)\n block.vline(20, 0, offset=(-150, 0), width=0.6)\n \n block.text(\"Title\", (-345, 70))\n block.text(\"Size\", (-345, 40))\n block.text(sheetstyle, (-340, 30), vert=block.CENTRE)\n block.text(\"Number\", (-295, 40))\n block.text(\"Revision\", (-95, 40))\n block.text(\"Date\", (-345, 10))\n d = format(date.fromtimestamp(stat.st_mtime), \"%x\")\n block.text(d, (-300, 10))\n block.text(\"File\", (-345, 0))\n block.text(filename, (-300, 0))\n block.text(\"Sheet\", (-145, 10))\n block.text(\"of\", (-117, 10))\n block.text(\"Drawn By:\", (-145, 0))\n \n for obj in objects:\n if (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\"} and\n obj[\"RECORD\"] == Record.JUNCTION and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n location = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n col = colour(obj[\"COLOR\"])\n renderer.circle(2, location, fill=col)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"IOTYPE\", \"ALIGNMENT\"} == {\"RECORD\", \"OWNERPARTID\", \"STYLE\", \"WIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"AREACOLOR\", \"TEXTCOLOR\", \"NAME\", \"UNIQUEID\"} and\n obj[\"RECORD\"] == Record.PORT and obj[\"OWNERPARTID\"] == b\"-1\"):\n width = int(obj[\"WIDTH\"])\n if \"IOTYPE\" in obj:\n points = ((0, 0), (5, -5), (width - 5, -5),\n (width, 0), (width - 5, +5), (5, +5))\n else:\n points = ((0, -5), (width - 5, -5),\n (width, 0), (width - 5, +5), (0, +5))\n if (obj.get(\"ALIGNMENT\") == b\"2\") ^ (obj[\"STYLE\"] != b\"7\"):\n labelpoint = (10, 0)\n horiz = renderer.LEFT\n else:\n labelpoint = (width - 10, 0)\n horiz = renderer.RIGHT\n if obj[\"STYLE\"] == b\"7\":\n shapekw = dict(rotate=+90, offset=(0, +width))\n else:\n shapekw = dict()\n offset = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n with renderer.view(offset=offset) as view:\n view.polygon(points,\n width=0.6,\n outline=colour(obj[\"COLOR\"]),\n fill=colour(obj[\"AREACOLOR\"]),\n **shapekw)\n \n with contextlib.ExitStack() as context:\n if obj[\"STYLE\"] == b\"7\":\n view = context.enter_context(view.view(rotate=+1))\n view.text(\n overline(obj[\"NAME\"]),\n colour=colour(obj[\"TEXTCOLOR\"]),\n offset=labelpoint,\n vert=view.CENTRE, horiz=horiz,\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\"} >= {\"RECORD\", \"OWNERPARTID\", \"LINEWIDTH\", \"COLOR\", \"LOCATIONCOUNT\", \"X1\", \"Y1\", \"X2\", \"Y2\"} and\n obj[\"RECORD\"] == Record.WIRE and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"LINEWIDTH\"] == b\"1\"):\n points = list()\n for location in range(int(obj[\"LOCATIONCOUNT\"])):\n location = format(1 + location)\n points.append(tuple(int(obj[x + location]) for x in \"XY\"))\n renderer.polyline(points, colour=colour(obj[\"COLOR\"]))\n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\"} and\n obj[\"RECORD\"] in {b\"46\", b\"48\", b\"44\"} or\n obj.keys() - {\"USECOMPONENTLIBRARY\", \"DESCRIPTION\", \"DATAFILECOUNT\", \"MODELDATAFILEENTITY0\", \"MODELDATAFILEKIND0\", \"DATALINKSLOCKED\", \"DATABASEDATALINKSLOCKED\", \"ISCURRENT\", \"INDEXINSHEET\", \"INTEGRATEDMODEL\", \"DATABASEMODEL\"} == {\"RECORD\", \"OWNERINDEX\", \"MODELNAME\", \"MODELTYPE\"} and\n obj[\"RECORD\"] == b\"45\" and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj.get(\"USECOMPONENTLIBRARY\", b\"T\") == b\"T\" and obj[\"MODELTYPE\"] in {b\"PCBLIB\", b\"SI\", b\"SIM\", b\"PCB3DLib\"} and obj.get(\"DATAFILECOUNT\", b\"1\") == b\"1\" and obj.get(\"ISCURRENT\", b\"T\") == b\"T\" and obj.get(\"INTEGRATEDMODEL\", b\"T\") == b\"T\" and obj.get(\"DATABASEMODEL\", b\"T\") == b\"T\" and obj.get(\"DATALINKSLOCKED\", b\"T\") == b\"T\" and obj.get(\"DATABASEDATALINKSLOCKED\", b\"T\") == b\"T\" or\n obj.keys() >= {\"RECORD\", \"AREACOLOR\", \"BORDERON\", \"CUSTOMX\", \"CUSTOMY\", \"DISPLAY_UNIT\", \"FONTIDCOUNT\", \"FONTNAME1\", \"HOTSPOTGRIDON\", \"HOTSPOTGRIDSIZE\", \"ISBOC\", \"SHEETNUMBERSPACESIZE\", \"SIZE1\", \"SNAPGRIDON\", \"SNAPGRIDSIZE\", \"SYSTEMFONT\", \"USEMBCS\", \"VISIBLEGRIDON\", \"VISIBLEGRIDSIZE\"} and\n obj[\"RECORD\"] == Record.SHEET and obj[\"AREACOLOR\"] == b\"16317695\" and obj[\"BORDERON\"] == b\"T\" and obj.get(\"CUSTOMMARGINWIDTH\", b\"20\") == b\"20\" and obj.get(\"CUSTOMXZONES\", b\"6\") == b\"6\" and obj.get(\"CUSTOMYZONES\", b\"4\") == b\"4\" and obj[\"DISPLAY_UNIT\"] == b\"4\" and obj[\"FONTNAME1\"] == b\"Times New Roman\" and obj[\"HOTSPOTGRIDON\"] == b\"T\" and obj[\"ISBOC\"] == b\"T\" and obj[\"SHEETNUMBERSPACESIZE\"] == b\"4\" and obj[\"SIZE1\"] == b\"10\" and obj[\"SNAPGRIDON\"] == b\"T\" and obj[\"SYSTEMFONT\"] == b\"1\" and obj.get(\"TITLEBLOCKON\", b\"T\") == b\"T\" and obj[\"USEMBCS\"] == b\"T\" and obj[\"VISIBLEGRIDON\"] == b\"T\" and obj[\"VISIBLEGRIDSIZE\"] == b\"10\" or\n obj.keys() == {\"HEADER\", \"WEIGHT\"} and\n obj[\"HEADER\"] == b\"Protel for Windows - Schematic Capture Binary File Version 5.0\" or\n obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"DESIMP0\", \"DESIMPCOUNT\", \"DESINTF\", \"OWNERINDEX\"} and\n obj[\"RECORD\"] == b\"47\" and obj[\"DESIMPCOUNT\"] == b\"1\" or\n obj.keys() == {\"RECORD\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"FILENAME\"} and\n obj[\"RECORD\"] == b\"39\" and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n pass\n \n elif (obj.keys() - {\"ISMIRRORED\", \"ORIENTATION\", \"INDEXINSHEET\", \"COMPONENTDESCRIPTION\", \"SHEETPARTFILENAME\", \"DESIGNITEMID\", \"DISPLAYMODE\", \"NOTUSEDBTABLENAME\", \"LIBRARYPATH\"} == {\"RECORD\", \"OWNERPARTID\", \"UNIQUEID\", \"AREACOLOR\", \"COLOR\", \"CURRENTPARTID\", \"DISPLAYMODECOUNT\", \"LIBREFERENCE\", \"LOCATION.X\", \"LOCATION.Y\", \"PARTCOUNT\", \"PARTIDLOCKED\", \"SOURCELIBRARYNAME\", \"TARGETFILENAME\"} and\n obj[\"RECORD\"] == b\"1\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"AREACOLOR\"] == b\"11599871\" and obj[\"COLOR\"] == b\"128\" and obj[\"PARTIDLOCKED\"] == b\"F\" and obj[\"TARGETFILENAME\"] == b\"*\"):\n pass\n \n elif (obj.keys() - {\"TEXT\", \"OWNERINDEX\", \"ISHIDDEN\", \"READONLYSTATE\", \"INDEXINSHEET\", \"UNIQUEID\", \"LOCATION.X\", \"LOCATION.X_FRAC\", \"LOCATION.Y\", \"LOCATION.Y_FRAC\", \"ORIENTATION\", \"ISMIRRORED\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"FONTID\", \"NAME\"} and\n obj[\"RECORD\"] == Record.PARAMETER and obj[\"OWNERPARTID\"] == b\"-1\"):\n if obj.get(\"ISHIDDEN\") != b\"T\" and obj.keys() >= {\"TEXT\", \"LOCATION.X\", \"LOCATION.Y\"}:\n orient = obj.get(\"ORIENTATION\")\n kw = {\n None: dict(vert=renderer.BOTTOM, horiz=renderer.LEFT),\n b\"1\": dict(vert=renderer.BOTTOM, horiz=renderer.LEFT),\n b\"2\": dict(vert=renderer.TOP, horiz=renderer.RIGHT),\n }[orient]\n if orient == b\"1\":\n kw.update(angle=+90)\n val = obj[\"TEXT\"]\n if val.startswith(b\"=\"):\n match = val[1:].lower()\n for o in objects:\n if o.get(\"RECORD\") != Record.PARAMETER or o.get(\"OWNERINDEX\") != obj[\"OWNERINDEX\"]:\n continue\n if o[\"NAME\"].lower() != match:\n continue\n val = o[\"TEXT\"]\n break\n else:\n raise LookupError(\"Parameter value for |OWNERINDEX={}|TEXT={}\".format(obj[\"OWNERINDEX\"].decode(\"ascii\"), obj[\"TEXT\"].decode(\"ascii\")))\n renderer.text(val.decode(\"ascii\"),\n colour=colour(obj[\"COLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n **kw)\n else:\n text(renderer, obj, **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"ISMIRRORED\", \"LOCATION.X_FRAC\", \"LOCATION.Y_FRAC\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"FONTID\", \"TEXT\", \"NAME\", \"READONLYSTATE\"} and\n obj[\"RECORD\"] == Record.DESIGNATOR and obj[\"OWNERPARTID\"] == b\"-1\" and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"NAME\"] == b\"Designator\" and obj[\"READONLYSTATE\"] == b\"1\"):\n desig = obj[\"TEXT\"].decode(\"ascii\")\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if int(owner[\"PARTCOUNT\"]) > 2:\n desig += chr(ord(\"A\") + int(owner[\"CURRENTPARTID\"]) - 1)\n renderer.text(desig, (int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n colour=colour(obj[\"COLOR\"]),\n font=\"font\" + obj[\"FONTID\"].decode(),\n )\n \n elif (obj.keys() >= {\"RECORD\", \"OWNERPARTID\", \"OWNERINDEX\", \"LOCATIONCOUNT\", \"X1\", \"X2\", \"Y1\", \"Y2\"} and\n obj[\"RECORD\"] == Record.POLYLINE and obj.get(\"ISNOTACCESIBLE\", b\"T\") == b\"T\" and obj.get(\"LINEWIDTH\", b\"1\") == b\"1\"):\n if obj[\"OWNERPARTID\"] == b\"-1\":\n current = True\n else:\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n current = (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\"))\n if current:\n polyline(renderer, obj)\n \n elif (obj.keys() - {\"OWNERPARTDISPLAYMODE\", \"INDEXINSHEET\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"COLOR\", \"ISNOTACCESIBLE\", \"LINEWIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"CORNER.X\", \"CORNER.Y\"} and\n obj[\"RECORD\"] == Record.LINE and obj[\"ISNOTACCESIBLE\"] == b\"T\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\")):\n renderer.line(\n colour=colour(obj[\"COLOR\"]),\n width=int(obj[\"LINEWIDTH\"]),\n a=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n b=(int(obj[\"CORNER.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"NAME\", \"SWAPIDPIN\", \"OWNERPARTDISPLAYMODE\", \"ELECTRICAL\", \"DESCRIPTION\", \"SWAPIDPART\", \"SYMBOL_OUTEREDGE\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"DESIGNATOR\", \"FORMALTYPE\", \"LOCATION.X\", \"LOCATION.Y\", \"PINCONGLOMERATE\", \"PINLENGTH\"} and\n obj[\"RECORD\"] == Record.PIN and obj[\"FORMALTYPE\"] == b\"1\"):\n if obj[\"OWNERPARTID\"] == objects[1 + int(obj[\"OWNERINDEX\"])][\"CURRENTPARTID\"]:\n pinlength = int(obj[\"PINLENGTH\"])\n pinconglomerate = int(obj[\"PINCONGLOMERATE\"])\n offset = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n rotate = pinconglomerate & 3\n with renderer.view(offset=offset, rotate=rotate) as view:\n kw = dict()\n points = list()\n if \"SYMBOL_OUTEREDGE\" in obj:\n view.circle(2.85, (3.15, 0), width=0.6)\n points.append(6)\n points.append(pinlength)\n electrical = obj.get(\"ELECTRICAL\", PinElectrical.INPUT)\n marker = pinmarkers[electrical]\n if marker:\n kw.update(startarrow=marker)\n view.hline(*points, **kw)\n \n if pinconglomerate >> 1 & 1:\n invert = -1\n kw = dict(angle=180)\n else:\n invert = +1\n kw = dict()\n if pinconglomerate & 8 and \"NAME\" in obj:\n view.text(overline(obj[\"NAME\"]),\n vert=view.CENTRE,\n horiz=view.RIGHT * invert,\n offset=(-7, 0),\n **kw)\n if pinconglomerate & 16:\n designator = obj[\"DESIGNATOR\"].decode(\"ascii\")\n view.text(designator,\n horiz=view.LEFT * invert,\n offset=(+9, 0),\n **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"ORIENTATION\", \"STYLE\", \"ISCROSSSHEETCONNECTOR\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"LOCATION.X\", \"LOCATION.Y\", \"SHOWNETNAME\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.POWER_OBJECT and obj[\"OWNERPARTID\"] == b\"-1\"):\n orient = obj.get(\"ORIENTATION\")\n if obj.get(\"ISCROSSSHEETCONNECTOR\") == b\"T\":\n marker = dchevron\n offset = 14\n else:\n (marker, offset) = connmarkers.get(obj[\"STYLE\"], (None, 0))\n \n col = colour(obj[\"COLOR\"])\n translate = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n with renderer.view(colour=col, offset=translate) as view:\n kw = dict()\n if orient:\n kw.update(rotate=int(orient))\n view.draw(marker, **kw)\n \n if obj[\"SHOWNETNAME\"] != b\"F\":\n orients = {\n b\"2\": (renderer.RIGHT, renderer.CENTRE, (-1, 0)),\n b\"3\": (renderer.CENTRE, renderer.TOP, (0, -1)),\n None: (renderer.LEFT, renderer.CENTRE, (+1, 0)),\n b\"1\": (renderer.CENTRE, renderer.BOTTOM, (0, +1)),\n }\n (horiz, vert, pos) = orients[orient]\n t = obj[\"TEXT\"].decode(\"ascii\")\n pos = (p * offset for p in pos)\n view.text(t, pos, horiz=horiz, vert=vert)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"OWNERPARTDISPLAYMODE\", \"ISSOLID\", \"LINEWIDTH\", \"CORNERXRADIUS\", \"CORNERYRADIUS\", \"TRANSPARENT\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"AREACOLOR\", \"COLOR\", \"CORNER.X\", \"CORNER.Y\", \"ISNOTACCESIBLE\", \"LOCATION.X\", \"LOCATION.Y\"} and\n obj[\"RECORD\"] in {Record.RECTANGLE, Record.ROUND_RECTANGLE} and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj.get(\"ISSOLID\", b\"T\") == b\"T\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\")):\n kw = dict(width=0.6, outline=colour(obj[\"COLOR\"]))\n if \"ISSOLID\" in obj:\n kw.update(fill=colour(obj[\"AREACOLOR\"]))\n a = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n b = (int(obj[\"CORNER.\" + x]) for x in \"XY\")\n \n if obj[\"RECORD\"] == Record.ROUND_RECTANGLE:\n r = list()\n for x in \"XY\":\n radius = obj.get(\"CORNER{}RADIUS\".format(x))\n if radius is None:\n radius = 0\n else:\n radius = int(radius)\n r.append(int(radius))\n renderer.roundrect(r, a, b, **kw)\n else:\n renderer.rectangle(a, b, **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"FONTID\", \"LOCATION.X\", \"LOCATION.Y\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.NET_LABEL and obj[\"OWNERPARTID\"] == b\"-1\"):\n renderer.text(overline(obj[\"TEXT\"]),\n colour=colour(obj[\"COLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"OWNERPARTDISPLAYMODE\", \"STARTANGLE\", \"SECONDARYRADIUS\"} == {\"RECORD\", \"OWNERPARTID\", \"OWNERINDEX\", \"COLOR\", \"ENDANGLE\", \"ISNOTACCESIBLE\", \"LINEWIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"RADIUS\"} and\n obj[\"RECORD\"] in {Record.ARC, Record.ELLIPTICAL_ARC} and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"LINEWIDTH\"] == b\"1\" and obj.get(\"OWNERPARTDISPLAYMODE\", b\"1\") == b\"1\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (owner[\"CURRENTPARTID\"] == obj[\"OWNERPARTID\"] and\n owner.get(\"DISPLAYMODE\", b\"0\") == obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\")):\n r = int(obj[\"RADIUS\"])\n if obj[\"RECORD\"] == Record.ELLIPTICAL_ARC:\n r2 = obj.get(\"SECONDARYRADIUS\")\n if r2 is None:\n r2 = 0\n else:\n r2 = int(r2)\n else:\n r2 = r\n \n start = float(obj.get(\"STARTANGLE\", 0))\n end = float(obj[\"ENDANGLE\"])\n centre = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n renderer.arc((r, r2), start, end, centre,\n colour=colour(obj[\"COLOR\"]),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"LINEWIDTH\"} > {\"RECORD\", \"AREACOLOR\", \"COLOR\", \"ISNOTACCESIBLE\", \"ISSOLID\", \"LOCATIONCOUNT\", \"OWNERINDEX\", \"OWNERPARTID\"} and\n obj[\"RECORD\"] == Record.POLYGON and obj[\"AREACOLOR\"] == b\"16711680\" and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"ISSOLID\"] == b\"T\" and obj.get(\"LINEWIDTH\", b\"1\") == b\"1\" and obj[\"OWNERPARTID\"] == b\"1\"):\n points = list()\n for location in range(int(obj[\"LOCATIONCOUNT\"])):\n location = format(1 + location)\n points.append(tuple(int(obj[x + location]) for x in \"XY\"))\n renderer.polygon(fill=colour(obj[\"COLOR\"]), points=points)\n elif (obj.keys() - {\"INDEXINSHEET\", \"ISNOTACCESIBLE\", \"OWNERINDEX\", \"ORIENTATION\", \"JUSTIFICATION\", \"COLOR\"} == {\"RECORD\", \"FONTID\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.LABEL):\n if obj[\"OWNERPARTID\"] == b\"-1\" or obj[\"OWNERPARTID\"] == objects[1 + int(obj[\"OWNERINDEX\"])][\"CURRENTPARTID\"]:\n text(renderer, obj)\n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"COLOR\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\"} and\n obj[\"RECORD\"] == b\"22\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n col = colour(obj[\"COLOR\"])\n location = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n renderer.draw(nc, location, colour=col)\n elif (obj.keys() - {\"CLIPTORECT\"} == {\"RECORD\", \"ALIGNMENT\", \"AREACOLOR\", \"CORNER.X\", \"CORNER.Y\", \"FONTID\", \"ISSOLID\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\", \"Text\", \"WORDWRAP\"} and\n obj[\"RECORD\"] == b\"28\" and obj[\"ALIGNMENT\"] == b\"1\" and obj[\"AREACOLOR\"] == b\"16777215\" and obj.get(\"CLIPTORECT\", b\"T\") == b\"T\" and obj[\"ISSOLID\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"WORDWRAP\"] == b\"T\"):\n lhs = int(obj[\"LOCATION.X\"])\n renderer.text(\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n offset=(lhs, int(obj[\"CORNER.Y\"])),\n width=int(obj[\"CORNER.X\"]) - lhs,\n text=obj[\"Text\"].decode(\"ascii\").replace(\"~1\", \"\\n\"),\n vert=renderer.TOP,\n )\n \n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"LINEWIDTH\", \"COLOR\", \"LOCATIONCOUNT\", \"X1\", \"Y1\", \"X2\", \"Y2\", \"X3\", \"Y3\", \"X4\", \"Y4\"} and\n obj[\"RECORD\"] == Record.BEZIER and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"1\" and obj[\"LINEWIDTH\"] == b\"1\" and obj[\"LOCATIONCOUNT\"] == b\"4\"):\n col = colour(obj[\"COLOR\"])\n points = list()\n for n in range(4):\n n = format(1 + n)\n points.append(tuple(int(obj[x + n]) for x in \"XY\"))\n renderer.cubicbezier(*points, colour=col)\n \n elif (obj.keys() - {\"RADIUS_FRAC\", \"SECONDARYRADIUS_FRAC\"} == {\"RECORD\", \"OWNERINDEX\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"RADIUS\", \"SECONDARYRADIUS\", \"COLOR\", \"AREACOLOR\", \"ISSOLID\"} and\n obj[\"RECORD\"] == Record.ELLIPSE and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj.get(\"RADIUS_FRAC\", b\"94381\") == b\"94381\" and obj[\"SECONDARYRADIUS\"] == obj[\"RADIUS\"] and obj.get(\"SECONDARYRADIUS_FRAC\", b\"22993\") == b\"22993\" and obj[\"ISSOLID\"] == b\"T\"):\n renderer.circle(\n r=int(obj[\"RADIUS\"]),\n width=0.6,\n outline=colour(obj[\"COLOR\"]), fill=colour(obj[\"AREACOLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"SYMBOLTYPE\"} == {\"RECORD\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"XSIZE\", \"YSIZE\", \"COLOR\", \"AREACOLOR\", \"ISSOLID\", \"UNIQUEID\"} and\n obj[\"RECORD\"] == Record.SHEET_SYMBOL and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"ISSOLID\"] == b\"T\" and obj.get(\"SYMBOLTYPE\", b\"Normal\") == b\"Normal\"):\n renderer.rectangle((int(obj[\"XSIZE\"]), -int(obj[\"YSIZE\"])),\n width=0.6,\n outline=colour(obj[\"COLOR\"]), fill=colour(obj[\"AREACOLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"FONTID\", \"TEXT\"} and\n obj[\"RECORD\"] in {Record.SHEET_NAME, Record.SHEET_FILE_NAME} and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n text(renderer, obj)\n \n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\", \"INDEXINSHEET\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"CORNER.X\", \"CORNER.Y\", \"EMBEDIMAGE\", \"FILENAME\"} and\n obj[\"RECORD\"] == Record.IMAGE and obj[\"OWNERINDEX\"] == b\"1\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"EMBEDIMAGE\"] == b\"T\" and obj[\"FILENAME\"] == b\"newAltmLogo.bmp\"):\n location = list()\n corner = list()\n for x in \"XY\":\n location.append(int(obj[\"LOCATION.\" + x]))\n corner.append(int(obj[\"CORNER.\" + x]))\n renderer.rectangle(location, corner, width=0.6)\n \n else:\n print(\"\".join(\"|{}={!r}\".format(p, v) for (p, v) in sorted(obj.items())), file=stderr)\n \n renderer.finish()", "def perform_symbolization(self): # pragma: no cover\n # pylint: disable=redefined-variable-type\n if os.path.isfile(self.start_location):\n files = [self.start_location]\n else:\n files = self._get_files()\n\n for filename in files:\n print(\"Processing file -- {0}\".format(filename))\n updated_file_text = ''\n updated_file_text = ''\n with open(filename, 'r') as fin:\n for line in fin.readlines():\n new_line = self.replace_id_with_symbol(line)\n\n if not updated_file_text and new_line:\n updated_file_text = new_line\n elif new_line:\n updated_file_text += new_line\n\n with open(filename, 'w') as fout:\n fout.write(updated_file_text)", "def _add_png(self, pngfile):\n with open(pngfile, 'rb') as png:\n if png.read(8) != self.magic:\n raise ValueError(\"{} is not a PNG file\".format(pngfile))\n while True:\n chead = png.read(8)\n if len(chead) == 0:\n break\n clen, ctype = struct.unpack(\">L4s\", chead)\n cdata = png.read(clen)\n ccrc = png.read(4)\n utype = ctype.decode(\"ascii\")\n self._current_chunk = (chead[:4], ctype, cdata, ccrc)\n if ctype in self.mustmatch:\n ref = self._matchref.get(ctype)\n if ref is None:\n self._matchref[ctype] = cdata\n self._copy()\n else:\n if cdata != ref:\n raise ValueError(\"Chunk {} mismatch\".format(utype))\n met = (\"_first_\" if self._first else \"_next_\") + utype\n try:\n met = getattr(self, met)\n except AttributeError:\n pass\n else:\n met(cdata)\n self._first = False", "def run(self):\n generated_gif = self.generate()\n with open(self.out_filename, 'wb') as out_fd:\n out_fd.write(generated_gif)", "def run_turtle_program(source):\n ast = parser.parse(source)\n\n t = turtle.Turtle()\n for stmt in ast.statement:\n do_statement(stmt, t)\n canvas = turtle.Screen().getcanvas()\n canvas.postscript(file='image.eps')\n img = Image.open('image.eps')\n img.save('image.png', 'png')\n turtle.Screen().bye()\n return 'image.png'", "def gen_symbols(path, strip):\n\n symbols = ''\n svg_namespace = 'http://www.w3.org/2000/svg'\n etree.register_namespace('', svg_namespace)\n\n for root, dirs, files in os.walk(os.path.abspath(path)):\n for wwsfile in files:\n basename, extension = os.path.splitext(wwsfile)\n if extension == '.svg':\n filepath = os.path.join(root, wwsfile)\n try:\n svg = etree.parse(filepath)\n svg_root = svg.getroot()\n\n attribs = svg_root.attrib\n desc = svg.find('{'+svg_namespace+'}desc')\n svg_root.remove(desc)\n title = svg.find('{'+svg_namespace+'}title')\n svg_root.remove(title)\n metadata = svg.find('{'+svg_namespace+'}metadata')\n svg_root.remove(metadata)\n\n viewbox_attrib = 'viewBox'\n if viewbox_attrib in attribs:\n viewbox = attribs[viewbox_attrib]\n else:\n viewbox = f\"0 0 {attribs['width']} {attribs['height']}\"\n\n basename2 = basename.replace(strip, '')\n symbols += f'<symbol id=\"{basename2}\" viewBox=\"{viewbox}\">'\n\n for element in svg_root:\n symbols += etree.tostring(element).decode('utf-8')\n symbols += '</symbol>'\n\n except Exception as err:\n warnings.warn(f'Could not parse file {filepath}: {err}')\n\n return symbols", "def AA2Image(readpath, savepath, header, font_data):\n if not os.path.isdir(savepath):\n os.makedirs(savepath)\n print('convert txt to png. save path: ', savepath)\n\n files = glob.glob(readpath+'*.txt')\n\n for file in files:\n ascii_art = AsciiArt(file)\n ascii_art_image = ascii_art.image(font_data)\n filename = header + os.path.basename(file)[:-4] + '.png'\n ascii_art_image = Image.fromarray(ascii_art_image)\n ascii_art_image = ascii_art_image.convert('L')\n ascii_art_image.save(savepath + filename)\n print('saved ', filename)", "def main():\n argvs = sys.argv\n argc = len(argvs)\n if argc == 1:\n print('usage: convert2png.py <path/to/*.ppm> ...')\n sys.exit(1)\n\n os.makedirs('result/convert2png', exist_ok=True)\n\n for i in range(1, argc):\n img = cv2.imread(argvs[i])\n\n # root, ext = os.path.splitext(argvs[i])\n # cv2.imwrite(root + '.png', img)\n\n root, ext = os.path.splitext(argvs[i])\n strImgName = root.split('/')[-1]\n cv2.imwrite('result/convert2png/' + strImgName + '.png', img)", "def postprocess_file(config: Config, dfs: DFs) -> None:\n if SymbolDF.name in dfs:\n dfs[SymbolDF.name] = postprocess_symbols(config, dfs[SymbolDF.name])", "def get_carbon_image(fName):\r\n\r\n print(f'On File {fName}')\r\n\r\n global driver\r\n with open(fName) as f:\r\n code = f.read()\r\n\r\n code = urllib.parse.quote_plus(code)\r\n url = CARBON.format(code=code)\r\n\r\n\r\n driver.get(url)\r\n\r\n driver.find_element_by_xpath(\"//button[contains(text(),'Export')]\").click()\r\n driver.find_element_by_xpath(\"//button[contains(text(),'Open')]\").click()\r\n sleep(5) # this might take a bit\r\n driver.save_screenshot(fName.strip('.py')+'.png')\r\n print(f\"{fName.strip('.py')+'.png'} saved.\")\r\n return True", "def new_func():\n dirname, _ = os.path.split(os.path.abspath(__file__))\n pngfile = os.path.sep.join([dirname, \"out.png\"])\n img = [\"110010010011\", \"101011010100\", \"110010110101\", \"100010010011\"]\n img = [[int(val) for val in value] for value in img]\n writer = png.Writer(len(img[0]), len(img), greyscale=True, bitdepth=16)\n with open(pngfile, \"wb\") as file:\n writer.write(file, img)\n try:\n func(pngfile)\n finally:\n os.remove(pngfile)", "def process(image):\n pass", "def main():\n\n import codecs\n\n file_path = '../sample_texts/hi-Deva.txt'\n with codecs.open(file_path, 'r', encoding='UTF-8') as input_file:\n sample_text = input_file.read()\n create_png(sample_text.strip(), 'hindi.png',\n family='Noto Sans Devanagari', language='hi', rtl=False)\n\n file_path = '../sample_texts/ar-Arab.txt'\n with codecs.open(file_path, 'r', encoding='UTF-8') as input_file:\n sample_text = input_file.read()\n create_png(sample_text.strip(), 'arabic.png',\n family='Noto Naskh Arabic', language='ar', rtl=True)\n\n file_path = '../sample_texts/mn-Mong.txt'\n with codecs.open(file_path, 'r', encoding='UTF-8') as input_file:\n sample_text = input_file.read()\n create_png(sample_text.strip(), 'mong.png',\n family='Noto Sans Mongolian', language='mn', vertical=True, rtl=False)", "def processIconFilename(self):\n\t\tself.iconFilename = self._getVal(64, 2)", "def make_image(self, frame, filename, **kwds):\n p = plot.plot(frame, **kwds)\n p.save_image(filename)", "def convert_gif(ctx):\n ctx.run(\n 'ffmpeg '\n '-i resources/demo.mkv -filter_complex \"[0:v] palettegen\" '\n 'resources/palette.png',\n pty=True\n )\n ctx.run(\n 'ffmpeg -i resources/demo.mkv '\n '-i resources/palette.png '\n '-filter_complex \"[0:v][1:v] paletteuse\" '\n 'resources/demo.gif',\n pty=True\n )", "def write(self, symFile):\n logging.debug(\"Writing Symbol \"+self.name)\n for polygon in self.polygons:\n symFile.write(polygon.symRep())\n for wire in self.wires:\n symFile.write(wire.symRep())\n for text in self.texts:\n symFile.write(text.symRep())\n for pin in self.pins:\n symFile.write(pin.symRep())\n for circle in self.circles:\n symFile.write(circle.symRep())\n for rectangle in self.rectangles:\n symFile.write(rectangle.symRep())", "def handle_as_file(view: View, point: int, string: str):\n # \"screenshot.png\"\n\n name = osp.basename(string)\n file, folder = get_file(view, string, name)\n\n # if file doesn't exist, return\n if not osp.isfile(file):\n return\n\n # does the file need conversion ?\n need_conversion = file.endswith(FORMAT_TO_CONVERT)\n\n # if the file needs conversion, convert it and read data from the resulting png\n if need_conversion:\n # keep the image's file and name for later use\n conv_file = file\n conv_name = name\n\n # create a temporary file\n tmp_file = osp.join(TEMP_DIR, \"tmp_png.png\")\n name = osp.splitext(name)[0] + \".png\"\n\n # use the magick command of Imagemagick to convert the image to png\n magick(file, tmp_file)\n\n file = tmp_file\n\n with open(file, \"rb\") as f:\n encoded = str(base64.b64encode(f.read()), \"utf-8\")\n\n real_width, real_height, size = get_image_size(file)\n width, height = get_dimensions(view, file)\n size = str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, conv_name, \"file\")\n else:\n save(file, name, \"file\", folder)\n elif href == \"save_as\":\n convert(conv_file if need_conversion else file, \"file\")\n else:\n sublime.active_window().open_file(file)\n\n view.show_popup(\n TEMPLATE % (width, height, \"png\", encoded, real_width,\n real_height, size),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate)", "def main():\n convert(\"env_100000.mp4\", TargetFormat.GIF)", "def save_to_image_file(self, filename, image_format='png', scale_x=1, scale_y=1):\n\n self.save_barcode_to_pillow(scale_x=scale_x, scale_y=scale_y).save(filename,\n format=image_format)", "def save_as_png(file_name, path = DEFAULT_PATH):\n plt.ioff()\n plt.savefig(path + file_name + '.png')\n plt.close()", "def postprocess_symbols(config: Config, symbols: SymbolDF) -> SymbolDF:\n files = []\n arms = []\n arm_symbols = {}\n current_file = ''\n current_arm = ''\n has_file = False\n if config['collect.prefix-file']:\n prefixes = config.get_re('collect.prefix')\n else:\n prefixes = None\n if 'type' in symbols.columns:\n for symbol in symbols.itertuples():\n if symbol.type == 'FILE':\n has_file = True\n current_file = symbol.symbol\n if prefixes:\n current_file = simplify_source(current_file, prefixes)\n\n elif symbol.type == 'NOTYPE':\n if symbol.symbol.startswith('$'):\n if current_arm or symbol.symbol in ARM_SPECIAL_SYMBOLS:\n current_arm = symbol.symbol\n arm_symbols[current_arm] = True\n files.append(current_file)\n arms.append(current_arm)\n\n if has_file:\n symbols['file'] = files\n if current_arm:\n symbols['arm'] = arms\n\n if has_file:\n symbols = symbols[symbols['type'] != 'FILE']\n if current_arm:\n syms = arm_symbols.keys()\n symbols = symbols[~symbols.symbol.isin(syms)]\n return symbols", "def do_icon(srcfn, magnitude):\n img = Image.open(\"%s.png\" % (srcfn, ))\n draw = ImageDraw.Draw(img)\n (width, _height) = FONT.getsize(magnitude)\n # 40 pixel wide, we want to center it\n x0 = int(20 - (width / 2.))\n draw.text((x0, 8), magnitude, font=FONT, fill=(0, 0, 0, 255))\n img.save((\"../../htdocs/icons/lsr/%s/%s.png\"\n ) % (srcfn, magnitude))\n del img\n del draw", "def process(self, image):", "def decode(n_pir,template,localtime,draw,bin_display):\n template_filename=template+\"%02d\"\n\n for n in range(n_pir):\n decode_in_file=template_filename%(n+1)\n decode_out_file=decode_in_file+\"_parsed.txt\"\n click.echo(\"Working on file: %s\"%decode_out_file)\n buff_size=8\n try:\n with open(decode_in_file,'rb') as i: #\n with open(decode_out_file,'w') as o:\n #Header\n o.write('Time,Status\\n')\n while True:\n anteroom=i.read(buff_size)\n if anteroom==b'':\n break\n anteroom_tuple=struct.unpack('=If',anteroom)\n time_=anteroom_tuple[0]\n status=anteroom_tuple[1]\n if localtime:\n time_=time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time_))\n o.write('%s,%f\\n'%(time_,status))\n else:\n o.write('%i,%f\\n'%(time_,status))\n except FileNotFoundError:\n continue\n if draw:\n actogram(template_filename, n_pir, bin_display)", "def dofigure(line):\n global fnum\n if 'onlineonlycolor' not in line:\n fnum += 1\n locate = re.search(r\"\\{([\\w\\d\\-_]+)\\}\", line)\n if not locate:\n raise RuntimeError(\"Could not find image in line '{}'\".format(line))\n imagetext = locate.group(1)\n imname, ftype = findfigure(imagetext)\n if 'plottwo' in line:\n imname2 = line.split('{')[2].split('}')[0]\n # print name and number\n print(fnum+'a', imname)\n print(fnum+'b', imname2)\n _, subname = os.path.split(imname)\n _, subname2 = os.path.split(imname2)\n ftype = os.path.splitext(subname)\n # rename with number if desired\n subname = outfigname(fnum, ftype, char=\"a\")\n outname = os.path.join(outdir, subname)\n subname2 = outfigname(fnum, ftype, char=\"b\")\n outname2 = os.path.join(outdir, subname2)\n # copy over\n os.system(\"cp \"+imname+\" \"+outname)\n os.system(\"cp \"+imname2+\" \"+outname2)\n # write out plot string\n newline = line.replace(imagetext, subname)\n newline = newline.replace(imname2, subname2)\n else:\n # print name and number\n print(fnum, imname)\n _, subname = os.path.split(imname)\n # rename with number if desired\n subname = outfigname(fnum, ftype)\n outname = os.path.join(outdir, subname)\n # copy over\n os.system(\"cp \"+imname+\" \"+outname)\n # write out plot string\n newline = line.replace(imagetext, subname)\n return(newline)" ]
[ "0.5763709", "0.55842566", "0.5484444", "0.54430854", "0.5442242", "0.5435863", "0.53775424", "0.53518033", "0.53421825", "0.53008115", "0.52987564", "0.5291246", "0.5290452", "0.5279297", "0.52617425", "0.5226984", "0.5217233", "0.52096176", "0.51862746", "0.51822567", "0.51762855", "0.51399404", "0.512233", "0.5105043", "0.5100154", "0.50914484", "0.5087448", "0.50743026", "0.50676775", "0.506356" ]
0.61083233
0
calculate_angles(chunk) calculates elevation and azimuth given a jsonformatted chunk from ODAS
def calculate_angles(self,chunk): import math import collections Angles = collections.namedtuple("Angles", "ev az") x = float(chunk['x']) y = float(chunk['y']) z = float(chunk['z']) ev = round(90 - math.acos(z/math.sqrt(x*x+y*y+z*z))*180/math.pi) az = round(math.atan2(y,x)*180/math.pi) return(Angles(ev, az))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(all_blobs, all_angles):", "def extract_angles(self):\n atom_ids = self.contents['ID']\n angle_list = []\n for key, value in self.angles.items():\n a = value[0]\n b = value[1]\n c = value[2]\n\n lst = [a, b, c]\n\n A_ = np.asarray(atom_ids).reshape(-1, 3)\n\n sorted = np.argsort(lst)\n A_sorted = A_[:, sorted]\n\n idd = np.ones(len(A_sorted)) * key\n iff = np.arange(1, len(A_sorted) + 1)\n\n concate = np.concatenate((iff[:,np.newaxis], idd[:,np.newaxis], A_sorted), axis=-1)\n df = pd.DataFrame(data=concate, columns=['Mol_ID', 'Angle_type', 'Atom_1', 'Atom_2', 'Atom_3'])\n angle_list.append(df)\n self.angle_df = pd.concat(angle_list)\n self.num_angles = len(self.angle_df)", "def get_mean_viewing_angles(self) -> (float, float, float):\n # Get MTD XML file\n root, _ = self.read_mtd()\n\n # Open zenith and azimuth angle\n try:\n az = float(root.findtext(\".//SatelliteAzimuth\"))\n off_nadir = float(root.findtext(\".//ViewAngle\"))\n incidence_angle = float(root.findtext(\".//incidenceAngle\"))\n except TypeError:\n raise InvalidProductError(\n \"SatelliteAzimuth, ViewAngle or incidenceAngle not found in metadata!\"\n )\n\n return az, off_nadir, incidence_angle", "def calculate_angles(self, x, y):\n Oimat = inv(self.Omat)\n Mat = self.pixel_size * inv(self.Dmat) * Oimat\n polar_angles = []\n azimuthal_angles = []\n for i in range(len(x)):\n peak = Oimat * (vec(x[i], y[i]) - self.Cvec)\n v = norm(Mat * peak)\n polar_angle = np.arctan(v / self.distance)\n polar_angles.append(polar_angle)\n azimuthal_angles.append(np.arctan2(-peak[1, 0], peak[2, 0]))\n return (np.array(polar_angles) * degrees,\n np.array(azimuthal_angles) * degrees)", "def test_calcAngles_angles_or_axis(self, kargs, expected_len_result, expected_truncated_results):\n kargs['vsk'] = self.cal_SM\n result = pycgmCalc.calcAngles(self.motion_data, **kargs)\n np.testing.assert_equal(len(result), expected_len_result)\n np.testing.assert_almost_equal(result[0:5], expected_truncated_results)", "def __prepare_angles_contents(angles: Optional[dict],\n elements: list) -> list:\n\n angles_contents = []\n\n number_of_angles = len(angles) if angles is not None else 0\n\n angles_contents.append(\n ' {:^2}'.format(number_of_angles) +\n ' ! Nr of angles;' +\n 'at1;at2;at3;Thetao,o;ka;kb;pv1;pv2;val(bo)\\n')\n\n if number_of_angles:\n\n for key, values in angles.items():\n\n num = ReactiveForceFieldWriter.__get_num_from_str(elements,\n key)\n\n angles_contents.append(\n ' ' + num + ' ' * 2 +\n str(values['value']).lstrip('[').rstrip(']') +\n '\\n')\n\n return angles_contents", "def angle(z):", "def ADCangles(EL, HA, DEC, LAT=31.963972222):\n Z, HA, coDEC, coLAT = np.deg2rad([90 - EL, HA, 90 - DEC, 90 - LAT])\n if Z == 0:\n return np.zeros(3)\n sinZ = np.sin(Z)\n sinP = np.sin(HA) * np.sin(coLAT) / sinZ\n cosP = (np.cos(coLAT) - np.cos(coDEC) * np.cos(Z)) / (np.sin(coDEC) * sinZ)\n P = np.arctan2(sinP, cosP)\n # Formulas from DESI-4957\n tanZ = np.tan(Z)\n HORIZON = P + 0.5 * np.pi\n ADC1 = HORIZON + (0.0353 + tanZ * (0.2620 + tanZ * 0.3563))\n ADC2 = HORIZON - (0.0404 + tanZ * (0.2565 + tanZ * 0.3576))\n return np.rad2deg([P, ADC1, ADC2])", "def get_mean_sun_angles(self) -> (float, float):\n # Get MTD XML file\n root, _ = self.read_mtd()\n\n # Open zenith and azimuth angle\n zenith_angle = float(root.findtext(\".//SolarZenith\"))\n azimuth_angle = float(root.findtext(\".//SolarAzimuth\"))\n\n return azimuth_angle, zenith_angle", "def check_angle(self):\n self.find_pixels()\n alpha_theta=np.deg2rad(70)\n alpha_phi=np.deg2rad(70)\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n phi_0=20\n phi_0=np.deg2rad(phi_0)\n j, diff=self.compute_phi(\"find_orient.png\")\n print \"j=\", j\n print \"diff=\", diff", "def calculate_angles():\n time = request.args.get('time')\n\n result = Helpers.validate_and_parse_input(time)\n if result:\n hour, minute = result\n\n hour_angle = 0.5 * (hour * 60 + minute)\n minute_angle = 6 * minute\n\n angle = abs(hour_angle - minute_angle)\n angle = min(360 - angle, angle)\n DatastoreClient(kind='clock_angle_logs').log_to_datastore(time, angle)\n\n return Helpers.success(angle)\n else:\n DatastoreClient(kind='clock_angle_logs').log_to_datastore(time, 'bad_request')\n return Helpers.bad_request(r\"query parameter time should follow regex ^\\d{1,2}:\\d{1,2}$ and value should be \"\n r\"between 00:00 and 23:59\")", "def areas(self):\n\n height_delta = (np.cos(self.polar_corners[:-1, :-1]) - np.cos(self.polar_corners[:-1, 1:]))\n azimuth_delta = (self.azimuthal_corners[1:, 1:] - self.azimuthal_corners[:-1, 1:])\n\n return height_delta * azimuth_delta", "def calculate_average_angles(tube_steps,angular_file,pixel_step,tube_sep,extra_dummy=[]):\n no_of_overlaps = int(round((len(tube_steps)+len(extra_dummy))/pixel_step))-1\n correction_array = Array(read_horizontal_corrections(angular_file))\n no_of_tubes = len(correction_array)\n counter = array.zeros(no_of_tubes+no_of_overlaps,int)\n final_values = array.zeros(no_of_tubes+no_of_overlaps,float)\n for stepno in range(no_of_overlaps+1):\n counter[stepno:stepno+no_of_tubes]+=array.ones(no_of_tubes,int)\n final_values[stepno:stepno+no_of_tubes]+=correction_array\n ave_angles = final_values/counter\n print 'Check: average angles ' + `ave_angles`\n print 'Check: counter' + `counter`\n print 'Check: no of overlaps, tubes: %d %d ' % (no_of_overlaps,no_of_tubes)\n # Now apply these average corrections to the actual angles\n real_step = pixel_step\n if len(tube_steps)<pixel_step:\n real_step = len(tube_steps) #for when we have no overlap and missing steps\n final_values = array.zeros((no_of_tubes+no_of_overlaps)*real_step)\n print 'Final values has len %d' % len(final_values)\n for stepno in range(no_of_tubes+no_of_overlaps):\n final_values[stepno*real_step:(stepno+1)*real_step] = tube_steps + tube_sep*stepno + ave_angles[stepno]\n return final_values", "def create_azimuthal_polarization(dim, rotation):\n theta_array = np.zeros((dim, dim))\n\n for i in range(np.size(theta_array, 0)):\n for j in range(np.size(theta_array, 1)):\n x = -dim / 2 + i\n y = -dim / 2 + j\n # perform roation\n th = math.pi*rotation/180.0\n x = np.cos(th)*x - np.sin(th)*y\n y = np.sin(th)*x + np.cos(th)*y\n\n rot = math.atan2(x, y) + math.pi/2\n # factor = (rot % (2*math.pi))\n theta_array[i][j] = (rot % (2 * math.pi))\n return theta_array", "def write_angles(self, polar_angles, azimuthal_angles):\n with self.entry.nxfile:\n if 'sample' not in self.entry:\n self.entry['sample'] = NXsample()\n if 'peaks' not in self.entry:\n self.entry['peaks'] = NXdata()\n else:\n if 'polar_angle' in self.entry['peaks']:\n del self.entry['peaks/polar_angle']\n if 'azimuthal_angle' in self.entry['peaks']:\n del self.entry['peaks/azimuthal_angle']\n self.write_parameter('peaks/polar_angle', polar_angles)\n self.write_parameter('peaks/azimuthal_angle', azimuthal_angles)", "def azimuth(poly):\n num = len(poly) - 1\n vec = unit_normal(poly[0], poly[1], poly[num])\n vec_azi = np.array([vec[0], vec[1], 0])\n vec_n = np.array([0, 1, 0])\n # update by Santosh\n # angle2vecs gives the smallest angle between the vectors\n # so for a west wall angle2vecs will give 90\n # the following 'if' statement will make sure 270 is returned\n x_vector = vec_azi[0]\n if x_vector < 0:\n return 360 - angle2vecs(vec_azi, vec_n)\n else:\n return angle2vecs(vec_azi, vec_n)", "def angles(self):\n self._sort_measurements()\n return self._angles", "def _parse_json(joint_states):\n json_f = [J for J in os.listdir(joint_states) if J.endswith('.json')]\n dfs = []\n for j in json_f:\n js = pd.read_json(f'{joint_states}/{j}')\n dfs.append(np.array([A['angle'] for A in js[js.columns[0]][0]['joint_angles']], dtype=float))\n return np.array(dfs)[:, :5]", "def really_process(txn, ctx):\n delete_prev_attrs(txn, ctx[\"nexrad\"])\n\n cenlat = float(ST[ctx[\"nexrad\"]][\"lat\"])\n cenlon = float(ST[ctx[\"nexrad\"]][\"lon\"])\n latscale = 111137.0\n lonscale = 111137.0 * math.cos(cenlat * math.pi / 180.0)\n\n # STM ID AZ/RAN TVS MESO POSH/POH/MX SIZE VIL DBZM HT TOP FCST MVMT\n co = 0\n for line in ctx[\"lines\"]:\n if len(line) < 5:\n continue\n if line[1] != \" \":\n continue\n tokens = line.replace(\">\", \" \").replace(\"/\", \" \").split()\n if not tokens or tokens[0] == \"STM\":\n continue\n if tokens[5] == \"UNKNOWN\":\n tokens[5] = 0\n tokens.insert(5, 0)\n tokens.insert(5, 0)\n if len(tokens) < 13:\n LOG.info(\"Incomplete Line ||%s||\", line)\n continue\n d = {}\n co += 1\n d[\"storm_id\"] = tokens[0]\n d[\"azimuth\"] = float(tokens[1])\n if tokens[2] == \"***\":\n LOG.info(\"skipping bad line |%s|\", line)\n continue\n d[\"range\"] = float(tokens[2]) * 1.852\n d[\"tvs\"] = tokens[3]\n d[\"meso\"] = tokens[4]\n d[\"posh\"] = tokens[5] if tokens[5] != \"***\" else None\n d[\"poh\"] = tokens[6] if tokens[6] != \"***\" else None\n if tokens[7] == \"<0.50\":\n tokens[7] = 0.01\n d[\"max_size\"] = tokens[7]\n\n if tokens[8] in [\"UNKNOWN\", \"***\"]:\n d[\"vil\"] = 0\n else:\n d[\"vil\"] = tokens[8]\n\n d[\"max_dbz\"] = tokens[9]\n d[\"max_dbz_height\"] = tokens[10]\n d[\"top\"] = tokens[11]\n if tokens[12] == \"NEW\":\n d[\"drct\"] = 0\n d[\"sknt\"] = 0\n else:\n d[\"drct\"] = int(float(tokens[12]))\n d[\"sknt\"] = tokens[13]\n d[\"nexrad\"] = ctx[\"nexrad\"]\n\n cosaz = math.cos(d[\"azimuth\"] * math.pi / 180.0)\n sinaz = math.sin(d[\"azimuth\"] * math.pi / 180.0)\n mylat = cenlat + (cosaz * (d[\"range\"] * 1000.0) / latscale)\n mylon = cenlon + (sinaz * (d[\"range\"] * 1000.0) / lonscale)\n d[\"geom\"] = \"SRID=4326;POINT(%s %s)\" % (mylon, mylat)\n d[\"valid\"] = ctx[\"ts\"]\n\n for table in [\n \"nexrad_attributes\",\n \"nexrad_attributes_%s\" % (ctx[\"ts\"].year,),\n ]:\n sql = f\"\"\"\n INSERT into {table} (nexrad, storm_id, geom, azimuth,\n range, tvs, meso, posh, poh, max_size, vil, max_dbz,\n max_dbz_height, top, drct, sknt, valid)\n values (%(nexrad)s, %(storm_id)s, ST_GeomFromEWKT(%(geom)s),\n %(azimuth)s, %(range)s, %(tvs)s, %(meso)s, %(posh)s,\n %(poh)s, %(max_size)s, %(vil)s, %(max_dbz)s,\n %(max_dbz_height)s, %(top)s, %(drct)s, %(sknt)s, %(valid)s)\n \"\"\"\n if common.dbwrite_enabled():\n txn.execute(sql, d)\n\n if co > 0:\n LOG.info(\n \"%s %s Processed %s entries\",\n ctx[\"nexrad\"],\n ctx[\"ts\"].strftime(\"%Y-%m-%d %H:%M UTC\"),\n co,\n )\n return co", "def test_calcAngles_angles_and_axis(self, kargs, expected_len_result, expected_first_angle, expected_first_axis):\n kargs['vsk'] = self.cal_SM\n angles, axis = pycgmCalc.calcAngles(self.motion_data, **kargs)\n np.testing.assert_equal(len(angles), expected_len_result)\n np.testing.assert_equal(len(axis), expected_len_result)\n np.testing.assert_almost_equal(angles[0][0], expected_first_angle, self.rounding_precision)\n np.testing.assert_almost_equal(axis[0][0], expected_first_axis, self.rounding_precision)", "def sensor_angles(self, channel=\"1\"):\n if channel != \"3B\":\n sensor = self.channel2sensor[channel]\n else:\n sensor = \"VNIRB\"\n\n # Angular data from ASTER metadata data.\n S = float(self.meta[\"MAPORIENTATIONANGLE\"])\n\n FOV = {\"VNIR\": 6.09, \"VNIRB\": 5.19, \"SWIR\": 4.9, \"TIR\": 4.9}\n\n P = {\n \"VNIR\": float(self.meta[\"POINTINGANGLE.1\"]),\n \"VNIRB\": float(self.meta[\"POINTINGANGLE.1\"]),\n \"SWIR\": float(self.meta[\"POINTINGANGLE.2\"]),\n \"TIR\": float(self.meta[\"POINTINGANGLE.3\"]),\n }\n\n # cut overlap area of backward pointing telescope\n if channel != \"3B\":\n field = self.read_digitalnumbers(channel)\n elif channel == \"3B\" and self.meta[\"FLYINGDIRECTION\"] == \"DE\":\n field = self.read_digitalnumbers(channel)[400:]\n elif channel == \"3B\" and self.meta[\"FLYINGDIRECTION\"] == \"AE\":\n field = self.read_digitalnumbers(channel)[:400]\n\n # design n field\n sidx = np.arange(np.shape(field)[1])\n\n mid0 = sidx[np.isfinite(field[5, :])][[0, -1]].mean()\n mid1 = sidx[np.isfinite(field[-5, :])][[0, -1]].mean()\n\n f = interpolate.interp1d(\n np.array([5, np.shape(field)[0] - 5]),\n np.array([mid0, mid1]),\n kind=\"linear\",\n fill_value=\"extrapolate\",\n )\n\n mids = f(np.arange(np.shape(field)[0]))\n # costructing an n-array indexing the pixels symmetric to the center of the\n # swath. If pointing angle is zero, the sensor zenith angle is zero in the\n # swath center.\n n = sidx - mids[:, np.newaxis]\n\n # left and right side of nadir are defined such that the sign follows the\n # roll angle sign, which is negative on the right and positive on the left\n # side of the sensor in flying direction (!), NOT in projected image. The\n # sides therefore depend on the ascending / decending mode defined in the\n # meta data.\n flyingdir = self.meta[\"FLYINGDIRECTION\"]\n if flyingdir is \"DE\":\n n *= -1\n\n swath_widths = np.sum(np.isfinite(field), axis=1)\n # average swath width, but exluding possible NaN-scanlines at beginning and\n # end of the image.\n swath_width = np.mean(swath_widths[swath_widths > 4200])\n\n n_angles = n * FOV[sensor] / swath_width + P[sensor]\n azimuth = np.full_like(field, np.nan)\n\n if channel != \"3B\":\n zenith = abs(n_angles)\n\n if flyingdir is \"DE\":\n azimuth[n_angles > 0] = 90 + S\n azimuth[n_angles <= 0] = 270 + S\n else:\n azimuth[n_angles < 0] = 90 + S\n azimuth[n_angles >= 0] = 270 + S\n else:\n h = 705000 # in km above the equator\n zenith = np.rad2deg(\n np.arctan(\n np.sqrt(\n (h * np.tan(np.deg2rad(P[sensor])) + 15 * n) ** 2\n + (h * np.tan(np.deg2rad(27.6)) / np.cos(np.deg2rad(P[sensor])))\n ** 2\n )\n / h\n )\n )\n\n x = np.rad2deg(np.arctan(0.6 / np.tan(np.deg2rad(n_angles))))\n if flyingdir is \"DE\":\n azimuth[n_angles > 0] = np.array(90 - x + S)[n_angles > 0]\n azimuth[n_angles <= 0] = np.array(270 - x + S)[n_angles <= 0]\n else:\n azimuth[n_angles < 0] = np.array(90 - x + S)[n_angles < 0]\n azimuth[n_angles >= 0] = np.array(270 - x + S)[n_angles >= 0]\n\n zenith[np.isnan(field)] = np.nan\n azimuth[np.isnan(field)] = np.nan\n\n return zenith, azimuth", "def sector_angles(self) -> np.ndarray:\n return self._sector_angles", "def check_angle_of_arcs(self):\n\n if self.thin_arc_start_angle >= 3600:\n self.thin_arc_start_angle %= 360\n self.thin_arc_start_angle += 360\n\n elif self.thin_arc_start_angle <= -3600:\n self.thin_arc_start_angle %= 360\n self.thin_arc_start_angle -= 360\n\n if self.thin_arc_end_angle >= 3600:\n self.thin_arc_end_angle %= 360\n self.thin_arc_end_angle += 360\n\n elif self.thin_arc_end_angle <= -3600:\n self.thin_arc_end_angle %= 360\n self.thin_arc_end_angle -= 360\n\n if self.thick_arc_start_angle >= 3600:\n self.thick_arc_start_angle %= 360\n self.thick_arc_start_angle += 360\n\n elif self.thick_arc_start_angle <= -3600:\n self.thick_arc_start_angle %= 360\n self.thick_arc_start_angle -= 360\n\n if self.thick_arc_end_angle >= 3600:\n self.thick_arc_end_angle %= 360\n self.thick_arc_end_angle += 360\n\n elif self.thick_arc_end_angle <= -3600:\n self.thick_arc_end_angle %= 360\n self.thick_arc_end_angle -= 360", "def get_node_angles(self, node_name, frame):\n channels = self.node_names[node_name][\"channels\"]\n euler_angles = []\n rotation_order = []\n for ch in channels:\n if ch.lower().endswith(\"rotation\"):\n idx = self.node_channels.index((node_name, ch))\n rotation_order.append(ch)\n euler_angles.append(frame[idx])\n return euler_angles, rotation_order", "def get_internal_angles(self):\n\n angles = []\n\n for elx, elz in zip(self.grid['x'], self.grid['z']):\n el_angles = []\n xy = np.vstack((elx, elz))\n for i in range(0, elx.size):\n i1 = (i - 1) % elx.size\n i2 = (i + 1) % elx.size\n\n a = (xy[:, i] - xy[:, i1])\n b = (xy[:, i2] - xy[:, i])\n # note that nodes are ordered counter-clockwise!\n angle = np.pi - np.arctan2(\n a[0] * b[1] - a[1] * b[0],\n a[0] * b[0] + a[1] * b[1]\n )\n el_angles.append(angle * 180 / np.pi)\n angles.append(el_angles)\n return np.array(angles)", "def calc_angles_struct(structure):\n if isinstance(structure,list):\n lig_angles = np.array(structure)[np.argsort(angs)[::-1]]\n else:\n mol = io_molecule.convert_io_molecule(structure)\n if len(mol.graph) == 0:\n print('Creating imputed molecular graph! May be untrustworthy.')\n mol.create_BO_dict()\n mets = mol.find_metals()\n lig_angles = []\n if len(mets) == 1:\n coordats = np.nonzero(mol.graph[mets[0]])[0]\n if len(coordats) == 1:\n lig_angles = []\n else:\n coords = mol.ase_atoms.get_positions()\n angs = [\n get_angle(coords[x[0]],coords[mets[0]],coords[x[1]]) for x in itertools.combinations(coordats,2)\n ]\n angs = np.array(angs)[np.argsort(angs)[::-1]] # Add angles\n lig_angles += angs.tolist() # Add sorted angles as features\n else:\n print('Warning: User ligand input without metal for refernce on interatomic angles. \\\n Please pass a structure with a metal for user ligand generation.')\n lig_angles += [0.0] * (36-len(lig_angles)) # Pad with zeros\n n_ca_m_ca_angles = len(np.nonzero(lig_angles)[0])\n denticity = denticity_combinations_dict[n_ca_m_ca_angles]\n return {'user_lig':np.array(lig_angles)}, denticity", "def rotationDetermination(self):\n \n for index, row in enumerate(self.magdata):\n if index > 11 and index < (len(self.magdata) - 12):\n br1 = [row[0] for row in self.magdata[(index-12):(index-2)]]\n bt1 = [row[1] for row in self.magdata[(index-12):(index-2)]]\n bn1 = [row[2] for row in self.magdata[(index-12):(index-2)]]\n b1 = np.matrix((np.mean(br1), np.mean(bt1), np.mean(bn1)))\n\n br2 = [row[0] for row in self.magdata[(index+2):(index+12)]]\n bt2 = [row[1] for row in self.magdata[(index+2):(index+12)]]\n bn2 = [row[2] for row in self.magdata[(index+2):(index+12)]]\n b2 = np.matrix((np.mean(br2), np.mean(bt2), np.mean(bn2)))\n\n theta = np.arccos(np.dot(b1,b2.T)/(np.linalg.norm(b1)*np.linalg.norm(b2)))*180/np.pi\n\n self.detections.rotations.append(theta[0,0])\n self.detections.rotationTimeTags.append(self.timestamps[index])\n \n\n## self.b1 = b1\n## self.b2 = b2\n self.detections.rotationBoundary=[]\n if len(self.detections.rotations) != 0:\n \n for index, theta in enumerate(self.detections.rotations):\n if index > 0:\n if theta > 30 and self.detections.rotations[index-1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])\n if index < len(self.detections.rotations)-1:\n if theta > 30 and self.detections.rotations[index+1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(self.A[1, 2], self.A[2, 2]) # Roll Angle\n theta = -np.sin(self.A[0, 2]) # Pitch Angle\n psi = np.arctan2(self.A[0, 1], self.A[0, 0]) # Yaw Angle\n return np.array([phi, theta, psi])", "def read_area_shapes(path_ew, path_s):\n output = []\n\n with fiona.open(path_ew, 'r') as reader:\n for lsoa in reader:\n output.append({\n 'type': lsoa['type'],\n 'geometry': lsoa['geometry'],\n 'properties': {\n 'code': lsoa['properties']['LSOA11CD'],\n # 'LSOA11NM': lsoa['properties']['LSOA11NM'],\n }\n })\n\n with fiona.open(path_s, 'r') as reader:\n for datazone in reader:\n output.append({\n 'type': datazone['type'],\n 'geometry': datazone['geometry'],\n 'properties': {\n 'code': datazone['properties']['DataZone'],\n # 'LSOA11NM': lsoa['properties']['LSOA11NM'],\n }\n })\n\n return output", "def average_angle_for_box(n_detectors, n_detectors_middle, n_detectors_upper_lower):\n\n angles_for_middle_box = n_detectors[0:n_detectors_middle]\n middle_angle = sum(angles_for_middle_box) / len(angles_for_middle_box)\n\n angles_for_upper_lower_box = n_detectors[0:n_detectors_upper_lower]\n if len(angles_for_upper_lower_box) > 0:\n upper_lower_angle = sum(angles_for_upper_lower_box) / len(angles_for_upper_lower_box)\n else:\n upper_lower_angle = 0\n\n n_detectors = n_detectors[max(n_detectors_middle, n_detectors_upper_lower):]\n return middle_angle, upper_lower_angle, n_detectors" ]
[ "0.56423277", "0.55796504", "0.5485317", "0.532156", "0.5300278", "0.50899595", "0.50719124", "0.50677234", "0.5040855", "0.5008711", "0.5001167", "0.50002366", "0.4993435", "0.49813247", "0.4962279", "0.49208593", "0.49165124", "0.49086887", "0.48858005", "0.48731053", "0.48723224", "0.48558918", "0.48430544", "0.48411146", "0.48357418", "0.4828506", "0.4807861", "0.48014358", "0.4766292", "0.47513896" ]
0.7734531
0
Returns an "absolute" value for a timedelta, always representing a time distance.
def abs_timedelta(delta): if delta.days < 0: now = datetime.datetime.now() return now - (now + delta) return delta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def abs_timedelta(delta):\r\n if delta.days < 0:\r\n now = _now()\r\n return now - (now + delta)\r\n return delta", "def delta(self, abs_value=False):\n return self.current - self.last if not abs_value else np.abs(self.current - self.last)", "def timedelta(self) -> datetime.timedelta:\n factor = -1 if self.negative else 1\n return datetime.timedelta(\n hours=factor * self.hours, minutes=factor * self.minutes\n )", "def abs(value):\n return _abs(value)", "def day_absolute_to_relative(absolute):\n today = datetime.datetime.today()\n date = datetime.datetime.strptime(absolute, \"%Y-%m-%d\")\n return abs((today - date).days)", "def abs(self, a):\n return abs(a)", "def delta(self) -> timedelta:\n delta = self.data.get(\"delta\", 0)\n return timedelta(seconds=delta)", "def apparent_to_absolute_magnitude(apparent_magnitude, distance):\n distance_in_parsecs = distance / (648000. * astronomical_unit / np.pi)\n absolute_magnitude = apparent_magnitude - 5*np.log10(distance_in_parsecs) + 5\n return absolute_magnitude", "def abs_(a):", "def get_absolute_datetime(reference, offset):\n absolute_datetime = reference + datetime.timedelta(seconds=offset)\n\n return absolute_datetime", "def __abs__(self):\n return Quantity(abs(self._value), self.unit)", "def create_timedelta():\n # timedelta(days, seconds, microseconds, milliseconds, minutes, hours, weeks)\n td = datetime.timedelta(microseconds=-1)\n # Why is this (-1, 86399, 999999)?\n # Because -1 days + (86,400 - 1) seconds = -1 second, and -1,000,000 microseconds + 999,999 microseconds = -1 microsecond\n print(td.days, td.seconds, td.microseconds) # (-1, 86399, 999999)", "def __abs__(self):\n retval = self.copy()\n retval._val = abs(retval._val)\n return retval", "def abs(self):\n\n return self._get(\"abs\", rtype=self.__class__)", "def eta(self):\n eta = self.fields['eta']\n if eta >= 0:\n return datetime.timedelta(seconds=eta)\n else:\n ValueError('eta not valid')", "def _convert_to_timedelta(time_diff):\n return timedelta(seconds=time_diff)", "def absolute_value(val):\n if val < 0:\n return val * -1\n else:\n return val", "def as_duration(abs_time_in_seconds):\n\n durations = (\n ('s', 1),\n ('m', 60),\n ('h', 60 * 60),\n ('d', 60 * 60 * 24),\n ('w', 60 * 60 * 24 * 7)\n )\n\n duration = time.time() - abs_time_in_seconds\n result = \"now\"\n\n for label, length in durations:\n if length > duration:\n break\n result = \"{:.0f}{}\".format(math.ceil(duration / length), label)\n\n return result", "def day_relative_to_absolute(relative):\n today = datetime.datetime.today()\n delta = datetime.timedelta(days=relative)\n return (today - delta).strftime(\"%Y-%m-%d\")", "def duration(self):\n # type: () -> Optional[timedelta]\n\n if self.datetime_start and self.datetime_complete:\n return self.datetime_complete - self.datetime_start\n else:\n return None", "def find_absolute_value(x):\n return math.fabs(x)", "def abs(self):\n\n return Number.abs(self)", "def _abs (x):\n\n return x if le(nil,x) else -x", "def _convert_to_timedelta(time_diff):\n return timedelta(microseconds=time_diff / _NANO_TO_MICRO)", "def abs(self):\n return self * self.sign()", "def timedelta(self, *a, **kw):\n from datetime import timedelta\n return timedelta(*a, **kw)", "def resolve(self):\n addl_micros = round(self.nanoseconds / 1000)\n return self.td + datetime.timedelta(microseconds=addl_micros)", "def absulute2relative_time(x): \n if x.viewed:\n x.viewed_reltime=x.viewed_time-x.start\n \n if x.completed:\n x.completed_reltime=x.completed_time-x.start\n \n return x", "def absolute_value(val):\n a = np.round(val/100.*np.array(cum_hours).sum(), 0)\n return a", "def get_duration(self):\n return (self.stop_day - self.start_day) * (24 * 60) \\\n + (self.stop_hour - self.start_hour) * 60" ]
[ "0.75997627", "0.6525403", "0.64160365", "0.6255736", "0.58985287", "0.58082616", "0.57936674", "0.573392", "0.569296", "0.56611925", "0.56431794", "0.56412184", "0.56401443", "0.5639377", "0.5544877", "0.5541361", "0.5524112", "0.5501755", "0.547358", "0.54723006", "0.54522437", "0.54519135", "0.5440847", "0.5408365", "0.54063404", "0.5403135", "0.5398996", "0.5394417", "0.53897077", "0.5384018" ]
0.7604547
0
Turn a value into a date and a timedelta which represents how long ago it was. If that's not possible, return (None, value).
def date_and_delta(value): now = datetime.datetime.now() if isinstance(value, datetime.datetime): date = value delta = now - value elif isinstance(value, datetime.timedelta): date = now - value delta = value else: try: value = int(value) delta = datetime.timedelta(seconds=value) date = now - delta except (ValueError, TypeError): return None, value return date, abs_timedelta(delta)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_and_delta(value):\r\n now = _now()\r\n if isinstance(value, datetime):\r\n date = value\r\n delta = now - value\r\n elif isinstance(value, timedelta):\r\n date = now - value\r\n delta = value\r\n else:\r\n try:\r\n value = int(value)\r\n delta = timedelta(seconds=value)\r\n date = now - delta\r\n except (ValueError, TypeError):\r\n return (None, value)\r\n return date, abs_timedelta(delta)", "def naturaltime(value):\n try:\n value = datetime.datetime(value.year, value.month, value.day, value.hour, value.minute, value.second)\n except AttributeError:\n return value\n except ValueError:\n return value\n\n if getattr(value, 'tzinfo', None):\n now = datetime.datetime.now(LocalTimezone(value))\n else:\n now = datetime.datetime.now()\n now = now - timedelta(0, 0, now.microsecond)\n if value < now:\n delta = now - value\n if delta.days != 0:\n return pgettext(\n 'naturaltime', '%(delta)s ago'\n ) % {'delta': defaultfilters.timesince(value)}\n elif delta.seconds == 0:\n return _(u'now')\n elif delta.seconds < 60:\n return ungettext(\n u'a second ago', u'%(count)s seconds ago', delta.seconds\n ) % {'count': delta.seconds}\n elif delta.seconds // 60 < 60:\n count = delta.seconds // 60\n return ungettext(\n u'a minute ago', u'%(count)s minutes ago', count\n ) % {'count': count}\n else:\n count = delta.seconds // 60 // 60\n return ungettext(\n u'an hour ago', u'%(count)s hours ago', count\n ) % {'count': count}\n else:\n delta = value - now\n if delta.days != 0:\n return pgettext(\n 'naturaltime', '%(delta)s from now'\n ) % {'delta': defaultfilters.timeuntil(value)}\n elif delta.seconds == 0:\n return _(u'now')\n elif delta.seconds < 60:\n return ungettext(\n u'a second from now', u'%(count)s seconds from now', delta.seconds\n ) % {'count': delta.seconds}\n elif delta.seconds // 60 < 60:\n count = delta.seconds // 60\n return ungettext(\n u'a minute from now', u'%(count)s minutes from now', count\n ) % {'count': count}\n else:\n count = delta.seconds // 60 // 60\n return ungettext(\n u'an hour from now', u'%(count)s hours from now', count\n ) % {'count': count}", "def _get_delta(self, now, then):\n if now.__class__ is not then.__class__:\n now = datetime.date(now.year, now.month, now.day)\n then = datetime.date(then.year, then.month, then.day)\n if now < then:\n raise ValueError(\"Cannot determine moderation rules because date field is set to a value in the future\")\n return now - then", "def naturaltime(value, future=False, months=True):\r\n now = _now()\r\n date, delta = date_and_delta(value)\r\n if date is None:\r\n return value\r\n # determine tense by value only if datetime/timedelta were passed\r\n if isinstance(value, (datetime, timedelta)):\r\n future = date > now\r\n\r\n ago = _('%s from now') if future else _('%s ago')\r\n delta = naturaldelta(delta)\r\n\r\n if delta == _(\"a moment\"):\r\n return _(\"now\")\r\n\r\n return ago % delta", "def make_datetime(value):\n if value:\n return value\n return None", "def ago(self):\n return human(self.timestamp/1000.0, precision=1, abbreviate=True)", "def _parse_date(value):\n # Check for day-month pattern\n day_month_text = re.match(\"^(\\d{1,2})-(\\d{2})$\", value)\n if day_month_text:\n day = int(day_month_text.group(1))\n month = int(day_month_text.group(2))\n return datetime(datetime.now().year, month, day)\n\n # I assume Polish locale\n parts = value.strip().split(maxsplit=1)\n amount = int(parts[0])\n for hour_part in TIMEDELTA_HOURS:\n if hour_part in parts[1]:\n delta = timedelta(hours=amount)\n break\n else:\n for minute_part in TIMEDELTA_MINS:\n if minute_part in parts[1]:\n delta = timedelta(minutes=amount)\n break\n return datetime.now() - delta", "def SAgeDdt(ddt):\n if ddt.days < 0:\n return \"in the future?\"\n months = int(ddt.days*12/365)\n years = int(ddt.days/365)\n if years >= 1:\n return \"%d year%s ago\" % (years, SPlural(years))\n if months >= 3:\n return \"%d months ago\" % months \n if ddt.days == 1:\n return \"yesterday\"\n if ddt.days > 1:\n return \"%d days ago\" % ddt.days\n hrs = int(ddt.seconds/60/60)\n if hrs >= 1:\n return \"%d hour%s ago\" % (hrs, SPlural(hrs))\n minutes = round(ddt.seconds/60)\n if minutes < 1:\n return \"seconds ago\"\n return \"%d minute%s ago\" % (minutes, SPlural(minutes))", "def relativeTime(date):\n diff = datetime.utcnow() - date\n\n if diff.days > 7 or diff.days < 0:\n return date.ctime()\n elif diff.days == 1:\n return '1 day ago'\n elif diff.days > 1:\n return '%d days ago' % diff.days\n elif diff.seconds <= 1:\n return 'just now'\n elif diff.seconds < 60:\n return '%d seconds ago' % diff.seconds\n elif diff.seconds < (60 * 2):\n return '1 minute ago'\n elif diff.seconds < (60 * 60):\n return '%d minutes ago' % (diff.seconds / 60)\n elif diff.seconds < (60 * 60 * 2):\n return '1 hour ago'\n else:\n return '%d hours ago' % (diff.seconds / (60 * 60))", "def time_since(dt, default=\"just now\"):\n\t\n\tnow = datetime.utcnow()\n\tdiff = now - dt\n\t\n\tperiods = (\n\t\t(diff.days / 365, \"year\", \"years\"),\n\t\t(diff.days / 30, \"month\", \"months\"),\n\t\t(diff.days / 7, \"week\", \"weeks\"),\n\t\t(diff.days, \"day\", \"days\"),\n\t\t(diff.seconds / 3600, \"hour\", \"hours\"),\n\t\t(diff.seconds / 60, \"minute\", \"minutes\"),\n\t\t(diff.seconds, \"second\", \"seconds\"),\n\t)\n\n\tfor period, singular, plural in periods:\n\t\tif period:\n\t\t\treturn \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n\treturn default", "async def _parse_value(self, responses: SourceResponses) -> Value:\n commit_responses = responses[1:]\n return str(days_ago(max([parse((await response.json())[\"committed_date\"]) for response in commit_responses])))", "def timesince(dt, default=\"just now\"):\n\n now = datetime.datetime.now()\n diff = now - dt\n \n periods = (\n (diff.days / 365, \"year\", \"years\"),\n (diff.days / 30, \"month\", \"months\"),\n (diff.days / 7, \"week\", \"weeks\"),\n (diff.days, \"day\", \"days\"),\n (diff.seconds / 3600, \"hour\", \"hours\"),\n (diff.seconds / 60, \"minute\", \"minutes\"),\n (diff.seconds, \"second\", \"seconds\"),\n )\n\n for period, singular, plural in periods:\n \n if period:\n return \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n return default", "def howLongAgo(time=False):\n now = timezone.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"genau jetzt\"\n if second_diff < 60:\n return \"vor \" + str(second_diff) + \" Sek.\"\n if second_diff < 120:\n return \"vor einer Min.\"\n if second_diff < 3600:\n return \"vor \" + str( second_diff / 60 ) + \" Min.\"\n if second_diff < 7200:\n return \"vor einer St.\"\n if second_diff < 86400:\n return \"vor \" + str( second_diff / 3600 ) + \" St.\"\n if day_diff == 1:\n return \"Gestern\"\n if day_diff < 7:\n return \"vor \" + str(day_diff) + \" Tagen\"\n if day_diff < 31:\n return \"vor \" + str(day_diff/7) + \" Wochen\"\n if day_diff < 365:\n return \"vor \" + str(day_diff/30) + \" Monaten\"\n return \"vor \" + str(day_diff/365) + \" Jahren\"", "def shorttimesince(value, arg=None):\r\n from django.utils.timesince import timesince\r\n if not value:\r\n return u''\r\n if arg:\r\n return calculate_shorttimesince(arg, value)\r\n return calculate_shorttimesince(value)", "def pretty_date(time=False):\n now = datetime.datetime.utcnow()\n if type(time) is int:\n diff = now - datetime.datetime.fromtimestamp(time)\n elif isinstance(time, datetime.datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n day_diff *= -1\n second_diff *= -1\n if day_diff < 1:\n if second_diff < 10:\n return ugettext('imminently')\n if second_diff < 60:\n return ungettext('{n} second from now', '{n} seconds from now', second_diff).format(n=second_diff)\n if second_diff < 120:\n return ugettext('in a minute')\n if second_diff < 3600:\n return ungettext('{n} minute from now', '{n} minutes from now', second_diff / 60).format(n=second_diff / 60)\n if second_diff < 7200:\n return ugettext('in an hour')\n if second_diff < 86400:\n return ungettext('{n} hour from now', '{n} hours from now', second_diff / 3600).format(n=second_diff / 3600)\n if day_diff == 1:\n return ugettext('tomorrow')\n if day_diff < 7:\n return ungettext('{n} day from now', '{n} days from now', day_diff).format(n=day_diff)\n if day_diff < 31:\n return ungettext('{n} week from now', '{n} weeks from now', day_diff / 7).format(n=day_diff / 7)\n if day_diff < 365:\n return ungettext('{n} month from now', '{n} months from now', day_diff / 30).format(n=day_diff / 30)\n return ungettext('{n} year from now', '{n} years from now', day_diff / 365).format(n=day_diff / 365)\n\n if day_diff == 0:\n if second_diff < 10:\n return ugettext('just now')\n if second_diff < 60:\n return ungettext('{n} second ago', '{n} seconds ago', second_diff).format(n=second_diff)\n if second_diff < 120:\n return ugettext('a minute ago')\n if second_diff < 3600:\n return ungettext('{n} minute ago', '{n} minutes ago', second_diff / 60).format(n=second_diff / 60)\n if second_diff < 7200:\n return ugettext('an hour ago')\n if second_diff < 86400:\n return ungettext('{n} hour ago', '{n} hours ago', second_diff / 3600).format(n=second_diff / 3600)\n if day_diff == 1:\n return ugettext('yesterday')\n if day_diff < 7:\n return ungettext('{n} day ago', '{n} days ago', day_diff).format(n=day_diff)\n if day_diff < 31:\n return ungettext('{n} week ago', '{n} weeks ago', day_diff / 7).format(n=day_diff / 7)\n if day_diff < 365:\n return ungettext('{n} month ago', '{n} months ago', day_diff / 30).format(n=day_diff / 30)\n return ungettext('{n} year ago', '{n} years ago', day_diff / 365).format(n=day_diff / 365)", "def abs_timedelta(delta):\n if delta.days < 0:\n now = datetime.datetime.now()\n return now - (now + delta)\n return delta", "def naturaltime(value):\n if not isinstance(value, date): # datetime is a subclass of date\n return value\n\n now = datetime.now(utc if is_aware(value) else None)\n if value < now:\n delta = now - value\n if delta.days != 0:\n return 'hace %(delta)s' % {'delta': defaultfilters.timesince(value)}\n elif delta.seconds == 0:\n return 'ahora'\n elif delta.seconds < 60:\n return u'hace %(count)s segundos' % {'count': delta.seconds}\n elif delta.seconds // 60 < 60:\n count = delta.seconds // 60\n return u'hace %(count)s minutos' % {'count': count}\n else:\n count = delta.seconds // 60 // 60\n return u'hace %(count)s horas' % {'count': count}", "def abs_timedelta(delta):\r\n if delta.days < 0:\r\n now = _now()\r\n return now - (now + delta)\r\n return delta", "def pretty_date(time=False):\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n else:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(round(second_diff, 0))) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(round(second_diff / 60, 0))) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(round(second_diff / 3600, 0))) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(int(round(day_diff, 0))) + \" days ago\"\n if day_diff < 31:\n return str(int(round(day_diff / 7, 0))) + \" weeks ago\"\n if day_diff < 365:\n return str(int(round(day_diff / 30, 0))) + \" months ago\"\n return str(int(round(day_diff / 365, 0))) + \" years ago\"", "def pretty_date(time=False):\r\n from datetime import datetime\r\n import dateutil.parser\r\n now = datetime.now()\r\n if type(time) is str or type(time) is unicode:\r\n time = dateutil.parser.parse(time)\r\n if type(time) is int:\r\n diff = now - datetime.fromtimestamp(time)\r\n elif isinstance(time, datetime):\r\n diff = now - time\r\n elif not time:\r\n diff = now - now\r\n second_diff = diff.seconds\r\n day_diff = diff.days\r\n\r\n if day_diff < 0:\r\n return ''\r\n\r\n if day_diff == 0:\r\n if second_diff < 10:\r\n return \"just now\"\r\n if second_diff < 60:\r\n return str(second_diff) + \" seconds ago\"\r\n if second_diff < 120:\r\n return \"a minute ago\"\r\n if second_diff < 3600:\r\n return ' '.join([str(second_diff / 60), \"minutes ago\"])\r\n if second_diff < 7200:\r\n return \"an hour ago\"\r\n if second_diff < 86400:\r\n return ' '.join([str(second_diff / 3600), \"hours ago\"])\r\n if day_diff == 1:\r\n return \"Yesterday\"\r\n if day_diff < 7:\r\n return ' '.join([str(day_diff), \"days ago\"])\r\n if day_diff < 31:\r\n return ' '.join([str(day_diff / 7), \"weeks ago\"])\r\n if day_diff < 60:\r\n return ' '.join([str(day_diff / 30), \"month ago\"])\r\n if day_diff < 365:\r\n return ' '.join([str(day_diff / 30), \"months ago\"])\r\n if day_diff < (365 * 2):\r\n return ' '.join([str(day_diff / 365), \"year ago\"])\r\n return ' '.join([str(day_diff / 365), \"years ago\"])", "def handle(self, value, context: typing.MutableMapping):\n if isinstance(value, timedelta):\n return value\n elif isinstance(value, int):\n return timedelta(milliseconds=int(value * self.resolution))\n try:\n return timedelta(\n milliseconds=int(Decimal(value) * self.resolution))\n except (ValueError, InvalidOperation):\n pass\n\n match = self.duration_re.match(value)\n if not match:\n self.report(value, context)\n return None\n\n params = {\n key: int(value)\n for key, value in match.groupdict().items()\n if value\n }\n return timedelta(**params)", "def timedelta_filter(date_value, **kwargs):\n\n current_date = parse_datetime(date_value)\n return (current_date - timedelta(**kwargs))", "def get_entry_date(input):\n if input and \"date\" in input.keys():\n return input[\"date\"], input[\"date\"] + timedelta(minutes=1)\n return None, None", "def to_timedelta(value) -> timedelta:\n\n # For values >=24hrs, Pandas converts them to a datetime object.\n # For values <24hrs, Pandas converts them to time object.\n if isinstance(value, timedelta):\n return value\n elif isinstance(value, datetime):\n return value - datetime(1900, 1, 1) + timedelta(hours=24)\n elif isinstance(value, time):\n return datetime.combine(date.min, value) - datetime.min\n elif isinstance(value, str):\n duration_regex = re.compile(\n r\"^(?P<sign>-?)(?P<hours>[0-9]+?):(?P<minutes>[0-9]{2})$\"\n )\n parts = duration_regex.match(value.strip())\n if parts is not None:\n sign = parts.group(\"sign\")\n hours = float(parts.group(\"hours\"))\n minutes = float(parts.group(\"minutes\"))\n if sign == \"-\":\n hours = hours * (-1)\n minutes = minutes * (-1)\n return timedelta(hours=hours, minutes=minutes)\n else:\n logging.warning(\n \"Could not convert overtime value to timedelta \"\n \"object. \"\n f\"Values was {value} and type was {type(value)}.\"\n )\n\n else:\n logging.warning(\n \"Could not convert overtime value to timedelta object. \"\n f\"Value was {value} and type was {type(value)}.\"\n )\n\n return timedelta(0)", "def _subtract_times(self, a, b):\n td = a - b\n return td.days * 24 * 60 + td.seconds // 60", "def relative_datetime(self):\n now = datetime.now(timezone.utc)\n created_at = self.created_at.astimezone(timezone.utc)\n\n delta = humanize.naturaldelta(abs(created_at - now))\n tense = \"from now\" if now < created_at else \"ago\"\n\n return f\"{delta} {tense}\"", "def _time_delta_from_info(info):\n now = datetime.datetime.now()\n then = info.start_time\n return str(now.replace(microsecond=0) - then.replace(microsecond=0))", "def naturaldelta(value, months=True):\r\n now = _now()\r\n date, delta = date_and_delta(value)\r\n if date is None:\r\n return value\r\n\r\n use_months = months\r\n\r\n seconds = abs(delta.seconds)\r\n days = abs(delta.days)\r\n years = days // 365\r\n days = days % 365\r\n months = int(days // 30.5)\r\n\r\n if not years and days < 1:\r\n if seconds == 0:\r\n return _(\"a moment\")\r\n elif seconds == 1:\r\n return _(\"a second\")\r\n elif seconds < 60:\r\n return ngettext(\"%d second\", \"%d seconds\", seconds) % seconds\r\n elif 60 <= seconds < 120:\r\n return _(\"a minute\")\r\n elif 120 <= seconds < 3600:\r\n minutes = seconds // 60\r\n return ngettext(\"%d minute\", \"%d minutes\", minutes) % minutes\r\n elif 3600 <= seconds < 3600 * 2:\r\n return _(\"an hour\")\r\n elif 3600 < seconds:\r\n hours = seconds // 3600\r\n return ngettext(\"%d hour\", \"%d hours\", hours) % hours\r\n elif years == 0:\r\n if days == 1:\r\n return _(\"a day\")\r\n if not use_months:\r\n return ngettext(\"%d day\", \"%d days\", days) % days\r\n else:\r\n if not months:\r\n return ngettext(\"%d day\", \"%d days\", days) % days\r\n elif months == 1:\r\n return _(\"a month\")\r\n else:\r\n return ngettext(\"%d month\", \"%d months\", months) % months\r\n elif years == 1:\r\n if not months and not days:\r\n return _(\"a year\")\r\n elif not months:\r\n return ngettext(\"1 year, %d day\", \"1 year, %d days\", days) % days\r\n elif use_months:\r\n if months == 1:\r\n return _(\"1 year, 1 month\")\r\n else:\r\n return ngettext(\"1 year, %d month\",\r\n \"1 year, %d months\", months) % months\r\n else:\r\n return ngettext(\"1 year, %d day\", \"1 year, %d days\", days) % days\r\n else:\r\n return ngettext(\"%d year\", \"%d years\", years) % years", "def pretty_date(time=False):\n from datetime import datetime\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time \n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str( second_diff / 60 ) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str( second_diff / 3600 ) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff/7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff/30) + \" months ago\"\n return str(day_diff/365) + \" years ago\"", "def timesince_limited(d):\n today = datetime.datetime.now()\n delta = datetime.timedelta\n interval = today - d\n if today.strftime('%Y-%m-%d') == d.strftime('%Y-%m-%d'):\n if interval < delta(days=0, hours=1):\n return timesince(d) + ' ago '\n else:\n return d.strftime('%H:%M')\n else:\n return d" ]
[ "0.77345294", "0.6244832", "0.6088585", "0.577737", "0.5701926", "0.56636906", "0.56362903", "0.559285", "0.5573411", "0.5541884", "0.5530724", "0.55193424", "0.55110997", "0.54977155", "0.54975235", "0.54929054", "0.5488044", "0.54846984", "0.545197", "0.5361616", "0.5360478", "0.5347188", "0.5314906", "0.5292867", "0.5246696", "0.52228487", "0.52108926", "0.5202497", "0.5199447", "0.5193448" ]
0.7786376
0
Return the Hamming distance between equallength sequences
def __hamming_distance(s1, s2): if len(s1) != len(s2): raise ValueError("Undefined for sequences of unequal length") return sum(el1 != el2 for el1, el2 in zip(s1, s2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal lenght.\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))", "def hammingDistance(s1 = \"\", s2 = \"\"):\n # if len(s1) != len(s2):\n # raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(bool(ord(ch1) - ord(ch2)) for ch1, ch2 in zip(s1, s2))", "def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance", "def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])", "def hamming_dist(seq1, seq2):\n diffs = 0\n length = 0\n for x, y in zip(str(seq1), str(seq2)):\n if x == '-' or y == '-':\n continue\n elif x != y:\n diffs += 1\n length += 1\n try:\n return float(diffs) / length\n except:\n return 0.5", "def hamming_distance(s1, s2):\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming_distance(a, b):\n return np.count_nonzero(a != b)", "def hamming_distance(s1, s2):\n if len(s1) > len(s2):\n s2 = s2.ljust(len(s1))\n else:\n s1 = s1.ljust(len(s2))\n\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def hamming_distance(input1, input2):\n if len(input1) != len(input2):\n raise ValueError('Length of input1 and input2 are not equal.')\n input1 = hex_decode(hex_encode(input1))\n input2 = hex_decode(hex_encode(input2))\n # the general strategy here is to xor the two strings together\n # and then just count the number of 1s in the output (i.e., where the\n # two strings differed).\n output = fixed_xor(input1, input2)\n distance = 0\n for byte in output:\n for i in range(8):\n bit_mask = 1 << i\n if (bit_mask & byte) == bit_mask:\n distance += 1\n return distance", "def hamming_distance(cs):\n d = 0.0\n end = len(cs) - 1\n for idx in range(end):\n s1 = cs[idx]\n s2 = cs[idx + 1]\n assert len(s1) == len(s2)\n s1_bits = ''.join('{:b}'.format(c).zfill(8) for c in s1)\n s2_bits = ''.join('{:b}'.format(c).zfill(8) for c in s2)\n d += sum(c1 != c2 for c1, c2 in zip(s1_bits, s2_bits))\n return d / end", "def hamming_distance(s1, s2):\n assert len(s1)==len(s2), \",\".join((s1, s2))\n s1 = np.array(s1.upper(), dtype=\"c\")\n s2 = np.array(s2.upper(), dtype=\"c\")\n return np.sum(s1 != s2)", "def hamming_distance(x1: np.ndarray, x2: np.ndarray) -> int:\n assert isinstance(x1, np.ndarray) and isinstance(x2, np.ndarray)\n return (x1 != x2).sum()", "def hamming_distance(p, q):\n result = 0\n for x, y in zip(p, q):\n if x != y:\n result += 1\n return result + abs(len(p) - len(q))", "def compute_hamming_distance(str1, str2):\n\n mismatches = 0\n len_strs = len(str1)\n for i in range(len_strs):\n if str1[i] != str2[i]:\n mismatches = mismatches + 1\n return mismatches", "def hamming_distance(h1, h2):\n b1 = bitarray.bitarray()\n b1.frombytes(h1)\n b2 = bitarray.bitarray()\n b2.frombytes(h2)\n return bitarray.bitdiff(b1, b2)", "def hamming_distance(lhs,rhs):\n return len([(x,y) for x,y in zip(lhs,rhs) if x !=y])", "def hamming_dist(bytes1, bytes2):\n if type(bytes1) == str:\n bytes1 = [ord(c) for c in str1]\n if type(bytes2) == str:\n bytes2 = [ord(c) for c in str2]\n bins = [bin(o1 ^ o2) for o1, o2 in zip(bytes1, bytes2)]\n return len([i for i in ''.join(bins) if i == '1'])", "def HammingDistance(pattern1, pattern2):\n distance = 0\n if len(pattern1) == len(pattern2):\n for i in range(len(pattern1)):\n if pattern1[i]!=pattern2[i]:\n distance += 1\n return distance\n else:\n assert 0, \"Two patterns have different lengths.\"", "def hamming_dist(a_b, b_b):\n return sum(bin(a_b[n] ^ b_b[n]).count('1') for n in range(len(a_b)))", "def hamming_distance(str1, str2):\n\n # TODO: Write your solution here\n # Edge case check\n if len(str1) != len(str2):\n return None\n\n count = 0\n for index in range(len(str1)):\n if str1[index] != str2[index]:\n count += 1\n\n if count is 0:\n return None\n\n return count", "def hamming_distance(a, b):\n assert len(a) == len(b)\n dist = sum(item_a != item_b for item_a, item_b in zip(a, b))\n return dist", "def generalised_hamming_distance(a, b):\n if len(a) == len(b):\n return hamming_distance(a, b)\n if len(a) > len(b):\n dna = a\n kmer = b\n else:\n dna = b\n kmer = a\n k = len(kmer)\n\n dist = min([hamming_distance(kmer, kmer2) for kmer2 in kmers_from_dna(dna, k)])\n return dist", "def hamming_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n\n # Call the function to compute the distance\n return measure.get_raw_score(s1, s2)", "def hamming_distance(bytes_0: bytes, bytes_1: bytes) -> int:\n assert len(bytes_0) == len(bytes_1)\n return sum(sum(bits(byte_0 ^ byte_1)) for (byte_0, byte_1) in zip(bytes_0, bytes_1))", "def hamming_distance(StringA,StringB):\n if len(StringA) != String(B):\n raise ValueError(\"The length of sequences are not equal!\")\n return sum(x !=y for (x,y) in zip(StringA,StringB))", "def hamming_dist(gene_1, gene_2):\n ham_dist = 0\n for c1, c2 in zip(gene_1, gene_2):\n if c1 != c2:\n ham_dist += 1\n return ham_dist", "def hamming_distance(string_a: str, string_b: str) -> int:\n if len(string_a) != len(string_b):\n raise ValueError(\n \"Strings are of unequal length can not compute hamming distance. Hamming distance is undefined.\"\n )\n return sum(char_1 != char_2 for char_1, char_2 in zip(string_a, string_b))", "def hammingDist(x, y):\n hd = 0\n for ch1, ch2 in zip(x, y):\n if ch1 != ch2:\n hd += 1\n return hd", "def HammingDist(str1, str2):\n\tHdist = 0\n\tfor i, base in enumerate(str1):\n\t\tif base != str2[i]:\n\t\t\tHdist += 1\n\n\treturn Hdist", "def hamming(a, b):\n len1 = len(a)\n len2 = len(b)\n overlap = min(len1, len2)\n difference = abs(len1 - len2)\n for x in range(overlap):\n if a[x] != b[x]:\n difference += 1\n\n return difference" ]
[ "0.7564989", "0.7524423", "0.7510748", "0.7498424", "0.73834527", "0.72914463", "0.7283258", "0.72027653", "0.719384", "0.7186409", "0.7184023", "0.7142286", "0.7130274", "0.71298635", "0.70760477", "0.70056623", "0.6964983", "0.69032145", "0.6894841", "0.68800247", "0.68678236", "0.6847591", "0.6843833", "0.68348813", "0.68032765", "0.679861", "0.6798262", "0.6720903", "0.6690633", "0.6689964" ]
0.7553281
1
return checkpoints for recomputing
def get_checkpoints(self): # recompute checkpoints return self._checkpoints
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkpoint():", "def checkpoint_set():\n checkpoints.append(list())", "def get_all_overall_checkpoint(cls):\n return cls.create_all_overall_checkpoint()", "def get_checkpoint_list(cls):\n return cls.create_checkpoint_list()", "def checkpoint(self):\r\n return self._checkpoint", "def finish_checkpoint(self):\n return self.this_evaluation.checkpoint", "def create_all_overall_checkpoint(cls):\n return DB.read_all_overall_checkpoint()", "def callstack_now():\n return checkpoints[-1]", "def parse_checkpoint(self):\n pass", "def checkpoint(self):\n return self.__checkpoint", "def get_checkpoint_snapshot(self):\n try:\n __method_name = inspect.currentframe().f_code.co_name\n checkpoint = self.state.get()\n if checkpoint:\n checkpoint = json.loads(checkpoint)\n checkpoint = checkpoint.get(\"snapshot\")\n self.applogger.info(\n \"{}(method={}) : {} : Checkpoint list fetched successfully.\".format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n else:\n checkpoint = []\n self.state.post(json.dumps({\"snapshot\": checkpoint}))\n self.applogger.info(\n \"{}(method={}) : {} : Checkpoint list not found. Created new checkpoint list.\".format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n return checkpoint\n except Exception as ex:\n self.applogger.error(\n '{}(method={}) : {} : Unexpected error while getting checkpoint list: err=\"{}\"'.format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name, str(ex)\n )\n )\n raise Exception(ex)", "def get_checkpoint_data(self) -> Dict[str, Any]:\n # get ckpt file path from config.trainer.params.resume_from_checkpoint\n path = self.config.trainer.params.get(\"resume_from_checkpoint\", None)\n if path is not None:\n is_zoo = self.is_zoo_path(path)\n ckpt_filepath = path\n if is_zoo:\n folder = download_pretrained_model(path)\n ckpt_filepath = get_ckpt_path_from_folder(folder)\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = get_config_from_folder_or_ckpt(folder, ckpt)\n else:\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = None\n\n return {\n \"ckpt\": ckpt,\n \"checkpoint_path\": ckpt_filepath,\n \"is_zoo\": is_zoo,\n \"config\": config,\n }\n\n is_zoo = False\n config = None\n ckpt = None\n # get ckpt file path from config.checkpoint\n ckpt_config = self.config.checkpoint\n suffix = \"best.ckpt\" if ckpt_config.resume_best else \"current.ckpt\"\n path = os.path.join(get_mmf_env(key=\"save_dir\"), suffix)\n ckpt_filepath = None\n resume_from_specified_path = (\n ckpt_config.resume_file is not None or ckpt_config.resume_zoo is not None\n ) and (not ckpt_config.resume or not PathManager.exists(path))\n if resume_from_specified_path:\n if ckpt_config.resume_file and PathManager.exists(ckpt_config.resume_file):\n ckpt_filepath = ckpt_config.resume_file\n elif ckpt_config.resume_zoo is not None:\n is_zoo = True\n folder = download_pretrained_model(ckpt_config.resume_zoo)\n ckpt_filepath = get_ckpt_path_from_folder(folder)\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = get_config_from_folder_or_ckpt(folder, ckpt)\n else:\n raise RuntimeError(f\"{ckpt_config.resume_file} doesn't exist\")\n\n if ckpt_config.resume and PathManager.exists(path):\n ckpt_filepath = path\n\n if ckpt_filepath is not None:\n ckpt = get_ckpt_from_path(ckpt_filepath)\n\n return {\n \"ckpt\": ckpt,\n \"checkpoint_path\": ckpt_filepath,\n \"is_zoo\": is_zoo,\n \"config\": config,\n }", "def get_checkpoint(self, metrics):\n assert all(metric in metrics for metric in [\"acc1\", \"acc5\", \"acc10\", \"unsupervised\", \"total\"]), \"Not all metrics found\"\n checkpoint = OrderedDict()\n for metric in metrics:\n checkpoint[metric] = metrics[metric]\n checkpoint['map_params'] = self.transform.state_dict()\n return checkpoint", "def _restore(self):\n\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1]) # Robust enough?\n return restored_step\n logging.info('Starting training from scratch.')\n return 0", "def find_latest_checkpoint(self) -> Tuple[str, str]:\n return {}", "def create_checkpoint_list(cls):\n checkpoint_data = DB.read_checkpoint_record_list()\n return [Checkpoint(*checkpoint) for checkpoint in checkpoint_data]", "def get_checkpoint():\n\timport numpy as np\n\n\tcheckpoint = []\n\tfor directory in directories:\n\t\ttry: # try to find folder\n\t\t\tos.chdir('./'+directory)\n\t\texcept:\n\t\t\tcontinue\n\t\tcontents = os.listdir('./')\n\t\tif contents == []: # if folder is empty\n\t\t\tprint(\"No data for\", directory)\n\t\t\tos.chdir('..')\n\t\t\tcontinue\n\t\tcounter = []\n\t\tfor entry in contents:\n\t\t\tentry = entry.split('.')\n\t\t\tnum = entry[0][2:]\n\t\t\ttry: # excludes files that aren't of type x-y.jpg\n\t\t\t\tnum = int(num)\n\t\t\t\tcounter.append(num)\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\tcheckpoint.append(max(counter))\n\t\tos.chdir('..')\n\tcheckpoint = np.mean(checkpoint)\n\treturn checkpoint", "def find_checkpoint(self, checkpoints, current_time):\n checkpoint_line_len = N.zeros(len(checkpoints), dtype=float)\n checkpoint_distances = N.zeros(len(checkpoints), dtype=float)\n checkpoint_chosen = False\n\n for i in range(len(checkpoints)):\n checkpoint_line_len[i] = checkpoints[i].get_line_length()\n checkpoint_distances[i] = self._calc_distance(checkpoints[i].location)\n \n min_length = N.min(checkpoint_line_len)\n min_dist = N.min(checkpoint_distances)\n # If the min_length of all lines is > 0, divide all lengths by the min_length\n if (min_length > 0):\n checkpoint_line_len = checkpoint_line_len / min_length\n # Same idea for the distances\n if (min_dist > 0):\n checkpoint_ratios = checkpoint_distances / min_dist\n else:\n checkpoint_ratios = checkpoint_distances\n \n # Add these values together, and choose the smallest value\n checkpoint_rankings = checkpoint_ratios + checkpoint_line_len\n min_index = N.argmin(checkpoint_rankings)\n # found the target checkpoint, set that as the target_checkpoint\n checkpoint_candidate = checkpoints[min_index]\n if self.checkpoint_target is None or self.checkpoint_target is not checkpoint_candidate:\n if self.checkpoint_target is not None:\n print(\"Attendee:\", self.attendee_id, \"has changed checkpoint target from:\",\\\n self.checkpoint_target.get_location(), \"to checkpoint at:\",\\\n checkpoint_candidate.get_location())\n self.checkpoint_target = checkpoint_candidate\n self._calc_checkpoint_arrival(checkpoint_distances[min_index], current_time)\n self._set_checkpoint_vector(self.checkpoint_target.get_location())\n \n return self.checkpoint_target", "def variable_progression():\n\t# files = glob.glob('parameter_checkpoints/epoch-*[!.meta]')\n\tfiles = glob.glob('parameter_checkpoints/epoch-*')\n\n\t# reorder epochs by 'human order' otherwise it would order it as 1,110,12,...\n\t# http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside\n\tdef atoi(text):\n\t return int(text) if text.isdigit() else text\n\n\tdef natural_keys(text):\n\t '''\n\t alist.sort(key=natural_keys) sorts in human order\n\t http://nedbatchelder.com/blog/200712/human_sorting.html\n\t (See Toothy's implementation in the comments)\n\t '''\n\t return [ atoi(c) for c in re.split('(\\d+)', text) ]\n\n\tfiles.sort(key=natural_keys)\n\n\tx, W, bh, bv = rbm.get_variables()\n\ttrainable_vars = [W, bh, bv]\n\n\tsaver = tf.train.Saver(trainable_vars)\t# restore the weights and biases of the trained model\n\n\tweights = []\n\tbhs = []\n\tbvs = []\n\twith tf.Session() as sess:\n\t\tinit = tf.initialize_all_variables()\t\n\t\tsess.run(init)\n\t\t# iterate through each saved epoch checkpoint, and add the W, bh, and bv matrices to their\n\t\t# respective lists\n\t\tfor f in files:\n\t\t\tsaver.restore(sess, f)\t\t# load the saved weights and biases from a given epoch checkpoint file\n\t\t\tweights.append(W.eval())\t\n\t\t\tbhs.append(bh.eval())\n\t\t\tbvs.append(bv.eval())\n\n\treturn weights, bhs, bvs", "def fit(self):\n # Iterate and train.\n step_file = self.checkpointer.get_step_file()\n start_step = Pickle.load(open(step_file, 'rb'))\n for step in xrange(start_step, self.train_size // self.train_batch_size):\n print 'Step No.:', step\n # Checkpoint tensorflow variables for recovery\n if step % self.checkpointer.get_checkpoint_steps() == 0:\n print 'Checkpointing: Saving Tensorflow variables'\n self.saver.save(self.sess, self.checkpointer.get_save_address())\n Pickle.dump(step + 1, open(step_file, 'wb'))\n print 'Checkpointing Complete. Deleting historical checkpoints....'\n self.checkpointer.delete_previous_checkpoints(num_previous=2)\n print 'Deleted.. Moving forward...'\n\n offset = (step * self.train_batch_size) % self.train_size\n batch_data_fwd = self.X_trn_fwd[offset:(offset + self.train_batch_size), :].T\n batch_data_bwd = self.X_trn_bwd[offset:(offset + self.train_batch_size), :].T\n batch_labels = self.Y_trn[offset:(offset + self.train_batch_size), :].T\n\n loss_t_forward, loss_t_backward = self._train_batch(batch_data_fwd, batch_data_bwd, batch_labels)\n print \"Present Loss Forward:\", loss_t_forward\n print \"Present Loss Backward:\", loss_t_backward\n\n # check results on 2 tasks - Visual Validation\n print 'Train Data Validation\\n'\n self._visual_validate(self.X_trn_fwd[301, :], self.X_trn_bwd[301, :], self.Y_trn[301, :])\n print\n print\n print 'Test Data Validation\\n'\n self._visual_validate(self.X_tst_fwd[56, :], self.X_tst_bwd[56, :], self.Y_tst[56, :])\n print\n print\n\n # Store prediction after certain number of steps #############\n # This will be useful for the graph construction\n '''\n if(step % self.checkpointer.get_prediction_checkpoint_steps() == 0):\n self.predict()\n self.store_test_predictions('_' + str(step))\n '''", "def previous_saves(self):\n if os.path.exists(self.results_dir):\n return sorted([x for x in Path(self.results_dir).glob(f'{self.model_name}checkpoint_*.pk')], key=lambda s: int(s.name.replace(f'{self.model_name}checkpoint_', '').replace('.pk', '')))\n else:\n return []", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def load_checkpoints(args, model): \n print('Loading the model checkpoints from iter {}...'.format(args.resume_iter))\n checkpoint_path = os.path.join(config.checkpoint_path, args.model_type)\n\n gen_g_path = os.path.join(checkpoint_path, '{}-Gen_g.ckpt'.format(args.resume_iter))\n gen_f_path = os.path.join(checkpoint_path, '{}-Gen_f.ckpt'.format(args.resume_iter))\n model.gen_g.load_state_dict(torch.load(gen_g_path, map_location=lambda storage, loc: storage))\n model.gen_f.load_state_dict(torch.load(gen_f_path, map_location=lambda storage, loc: storage))\n\n if args.train:\n dis_c_path = os.path.join(checkpoint_path, '{}-Dis_c.ckpt'.format(args.resume_iter))\n dis_t_path = os.path.join(checkpoint_path, '{}-Dis_t.ckpt'.format(args.resume_iter))\n model.dis_c.load_state_dict(torch.load(dis_c_path, map_location=lambda storage, loc: storage))\n model.dis_t.load_state_dict(torch.load(dis_t_path, map_location=lambda storage, loc: storage))", "def test_checkpoints(self):\r\n\r\n self.tmpdir = mkdtemp(dir=\"./\",\r\n suffix=\"_test_checkpoints/\")\r\n\r\n bestscores = dict({1: 0.9,\r\n 2: 1.1,\r\n 3: 2.3,\r\n 4: 99.93232344})\r\n\r\n out_fp = write_checkpoint(\r\n \"Key\", 99, self.mapping, [1, 2, 3, 4], bestscores,\r\n [2, 1, 3, 4],\r\n self.tmpdir)\r\n\r\n observed = read_checkpoint(out_fp)\r\n\r\n self.assertEqual(observed[0], \"Key\")\r\n self.assertEqual(observed[1], 99)\r\n self.assertEqual(observed[2], self.mapping)\r\n self.assertEqual(observed[3], [1, 2, 3, 4])\r\n self.assertEqual(observed[4], bestscores)\r\n self.assertEqual(observed[5], [2, 1, 3, 4])", "def testCheckpointContinuationValidity(self):\n\n # Train once, get checkpoint via callback returns\n res_1 = {}\n bst_1 = train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[\n _checkpoint_callback(frequency=1, before_iteration_=False)\n ],\n num_boost_round=2,\n ray_params=RayParams(num_actors=2),\n additional_results=res_1)\n last_checkpoint_1 = res_1[\"callback_returns\"][0][-1]\n last_checkpoint_other_rank_1 = res_1[\"callback_returns\"][1][-1]\n\n # Sanity check\n lc1 = xgb.Booster()\n lc1.load_model(last_checkpoint_1)\n self.assertEqual(last_checkpoint_1, last_checkpoint_other_rank_1)\n self.assertEqual(last_checkpoint_1, lc1.save_raw())\n self.assertEqual(bst_1.get_dump(), lc1.get_dump())\n\n # Start new training run, starting from existing model\n res_2 = {}\n bst_2 = train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[\n _checkpoint_callback(frequency=1, before_iteration_=True),\n _checkpoint_callback(frequency=1, before_iteration_=False)\n ],\n num_boost_round=4,\n ray_params=RayParams(num_actors=2),\n additional_results=res_2,\n xgb_model=lc1)\n first_checkpoint_2 = res_2[\"callback_returns\"][0][0]\n first_checkpoint_other_actor_2 = res_2[\"callback_returns\"][1][0]\n last_checkpoint_2 = res_2[\"callback_returns\"][0][-1]\n last_checkpoint_other_actor_2 = res_2[\"callback_returns\"][1][-1]\n\n fcp_bst = xgb.Booster()\n fcp_bst.load_model(first_checkpoint_2)\n\n lcp_bst = xgb.Booster()\n lcp_bst.load_model(last_checkpoint_2)\n\n # Sanity check\n self.assertEqual(first_checkpoint_2, first_checkpoint_other_actor_2)\n self.assertEqual(last_checkpoint_2, last_checkpoint_other_actor_2)\n self.assertEqual(bst_2.get_dump(), lcp_bst.get_dump())\n\n # Training should not have proceeded for the first checkpoint,\n # so trees should be equal\n self.assertEqual(lc1.get_dump(), fcp_bst.get_dump())\n\n # Training should have proceeded for the last checkpoint,\n # so trees should not be equal\n self.assertNotEqual(fcp_bst.get_dump(), lcp_bst.get_dump())", "async def checkpoint(cls) -> None:", "def train(stop_criteria, save_dir):\n analysis = ray.tune.run(ppo.PPOTrainer, config=config, local_dir=save_dir, stop=stop_criteria,\n checkpoint_at_end=True)\n # list of lists: one list per checkpoint; each checkpoint list contains 1st the path, 2nd the metric value\n trial = analysis.get_best_trial('episode_reward_mean', 'max', 'all', True)\n checkpoints = analysis.get_trial_checkpoints_paths(trial=trial, metric='episode_reward_mean')\n # retrieve the checkpoint path; we only have a single checkpoint, so take the first one\n checkpoint_path = checkpoints[0][0]\n return checkpoint_path, analysis", "def _restore_training_state(self, restore_state):\n self.load_state_dict(restore_state[\"model\"])\n self.optimizer.load_state_dict(restore_state[\"optimizer\"])\n self.lr_scheduler.load_state_dict(restore_state[\"lr_scheduler\"])\n start_iteration = restore_state[\"iteration\"] + 1\n if self.config[\"verbose\"]:\n print(f\"Restored checkpoint to iteration {start_iteration}.\")\n\n if restore_state[\"best_model_found\"]:\n # Update checkpointer with appropriate information about best model\n # Note that the best model found so far may not be the model in the\n # checkpoint that is currently being loaded.\n self.checkpointer.best_model_found = True\n self.checkpointer.best_iteration = restore_state[\"best_iteration\"]\n self.checkpointer.best_score = restore_state[\"best_score\"]\n if self.config[\"verbose\"]:\n print(\n f\"Updated checkpointer: \"\n f\"best_score={self.checkpointer.best_score:.3f}, \"\n f\"best_iteration={self.checkpointer.best_iteration}\"\n )\n return start_iteration", "def _get_checkpoint(self):\n ckpt = tf.train.get_checkpoint_state(self.model)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = ckpt.model_checkpoint_path\n else:\n raise RuntimeError('No checkpoint file found')\n return ckpt_path", "def __call__(self, save_fct):\n eval_scores = [\"Not evaluated\"]\n if self.train:\n logger.info(\"> Training\")\n self.train.run_training(save_fct = save_fct)\n logger.info('reverting learned weights to best checkpoint..')\n try:\n ParamManager.param_col.revert_to_best_model()\n except RevertingUnsavedModelException:\n pass\n\n evaluate_args = self.evaluate\n if evaluate_args:\n logger.info(\"> Performing final evaluation\")\n eval_scores = []\n for evaluator in evaluate_args:\n eval_score = evaluator.eval()\n if type(eval_score) == list:\n eval_scores.extend(eval_score)\n else:\n eval_scores.append(eval_score)\n\n return eval_scores" ]
[ "0.744063", "0.70907867", "0.7052293", "0.69212705", "0.68812984", "0.664399", "0.66409785", "0.6579985", "0.6512506", "0.65115094", "0.65036887", "0.63288766", "0.624046", "0.6220209", "0.62004256", "0.61839545", "0.61744016", "0.6170368", "0.6168887", "0.61377853", "0.6108591", "0.61012584", "0.596558", "0.5936436", "0.59361285", "0.59343374", "0.59013444", "0.5896348", "0.58896816", "0.5888293" ]
0.7678584
0