query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Get the deb822 class to use based on obj | def get_deb822_cls(obj):
if isinstance(obj, basestring):
key = obj.split('/')[-1][:-len('.gz')]
elif isinstance(obj, dict):
# NOTE: Support a resource object
if 'type' in obj:
key = obj['type'].title()
else:
for map_key, cls_key in KEY_TO_NAME:
if map_key in [k.lower() for k in obj.keys()]:
key = cls_key
break
return SUPPORTED[key] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_obj_class(self, obj_type: str) -> Type[TgnObject]:\n pass",
"def get_message(obj):\n if isinstance(obj, email.Message.Message):\n return obj\n if hasattr(obj, \"read\"):\n obj = obj.read()\n try:\n msg = email.message_from_string(obj)\n except email.Errors.MessageParseError:\n msg = None\n return msg",
"def get_obj_class(self, obj_class_name: str) -> ObjClass:\n return self._obj_classes.get(obj_class_name)",
"def _get_class(self, obj):\n\n object_type = obj.object_type\n\n 'Background class'\n object_class = 0\n\n # Don't care classes\n if object_type in ['DontCare', 'Person_sitting'] or obj.truncation > 0.75 or obj.occlusion > 1:\n object_class = 1\n\n # Vehicle classes\n elif object_type in ['Car', 'Van']:\n object_class = 2\n\n # Pedestrian class\n elif object_type in ['Pedestrian']: # TODO: Consider change this with ==\n object_class = 3\n\n # Cyclist class\n elif object_type in ['Cyclist']: # TODO: Consider change this with ==\n object_class = 4\n\n return object_class",
"def determine_object_type(obj):\n type_obj = \"Ethernet\"\n if isinstance(obj, dom_kvm.LinuxBridge):\n type_obj = BRIDGE\n elif isinstance(obj, dom_kvm.EthernetBond):\n type_obj = ETH_BOND\n elif isinstance(obj, dom_kvm.PhysicalPort):\n type_obj = ETHERNET\n elif isinstance(obj, dom_kvm.OpenVSwitch):\n type_obj = OVS_BR\n return type_obj",
"def typ(rxn_class):\n return rxn_class[0]",
"def get_obj(self, name):\n val = self.get(name)\n if not val:\n return None\n if name.find('queue') >= 0:\n obj = boto.lookup('sqs', val)\n if obj:\n obj.set_message_class(ServiceMessage)\n elif name.find('bucket') >= 0:\n obj = boto.lookup('s3', val)\n elif name.find('domain') >= 0:\n obj = boto.lookup('sdb', val)\n else:\n obj = None\n return obj",
"def get_obj(self, name):\r\n val = self.get(name)\r\n if not val:\r\n return None\r\n if name.find('queue') >= 0:\r\n obj = boto.lookup('sqs', val)\r\n if obj:\r\n obj.set_message_class(ServiceMessage)\r\n elif name.find('bucket') >= 0:\r\n obj = boto.lookup('s3', val)\r\n elif name.find('domain') >= 0:\r\n obj = boto.lookup('sdb', val)\r\n else:\r\n obj = None\r\n return obj",
"def _get_type(self, obj):\n typever = obj['Type']\n typesplit = typever.split('.')\n return typesplit[0] + '.' + typesplit[1]",
"def processObject(self, x):\n pickled = self.objToPickle(x)\n if pickled:\n return pickled\n return self.objToFQN(x)",
"def get_cls_for(obj_type):\n return {\n \"workflow\": Workflow\n }[obj_type]",
"def guess_type(object):\n # retrieve a list of classes\n classes = (\n re.match(\"<class '(.+)'>\", str(object.__class__)).groups()[0].split(\".\")\n )\n # Return the most specific one\n return classes[-1]",
"def get_class_name(obj) -> str:\n return obj.__class__.__name__",
"def _declaring_class(obj):\n name = _qualname(obj)\n return name[:name.rfind('.')]",
"def getPythonObjectStrInQuotes( inObj ):\n classStr= str(inObj)\n reMatch= re.match( \".*?'(.*?)'.*\", classStr )\n return reMatch.groups()[0]",
"def get_message_class_by_type(msgtype):\n\n try:\n module = importlib.import_module('platypush.message.' + msgtype)\n except ImportError as e:\n logging.warning('Unsupported message type {}'.format(msgtype))\n raise RuntimeError(e)\n\n cls_name = msgtype[0].upper() + msgtype[1:]\n\n try:\n msgclass = getattr(module, cls_name)\n except AttributeError as e:\n logging.warning('No such class in {}: {}'.format(\n module.__name__, cls_name))\n raise RuntimeError(e)\n\n return msgclass",
"def get_obj(cls, tsb):\n if tsb in Instrument_CP.instrument_symbol_to_obj_map:\n return Instrument_CP.instrument_symbol_to_obj_map[tsb]\n else: \n return cls(tsb)",
"def object_decoder(obj):\n\t\tif 'logfile' in obj:\n\t\t\treturn logfile(obj['logfile']['name'], obj['logfile']['lines'], obj['logfile']['type'], obj['logfile']['content'], obj['logfile']['sources'])\n\t\tif 'logfile_entry' in obj:\n\t\t\tif len(obj['logfile_entry']['timestamp']['datetime']) >= 20 :\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S.%f\")\n\t\t\telif obj['logfile_entry']['timestamp']['datetime'][-6:-5] != '+':\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S\")\n\t\t\telse:\n\t\t\t\tunformatted_date = obj['logfile_entry']['timestamp']['datetime']\n\t\t\t\tunformatted_date = unformatted_date[:-3]+unformatted_date[-2:]\n\t\t\t\t# once again, related to missing features in Python 3.6\n\t\t\t\tdate = datetime.datetime.strptime(unformatted_date,\"%Y-%m-%dT%H:%M:%S.%f%z\")\n\t\t\treturn logfile_entry(obj['logfile_entry']['id'], file, obj['logfile_entry']['message'], obj['logfile_entry']['structured_data'], date,obj['logfile_entry']['hostname'],obj['logfile_entry']['source'])\n\t\treturn obj",
"def get_class(self, name):\n if name in self._objects_mapping:\n classname = self._objects_mapping[name]\n\n klass = None\n try:\n klass = getattr(self._sdk, classname)\n except:\n Printer.raise_error('Unknown class %s' % classname)\n\n return klass\n\n Printer.raise_error('Unknown object named %s' % name)",
"def _get_by_str(self, obj_str):\n logging.debug('Trying to get object {} from file {}'\n .format(obj_str, self.filename()))\n return self.hfile.Get(obj_str)",
"def get_python_type(obj, format_type):\n t = type(obj)\n\n return t if format_type is None else t.__name__",
"def obj_for_name(obj_name):\n parts = obj_name.split('.')\n module = \".\".join(parts[:-1])\n m = __import__( module )\n for comp in parts[1:]:\n m = getattr(m, comp)\n return m",
"def get_notification_string(self, obj):\n return str(obj)",
"def get_object(selenium, obj):\n return _get_ui_service(selenium, obj).get_obj_from_info_page(obj)",
"def objToFQN(self, x):\n try:\n fqn = reflect.fullyQualifiedName(x)\n reflect.namedObject(fqn)\n except:\n return\n return fqn",
"def get_cls_name(obj: Any, package_name: bool = True) -> str:\n cls_name = str(obj.__class__)\n # remove class prefix\n cls_name = cls_name.split('\\'')[1]\n # split modules\n cls_split = cls_name.split('.')\n if len(cls_split) > 1:\n cls_name = cls_split[0] + '.' + cls_split[-1] if package_name else cls_split[-1]\n else:\n cls_name = cls_split[0]\n return cls_name",
"def get_class_name_value(obj):\n if obj is None:\n return None\n\n t = obj.GetType()\n \"\"\":type: lldb.SBType\"\"\"\n if t is None:\n return None\n\n if t.IsPointerType():\n t = t.GetPointeeType()\n if t.IsReferenceType():\n t = t.GetDereferencedType()\n\n return None if t is None else t.GetName()",
"def fl_get_object_objclass(ptr_flobject):\n _fl_get_object_objclass = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_object_objclass\", \\\n cty.c_int, [cty.POINTER(xfdata.FL_OBJECT)], \\\n \"\"\"int fl_get_object_objclass(FL_OBJECT * obj) \"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n retval = _fl_get_object_objclass(ptr_flobject)\n return retval",
"def nice_classname(obj):\n if inspect.isclass(obj):\n cls_name = obj.__name__\n else:\n cls_name = obj.__class__.__name__\n mod = inspect.getmodule(obj)\n if mod:\n name = mod.__name__\n # jython\n if name.startswith('org.python.core.'):\n name = name[len('org.python.core.'):]\n return \"%s.%s\" % (name, cls_name)\n else:\n return cls_name",
"def nice_classname(obj):\n if inspect.isclass(obj):\n cls_name = obj.__name__\n else:\n cls_name = obj.__class__.__name__\n mod = inspect.getmodule(obj)\n if mod:\n name = mod.__name__\n # jython\n if name.startswith('org.python.core.'):\n name = name[len('org.python.core.'):]\n return \"%s.%s\" % (name, cls_name)\n else:\n return cls_name"
] | [
"0.60468835",
"0.5737325",
"0.5626973",
"0.5541262",
"0.5488912",
"0.5470521",
"0.53820634",
"0.5379896",
"0.535658",
"0.5270914",
"0.52564365",
"0.52383137",
"0.52213126",
"0.5215957",
"0.51678175",
"0.51556087",
"0.5125791",
"0.5117053",
"0.5051719",
"0.504959",
"0.5039547",
"0.50351274",
"0.5002861",
"0.49759966",
"0.49615338",
"0.4956623",
"0.49364313",
"0.49151886",
"0.49109215",
"0.49109215"
] | 0.82478124 | 0 |
Get the indexes that represents this Distribution from the underlying Components | def get_indexes(self):
indexes = []
for c in self.components:
indexes.extend(c.get_indexes())
return indexes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def indices(self) -> np.ndarray:\n return self.impl.indices",
"def indices(self):\n return self.index.indices",
"def get_indices(self):\r\n return self._indices",
"def getIndices(self):\r\n return self._indices",
"def index(self):\n return self.data.index.values",
"def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]",
"def indexes(self) -> list:\n return self._indexes",
"def indices(self):\n return range(len(self))",
"def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices",
"def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))",
"def buckets(self):\n return self.indexed",
"def get_indices(self):\n\n def query(rel): \n return \"\"\"SELECT pg_class.relname, pg_index.indkey\n FROM pg_class, pg_index\n WHERE (pg_index.indexrelid = pg_class.oid)\n AND (pg_index.indrelid = (SELECT pg_class.oid FROM pg_class WHERE pg_class.relname = \\'{}\\'));\n \"\"\".format(rel)\n\n rels = tpch.schema.keys()\n idxs = dict.fromkeys(rels)\n\n with self.tpch_cxn.cursor() as curs:\n for rel in rels:\n curs.execute(query(rel))\n idxs_ = curs.fetchall()\n idxs_ = dict(idxs_) # index -> index keys \n \n # TODO this can be done cleanly in query\n # pg_index.indkey is a SQL array of attributes indices in their respective tables\n split=lambda attrs: attrs.split() \n cast=lambda attrs: list(map(lambda attr: int(attr)-1, attrs))\n invertindex=lambda attrs: list(np.array(schema[rel])[attrs])\n\n attrs = idxs_.values() \n attrs = list(map(split, attrs))\n attrs = list(map(cast, attrs))\n attrs = list(map(invertindex, attrs))\n\n idxs_ = {key : attrs[i] for i, key in enumerate(idxs_.keys())}\n idxs[rel] = idxs_\n return idxs",
"def indices(self):\n return self._kbounded_partitions",
"def get(self, *args):\n return _libsbml.ListOfSpeciesTypeComponentIndexes_get(self, *args)",
"def indexes(self):\n return getattr(self, '_indexes', None)",
"def ordered_indices(self):\n return self.d1.ordered_indices()\n # RETURN BASED ON D1's sizes",
"def get_index_array(self):\n return self.region_pairs",
"def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])",
"def ordered_indices(self):\n return self.base_dataset.ordered_indices()",
"def indexes(self):\n indexes = self.execute(self.commands.get_indexes(self.name))\n return [Index(*tup) for tup in indexes]",
"def get_multi_index(self):\n return self.basis.elements",
"def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices",
"def indices(self):\n i, j, _edge = self.indicesAndEdge()\n return i, j",
"def childWellIndices(self):\n return self._wellIndices",
"def get_indexes(self, x):\n indexes = []\n for index_hashes in self.hash_functions:\n combined_index = []\n for idx_spec, hash_func in zip(self.config.index_specs, index_hashes):\n combined_index.append(idx_spec.distribution.get_index(hash_func(x)))\n indexes.append(tuple(combined_index))\n return indexes",
"def ordered_indices(self):\r\n return np.arange(len(self), dtype=np.int64)",
"def indices(self):\n return tuple([slice(*r) for r in self.location])",
"def indexes(self):\r\n\r\n\r\n if not self.usesequence:\r\n\r\n if len(self.get_all_indexes()) != len(self.sortedindexes) \\\r\n or self.indexchanged or not self.sortedindexes:\r\n self.indexchanged = False\r\n self.sortedindexes = sorted(self.get_all_indexes(),\r\n key=lambda x_temp: Index(x_temp))\r\n return self.sortedindexes\r\n return self.sortedindexes\r\n else:\r\n if self.indexchanged:\r\n self.sortedindexes = self.default_dict['indexlist'].strings()\r\n return self.sortedindexes\r\n else:\r\n return self.sortedindexes",
"def indices(self, position=None):\n \n raise NotImplementedError()",
"def get_indices(self):\n selection_model = self.selectionModel()\n return selection_model.selectedRows()"
] | [
"0.79678136",
"0.78836995",
"0.77245635",
"0.7669747",
"0.7342353",
"0.73129576",
"0.7257382",
"0.7190922",
"0.69870543",
"0.69729394",
"0.68934375",
"0.6891254",
"0.6873979",
"0.6869683",
"0.6851193",
"0.6825982",
"0.6809999",
"0.6809096",
"0.6807874",
"0.67803395",
"0.6763159",
"0.67563313",
"0.67374843",
"0.67008585",
"0.66964746",
"0.6692316",
"0.66753507",
"0.66639084",
"0.6642388",
"0.6633249"
] | 0.8461544 | 0 |
Add a package to a component | def add_package(self, component_name, package):
component = self.get_component(component_name)
component.add_package(package) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_package(self, package):\n obj = package if isinstance(package, Package) else Package(\n component=self, **package)\n self.data['packages'].append(obj)",
"def add_packages(self, component_name, packages):\n for pkg in packages:\n self.add_package(component_name, pkg)",
"def add_package ( self, package_info, addition_control, **pkg_add_kw ):\n return self._get_package_dir ( package_info ['name'] ).add_package (\n package_info, addition_control, **pkg_add_kw\n )",
"def test_add_package(self)-> None:\n my_deck = get_deck(deck_name='Test Deck Name')\n add_package(my_deck, 'output_name')\n pass",
"def install_package(self, package):\n raise NotImplementedError(\"install_package not implemented!\")",
"def register_packages(self, module_name, extra_package):\n self.creator_manager.register_packages(module_name, extra_package)",
"def add(self, component) -> None:\n pass",
"def add_arch_package(self, package_name: str) -> None:\n self.arch_packages.append(package_name)",
"def AddNewPackage(path, package_name, namespace):\n\n new_line = '<include package=\".%s\" />' % package_name\n fullpath=path + '/configure.zcml'\n\n try:\n fh=open(fullpath)\n lines=fh.readlines()\n for i in range(len(lines)):\n if len(lines[i]) == 0: continue\n if lines[i][-1] in ['\\n', '\\r']: lines[i] = lines[i][:-1]\n if len(lines[i]) == 0: continue\n if lines[i][-1] in ['\\n', '\\r']: lines[i] = lines[i][:-1]\n fh.close()\n except:\n lines=[\n '<configure',\n '\\txmlns=\"http://namespaces.zope.org/zope\"',\n '\\ti18n_domain=\"%s\">\\n' % namespace, \n '\\t<!-- END OF PACKAGES -->',\n '\\n</configure>']\n\n\n # See if our file is present\n for l in lines:\n if l.find(new_line) != -1:\n return # all done\n\n # Find the marker and insert at that point\n for i in range(len(lines)):\n l = lines[i]\n if l.find('<!-- END OF PACKAGES -->') != -1:\n lines = lines[:i] + ['\\t'+new_line] + lines[i:]\n break\n\n # rewrite the file \n if os.access(fullpath, os.F_OK):\n backup=fullpath+'~'\n if os.access(backup, os.F_OK):\n os.unlink(backup)\n os.rename(fullpath, backup)\n\n fh = open(fullpath, 'w')\n fh.write('\\n'.join([l.replace('\\t', ' ') for l in lines]))\n fh.close()",
"def _add_package(self, pkg):\n for dep in pkg.dependency:\n for key in self.registry.keys():\n if key[0] == dep:\n break\n else:\n raise RuntimeError(\n 'Package %s has unresolved dependency issues: %s' %\n (pkg.name, dep))\n self.package_dependency.add_edge(pkg.name, dep)\n self.package_dependency.add_vertex(pkg.name)\n for key, task in pkg.tasks.iteritems():\n self.registry[key, pkg.version] = pkg\n self.registry[key, None] = pkg\n self.registry[pkg.name, pkg.version] = pkg\n\n # mark this package as the latest one\n self.registry[pkg.name, None] = pkg",
"def add_component(self, lib_component):\n comp_name = lib_component.name\n try:\n comp = self.__component_list[comp_name]\n except KeyError:\n self.__component_list[comp_name] = lib_component",
"def set_package(self, pkg): \n self.pkg = pkg",
"def add_package(\n self,\n package,\n node_paths=None, \n type_option=PackageInstallationTypeOption.PROD,\n version_option=None):\n args=self._get_add_package_args(\n package,\n type_option=type_option,\n version_option=version_option)\n return self.run_command(args=args, node_paths=node_paths)",
"def do_package(package):\n\tn_ucr = extFile(package, 'univention-config-registry')\n\tif not os.path.exists(n_ucr):\n\t\treturn\n\n\tf_ucr = open(n_ucr, 'r')\n\n\tfor item in univention.config_registry.parseRfc822(f_ucr.read()):\n\t\ttyp = item['Type'][0]\n\t\tif typ == 'file':\n\t\t\tf = item['File'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\t\tfor key in ('Preinst', 'Postinst'):\n\t\t\t\tif key in item:\n\t\t\t\t\tmod = item[key][0]\n\t\t\t\t\tdoIt('install', '-d', destDir(mod, package, 'modules'))\n\t\t\t\t\tdoIt('cp', '-a', srcPath(mod), destPath(mod, package, 'modules'))\n\t\telif typ == 'subfile':\n\t\t\tf = item['Subfile'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\telif typ == 'multifile':\n\t\t\tf = item['Multifile'][0]\n\t\t\tif os.path.exists(f):\n\t\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\t\tfor key in ('Preinst', 'Postinst'):\n\t\t\t\tif key in item:\n\t\t\t\t\tmod = item[key][0]\n\t\t\t\t\tdoIt('install', '-d', destDir(mod, package, 'modules'))\n\t\t\t\t\tdoIt('cp', '-a', srcPath(mod), destPath(mod, package, 'modules'))\n\t\telif typ == 'script':\n\t\t\tf = item['Script'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package, 'scripts'))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package, 'scripts'))\n\t\t\tdoIt('chmod', 'a+x', destPath(f, package, 'scripts'))\n\t\telif typ == 'module':\n\t\t\tf = item['Module'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package, 'modules'))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package, 'modules'))\n\t\telse:\n\t\t\tprint >>sys.stderr, 'Unknown type: %s' % typ\n\t\t\treturn\n\n\tf_ucr.close()\n\n\tdoIt('install', '-d', destDir('', package, 'info'))\n\tdoIt('install', '-m644', n_ucr, destPath(package+'.info', package, 'info'))\n\tmapping_file = extFile( package, 'univention-config-registry-mapping')\n\tif os.path.exists(mapping_file):\n\t\tdoIt('install', '-d', destDir('', package, 'mapping'))\n\t\tdoIt('install', '-m644', mapping_file, destPath(package+'.univention-config-registry-mapping', package, 'mapping'))\n\n\tdata = {\n\t\t\t'pkg': quote(package),\n\t\t\t'info': quote(\"/etc/univention/templates/info/%s.info\" % package),\n\t\t\t'removed': quote(\"/etc/univention/templates/removed/%s.info\" % package),\n\t\t\t}\n\n\tf_preinst = open(extFile(package, 'preinst.debhelper'), 'a')\n\tf_preinst.write('# Automatically added by univention-install-config-registry\\n')\n\tf_preinst.write('if [ \"$1\" = \"install\" ] ; then\\n')\n\tf_preinst.write(' [ -e %(removed)s ] && [ ! -e %(info)s ] && mv %(removed)s %(info)s || true\\n' % data)\n\tf_preinst.write('fi\\n')\n\tf_preinst.write('# End automatically added section\\n')\n\tf_preinst.close()\n\n\tf_postinst = open(extFile(package, 'postinst.debhelper'), 'a')\n\tf_postinst.write('# Automatically added by univention-install-config-registry\\n')\n\tf_postinst.write('if [ \"$1\" = \"abort-remove\" ]; then\\n')\n\tf_postinst.write(' [ -e %(removed)s ] && mv %(removed)s %(info)s || true\\n' % data)\n\tf_postinst.write('fi\\n')\n\tf_postinst.write('[ -x /usr/sbin/univention-config-registry ] && univention-config-registry register %(pkg)s || true\\n' % data)\n\tf_postinst.write('# End automatically added section\\n')\n\tf_postinst.close()\n\n\tf_prerm = open(extFile(package, 'prerm.debhelper'), 'a')\n\tf_prerm.write('# Automatically added by univention-install-config-registry\\n')\n\tf_prerm.write('if [ \"$1\" = \"remove\" ] && [ -e %(info)s ] ; then\\n' % data)\n\tf_prerm.write(' [ -x /usr/sbin/univention-config-registry ] && univention-config-registry unregister %(pkg)s || true\\n' % data)\n\tf_prerm.write(' mv %(info)s %(removed)s || true\\n' % data)\n\tf_prerm.write('fi\\n')\n\tf_prerm.write('# End automatically added section\\n')\n\tf_prerm.close()\n\n\tdoIt('perl', '-e', 'use Debian::Debhelper::Dh_Lib;addsubstvar(\"%s\", \"misc:Depends\", \"univention-config (>= 7.0.25)\");' % package)",
"def add_package(self, package_metadata):\n if package_metadata.name not in self._name_to_packages:\n bisect.insort(self._names, package_metadata.name)\n\n self._name_to_packages.setdefault(\n package_metadata.name, self._default_factory()\n )\n self._name_to_packages[package_metadata.name].append(package_metadata)\n # Fixme: this should not be that costly as long as we don't have\n # many versions for a given package.\n self._name_to_packages[package_metadata.name].sort(\n key=operator.attrgetter(\"version\")\n )",
"def add_package(self, pkg_name, pkg):\n pkg_name = pkg_name.lower()\n if pkg_name not in self.zbnam_packages:\n if pkg_name == \"cbc\":\n pkg_name = \"bud\"\n else:\n raise KeyError(\n f\"{pkg_name} package is not valid for zonebudget\"\n )\n\n if isinstance(pkg, str):\n if os.path.exists(os.path.join(self._model_ws, pkg)):\n pkg = os.path.join(self._model_ws, pkg)\n\n func = self.zbnam_packages[pkg_name]\n if pkg_name in (\"bud\", \"grb\"):\n pkg = func(pkg, precision=\"double\")\n else:\n pkg = func.load(pkg, self)\n\n else:\n pass\n\n pkg_name = f\"_{pkg_name}\"\n self.__setattr__(pkg_name, pkg)\n if pkg is not None:\n self.package_dict[pkg_name[1:]] = pkg",
"def register_command(app: MaraApp, command: click.Command, package: str):\n if isinstance(command, click.MultiCommand):\n app.cli.add_command(command)\n else:\n command.name = package + '.' + command.name\n app.cli.add_command(command)",
"def add_debian_package(self, package_name: str) -> None:\n self.debian_packages.append(package_name)",
"def add_package_type(self, name, handler):\n if not interpret_string(name):\n raise RelengInvalidSetupException('invalid package name provided')\n name_key = name.lower()\n if not name_key.startswith(PREFIX_REQUIREMENT):\n raise RelengInvalidSetupException('extension-defined package types '\n 'must be prefixed with \"{}\"'.format(PREFIX_REQUIREMENT))\n if name_key in self.package_types:\n raise RelengInvalidSetupException('extension package type {} '\n 'is already defined by another extension'.format(name))\n if not inspect.isclass(handler):\n raise RelengInvalidSetupException('handler is not a class')\n package_type = handler()\n build_op = getattr(package_type, 'build', None)\n configure_op = getattr(package_type, 'configure', None)\n install_op = getattr(package_type, 'install', None)\n if (not callable(build_op) or not callable(configure_op) or\n not callable(install_op)):\n raise RelengInvalidSetupException('package type does not defined '\n 'required method(s)')\n self.package_types[name_key] = package_type",
"def modifypackage(folder, package):\n form = AddPackageForm()\n\n # if it's not \"add\", we should try to load an existing package\n if package != \"add\":\n INFO_PATH = DATA + folder + \"/packages/\" + package + \"/info.json\"\n\n # try to open the file\n try:\n with open(INFO_PATH, \"r\") as inp:\n d = json.load(inp)\n contents = defaultdict(str, d)\n\n # read in all properties\n form.name.data = package\n form.name.render_kw = {'disabled':''}\n form.title.data = contents[\"title\"]\n form.author.data = contents[\"author\"]\n form.category.data = contents[\"category\"]\n form.version.data = contents[\"version\"]\n form.short_desc.data = contents[\"description\"]\n form.details.data = contents[\"details\"].replace(\"\\\\n\", \"\\n\")\n form.url.data = contents[\"url\"]\n form.license.data = contents[\"license\"]\n except IOError:\n return render_template('404.html', resource=package), 404\n\n if form.validate_on_submit():\n\n # make the directory for this package\n PKG_PATH = DATA + folder + \"/\" + form.name.data + \"/\"\n os.mkdir(PKG_PATH)\n\n flash('Package created successfully')\n\n # redirect to the login page\n return redirect(url_for('home.dashboard'))\n\n # load registration template\n return render_template('home/add.html', form=form, title='Package', target=package)",
"def notify_add_package(self, pkg):\n ver_key = (pkg.category, pkg.package)\n s = set(self.versions.get(ver_key, ()))\n s.add(pkg.fullver)\n if pkg.category not in self.categories:\n self.categories.force_add(pkg.category)\n self.packages.force_regen(pkg.category)\n self.versions.force_regen(ver_key, tuple(s))",
"def add_packages(self, packages):\n for p in packages:\n self.add_package(p)",
"def add_package_import(self, package):\n self._package_imports.add(package)",
"def add_new_package(self, package, skip='', description=''):\n with self._conn.begin():\n return self._conn.execute(\n \"VALUES (add_new_package(%s, %s, %s))\", (package, skip,\n description)).scalar()",
"def package(self, package):\n\n self._package = package",
"def add(package, config):\n\n def parse_package(text):\n splitted = text.split('/', 2)\n if len(splitted) == 3:\n namespace, name, source = splitted\n else:\n namespace, name = splitted\n source = None\n return dict(namespace=namespace,\n name=name,\n source=source)\n\n rows = map(parse_package, package)\n\n _add_changelogs(config, rows)",
"def add_package(self, package_item):\r\n self.RequestedShipment.RequestedPackageLineItems.append(package_item)\r\n package_weight = package_item.Weight.Value\r\n self.RequestedShipment.TotalWeight.Value += package_weight\r\n self.RequestedShipment.PackageCount += 1",
"def add_package(self, package_item):\r\n self.RequestedShipment.RequestedPackageLineItems.append(package_item)\r\n package_weight = package_item.Weight.Value\r\n self.RequestedShipment.TotalWeight.Value += package_weight\r\n self.RequestedShipment.PackageCount += 1",
"def add_package_import(self, package):\n self._main_model.add_package_import(package)",
"def package():\n pass"
] | [
"0.74583834",
"0.69385165",
"0.6926181",
"0.6425457",
"0.64241457",
"0.637839",
"0.6228563",
"0.62157047",
"0.6180625",
"0.6137637",
"0.6119172",
"0.6106267",
"0.60927397",
"0.60533196",
"0.6002774",
"0.5973729",
"0.5969399",
"0.59233946",
"0.5900656",
"0.58961207",
"0.58565676",
"0.5853485",
"0.5849491",
"0.5843319",
"0.58431935",
"0.5819632",
"0.58129686",
"0.58129686",
"0.5806767",
"0.580212"
] | 0.85479677 | 0 |
Add multiple Packages to a component | def add_packages(self, component_name, packages):
for pkg in packages:
self.add_package(component_name, pkg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_packages(self, packages):\n for p in packages:\n self.add_package(p)",
"def update(self, iterable):\n for package in iterable:\n self.add_package(package)",
"def register_packages(self, module_name, extra_package):\n self.creator_manager.register_packages(module_name, extra_package)",
"def add_package(self, component_name, package):\n component = self.get_component(component_name)\n component.add_package(package)",
"def packages():",
"def expand_package(self, pkg):\n return [(pkg, c) for c in self.packages[pkg].components]",
"def install(self, *packages):\n raise NotImplementedError",
"def add_package(self, package):\n obj = package if isinstance(package, Package) else Package(\n component=self, **package)\n self.data['packages'].append(obj)",
"def do_package(package):\n\tn_ucr = extFile(package, 'univention-config-registry')\n\tif not os.path.exists(n_ucr):\n\t\treturn\n\n\tf_ucr = open(n_ucr, 'r')\n\n\tfor item in univention.config_registry.parseRfc822(f_ucr.read()):\n\t\ttyp = item['Type'][0]\n\t\tif typ == 'file':\n\t\t\tf = item['File'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\t\tfor key in ('Preinst', 'Postinst'):\n\t\t\t\tif key in item:\n\t\t\t\t\tmod = item[key][0]\n\t\t\t\t\tdoIt('install', '-d', destDir(mod, package, 'modules'))\n\t\t\t\t\tdoIt('cp', '-a', srcPath(mod), destPath(mod, package, 'modules'))\n\t\telif typ == 'subfile':\n\t\t\tf = item['Subfile'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\telif typ == 'multifile':\n\t\t\tf = item['Multifile'][0]\n\t\t\tif os.path.exists(f):\n\t\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\t\tfor key in ('Preinst', 'Postinst'):\n\t\t\t\tif key in item:\n\t\t\t\t\tmod = item[key][0]\n\t\t\t\t\tdoIt('install', '-d', destDir(mod, package, 'modules'))\n\t\t\t\t\tdoIt('cp', '-a', srcPath(mod), destPath(mod, package, 'modules'))\n\t\telif typ == 'script':\n\t\t\tf = item['Script'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package, 'scripts'))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package, 'scripts'))\n\t\t\tdoIt('chmod', 'a+x', destPath(f, package, 'scripts'))\n\t\telif typ == 'module':\n\t\t\tf = item['Module'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package, 'modules'))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package, 'modules'))\n\t\telse:\n\t\t\tprint >>sys.stderr, 'Unknown type: %s' % typ\n\t\t\treturn\n\n\tf_ucr.close()\n\n\tdoIt('install', '-d', destDir('', package, 'info'))\n\tdoIt('install', '-m644', n_ucr, destPath(package+'.info', package, 'info'))\n\tmapping_file = extFile( package, 'univention-config-registry-mapping')\n\tif os.path.exists(mapping_file):\n\t\tdoIt('install', '-d', destDir('', package, 'mapping'))\n\t\tdoIt('install', '-m644', mapping_file, destPath(package+'.univention-config-registry-mapping', package, 'mapping'))\n\n\tdata = {\n\t\t\t'pkg': quote(package),\n\t\t\t'info': quote(\"/etc/univention/templates/info/%s.info\" % package),\n\t\t\t'removed': quote(\"/etc/univention/templates/removed/%s.info\" % package),\n\t\t\t}\n\n\tf_preinst = open(extFile(package, 'preinst.debhelper'), 'a')\n\tf_preinst.write('# Automatically added by univention-install-config-registry\\n')\n\tf_preinst.write('if [ \"$1\" = \"install\" ] ; then\\n')\n\tf_preinst.write(' [ -e %(removed)s ] && [ ! -e %(info)s ] && mv %(removed)s %(info)s || true\\n' % data)\n\tf_preinst.write('fi\\n')\n\tf_preinst.write('# End automatically added section\\n')\n\tf_preinst.close()\n\n\tf_postinst = open(extFile(package, 'postinst.debhelper'), 'a')\n\tf_postinst.write('# Automatically added by univention-install-config-registry\\n')\n\tf_postinst.write('if [ \"$1\" = \"abort-remove\" ]; then\\n')\n\tf_postinst.write(' [ -e %(removed)s ] && mv %(removed)s %(info)s || true\\n' % data)\n\tf_postinst.write('fi\\n')\n\tf_postinst.write('[ -x /usr/sbin/univention-config-registry ] && univention-config-registry register %(pkg)s || true\\n' % data)\n\tf_postinst.write('# End automatically added section\\n')\n\tf_postinst.close()\n\n\tf_prerm = open(extFile(package, 'prerm.debhelper'), 'a')\n\tf_prerm.write('# Automatically added by univention-install-config-registry\\n')\n\tf_prerm.write('if [ \"$1\" = \"remove\" ] && [ -e %(info)s ] ; then\\n' % data)\n\tf_prerm.write(' [ -x /usr/sbin/univention-config-registry ] && univention-config-registry unregister %(pkg)s || true\\n' % data)\n\tf_prerm.write(' mv %(info)s %(removed)s || true\\n' % data)\n\tf_prerm.write('fi\\n')\n\tf_prerm.write('# End automatically added section\\n')\n\tf_prerm.close()\n\n\tdoIt('perl', '-e', 'use Debian::Debhelper::Dh_Lib;addsubstvar(\"%s\", \"misc:Depends\", \"univention-config (>= 7.0.25)\");' % package)",
"def append_packages(self, packages: Sequence['Package']):\n for package in packages:\n if package.name in self.all_packages_dict:\n logging.error(\"Package {} already known\".format(package))\n raise InvalidInput(\"Package {} already known\".format(package))\n\n self.all_packages_dict[package.name] = package\n\n if package.type_of is PossibleTypes.REPO_PACKAGE:\n self.repo_packages_list.append(package)\n elif package.type_of is PossibleTypes.AUR_PACKAGE:\n self.aur_packages_list.append(package)\n elif package.type_of is PossibleTypes.DEVEL_PACKAGE:\n self.devel_packages_list.append(package)\n else:\n assert package.type_of is PossibleTypes.PACKAGE_NOT_REPO_NOT_AUR\n self.not_repo_not_aur_packages_list.append(package)\n\n self.__append_to_x_dict(packages, 'provides')\n self.__append_to_x_dict(packages, 'conflicts')",
"def add_packages(project, env_spec_name, packages, channels, pip=False):\n return _update_env_spec(project, env_spec_name, packages, channels, create=False, pip=pip)",
"def group_all_together(self):\n package_parts = self.container.config.output.package.split(\".\")\n module = package_parts.pop()\n package = \".\".join(package_parts)\n\n self.assign(self.container, package, module)",
"def _install_packages(packages):\n for package in packages:\n cuisine.package_ensure(package)",
"def add(self, *components):\n for component in components:\n if component.container is not None:\n component.container.remove(component)\n component.container = self\n self._components.extend(components)",
"def install():\n ArticleDataProvider.register()\n ProductDataProvider.register()",
"def _get_add_package_args(self, package, type_option, version_option):\n raise NotImplementedError()",
"def addLevelsPackage(self, levelsPackage):\n for key in levelsPackage.keys():\n self.addLevel(levelsPackage[key])",
"def create_package_list(base):\n\n return [base] + [\"{}.{}\".format(base, pkg) for pkg in find_packages(base)]",
"def add_components(self, comps):\r\n if not isinstance(comps, list):\r\n comps = [comps]\r\n for comp in comps:\r\n self.comps.append(comp)",
"def addPackageNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_addPackageNamespaces(self, *args)",
"def _add_package(self, pkg):\n for dep in pkg.dependency:\n for key in self.registry.keys():\n if key[0] == dep:\n break\n else:\n raise RuntimeError(\n 'Package %s has unresolved dependency issues: %s' %\n (pkg.name, dep))\n self.package_dependency.add_edge(pkg.name, dep)\n self.package_dependency.add_vertex(pkg.name)\n for key, task in pkg.tasks.iteritems():\n self.registry[key, pkg.version] = pkg\n self.registry[key, None] = pkg\n self.registry[pkg.name, pkg.version] = pkg\n\n # mark this package as the latest one\n self.registry[pkg.name, None] = pkg",
"def add_package ( self, package_info, addition_control, **pkg_add_kw ):\n return self._get_package_dir ( package_info ['name'] ).add_package (\n package_info, addition_control, **pkg_add_kw\n )",
"def add_pkg(pkgs, name, pkgver):\n try:\n pkgs.setdefault(name, []).append(pkgver)\n except AttributeError as exc:\n log.exception(exc)",
"def create_packages(self):\n if not self.rewrite:\n # The extra package structure is only required for vendored code used via import rewrites.\n return\n\n for index, _ in enumerate(self._subpath_components):\n relpath = _PACKAGE_COMPONENTS + self._subpath_components[: index + 1] + [\"__init__.py\"]\n touch(os.path.join(self.ROOT, *relpath))",
"def packages(self):\n return []",
"def addItems(*args):",
"def init_extensions(self, package, module):\n\n pass",
"def _add_services(self):\n this_service = {'name': '{{ metadata.package }}'}\n other_services = [\n {'name': 'mysql',\n 'location': 'cs:percona-cluster',\n 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'manila'}\n ]\n super(ManilaPluginCharmDeployment, self)._add_services(\n this_service, other_services)",
"def add_package(self, package_metadata):\n if package_metadata.name not in self._name_to_packages:\n bisect.insort(self._names, package_metadata.name)\n\n self._name_to_packages.setdefault(\n package_metadata.name, self._default_factory()\n )\n self._name_to_packages[package_metadata.name].append(package_metadata)\n # Fixme: this should not be that costly as long as we don't have\n # many versions for a given package.\n self._name_to_packages[package_metadata.name].sort(\n key=operator.attrgetter(\"version\")\n )",
"def __install(self):\n command = self.pipComboBox.currentText()\n if command == self.__default:\n command = \"\"\n \n packages = []\n for itm in self.resultList.selectedItems():\n packages.append(itm.text(0).strip())\n if packages:\n self.__pip.installPackages(packages, cmd=command)"
] | [
"0.73213696",
"0.7041433",
"0.6733623",
"0.66976285",
"0.64817804",
"0.6370093",
"0.63610965",
"0.6145575",
"0.60475343",
"0.59889007",
"0.59589255",
"0.5940141",
"0.5892097",
"0.5846863",
"0.57817626",
"0.5773263",
"0.57322484",
"0.56881267",
"0.56647044",
"0.5653178",
"0.5618085",
"0.56033665",
"0.55904067",
"0.55860925",
"0.5574845",
"0.5559474",
"0.5527266",
"0.5507922",
"0.5455156",
"0.5452599"
] | 0.7994629 | 0 |
Adds a package to this Component | def add_package(self, package):
obj = package if isinstance(package, Package) else Package(
component=self, **package)
self.data['packages'].append(obj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_package(self, component_name, package):\n component = self.get_component(component_name)\n component.add_package(package)",
"def add_package ( self, package_info, addition_control, **pkg_add_kw ):\n return self._get_package_dir ( package_info ['name'] ).add_package (\n package_info, addition_control, **pkg_add_kw\n )",
"def add_arch_package(self, package_name: str) -> None:\n self.arch_packages.append(package_name)",
"def add_packages(self, component_name, packages):\n for pkg in packages:\n self.add_package(component_name, pkg)",
"def install_package(self, package):\n raise NotImplementedError(\"install_package not implemented!\")",
"def add_debian_package(self, package_name: str) -> None:\n self.debian_packages.append(package_name)",
"def add_package(\n self,\n package,\n node_paths=None, \n type_option=PackageInstallationTypeOption.PROD,\n version_option=None):\n args=self._get_add_package_args(\n package,\n type_option=type_option,\n version_option=version_option)\n return self.run_command(args=args, node_paths=node_paths)",
"def add_package(self, package_item):\r\n self.RequestedShipment.RequestedPackageLineItems.append(package_item)\r\n package_weight = package_item.Weight.Value\r\n self.RequestedShipment.TotalWeight.Value += package_weight\r\n self.RequestedShipment.PackageCount += 1",
"def add_package(self, package_item):\r\n self.RequestedShipment.RequestedPackageLineItems.append(package_item)\r\n package_weight = package_item.Weight.Value\r\n self.RequestedShipment.TotalWeight.Value += package_weight\r\n self.RequestedShipment.PackageCount += 1",
"def add_package(self, package_metadata):\n if package_metadata.name not in self._name_to_packages:\n bisect.insort(self._names, package_metadata.name)\n\n self._name_to_packages.setdefault(\n package_metadata.name, self._default_factory()\n )\n self._name_to_packages[package_metadata.name].append(package_metadata)\n # Fixme: this should not be that costly as long as we don't have\n # many versions for a given package.\n self._name_to_packages[package_metadata.name].sort(\n key=operator.attrgetter(\"version\")\n )",
"def _add_package(self, pkg):\n for dep in pkg.dependency:\n for key in self.registry.keys():\n if key[0] == dep:\n break\n else:\n raise RuntimeError(\n 'Package %s has unresolved dependency issues: %s' %\n (pkg.name, dep))\n self.package_dependency.add_edge(pkg.name, dep)\n self.package_dependency.add_vertex(pkg.name)\n for key, task in pkg.tasks.iteritems():\n self.registry[key, pkg.version] = pkg\n self.registry[key, None] = pkg\n self.registry[pkg.name, pkg.version] = pkg\n\n # mark this package as the latest one\n self.registry[pkg.name, None] = pkg",
"def test_add_package(self)-> None:\n my_deck = get_deck(deck_name='Test Deck Name')\n add_package(my_deck, 'output_name')\n pass",
"def add_package(self, pkg_name, pkg):\n pkg_name = pkg_name.lower()\n if pkg_name not in self.zbnam_packages:\n if pkg_name == \"cbc\":\n pkg_name = \"bud\"\n else:\n raise KeyError(\n f\"{pkg_name} package is not valid for zonebudget\"\n )\n\n if isinstance(pkg, str):\n if os.path.exists(os.path.join(self._model_ws, pkg)):\n pkg = os.path.join(self._model_ws, pkg)\n\n func = self.zbnam_packages[pkg_name]\n if pkg_name in (\"bud\", \"grb\"):\n pkg = func(pkg, precision=\"double\")\n else:\n pkg = func.load(pkg, self)\n\n else:\n pass\n\n pkg_name = f\"_{pkg_name}\"\n self.__setattr__(pkg_name, pkg)\n if pkg is not None:\n self.package_dict[pkg_name[1:]] = pkg",
"def add_new_package_to_list(self, package: Package) -> None:\n\n package_cache_entry = f\"{package.file_name}#{package.name}#{package.version}\"\n if package_cache_entry not in self._added_packages_cache:\n self.packages.append(package)\n self._added_packages_cache.add(package_cache_entry)",
"def add_new_package(self, package, skip='', description=''):\n with self._conn.begin():\n return self._conn.execute(\n \"VALUES (add_new_package(%s, %s, %s))\", (package, skip,\n description)).scalar()",
"def add_package_import(self, package):\n self._package_imports.add(package)",
"def add_package_import(self, package):\n self._main_model.add_package_import(package)",
"def package(self, package):\n\n self._package = package",
"def add_packages(self, packages):\n for p in packages:\n self.add_package(p)",
"def add_package_location(self, package_location):\n self._main_model.add_package_location(package_location)",
"def add_package_location(self, package_location):\n self._package_locations.add(package_location)",
"def register_packages(self, module_name, extra_package):\n self.creator_manager.register_packages(module_name, extra_package)",
"def add_package(self, package):\n if package in config[\"ignore_packages\"]:\n return\n\n # Worker for dependenant builds\n worker = False\n needed = self.resolve(package, self.dependencies, graph=[])\n\n # If we want to build against a dependency, and it's not inn the list; abort\n if self.getProperty(\"build_with_dependency\") and \\\n self.getProperty(\"build_with_dependency\") not in needed:\n return\n\n if needed:\n worker = random.choice(config[\"localworkers\"])\n\n # Run over all needed packages and find their deps\n for i in needed:\n _needed = self.resolve(i, self.dependencies, graph=[])\n self.add_build(i, _needed, worker=worker)\n self.add_build(package, needed, worker=worker)",
"def addPackageNamespace(self, *args):\n return _libsbml.SBMLNamespaces_addPackageNamespace(self, *args)",
"def notify_add_package(self, pkg):\n ver_key = (pkg.category, pkg.package)\n s = set(self.versions.get(ver_key, ()))\n s.add(pkg.fullver)\n if pkg.category not in self.categories:\n self.categories.force_add(pkg.category)\n self.packages.force_regen(pkg.category)\n self.versions.force_regen(ver_key, tuple(s))",
"def set_package(self, pkg): \n self.pkg = pkg",
"def addPackage(self, name, folder, root, password, site, comment, paused):\n pid = self.db.addPackage(name, folder, root, password, site, comment,\n PackageStatus.Paused if paused else PackageStatus.Ok, OWNER)\n p = self.db.getPackageInfo(pid)\n\n self.evm.dispatchEvent(\"packageInserted\", pid, p.root, p.packageorder)\n return pid",
"def AddNewPackage(path, package_name, namespace):\n\n new_line = '<include package=\".%s\" />' % package_name\n fullpath=path + '/configure.zcml'\n\n try:\n fh=open(fullpath)\n lines=fh.readlines()\n for i in range(len(lines)):\n if len(lines[i]) == 0: continue\n if lines[i][-1] in ['\\n', '\\r']: lines[i] = lines[i][:-1]\n if len(lines[i]) == 0: continue\n if lines[i][-1] in ['\\n', '\\r']: lines[i] = lines[i][:-1]\n fh.close()\n except:\n lines=[\n '<configure',\n '\\txmlns=\"http://namespaces.zope.org/zope\"',\n '\\ti18n_domain=\"%s\">\\n' % namespace, \n '\\t<!-- END OF PACKAGES -->',\n '\\n</configure>']\n\n\n # See if our file is present\n for l in lines:\n if l.find(new_line) != -1:\n return # all done\n\n # Find the marker and insert at that point\n for i in range(len(lines)):\n l = lines[i]\n if l.find('<!-- END OF PACKAGES -->') != -1:\n lines = lines[:i] + ['\\t'+new_line] + lines[i:]\n break\n\n # rewrite the file \n if os.access(fullpath, os.F_OK):\n backup=fullpath+'~'\n if os.access(backup, os.F_OK):\n os.unlink(backup)\n os.rename(fullpath, backup)\n\n fh = open(fullpath, 'w')\n fh.write('\\n'.join([l.replace('\\t', ' ') for l in lines]))\n fh.close()",
"def add_package_type(self, name, handler):\n if not interpret_string(name):\n raise RelengInvalidSetupException('invalid package name provided')\n name_key = name.lower()\n if not name_key.startswith(PREFIX_REQUIREMENT):\n raise RelengInvalidSetupException('extension-defined package types '\n 'must be prefixed with \"{}\"'.format(PREFIX_REQUIREMENT))\n if name_key in self.package_types:\n raise RelengInvalidSetupException('extension package type {} '\n 'is already defined by another extension'.format(name))\n if not inspect.isclass(handler):\n raise RelengInvalidSetupException('handler is not a class')\n package_type = handler()\n build_op = getattr(package_type, 'build', None)\n configure_op = getattr(package_type, 'configure', None)\n install_op = getattr(package_type, 'install', None)\n if (not callable(build_op) or not callable(configure_op) or\n not callable(install_op)):\n raise RelengInvalidSetupException('package type does not defined '\n 'required method(s)')\n self.package_types[name_key] = package_type",
"def add(self, component) -> None:\n pass"
] | [
"0.8857589",
"0.74427915",
"0.70480454",
"0.7036367",
"0.69797033",
"0.69747174",
"0.6825071",
"0.6796833",
"0.6796833",
"0.6756401",
"0.67164165",
"0.6667246",
"0.6634431",
"0.66323304",
"0.6548047",
"0.65459543",
"0.6545156",
"0.6496167",
"0.6432102",
"0.6419851",
"0.6411089",
"0.6373124",
"0.6343776",
"0.63362813",
"0.63219875",
"0.6271499",
"0.62459844",
"0.6243805",
"0.62309253",
"0.62201685"
] | 0.83141875 | 1 |
Add a list of packages | def add_packages(self, packages):
for p in packages:
self.add_package(p) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_packages(self, component_name, packages):\n for pkg in packages:\n self.add_package(component_name, pkg)",
"def append_packages(self, packages: Sequence['Package']):\n for package in packages:\n if package.name in self.all_packages_dict:\n logging.error(\"Package {} already known\".format(package))\n raise InvalidInput(\"Package {} already known\".format(package))\n\n self.all_packages_dict[package.name] = package\n\n if package.type_of is PossibleTypes.REPO_PACKAGE:\n self.repo_packages_list.append(package)\n elif package.type_of is PossibleTypes.AUR_PACKAGE:\n self.aur_packages_list.append(package)\n elif package.type_of is PossibleTypes.DEVEL_PACKAGE:\n self.devel_packages_list.append(package)\n else:\n assert package.type_of is PossibleTypes.PACKAGE_NOT_REPO_NOT_AUR\n self.not_repo_not_aur_packages_list.append(package)\n\n self.__append_to_x_dict(packages, 'provides')\n self.__append_to_x_dict(packages, 'conflicts')",
"def update(self, iterable):\n for package in iterable:\n self.add_package(package)",
"def create_package_list(base):\n\n return [base] + [\"{}.{}\".format(base, pkg) for pkg in find_packages(base)]",
"def _add_missing_packages(frozen_pkgs, ret, **kwargs):\n # NOTE: we can remove the `for` using `pkgs`. This will improve\n # performance, but I want to have a more detalied report of what\n # packages are installed or failed.\n pkgs = __salt__[\"pkg.list_pkgs\"](**kwargs)\n missing_pkgs = set(frozen_pkgs) - set(pkgs)\n for pkg in missing_pkgs:\n try:\n __salt__[\"pkg.install\"](name=pkg, **kwargs)\n ret[\"pkgs\"][\"add\"].append(pkg)\n log.info(\"Added missing package %s\", pkg)\n except Exception as e: # pylint: disable=broad-except\n msg = \"Error adding %s package: %s\"\n log.error(msg, pkg, e)\n ret[\"comment\"].append(msg % (pkg, e))",
"def install_packages(packages):\n\n if packages:\n log(\"Installing apt packages: {0}\".format(packages))\n run(sh.apt_get.install, packages.split(), y=True)",
"def install(self, *packages):\n raise NotImplementedError",
"def packages():",
"def add_packages(project, env_spec_name, packages, channels, pip=False):\n return _update_env_spec(project, env_spec_name, packages, channels, create=False, pip=pip)",
"def _install_packages(self, package_list):\n installed_count = 0\n for package in package_list:\n install = [\n self.mock,\n '-q',\n '--root=%s' % self.root,\n '--arch=%s' % self.arch,\n '--install',\n '%s' % package\n ]\n \"\"\"\n Lots of useless debugging\n @TODO: Remove\n \"\"\"\n print \"Installing Package %s\" % package\n output, errors = self._run_command(install)\n print output, errors\n installed_count += 1\n \"\"\"\n Lots of useless debugging\n @TODO: Remove\n \"\"\"\n print output\n print errors",
"def _install_packages(packages):\n for package in packages:\n cuisine.package_ensure(package)",
"def add_pkg(pkgs, name, pkgver):\n try:\n pkgs.setdefault(name, []).append(pkgver)\n except AttributeError as exc:\n log.exception(exc)",
"def parsePackages(self, packages_list) -> None:\n\t\tif self.package_manager == \"apt\":\n\t\t\tfor package in packages_list:\n\t\t\t\tpackage = package.strip().split(\" \")\n\t\t\t\tname = package[0].split(\"/\")[0]\n\t\t\t\tversion = package[1]\n\t\t\t\tarchitecture = package[2]\n\t\t\t\tself.installed_packages.add(Package(name=name, version=version, architecture=architecture))\n\t\telse:\n\t\t\tlogger.error(\"Package manager parser not supported.\")\n\t\t\traise ValueError(\"Package manager unsupported\")\n\t\tlogger.info(\"Packages parsed successfully\")",
"def add_packages ( self, add_method ):\n addstats = self.repo_stats.queue_time\n for repo in self.repos:\n addstats.begin ( repo.name )\n self._queue_packages_from_repo ( repo, add_method )\n addstats.end ( repo.name )",
"def pipinstall(packages):\n\n if isinstance(packages, str):\n if hasattr(pip, 'main'):\n pip.main(['install', packages])\n else:\n pip._internal.main(['install', packages])\n elif isinstance(packages, list):\n for i in enumerate(packages):\n if hasattr(pip, 'main'):\n pip.main(['install', i[1]])\n else:\n pip._internal.main(['install', i[1]])\n else:\n raise TypeError(\"Nor a string or a list was provided.\")",
"def install(self, packages):\n cmd = ['dnf', 'install', '-y']\n\n for pkg in packages:\n # if pkg not in self.installed_packages:\n cmd.append(pkg)\n\n if packages:\n subprocess.Popen(cmd).wait()",
"def append_packages_by_name(self, packages_names: Sequence[str]):\n\n packages_names = set([strip_versioning_from_name(name) for name in packages_names])\n packages_names_to_fetch = [name for name in packages_names if name not in self.all_packages_dict]\n aur_names = packages_from_other_sources()[0]\n for name in packages_names:\n if name in packages_names_to_fetch:\n continue\n\n if name not in aur_names:\n continue\n\n package = self.all_packages_dict[name]\n if package.type_of is not PossibleTypes.AUR_PACKAGE and package.type_of is not PossibleTypes.DEVEL_PACKAGE:\n packages_names_to_fetch.append(name)\n\n deleted_while_appending = False\n while packages_names_to_fetch:\n fetched_packages = Package.get_packages_from_aur(packages_names_to_fetch)\n\n deps_of_the_fetched_packages = []\n for package in fetched_packages:\n deps_of_the_fetched_packages.extend(package.relevant_deps())\n if package.name in self.all_packages_dict:\n del self.all_packages_dict[package.name]\n deleted_while_appending = True\n\n self.append_packages(fetched_packages)\n\n relevant_deps = list(set([strip_versioning_from_name(dep) for dep in deps_of_the_fetched_packages]))\n\n packages_names_to_fetch = [dep for dep in relevant_deps if dep not in self.all_packages_dict]\n\n if deleted_while_appending:\n self.recreate_dicts()",
"def import_packages(filename:str, path:str=None):\r\n logging.info(\"Importing Package List From '{}'...\".format(filename))\r\n if path is None:\r\n path = get_site_packages_path()\r\n\r\n i = 1\r\n packages = open(filename, 'r').readlines()\r\n for p in packages:\r\n package, version = p.split(\"=\")\r\n logging.info(\"Installing Package {} ({} of {})...\".format(package, i, len(packages)))\r\n install(package, version, path)\r\n i += 1\r\n logging.info(\"Finished Importing Package List From '{}'\".format(filename))",
"def AddPackagesInDir(path):\n\t\tret = []\n\t\tpkgdirs = os.listdir(path)\n\t\tfor d in pkgdirs:\n\t\t\tif d == 'CVS' or d.startswith('.'):\n\t\t\t\tcontinue\n\t\t\tp = os.path.join(path, d)\n\n\t\t\tif os.path.isdir(p):\n\t\t\t\tcat_pkg_dir = os.path.join(*p.split(os.path.sep)[-2:])\n\t\t\t\tlogging.debug('adding %s to scanlist' % cat_pkg_dir)\n\t\t\t\tret.append(cat_pkg_dir)\n\t\treturn ret",
"def bulk_install(package_names:List[str]) -> str:\n\n success = []\n failure = []\n\n if not isinstance(package_names, list):\n raise TypeError(f'Expected {package_names} to be a list of strings')\n for package_name in package_names:\n try:\n package = Commands._install(name=package_name)\n except Exception:\n failure.append(package_name)\n break\n success.append(package_name)\n return f'Installed packages: {success}\\nNot installed packages: {failure}'",
"def install_deps():\n with open('requirements.txt', 'r') as f:\n packages = f.readlines()\n new_pkgs = []\n for resource in packages:\n new_pkgs.append(resource.strip())\n return new_pkgs",
"def packages(self):\n return []",
"def addpackage(sitedir, name, known_paths, syspath):\n fullname = os.path.join(sitedir, name)\n f = open(fullname, \"rU\")\n try:\n for line in f:\n if line.startswith(\"#\"):\n continue\n if line.startswith(\"import\"):\n exec(line)\n continue\n line = line.rstrip()\n dir, dircase = makepath(sitedir, line)\n if not dircase in known_paths and os.path.exists(dir):\n syspath.append(dir)\n known_paths.add(dircase)\n finally:\n f.close()\n return known_paths",
"def required_packages(cls) -> List[Text]:\n return []",
"def add_uppers():\n for filename, requirements in _sync():\n LOG.info(\"Obtaining latest versions of packages for %s.\", filename)\n for req in requirements:\n if isinstance(req, Requirement):\n if isinstance(req.version, dict) and not req.version[\"max\"]:\n req.sync_max_version_with_pypy()\n _write_requirements(filename, requirements)",
"def register_packages(self, module_name, extra_package):\n self.creator_manager.register_packages(module_name, extra_package)",
"def write_package_list(_, package_list, *args):\n logger.debug(\"Attempting to write package list\")\n try:\n with open(Base.get_package_list_filepath(), \"w\") as file:\n yaml.dump(package_list, file, sort_keys=True)\n except:\n logger.exception(\"Could not write package list\")\n exit(1)\n logger.debug(\"Packages written to file\")",
"def _install_packages(module, path, packages, allowed_dists, repo):\n cmd = \"./LuaDist/bin/luadist install \"\n\n # Add packages to command\n for package in packages:\n cmd += package + \" \"\n\n # Add types of dists allowed to command\n source_allowed = \"true\"\n binary_allowed = \"true\"\n if allowed_dists == \"binary\":\n source_allowed = \"false\"\n elif allowed_dists == \"source\":\n binary_allowed = \"false\"\n cmd += \" -source=\" + source_allowed + \" -binary=\" + binary_allowed\n\n # Add repository to command\n cmd += ' -repos=\"' + repo + '\"'\n\n ret_code, out, err = module.run_command(cmd, cwd=path)\n already_installed = \"No packages to install\" in out\n\n if ret_code != 0 and not already_installed:\n module.fail_json(\n rc=ret_code,\n stdout=out,\n stderr=err,\n msg=\"Cannot install one or more of the specified packages, \"\n + \"make sure all packages exist in the configured repository.\",\n )\n\n return cmd",
"def install(cls, directory: Path, packages: list[str]) -> None:\n\n if directory.is_dir() is False:\n raise Failure(f\"{directory} is not a valid directory\")\n\n logger.info(f\"Installing dependencies - {', '.join(packages)}\")\n\n os.chdir(directory)\n run([cls.command, \"add\"] + packages)",
"def install_packages(self):\n for package in self.packages:\n utils.exec_cmd('yum install -v -y {0}'.format(package))"
] | [
"0.7967399",
"0.71899575",
"0.6881099",
"0.67112195",
"0.66414106",
"0.661952",
"0.660106",
"0.65925723",
"0.6552438",
"0.6526353",
"0.646941",
"0.6427331",
"0.6338769",
"0.6301947",
"0.6284635",
"0.6241945",
"0.6221222",
"0.6174528",
"0.6141783",
"0.6097224",
"0.60807425",
"0.6058756",
"0.60549724",
"0.6037496",
"0.60130805",
"0.6006693",
"0.6000004",
"0.597404",
"0.593898",
"0.59169155"
] | 0.8461527 | 0 |
Update from a list of indexes | def update_from_indexes(self, data, **kw):
for i in data:
self.update_from_index(i, **kw) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_by_index(df, col, indexs, data):\n for indx in indexs:\n df.loc[indx, col] = data",
"def index_update(tensor, indices, values):\n tensor[indices] = values\n return tensor",
"def _update_in_db(db_values: list, sheet_cells: List[Cell], indices: Iterable[int]) -> List[int]:\n # print('Entering _update_in_db')\n mod_idx = []\n for i in indices:\n if db_values[i] != sheet_cells[i].value and (db_values[i] or sheet_cells[i].value):\n db_values[i] = sheet_cells[i].value\n mod_idx.append(i)\n # print('Leaving _update_in_db')\n return mod_idx",
"def update(i, v, xs):\n return [v if i == ind else x for ind, x in enumerate(xs)]",
"def set_index(self, list):\n for key in list:\n self.find_label_by_id(key).index = True",
"def update(self, values: List[int]) -> None:\n ...",
"def update(self, values: List[int]) -> None:\n ...",
"def set_indexes(self, indexes):\n if not isinstance(indexes, list) or not all(isinstance(i, int) for i in indexes):\n raise ValueError(\"The indexes should be a list and all its elements should be int\")\n self._indexes = indexes\n return self",
"def index_object(idxs=None):",
"def update_list(freq_list: typing.List[int], list_index: int) -> typing.List[int]:\n freq_list[list_index] += 1\n return freq_list",
"def updateRow(self, index: int) -> None:\n ...",
"def update(self, preds: Tensor, target: Tensor, indexes: Tensor) -> None:\n if indexes is None:\n raise ValueError(\"Argument `indexes` cannot be None\")\n\n indexes, preds, target = _check_retrieval_inputs(\n indexes, preds, target, allow_non_binary_target=self.allow_non_binary_target, ignore_index=self.ignore_index\n )\n\n self.indexes.append(indexes)\n self.preds.append(preds)\n self.target.append(target)",
"def replaceItemOfIndex(itemsList, value, index):\n itemsList[index] = value",
"def _updateBoundLicencesIndexes(licence, events, indexes=[]):\n if indexes == []:\n indexes = [\n 'Title',\n 'sortable_title',\n 'applicantInfosIndex',\n 'address',\n 'StreetNumber',\n 'StreetsUID',\n 'parcelInfosIndex'\n ]\n\n annotations = IAnnotations(licence)\n ticket_uids = annotations.get('urban.bound_tickets') or set([])\n inspection_uids = annotations.get('urban.bound_inspections') or set([])\n uids = inspection_uids.union(ticket_uids)\n catalog = api.portal.get_tool('portal_catalog')\n bound_licences_brains = catalog(UID=list(uids))\n for bound_licences_brain in bound_licences_brains:\n bound_licence = bound_licences_brain.getObject()\n to_reindex = False\n if bound_licence.portal_type == 'Inspection' and bound_licence.getUse_bound_licence_infos():\n to_reindex = True\n if bound_licence.portal_type == 'Ticket' and bound_licence.getUse_bound_inspection_infos():\n to_reindex = True\n if to_reindex:\n bound_licence.updateTitle()\n bound_licence.reindexObject(idxs=indexes)\n # make sure to update the whole reference chain licence <- inspection <- ticket\n _updateBoundLicencesIndexes(bound_licence, events)",
"def _Dynamic_UpdateIndex(self, index, void, request_id=None):\n self._RemoteSend(index, void, \"UpdateIndex\", request_id)\n return",
"def exercise_indexes():\n print(exercise_indexes.__doc__)\n print(\"The indexes of 'data' are:\", data.index)\n print(data, \"\\n\")\n print(\"Changing the indexes of 'data'\")\n print(data.reindex([2, 0, 1]), \"\\n\")\n print(\"Changing the indexes of 'data' randomly\")\n print(data.reindex(np.random.permutation(data.index)))",
"def index(self, values=None):\n if values is None:\n values = self.proxy_get()\n for value in values:\n self.index_value(value)",
"def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )",
"def update_targets(self, indexes: List[int], new_targets: np.ndarray):\n if self.train:\n self.train_nat[indexes, :] = new_targets\n else:\n self.test_nat[indexes, :] = new_targets",
"def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p**self.alpha)",
"def rebuild_all_indexes():\n response = _get_lambda_client().invoke(\n FunctionName=indexer_function_name,\n InvocationType=\"Event\",\n )",
"def reindex(self):",
"def reindex(self):",
"def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)",
"def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)",
"def _update_bulk(self, iterable):\n self.cursor.executemany(self.UPDATE, iterable)",
"def _update_index(self, descriptors):\n with self._model_lock:\n if self.read_only:\n raise ReadOnlyError(\"Cannot modify container attributes due \"\n \"to being in read-only mode.\")\n # tee out iterable for use in adding to index as well as hash code\n # generation.\n d_for_index, d_for_hashing = itertools.tee(descriptors, 2)\n\n self._log.debug(\"Updating descriptor index.\")\n self.descriptor_index.add_many_descriptors(d_for_index)\n\n self._log.debug(\"Generating hash codes for new descriptors\")\n prog_reporter = ProgressReporter(self._log.debug, 1.0).start()\n #: :type: collections.deque[numpy.ndarray[bool]]\n hash_vectors = collections.deque() # for updating hash_index\n for d in d_for_hashing:\n h_vec = self.lsh_functor.get_hash(d.vector())\n hash_vectors.append(h_vec)\n h_int = bit_vector_to_int_large(h_vec)\n # Get, update and reinsert hash UUID set object\n #: :type: set\n hash_uuid_set = self.hash2uuids_kvstore.get(h_int, set())\n hash_uuid_set.add(d.uuid())\n self.hash2uuids_kvstore.add(h_int, hash_uuid_set)\n prog_reporter.increment_report()\n prog_reporter.report()\n\n if self.hash_index is not None:\n self._log.debug(\"Updating hash index structure.\")\n self.hash_index.update_index(hash_vectors)",
"def update(self, values):\n pass",
"def setReference(self, updatedIndices):\n # self.colors[:] = [self.colors[i] for i in updatedIndices]\n self.cellData[:] = [self.cellData[i] for i in updatedIndices]",
"def bulk_update(self, iterable):\n inserted, updated = [], []\n for d, h in iterable:\n if -d in self:\n updated.append((-d, h))\n else:\n inserted.append((-d, h))\n self._update_bulk(updated)\n self._insert_bulk(inserted)"
] | [
"0.6944268",
"0.68279725",
"0.674803",
"0.6747745",
"0.67436105",
"0.66381496",
"0.66381496",
"0.65306586",
"0.634002",
"0.6184385",
"0.61832535",
"0.61691165",
"0.6095535",
"0.60420585",
"0.60305125",
"0.6021017",
"0.59819454",
"0.59468365",
"0.5943627",
"0.59080184",
"0.5902367",
"0.5902017",
"0.5902017",
"0.59011555",
"0.59011555",
"0.5895974",
"0.58254844",
"0.58076775",
"0.58009243",
"0.57987565"
] | 0.80606335 | 0 |
Updates this metadata instance with packages found in the given JSON document. This can be called multiple times to merge multiple repository metadata JSON documents into this instance. | def update_from_json(self, json_string):
parsed = json.loads(json_string)
self.add_packages(parsed.pop('packages', []))
self.data(parsed) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_package(self, **kwargs):\n logging.warning('Updating a package removes all existing data. '\n 'If you wish to keep the existing data, use `CachedCKAN.patch_package`.')\n results = self.api.action.package_update(**kwargs)\n self.get_ckan_metadata(True)\n return results",
"def _store_package_metadata(self):\n\n context = self._config.context\n log.debug('processing chef_json file {0} for package metadata'.format(self._get_chef_json_full_path()))\n with open(self._get_chef_json_full_path()) as chef_json_file:\n chef_json = json.load(chef_json_file)\n log.debug(chef_json.dump)\n\n context.package.attributes = {}\n for x in self._config.pkg_attributes:\n context.package.attributes[x] = chef_json.get(x, None)",
"def update_from_index(self, data, **kw):\n packages = _iter_paragraphs_path(data, **kw)\n self.add_packages([{'deb822': p} for p in packages])",
"def update_pkg_metadata(self, pkg, version=None, **kwargs):\n pass",
"def update_metadata(self):\n self.data[\"keywords\"] = self.repo.topics(self.data.get(\"keywords\", []))\n self.data[\"description\"] = self.data.get(\"description\") or self.repo.description\n self.data[\"codeRepository\"] = (\n self.data.get(\"codeRepository\") or self.repo.html_url\n )\n self.data[\"name\"] = self.data.get(\"name\") or self.repo.name\n self.data[\"issueTracker\"] = (\n self.data.get(\"issueTracker\") or self.repo.issues_url\n )\n self.data[\"license\"] = self.data.get(\"license\") or self.repo.license",
"def update(self, iterable):\n for package in iterable:\n self.add_package(package)",
"def update_packages(self, config_file):\n entries = yacman.load_yaml(config_file)\n self.update(entries)\n return True",
"def update_packages(self, packages: Packages, source=\"conda\") -> None:\n self[source] = self.get(source, {})\n self._update_packages(self[source], packages)",
"def update_package(self, package):\n if package is not None:\n self._package_cache.add(package.id, package)",
"def update(self):\n self.content = self.get_content()\n self.dependencies = self.content['requirements']['run']\n self.pythonversion = self.content['extra']['pythonversion']\n self.package_name = self.content['package']['name']",
"def patch_package(self, **kwargs):\n results = self.api.action.package_patch(**kwargs)\n self.get_ckan_metadata(True)\n return results",
"def update_package_data(distribution):\n build_py = distribution.get_command_obj('build_py')\n # distribution.package_data = find_package_data()\n # re-init build_py options which load package_data\n build_py.finalize_options()",
"def update_package_data(distribution):\n build_py = distribution.get_command_obj('build_py')\n # distribution.package_data = find_package_data()\n # re-init build_py options which load package_data\n build_py.finalize_options()",
"def add_packages(self, packages):\n for p in packages:\n self.add_package(p)",
"def update_project_documents(self, manifest_info):\n\n for proj_name, proj_info in manifest_info.projects.items():\n # See if project document already is in the database and extract\n # for updating if so, otherwise create a new dictionary for\n # population\n key_name = f'project:{proj_name}'\n\n try:\n project_data = self.db.get_document(key_name)\n except cbdatabase_db.NotFoundError:\n project_data = dict(\n type='project', key_=key_name, name=proj_name\n )\n\n remote, repo_url = \\\n manifest_info.get_project_remote_info(proj_name)\n\n if 'remotes' in project_data:\n if remote in project_data['remotes']:\n if repo_url not in project_data['remotes'][remote]:\n project_data['remotes'][remote].append(repo_url)\n else:\n project_data['remotes'][remote] = [repo_url]\n else:\n project_data['remotes'] = {remote: [repo_url]}\n\n self.db.upsert_documents({key_name: project_data})",
"def populate_package(package_count: int) -> None:\n logging.info(f\"Fetching {package_count} packages\")\n response = CurlController.send_get_request(url=CONFIG.EXTERNAL_API.ALL_PACKAGES)\n get_version = False\n count = 0\n temp_dir = filestore.generate_temp_dir()\n # Local Testing\n # response_arr = ['Package: A3', 'Version: 1.0.0', 'Depends: R (>= 2.15.0), xtable, pbapply', 'Suggests: randomForest, e1071', 'License: GPL (>= 2)', 'MD5sum: 027ebdd8affce8f0effaecfcd5f5ade2', 'NeedsCompilation: no', '', 'Package: aaSEA', 'Version: 1.1.0', 'Depends: R(>= 3.4.0)', 'Imports: DT(>= 0.4), networkD3(>= 0.4), shiny(>= 1.0.5),', ' shinydashboard(>= 0.7.0), magrittr(>= 1.5), Bios2cor(>= 2.0),', ' seqinr(>= 3.4-5), plotly(>= 4.7.1), Hmisc(>= 4.1-1)', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 0f9aaefc1f1cf18b6167f85dab3180d8', 'NeedsCompilation: no', '', 'Package: AATtools', 'Version: 0.0.1', 'Depends: R (>= 3.6.0)', 'Imports: magrittr, dplyr, doParallel, foreach', 'License: GPL-3', 'MD5sum: 3bd92dbd94573afb17ebc5eab23473cb', 'NeedsCompilation: no', '', 'Package: ABACUS', 'Version: 1.0.0', 'Depends: R (>= 3.1.0)', 'Imports: ggplot2 (>= 3.1.0), shiny (>= 1.3.1),', 'Suggests: rmarkdown (>= 1.13), knitr (>= 1.22)', 'License: GPL-3', 'MD5sum: 50c54c4da09307cb95a70aaaa54b9fbd', 'NeedsCompilation: no', '', 'Package: abbyyR', 'Version: 0.5.5', 'Depends: R (>= 3.2.0)', 'Imports: httr, XML, curl, readr, plyr, progress', 'Suggests: testthat, rmarkdown, knitr (>= 1.11), lintr', 'License: MIT + file LICENSE', 'MD5sum: e048a3bca6ea32126e6c367415c0bfaf', 'NeedsCompilation: no', '', 'Package: abc', 'Version: 2.1', 'Depends: R (>= 2.10), abc.data, nnet, quantreg, MASS, locfit', 'License: GPL (>= 3)', 'MD5sum: c9fffe4334c178917f762735aba59653', 'NeedsCompilation: no', '', 'Package: abc.data', 'Version: 1.0', 'Depends: R (>= 2.10)', 'License: GPL (>= 3)', 'MD5sum: 799079dbbdd0cfc9d9c61c3e35241806', 'NeedsCompilation: no', '', 'Package: ABC.RAP', 'Version: 0.9.0', 'Depends: R (>= 3.1.0)', 'Imports: graphics, stats, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 38c65a7251d28ef2462ee430ded95700', 'NeedsCompilation: no', '', 'Package: abcADM', 'Version: 1.0', 'Imports: Rcpp (>= 1.0.1)', 'LinkingTo: Rcpp, BH', 'License: GPL-3', 'MD5sum: 8134f67912b506194e3dab4ccd6e75f7', 'NeedsCompilation: yes', '', 'Package: ABCanalysis', 'Version: 1.2.1', 'Depends: R (>= 2.10)', 'Imports: plotrix', 'License: GPL-3', 'MD5sum: 678e03837e25a922bf71bafe1f8de617', 'NeedsCompilation: no', '', 'Package: abcdeFBA', 'Version: 0.4', 'Depends: Rglpk,rgl,corrplot,lattice,R (>= 2.10)', 'Suggests: LIM,sybil', 'License: GPL-2', 'MD5sum: c84d45a85d8ab6bbe517365e8845db83', 'NeedsCompilation: no', '', 'Package: ABCoptim', 'Version: 0.15.0', 'Imports: Rcpp, graphics, stats, utils', 'LinkingTo: Rcpp', 'Suggests: testthat, covr', 'License: MIT + file LICENSE', 'MD5sum: a62ed03650273c09899655065437078f', 'NeedsCompilation: yes', '', 'Package: ABCp2', 'Version: 1.2', 'Depends: MASS', 'License: GPL-2', 'MD5sum: e920282d5a369df71e15241be40cb60e', 'NeedsCompilation: no', '', 'Package: abcrf', 'Version: 1.8.1', 'Depends: R(>= 3.1)', 'Imports: readr, MASS, matrixStats, ranger, doParallel, parallel,', ' foreach, stringr, Rcpp (>= 0.11.2)', 'LinkingTo: Rcpp, RcppArmadillo', 'License: GPL (>= 2)', 'MD5sum: 4d5a304f46d117226791523cef4e2427', 'NeedsCompilation: yes', '', 'Package: abcrlda', 'Version: 1.0.3', 'Imports: stats', 'License: GPL-3', 'MD5sum: 651e6e18e08916b443aaf011b5a63525', 'NeedsCompilation: no', '', 'Package: abctools', 'Version: 1.1.3', 'Depends: R (>= 2.10), abc, abind, parallel, plyr, Hmisc', 'Suggests: ggplot2, abc.data', 'License: GPL (>= 2)', 'MD5sum: c5937b65837ef7e6bfbe141cea257f40', 'NeedsCompilation: yes', '', 'Package: abd', 'Version: 0.2-8', 'Depends: R (>= 3.0), nlme, lattice, grid, mosaic', 'Suggests: boot, car, ggplot2, plyr, HH, ICC, vcd, Hmisc', 'License: GPL-2', 'MD5sum: 1913d76a0fbc44222709381f63f385b9', 'NeedsCompilation: no', '', 'Package: abdiv', 'Version: 0.2.0', 'Imports: ape', 'Suggests: testthat (>= 2.1.0), vegan', 'License: MIT + file LICENSE', 'MD5sum: 80931c0ca85ba5386000bf617552c5ce', 'NeedsCompilation: no', '', 'Package: abe', 'Version: 3.0.1', 'License: GPL (>= 2)', 'MD5sum: 9c151db5397422c8927dee41dabfbfab', 'NeedsCompilation: no', '', 'Package: abess', 'Version: 0.3.0', 'Depends: R (>= 3.1.0)', 'Imports: Rcpp, MASS, methods, Matrix', 'LinkingTo: Rcpp, RcppEigen', 'Suggests: testthat, knitr, rmarkdown', 'License: GPL (>= 3) | file LICENSE', 'MD5sum: e0ea7d068147c49c011c7135ab290bd3', 'NeedsCompilation: yes', '', 'Package: abf2', 'Version: 0.7-1', 'License: Artistic-2.0', 'MD5sum: 6792a51c6fb3e239165d69aa8a71d3cd', 'NeedsCompilation: no', '', 'Package: abglasso', 'Version: 0.1.1', 'Imports: MASS, pracma, stats, statmod', 'Suggests: testthat', 'License: GPL-3', 'MD5sum: 18bd0759cd005c5ac6fb515799b3f3d8', 'NeedsCompilation: no', '', 'Package: ABHgenotypeR', 'Version: 1.0.1', 'Imports: ggplot2, reshape2, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: ca4397ba7390c0e0a3728c0cda864494', 'NeedsCompilation: no', '', 'Package: abind', 'Version: 1.4-5', 'Depends: R (>= 1.5.0)', 'Imports: methods, utils', 'License: LGPL (>= 2)', 'MD5sum: 136f981e1c4f618b64a87faaa7797c97', 'NeedsCompilation: no', '', 'Package: abjutils', 'Version: 0.3.1', 'Depends: R (>= 4.0)', 'Imports: dplyr, magrittr, purrr, rlang, rstudioapi, stringi, stringr,', ' tidyr', 'Suggests: testthat', 'License: MIT + file LICENSE', 'MD5sum: a596c07aaa7f82e5d123b2f7354e5b55', 'NeedsCompilation: no', '', 'Package: abmR', 'Version: 1.0.2', 'Depends: R (>= 3.5)', 'Imports: sp, rgdal, table1, googledrive, swfscMisc, geosphere,', ' kableExtra, gtsummary, ggplot2, gstat, purrr, rnaturalearth,', ' rnaturalearthdata, sf, tmap, raster, utils, stats, methods,', ' rgeos', 'Suggests: jpeg, knitr', 'License: GPL (>= 3)', 'MD5sum: cf96d']\n response_arr = response.decode(\"utf-8\").split(\"\\n\")\n with temp_dir:\n for item in response_arr:\n if count >= package_count:\n break\n if get_version:\n # Fetching the version, once we have the package name\n package_version = Command.get_package_version(item=item)\n if package_version:\n # Generating the required URL for the package to fetch the details\n package_url = Template(\n CONFIG.EXTERNAL_API.PACKAGE_DETAIL\n ).substitute(\n package_name=package_name,\n separator=\"_\",\n package_version=package_version,\n )\n logging.info(f\"Downloading {package_url}\")\n # Downloading the details of the package and extracting the DESCRIPTION file\n extract_file_path = filestore.join_paths(\n prefix=package_name,\n suffix=CONFIG.EXTERNAL_API.DETAIL_FILE_NAME,\n )\n target_dir = filestore.download_file(\n url=package_url,\n temp_dir=temp_dir,\n extract_file_path=extract_file_path,\n )\n # Reading contents of DESCRIPTION file\n package_details = filestore.join_paths(\n prefix=temp_dir.name,\n suffix=extract_file_path,\n )\n with open(package_details) as details_file:\n for line in details_file:\n if line.startswith(PackageInfoPrefix.PUBLICATION_DATE):\n publication_time_str = (\n Command.get_publication_timestamp(line)\n )\n publication_timestamp = (\n datetime_util.string_to_datetime(\n publication_time_str\n )\n )\n elif line.startswith(PackageInfoPrefix.TITLE):\n title = Command.get_package_title(line)\n elif line.startswith(PackageInfoPrefix.DESCRIPTION):\n description = Command.get_package_description(line)\n elif line.startswith(PackageInfoPrefix.AUTHOR):\n (\n author_name,\n author_email,\n ) = Command.get_package_author(line)\n elif line.startswith(PackageInfoPrefix.MAINTAINER):\n (\n maintainer_name,\n maintainer_email,\n ) = Command.get_package_maintainer(line)\n\n package_info_dict = {\n \"name\": package_name,\n \"version\": package_version,\n \"publication_timestamp\": publication_timestamp,\n \"title\": title,\n \"description\": description,\n \"author_name\": author_name,\n \"author_email\": author_email,\n \"maintainer_name\": maintainer_name,\n \"maintainer_email\": maintainer_email,\n }\n logging.info(package_info_dict)\n obj = PackageManager.create_object(\n create_data=package_info_dict\n )\n if obj == CONFIG.DB.FAILURE:\n raise Exception(f\"Could not insert package in DB\")\n count += 1\n get_version = False\n # Fetching the package name\n package_name = Command.get_package_name(item=item)\n if package_name:\n get_version = True",
"def update_package_data(distribution):\r\n build_py = distribution.get_command_obj(\"build_py\")\r\n build_py.finalize_options() # Updates package_data\r",
"def upload_packages(self, packages):\n\n context = {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"name\": self.name\n }\n\n filepaths = [os.path.join(os.path.dirname(path), pfile['filename'])\n for path in packages\n for pfile in deb_changes(path)['files']]\n filepaths.extend(packages)\n\n # get upload token\n resp = self._client.postjson(path=\"/users/%(username)s/\"\n \"repos/%(reponame)s/\"\n \"branches/%(name)s/get_upload_token\" %\n context)\n token = resp['utoken']\n for pfile in filepaths:\n self._client.upload(path=\"/upload/%s/send/%s\" %\n (token, os.path.basename(pfile)),\n filepath=pfile)\n self._client.post(path=\"/upload/%s/dput\" % token)",
"def _store_package_metadata(self):",
"def add_packages_to_json(filepath):\n with open(filepath, \"r\") as f:\n data = json.load(f)\n data[\"package_versions\"] = {}\n data[\"package_versions\"][\"scipy\"] = pkg_version(\"scipy\")\n data[\"package_versions\"][\"numpy\"] = pkg_version(\"numpy\")\n data[\"package_versions\"][\"cython\"] = pkg_version(\"cython\")\n data[\"package_versions\"][\"qutip\"] = pkg_version(\"qutip\")\n data[\"package_versions\"][\"pytest\"] = pkg_version(\"pytest\")\n with open(filepath, \"w\") as f:\n json.dump(data, f, indent=4, separators=(\",\", \": \"))\n return data",
"def install_api(self, packages):\n\n for pkg in packages:\n if pkg not in self.installed_packages:\n try:\n self.base.install(pkg)\n except:\n print(\"dnf error finding: \" + pkg)\n self.base.resolve()\n self.base.download_packages(self.base.transaction.install_set)\n self.base.do_transaction()\n self._get_dnf()",
"def parse_packages_and_load_mets(\n json_file_path,\n api_url,\n timestamp,\n package_list_no,\n storage_service_id,\n fetch_job_id,\n):\n OBJECTS = \"objects\"\n packages = []\n with open(json_file_path, \"r\") as packages_json:\n package_list = json.load(packages_json)\n\n try:\n os.remove(json_file_path)\n except OSError as err:\n logger.warning(\"Unable to delete package JSON file: {}\".format(err))\n\n for package_obj in package_list.get(OBJECTS, []):\n package = process_package_object(package_obj)\n packages.append(package)\n if not package.is_aip():\n continue\n start_mets_task(\n package.uuid,\n package.get_relative_path(),\n package.current_location,\n package.origin_pipeline,\n api_url,\n timestamp,\n package_list_no,\n storage_service_id,\n fetch_job_id,\n )\n return packages",
"def from_dict(self, json_data: Dict) -> None:\n self.package_name = json_data[\"name\"]\n # self.package_path = Path(json_data[\"path\"])\n self.description = json_data[\"description\"]\n self.mpy_version = json_data[\"mpy_version\"]\n self._publish = json_data[\"publish\"]\n self.hash = json_data[\"hash\"]\n self.stub_hash = json_data[\"stub_hash\"]\n # create folder\n if not self.package_path.exists():\n self.package_path.mkdir(parents=True, exist_ok=True)\n # create the pyproject.toml file\n self.create_update_pyproject_toml()\n # set pkg version after creating the toml file\n self.pkg_version = json_data[\"pkg_version\"]\n self.stub_sources = []\n for name, path in json_data[\"stub_sources\"]:\n if path.startswith(\"stubs/\"):\n path = path.replace(\"stubs/\", \"\")\n self.stub_sources.append((name, Path(path)))",
"def get_package_data(name, package=None):\n if not package:\n package = models.Package(name=name)\n releases = {}\n else:\n releases = package.get_all_releases()\n\n client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi', transport=Urllib2Transport())\n\n versions = client.package_releases(package.name, True)\n\n # package_releases() method is case-sensitive, if nothing found\n # then we search for it\n # XXX: Ask pypi to make it case-insensitive?\n if not versions:\n for item in client.search({'name': name}):\n if name.lower() == item['name'].lower():\n package.name = name = item['name']\n break\n else:\n logger.info(\"No packages found matching %r\", name)\n return\n\n # Retry retrieving the versions with the new/correct name\n versions = client.package_releases(package.name, True)\n\n # Save the package if it is new\n if not package.pk:\n package.save()\n\n for version in versions:\n release, files = releases.get(version, (None, {}))\n if not release:\n release = models.Release(package=package, version=version)\n release.save()\n\n data = client.release_data(package.name, release.version)\n\n release_form = forms.PypiReleaseDataForm(data, instance=release)\n if release_form.is_valid():\n release_form.save()\n\n release_files = client.package_urls(package.name, release.version)\n for info in release_files:\n release_file = files.get(info['filename'])\n if not release_file:\n release_file = models.ReleaseFile(\n release=release, filename=info['filename'])\n\n release_file.python_version = info['python_version']\n release_file.filetype = info['packagetype']\n release_file.url = info['url']\n release_file.size = info['size']\n release_file.md5_digest = info['md5_digest']\n release_file.save()\n\n package.update_timestamp = now()\n package.save()",
"def add_package(self, package_metadata):\n if package_metadata.name not in self._name_to_packages:\n bisect.insort(self._names, package_metadata.name)\n\n self._name_to_packages.setdefault(\n package_metadata.name, self._default_factory()\n )\n self._name_to_packages[package_metadata.name].append(package_metadata)\n # Fixme: this should not be that costly as long as we don't have\n # many versions for a given package.\n self._name_to_packages[package_metadata.name].sort(\n key=operator.attrgetter(\"version\")\n )",
"def write_packages_json(count, packages, packages_directory):\n json_download_file = os.path.join(\n packages_directory, \"packages{}.json\".format(count)\n )\n logger.info(\"Packages file is downloaded to '%s'\", json_download_file)\n try:\n with open(json_download_file, \"w\") as json_file:\n json.dump(packages, json_file, indent=4)\n except json.JSONDecodeError:\n logger.error(\"Cannot decode JSON from %s\", json_download_file)",
"def update(pkg_name):\n\n vendor_file = os.path.join('vendor', 'vendor.json')\n target = 'golang.org/x/{}'.format(pkg_name)\n\n with open(vendor_file) as content:\n deps = json.load(content)\n packages = [dep['path'] for dep in deps['package'] if dep['path'].startswith(target)]\n revision = '@{revision}'.format(revision=args.revision) if args.revision else ''\n packages = ['{pkg}{revision}'.format(pkg=pkg, revision=revision) for pkg in packages]\n cmd = ['govendor', 'fetch'] + packages\n if args.verbose:\n print(' '.join(cmd))\n subprocess.check_call(cmd)",
"def package(self, package):\n\n self._package = package",
"def mergeMetadata(self, obj, dom): \n self.update_semantics = 'merge'\n # create a metadata dict that has all the values from obj, overridden\n # by the current dom values.\n metadata = self.getModuleMetadata(obj, {})\n metadata.update(self.getMetadata(dom, METADATA_MAPPING))\n for oerdc_name, cnx_name in METADATA_MAPPING.items():\n if cnx_name in ['keywords',]:\n old_value = getattr(obj, cnx_name)\n if old_value:\n current_value = list(metadata.get(cnx_name, []))\n current_value.extend(old_value)\n metadata[cnx_name] = current_value\n if metadata:\n self.validate_metadata(metadata)\n metadata = self.fixEntities(metadata, ATTRIBUTES_TO_FIX)\n if ICollection.providedBy(obj):\n obj.collection_metadata(**metadata)\n elif IModule.providedBy(obj):\n obj.update_metadata(**metadata)\n self.updateRoles(obj, dom)\n obj.reindexObject(idxs=metadata.keys())",
"def write_package_json(self) -> None:\n # make sure folder exists\n if not self.package_path.exists():\n self.package_path.mkdir(parents=True, exist_ok=True)\n # write the json to a file\n with open(self.package_path / \"package.json\", \"w\") as f:\n json.dump(self.to_dict(), f, indent=4)"
] | [
"0.6071355",
"0.5914813",
"0.5828914",
"0.5709145",
"0.5708369",
"0.55982107",
"0.5593583",
"0.552553",
"0.5517733",
"0.5470216",
"0.5419332",
"0.53404135",
"0.53404135",
"0.53109956",
"0.52079356",
"0.5178757",
"0.5161923",
"0.5106",
"0.5101378",
"0.50850755",
"0.504053",
"0.5035775",
"0.50317496",
"0.5024649",
"0.5017731",
"0.5012957",
"0.49872646",
"0.49747118",
"0.49700204",
"0.49630946"
] | 0.6851507 | 0 |
Get a list of package resources | def get_package_resources(self):
resources = []
for pkg in self.packages:
resource_data = self.get_resource_data()
resources.extend(pkg.get_resources(resource_data))
return resources | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resources(self):\n res = []\n for resource in self._resources:\n res = res + resource.resources()\n\n return res",
"def resources(self):\n res = []\n for resource in self._resources:\n res = res + resource.resources()\n\n return res",
"def resources(self):\n return list(self.get_resources_for_type(gdef.ResType_All))",
"def get_packages_with_prefixes():\n return get_resources('packages')",
"def get_resources(self):\n return []",
"def get_resources():\n return Response(f\"{Resource.get_all_resources()}\", 200, mimetype='text/plain')",
"def resources(self):\n return self.__resources",
"def get_resources(self, **extra_args):\n return [lrms for lrms in self.resources.itervalues()]",
"def getResources(self, folder):\n\n #-------------------- \n # Get the resource JSON\n #-------------------- \n folder += \"/resources\"\n resources = self.__getJson(folder)\n #print(\"%s %s\"%(, folder))\n #print(\" Got resources: '%s'\"%(str(resources)))\n\n\n\n #-------------------- \n # Filter the JSONs\n #-------------------- \n resourceNames = []\n for r in resources:\n if 'label' in r:\n resourceNames.append(r['label'])\n #print(\"FOUND RESOURCE ('%s') : %s\"%(folder, r['label']))\n elif 'Name' in r:\n resourceNames.append(r['Name'])\n #print(\"FOUND RESOURCE ('%s') : %s\"%(folder, r['Name']))\n\n return resourceNames",
"def resources(self):\n return self._resources",
"def resources(self):\n return self._resources",
"def resources(self):\n return self._resources",
"def get_resources(self, resource_url):\n response = self.response(resource_url)\n body = response[0]\n return ResourceParser.extract_resources(body)",
"def resources(self):\r\n return self.page.object_list",
"def resources(self):",
"def collect_resources_list(namespace, output_dir, k8s_cli, mode):\n selector = \"\"\n if mode == MODE_RESTRICTED:\n selector = '--selector=\"{}\"'.format(OPERATOR_LABEL)\n collect_helper(output_dir,\n cmd=\"{} get all -o wide -n {} {}\".format(k8s_cli, namespace, selector),\n file_name=\"resources_list\",\n resource_name=\"resources list\",\n namespace=namespace)",
"def list(self):\n resources = self._os_resource_manager.list()\n resource_list = []\n for resource in resources:\n resource_list.append(self._resource_class(id=resource.id,\n name=resource.name))\n return resource_list",
"def list_resource_files(resource_name: str) -> List[str]:\n\n files = []\n dirs = []\n\n def inner_recursion(resource_name: str):\n\n if not pkg_resources.resource_exists(__name__, resource_name):\n raise RuntimeError(\n f\"The resource {resource_name}, does not seem to be available \"\n )\n\n # base case\n if not pkg_resources.resource_isdir(__name__, resource_name):\n files.append(resource_name)\n\n # recursive case\n else:\n if not resource_name.endswith(\"/\"):\n resource_name += \"/\"\n\n dirs.append(resource_name)\n children = pkg_resources.resource_listdir(__name__, resource_name)\n\n resource_names_extended = [f\"{resource_name}{c}\" for c in children]\n\n for child_resource in resource_names_extended:\n inner_recursion(child_resource)\n\n inner_recursion(resource_name)\n\n return files",
"def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")",
"def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")",
"def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")",
"def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")",
"def resources(self) -> Sequence['outputs.GetResourcesResourceResult']:\n return pulumi.get(self, \"resources\")",
"def resources(self):\n return [self]",
"def GetAllResourcesSample():\n client = CreateClient()\n # Unlike client.GetResources, this returns a list of resources\n for resource in client.GetAllResources():\n PrintResource(resource)",
"def _get_all_resources(self):\n all_resources = []\n for resource in ResourceModel.scan():\n all_resources.append(resource)\n return all_resources",
"def resources(self) -> \"Resources\":\n return self._resources",
"def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources",
"def GetResourceNames(self):\r\n return [x.name for x in self.resources]",
"def resources(self) -> [ListResourcesResponse]:\n resources = []\n\n layers = self.layers\n for layer_name in layers:\n layer_arns = self.layer_version_arns(layer_name)\n for arn in layer_arns:\n list_resources_response = ListResourcesResponse(\n service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,\n resource_type=self.resource_type, name=layer_name)\n resources.append(list_resources_response)\n return resources"
] | [
"0.7528305",
"0.7528305",
"0.74842477",
"0.7477079",
"0.7378839",
"0.73766977",
"0.73312473",
"0.7212838",
"0.7077163",
"0.70177805",
"0.70177805",
"0.70177805",
"0.68670994",
"0.68647087",
"0.6861097",
"0.6786635",
"0.6760258",
"0.6729682",
"0.6708055",
"0.6708055",
"0.6708055",
"0.6708055",
"0.6707574",
"0.66925883",
"0.66848904",
"0.6680019",
"0.66794664",
"0.6653978",
"0.6628294",
"0.65908265"
] | 0.82839036 | 0 |
Returns the unit key for this package that will uniquely identify it in Pulp. This is the unique key for the inventoried package in Pulp. | def unit_key(self):
data = self.to_dict()
return self.generate_unit_key(*[data[key] for key in UNIT_KEYS]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")"
] | [
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424"
] | 0.789188 | 0 |
Returns all nonunit key metadata that should be stored in Pulp for this package. This is how the package will be inventoried in Pulp. | def unit_metadata(self):
data = self.to_dict()
metadata = [(k, v) for k, v in data.items() if k not in UNIT_KEYS]
return metadata | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def provideExpectedMetaKeys(self):\n return self.metadataKeys, self.metadataParams",
"def keys(self):\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']",
"def get_extra_metadata_keys() -> list[str]:\n keys = [\n \"srow_x\",\n \"srow_y\",\n \"srow_z\",\n \"quatern_b\",\n \"quatern_c\",\n \"quatern_d\",\n \"qoffset_x\",\n \"qoffset_y\",\n \"qoffset_z\",\n \"dim\",\n \"pixdim\",\n *[f\"dim[{i}]\" for i in range(8)],\n *[f\"pixdim[{i}]\" for i in range(8)],\n ]\n\n # TODO: it would be good to remove these, but they are currently being used in the\n # codebase.\n # keys += [\n # \"original_affine\",\n # \"spatial_shape\",\n # \"spacing\",\n # ]\n\n return keys",
"def mandatory_info(self):\n return [info.key for info in self.entry_info if not info.optional]",
"def get_metadata(self):\n return {}",
"def get_metadata(self):\n # currently there is no metadata to send\n return {}",
"def _get_non_metadata_keys(self, keys: list):\n # All keys required to create PSM object\n default_keys = [\n \"chargeState\",\n \"rank\",\n \"PeptideSequence\",\n \"experimentalMassToCharge\",\n \"PeptideEvidenceRef\",\n \"Modification\",\n ]\n # Get the score key and add to default keys\n self._score_key = self._infer_score_name(keys)\n default_keys.append(self._score_key)\n\n # Get the q-value key and add to default keys\n self._qvalue_key = self._infer_qvalue_name(keys)\n if self._qvalue_key:\n default_keys.append(self._qvalue_key)\n\n # Get the PEP key and add to default keys\n self._pep_key = self._infer_pep_name(keys)\n if self._pep_key:\n default_keys.append(self._pep_key)\n\n # Get retention time key\n for rt_key in [\"retention time\", \"scan start time\"]:\n if rt_key in keys:\n self._rt_key = rt_key\n default_keys.append(rt_key)\n break\n\n # Keys that are not necessary for metadata\n self._non_metadata_keys = [\"ContactRole\", \"passThreshold\"]\n self._non_metadata_keys.extend(default_keys)",
"def _extra_keys(self):\r\n return []",
"def keys(self):\n return self.itunesAttributes.keys()",
"def _metadata(self) -> Dict[str, Any]:\n return self.__metadata",
"def keys(self):\n return self.metadb.keys()",
"def get_metadata():\n\n module = __name__.split('.', 1)\n\n pkg = pkg_resources.get_distribution(module[0])\n meta = {\n 'Name': None,\n 'Version': None,\n 'Summary': None,\n 'Home-page': None,\n 'Author': None,\n 'Author-email': None,\n 'License': None,\n }\n\n for line in pkg.get_metadata_lines(\"PKG-INFO\"):\n for par in meta:\n if line.startswith(par + \":\"):\n _, value = line.split(\": \", 1)\n meta[par] = value\n\n return meta",
"def keys(self, installer_context):\n return self.spec.keys(self.data, installer_context)",
"def keys(self):\n query = \"\"\"SELECT column_name, data_type, character_maximum_length\n FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = 'ngc2236';\"\"\"\n result = self.wifsip.query(query)\n keys = [r[0] for r in result]\n return keys",
"def _store_package_metadata(self):",
"def metadata(self): # -> list[Unknown]:\n ...",
"def metadata(self): # -> list[Unknown]:\n ...",
"def metadata(self) -> Dict:\n # Lazy load the metadata\n if self._metadata is not None:\n return self._metadata\n\n # Initialize metadata\n self._metadata = {}\n # Find wich bucket the package belong to\n bucket_dir = os.path.join(self.scoop_root, \"buckets\")\n buckets = os.listdir(bucket_dir)\n metadata_json = None\n for bucket in buckets:\n metadata_file = os.path.join(\n bucket_dir, bucket, \"bucket\", f\"{self.name}.json\"\n )\n if os.path.isfile(metadata_file):\n with open(metadata_file) as file:\n metadata_json = json.load(file)\n break\n\n if metadata_json is None:\n logger.error(\"Could not find package metadata\")\n return self._metadata\n\n self._metadata = metadata_json\n return self._metadata",
"def keys(self):\n keys = [key for key in self.__dict__.keys() if self[key] is not None]\n keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']\n return keys",
"def keys(self):\n keys = [key for key in self.__dict__.keys() if self[key] is not None]\n keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']\n return keys",
"def keys(self):\n keys = [key for key in self.__dict__.keys() if self[key] is not None]\n keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']\n return keys",
"def keys(self):\n return",
"def return_keys(self):\r\n\r\n keys = list(self.piDD.keys())\r\n return keys",
"def get_all_keys(self):\n return self.psettings.allKeys()",
"def get_upload_key_metadata(self):\n key = self.get_upload_key()\n metadata = key.metadata.copy()\n\n # Some http header properties which are stored on the key need to be\n # copied to the metadata when updating\n headers = {\n # http header name, key attribute name\n 'Cache-Control': 'cache_control',\n 'Content-Type': 'content_type',\n 'Content-Disposition': 'content_disposition',\n 'Content-Encoding': 'content_encoding',\n }\n\n for header_name, attribute_name in headers.items():\n attribute_value = getattr(key, attribute_name, False)\n if attribute_value:\n metadata.update({b'{0}'.format(header_name):\n b'{0}'.format(attribute_value)})\n return metadata",
"def get_metadata(self):\n metadata = {}\n for k in self.metadata_keys:\n metadata[k] = copy.copy(getattr(self, k))\n return metadata",
"def pypeit_file_keys(self):\n pypeit_keys = super().pypeit_file_keys()\n # TODO: Why are these added here? See\n # pypeit.metadata.PypeItMetaData.set_pypeit_cols\n pypeit_keys += ['calib', 'comb_id', 'bkg_id']\n return pypeit_keys",
"def test_list_no_metadata(self):\n key = Key(self.bucket)\n name, version = 'mypkg', '1.2'\n filename = '%s-%s.tar.gz' % (name, version)\n key.key = name + '/' + filename\n key.set_contents_from_string('foobar')\n package = list(self.storage.list(Package))[0]\n self.assertEquals(package.name, name)\n self.assertEquals(package.version, version)\n self.assertEquals(package.filename, filename)",
"def _get_missing_keys(self):\n REQUIRED_KEYS = [\n 'date_purchased', 'cost', 'supply_type_id'\n ]\n\n return [key for key in REQUIRED_KEYS if not key in self.request.data]",
"def metadata(self):\n return {\n \"wildtype\" : self.wildtype,\n \"genotypes\" : self.genotypes,\n \"phenotypes\" : self.Raw.phenotypes,\n \"stdeviations\" : self.stdeviations,\n \"n_replicates\" : self.n_replicates,\n \"mutations\" : self.mutations,\n \"log_transform\" : self.log_transform,\n \"order\" : self.order,\n \"epistasis\" : {\n \"keys\" : self.epistasis.keys,\n \"values\" : self.epistasis.values,\n }\n }"
] | [
"0.6587527",
"0.64940816",
"0.645989",
"0.6420536",
"0.63525313",
"0.62840426",
"0.628037",
"0.62474185",
"0.6222178",
"0.6196326",
"0.61875063",
"0.6166716",
"0.6148108",
"0.6105044",
"0.6097268",
"0.60649985",
"0.60649985",
"0.6063789",
"0.6055105",
"0.6055105",
"0.6055105",
"0.6021874",
"0.6017997",
"0.5996099",
"0.5996076",
"0.59838176",
"0.598034",
"0.5961586",
"0.5959656",
"0.59360766"
] | 0.68912286 | 0 |
Construct a relative path based on our own data. | def relative_path(self, data=None):
path_data = data.copy()
for i in ['prefix', 'source_name']:
if not i in path_data:
path_data[i] = getattr(self, i)
return constants.DEB_FILENAME % path_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_relative_path(full_path, prefix='/', split_on='/data/'):\n splits = full_path.split(split_on)\n return os.path.join(prefix, split_on, splits[-1])",
"def dataPath(relative):\n return os.path.join(_dataDir, relative)",
"def get_relative_path(self):\n if self.dip or self.sip or self.replica:\n raise PackageError(\n \"Get relative path for sip or replica packages not yet implemented\"\n )\n if self.deleted:\n raise PackageError(\"There are no relative paths for deleted packages\")\n if self.uuid is None:\n raise PackageError(\"Cannot generate a relative path without a package UUID\")\n rel = \"\"\n left_offset = len(self.default_pair_tree)\n right_offset = -len(self.compressed_ext)\n try:\n if self.current_path.endswith(self.compressed_ext):\n rel = self.current_path[left_offset:right_offset]\n else:\n rel = self.current_path[left_offset:]\n except AttributeError:\n raise PackageError(\"Current path doesn't exist for the package\")\n return \"{}/data/METS.{}.xml\".format(rel, self.uuid)",
"def _make_path(self) -> str:\r\n path_ = Path(path.join(conf.instance.output_path, self.path_prefix, self.name))\r\n if self.is_identifier_in_paths:\r\n path_ = path_ / self.identifier\r\n return path_",
"def buildpath(self):\n basepath = urlutil.href_settings.root + (self.relpath if self.relpath else cherrypy.request.path_info)\n if basepath.find('~') < 0:\n basepath += ('' if basepath.endswith('/') else '/') + '~'\n if cherrypy.request.query_string:\n basepath += ('&' if basepath.find('?') >= 0 else '?') + cherrypy.request.query_string\n return basepath",
"def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path",
"def make_relative(self,basepath = None):\n __make_relative__(run_object=self,basepath=basepath)",
"def data_path(path: str, createdir: bool = False) -> str:\n path_obj = Path(path)\n if not path_obj.is_absolute():\n if inside_project():\n path_obj = Path(project_data_dir(), path)\n else:\n path_obj = Path(\".scrapy\", path)\n if createdir and not path_obj.exists():\n path_obj.mkdir(parents=True)\n return str(path_obj)",
"def relative(self):\n rel = self.path\n if self.params:\n rel += ';' + self.params\n if self.query:\n rel += '?' + self.query\n if self.fragment:\n rel += '#' + self.fragment\n return rel",
"def build_path(self, *args):\n components = self.build_config + args\n return PATH.join(\n self.name,\n *components\n )",
"def get_data_path(path):\n\n data_path = Path(self.kard.meta.get('data_path', 'data'))\n\n if data_path.is_absolute():\n return str(data_path / path)\n\n return str(self.kard_folder_path / self.kard.name / data_path /\n path)",
"def path_creator(rel_path=''):\n if platform.system() != 'Windows':\n if rel_path == '':\n path_list=sys.argv[0].split('/')[:-1]\n return '/'.join(path_list)\n else:\n path_list = sys.argv[0].split('/')[:-1]\n return '/'.join(path_list) + '/' + rel_path\n else:\n if rel_path == '':\n path_list=sys.argv[0].split('\\\\')[:-1]\n path_res='\\\\'.join(path_list)\n return path_res\n else:\n path_list = sys.argv[0].split('\\\\')[:-1]\n rel_path=rel_path.split('/')\n path_res='\\\\'.join(path_list) + '\\\\' + '\\\\'.join(rel_path)\n return path_res",
"def fake_full_path(self) -> PurePath:\n if self.category:\n # Giza wrote out yaml file artifacts under a directory. e.g. steps-foo.yaml becomes\n # steps/foo.rst\n return self.source_path.parent.joinpath(\n PurePath(self.category), self.output_filename\n )\n return self.source_path",
"def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep",
"def path_helper(location, date, time, slc_dir='slc', data_dir='/media/bup/Data'):\n\n base_folder = data_dir + '/' + location + '/' + date + '/'\n name = date + '_' + time\n def_path = base_folder + slc_dir + '/' + name\n return def_path",
"def make_path(self, filename):\n return os.path.join(self.root_path, filename)",
"def base_path(self):\n return Path(self.path)",
"def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))",
"def _abs_path(rel_path):\n return os.path.join(BASE_DIR, rel_path)",
"def __make_path(self, filename):\n return self.__path() + os.sep + filename",
"def data_path(self):\n raise NotImplementedError",
"def get_data_path():\n\n # Get pathname absolute or relative.\n path = os.path.join(\n os.path.dirname(__file__), __malstor_data_directory__)\n\n abs_data_path = os.path.abspath(path)\n if not os.path.exists(abs_data_path):\n raise project_path_not_found\n\n return abs_data_path",
"def _path(self):\n if self.target[-1] != \"/\":\n self.target += \"/\"\n\n if \"/\" in self.source:\n self.path = self.target + self.source.split(\"/\")[-1]\n else:\n raise NotImplementedError(\"This software is not done for Windows\")\n if self.method == \"git\":\n self.path = self.path.replace(\".git\", \"\")",
"def get_data_path():\n return os.getcwd() + \"/data/\"",
"def build_relpath(self):\n return join_path(\"..\", self.build_dirname)",
"def datafilepath(*filename):\r\n import os\r\n return makepath(os.path.join(base_dir, *filename))",
"def path(cls, relpath=None):\r\n base = os.getcwd() if not ParseContext._active else cls.locate().current_buildfile.parent_path\r\n return os.path.abspath(os.path.join(base, relpath) if relpath else base)",
"def data_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\")",
"def id_to_relative_raw_path(self, id):\n return osp.join(self.id_to_base_id(id) + '.ply')",
"def rel_path(self) -> str:\n return os.path.dirname(self._filepath_oracle.filepath())"
] | [
"0.7208581",
"0.70153475",
"0.683594",
"0.6769534",
"0.6621445",
"0.6615474",
"0.6516488",
"0.65120804",
"0.6495915",
"0.64829516",
"0.64696205",
"0.64535296",
"0.6452572",
"0.64190155",
"0.6400882",
"0.63507867",
"0.6273181",
"0.6255719",
"0.62451476",
"0.6235075",
"0.6225686",
"0.6210604",
"0.62104017",
"0.6194029",
"0.61755985",
"0.6174046",
"0.6165383",
"0.61624646",
"0.613662",
"0.6126686"
] | 0.7555825 | 0 |
Replace invalid characters for an Excel sheet name within the ``sheet_name`` with the ``replacement_text``. | def sanitize_excel_sheet_name(sheet_name: str, replacement_text: str = "") -> str:
try:
unicode_sheet_name = _preprocess(sheet_name)
except AttributeError as e:
raise ValueError(e)
modify_sheet_name = __RE_INVALID_EXCEL_SHEET_NAME.sub(replacement_text, unicode_sheet_name)
return modify_sheet_name[:__MAX_SHEET_NAME_LEN] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sheet_name(self, name):\n if self.sheet:\n self._newline\n self._cell('')\n\n self._cell(name)\n self.sheet = name",
"def regional_sheet_name(df_column_name):\n\n sheet_name = df_column_name.title().replace('_', '-').replace('-Km2', '-km^2')\n\n # these regions are special cases; would probably be best to update\n # nasateam.regional_masks.py, but that would be a higher risk change\n if 'Centralarctic' in sheet_name:\n sheet_name = sheet_name.replace('Centralarctic', 'Central-Arctic')\n\n elif 'Eastsiberian' in sheet_name:\n sheet_name = sheet_name.replace('Eastsiberian', 'East-Siberian')\n\n elif 'Stlawrence' in sheet_name:\n sheet_name = sheet_name.replace('Stlawrence', 'St-Lawrence')\n\n # \"Canadian-Archipelago-Extent-km^2\" is 32 characters, too long to be the\n # name of an Excel sheet, so we drop the first dash here\n elif 'Canadianarchipelago' in sheet_name:\n sheet_name = sheet_name.replace('Canadianarchipelago', 'CanadianArchipelago')\n\n elif 'Bellingshausen Amundsen' in sheet_name:\n sheet_name = sheet_name.replace('Bellingshausen Amundsen', 'Bell-Amundsen')\n\n return sheet_name",
"def _update_sheet_name_with_unique_id(self, sheet_name):\n if sheet_name == MODULES_AND_FORMS_SHEET_NAME:\n return sheet_name\n module_sheet_name_match = HQ_MODULE_SHEET_NAME.match(sheet_name)\n if module_sheet_name_match:\n module_index = int(module_sheet_name_match.groups()[0]) - 1\n return self._generate_module_sheet_name(module_index)\n form_sheet_name_match = HQ_FORM_SHEET_NAME.match(sheet_name)\n if form_sheet_name_match:\n indexes = form_sheet_name_match.groups()\n module_index, form_index = int(indexes[0]) - 1, int(indexes[1]) - 1\n return self._generate_form_sheet_name(module_index, form_index)\n raise Exception(\"Got unexpected sheet name %s\" % sheet_name)",
"def hxlreplace():\n run_script(hxlreplace_main)",
"def fix(text):\n\n text = text.replace(\"\\\\\", \"\\\\\\\\\")\n text = text.replace(\"{\", \"\\\\{\").replace(\"}\", \"\\\\}\")\n text = _nonAsciiPattern.sub(_replace, text)\n return text",
"def sanitize_filename(filename, replacement_text=\"\"):\n\n return __RE_INVALID_FILENAME.sub(replacement_text, filename.strip())",
"def __FixClipName(self, text):\n substitutions = {\n '/': '',\n '\\\\': ''\n }\n cursorPos = self.clipNameEdit.cursorPosition()\n for char in substitutions:\n text = text.replace(char, substitutions[char])\n self.clipNameEdit.setText(text)\n self.clipNameEdit.setCursorPosition(cursorPos)",
"def fix_natural_language(name):\n\tfor ch in r\"\\`*{}[]()>#+-.!$\":\n\t\tif ch in name:\n\t\t\tname = name.replace(ch,\"_\")\n\treturn name",
"def name_to_goodreads(name):\n name = to_ascii(name.title())\n for char in CHARS:\n name = name.replace(*char)\n return name",
"def set_user_defined_sheet_name():\n global sheet\n probable_sheets = []\n\n machine_nr = input('Veuillez entrer un numéro de machine (Vide si recherche par C.) : ')\n invoice_nr = input('Veuillez entrer un numéro de C. : ')\n workbook = load_workbook(filename='./temp_excel.xlsm')\n sheets = workbook.sheetnames\n\n # Research amongs all sheets (There can be a lot)\n for ii in sheets:\n if machine_nr in ii and invoice_nr in ii:\n sheet = workbook[ii]\n break\n elif machine_nr in ii:\n probable_sheets.append(ii)\n elif invoice_nr in ii:\n probable_sheets.append(ii)\n\t \n\n # If no exact corresponding sheet is found\n if not sheet and probable_sheets != []:\n print('Aucune feuille ne correspond totalement à votre recherche, mais certaines s\\'en rapprochent :')\n i = 0\n for ii in probable_sheets:\n print(f'{i} : {ii}')\n i+=1\n print('99 pour quitter')\n choice = input('Faites un choix :')\n\n # Let the user exit the script\n if choice == 'q':\n sys.exit()\n else:\n sheet = workbook[probable_sheets[int(choice)]]",
"def replace_greek_spelled_out(s):\n for greek_uni, greek_spelled_out in greek_alphabet.items():\n s = s.replace(greek_uni, greek_spelled_out)\n return s",
"def replace_bad_characters(self, str):\n\n str = unicode(BeautifulStoneSoup(str,\n convertEntities=BeautifulStoneSoup.HTML_ENTITIES))\n str = unicodedata.normalize('NFKD', str).encode('ascii', 'ignore')\n str = unicode(re.sub('[^\\w\\s-]', '', str).strip().lower())\n str = unicode(str.replace(' ', '-'))\n return str",
"def Clean(s):\n for c in BAD_CHARACTERS:\n s = s.replace(c, '_')\n return s",
"def get_sheet_name(filename):\n filename = os.path.basename(filename)\n filename = os.path.splitext(filename)[0]\n # there are 31 character limitation\n if len(filename) > 31:\n filename = filename[:31]\n return filename",
"def sanitize_name(name):\n # For now just change dashes to underscores. Fix this more in the future\n return name.replace(\"-\", \"_\")",
"def correct_filename(self, img_name, categ):\n path = self._path\n\n # Change wrong characters in filename\n wrong_char = [char for char in img_name if char in [\" \", \"(\", \")\", \"é\", \"©\"]]\n if len(wrong_char) > 0:\n\n new_img_name = img_name\n for char in [\" \", \"(\", \")\", \"©\"]:\n new_img_name = new_img_name.replace(char, \"\")\n new_img_name = new_img_name.replace(\"é\", \"e\")\n\n os.rename(join(path, categ, img_name), join(path, categ, new_img_name))\n img_name = new_img_name\n\n return img_name",
"def sanitize_name(name: str) -> str:\n return re.sub(r\"[^A-Za-z0-9_-]\", \"-\", name)[0:128]",
"def replace_greek_uni(s):\n for greek_uni, greek_spelled_out in greek_alphabet.items():\n s = s.replace(greek_spelled_out, greek_uni)\n return s",
"def clean_keeping_underscores(cls, value):\r\n return INVALID_CHARS.sub('_', value)",
"def check_sheet(path, sheet): \n xl = pd.ExcelFile(path)\n if sheet not in xl.sheet_names:\n raise ValueError(\"Invalid sheet name \\'\" + sheet +\"\\'\")",
"def _clean_workflow_name(name: str) -> str:\n return REGEX_CHARS_TO_REPLACE.sub(\"-\", name).strip(\"-\")",
"def adjust_name_for_printing(name):\n if name is not None:\n name2 = name\n name = name.replace(\" \", \"_\").replace(\".\", \"_\").replace(\"-\", \"_m_\")\n name = name.replace(\"+\", \"_p_\").replace(\"!\", \"_I_\")\n name = name.replace(\"**\", \"_xx_\").replace(\"*\", \"_x_\")\n name = name.replace(\"/\", \"_l_\").replace(\"@\", '_at_')\n name = name.replace(\"(\", \"_of_\").replace(\")\", \"\")\n if re.match(r'^[a-zA-Z_][a-zA-Z0-9-_]*$', name) is None:\n raise NameError(\"name {} converted to {} cannot be further converted to valid python variable name!\".format(name2, name))\n return name\n return ''",
"def set_amiSheetNames(self):\n\n self.pres_sheetname = None\n self.edit_sheetname = None\n self.notransfer_sheetname = None\n\n for sheet in self.wb.sheet_names():\n sheet_lower = sheet.lower()\n #Check if two sheets get identfied by regex below?\n if re.match(\"(original|preservation|file|full|archive)\",\n sheet_lower):\n self.pres_sheetname = sheet\n elif re.match(\"edit\", sheet_lower):\n self.edit_sheetname = sheet\n elif re.match(\"not transferred\", sheet_lower):\n self.notransfer_sheetname = sheet",
"def sanitize_name(name):\n name = name.strip()\n\n # clean up group\n name = name.replace('- IE', ' -IE')\n name = name.replace('- MA', ' -MA')\n for l in [1,2,3,4,5,6,7,8,9]:\n for g in \"AB\":\n name = name.replace(f'IE{l}-{g}', f'IE-{l}{g}')\n name = name.replace(f'IE{l}{g}', f'IE-{l}{g}')\n for l in [1,2,3,4]:\n for g in [2*l-1, 2*l]:\n name = name.replace(f'MA-{l}{g}', f'MA{l}-{g}')\n name = name.replace(f'MA{l}{g}', f'MA{l}-{g}')\n\n # clean up name\n try:\n parts = name.split(' ')\n firstname = parts[0].title()\n group = parts[-1]\n familynames = parts[1:-1]\n familyname = \" \".join(f.upper() for f in familynames)\n name = f\"{firstname} {familyname} {group}\"\n name = name.replace('-IE', '- IE')\n name = name.replace('-MA', '- MA')\n except:\n pass\n while \" \" in name:\n name = name.replace(' ', ' ')\n return name",
"def sanitize(name):\n return re.sub(\"\\\\W|^(?=\\\\d)\", \"_\", name)",
"def clean(value):\r\n return re.sub('_+', '_', INVALID_CHARS.sub('_', value))",
"def _sanitizeName(name):\n\n name = name.lower() # lower.\n name = name.replace('.','') # remove periods.\n name = name.replace('-','') # remove dashes.\n name = name.replace(\"'\",'') # remove apostrophies.\n # return it.\n return name",
"def sanitize(name):\n ## Disallowed characters in filenames\n DISALLOWED_CHARS = \"\\\\/:<>?*|\"\n if name == None:\n name = \"Unknown\"\n for character in DISALLOWED_CHARS:\n name = name.replace(character,'_')\n # Replace \" with '\n name = name.replace('\"', \"'\")\n\n return name",
"def _process_replacements(input_text):\n original_input = list(input_text) # Immutable\n replaced = list(input_text)\n for index, char in enumerate(original_input):\n if char.isalpha() or char.isdigit():\n replacement = helpers.get_character(char)\n if replacement:\n replaced[index] = ' ' + replacement + ' '\n return helpers.remove_excess_spaces(''.join(replaced))",
"def _clean_contrast_name(contrast_name):\n new_name = contrast_name[:]\n\n # Some characters translate to words\n new_name = new_name.replace(\"-\", \" Minus \")\n new_name = new_name.replace(\"+\", \" Plus \")\n new_name = new_name.replace(\">\", \" Gt \")\n new_name = new_name.replace(\"<\", \" Lt \")\n\n # Others translate to spaces\n new_name = new_name.replace(\"_\", \" \")\n\n # Convert to camelCase\n new_name = new_name.split(\" \")\n new_name[0] = new_name[0].lower()\n new_name[1:] = [c.title() for c in new_name[1:]]\n new_name = \" \".join(new_name)\n\n # Remove non-alphanumeric characters\n new_name = \"\".join(ch for ch in new_name if ch.isalnum())\n\n # Let users know if the name was changed\n if new_name != contrast_name:\n warnings.warn(\n f'Contrast name \"{contrast_name}\" changed to \"{new_name}\"'\n )\n return new_name"
] | [
"0.62368625",
"0.5658766",
"0.55839145",
"0.5342836",
"0.52264005",
"0.5170571",
"0.5117008",
"0.49473116",
"0.4930441",
"0.4916277",
"0.49059415",
"0.48987168",
"0.4875026",
"0.4849753",
"0.4812612",
"0.4810664",
"0.47979686",
"0.47912505",
"0.47768623",
"0.4770285",
"0.47661123",
"0.4753105",
"0.4732713",
"0.4725495",
"0.47051752",
"0.46959808",
"0.46954927",
"0.46900332",
"0.46855152",
"0.46752217"
] | 0.85612595 | 0 |
Returns the entropy from the entire DataFrame to the given target. | def get_entropy_df(self, df=None):
if df is None:
df = self.df
target = self.target
entropy = 0
values = df[target].unique()
for value in values:
# Fraction of values of 'value' in target feature
fraction = df[target].value_counts()[value]/len(df[target])
entropy += -fraction*np.log2(fraction)
return entropy | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def entropy(target_col):\n elements,counts = np.unique(target_col,return_counts = True)\n entropy = np.sum([(-counts[i]/np.sum(counts))*np.log2(counts[i]/np.sum(counts)) for i in range(len(elements))])\n return entropy",
"def entropy(self, dataset, target_attr):\n freq = {} #A dictionary to counts how many samples for each target classification \n data_entropy = 0.0\n samplenumbers = len(dataset) #Total number of samplers in data set\n \n #Calculate the frequency of each of the values in the target attribute\n for record in dataset:\n if (record[target_attr] in freq):\n freq[record[target_attr]] += 1.0\n else:\n freq[record[target_attr]] = 1.0\n \n # Calculate the entropy of the data for the target attribute\n for freq in list(freq.values()):\n data_entropy += (-freq/samplenumbers) * math.log(freq/samplenumbers, 2) \n \n return data_entropy",
"def __entropy(self, data_set, target_feature):\n frequencies = self.__calculate_frequency(data_set, target_feature)\n feature_entropy = 0.0\n number_of_values = len(data_set)\n\n # Add entropy for each value in frequencies.\n for frequency in frequencies:\n probability = frequencies[frequency] / number_of_values\n feature_entropy += (probability * math.log(probability, 2))\n\n return feature_entropy * -1",
"def entropy ( target_array ):\n return -1 * sum (\n [\n pipe ( np.sum ( target_array == value ) / len ( target_array ), lambda ratio: ratio * np.log ( ratio ) )\n for value in set ( target_array )\n ]\n ) # End entropy()",
"def get_entropy(df: pd.DataFrame) -> int:\r\n\r\n # get unique values and their counts for the last column of df\r\n values, val_freq = np.unique(df[df.columns[-1]], return_counts=True)\r\n val_freq = val_freq / val_freq.sum()\r\n\r\n entropy = 0\r\n for freq in val_freq:\r\n entropy -= freq * np.log2(freq)\r\n return entropy",
"def get_entropy_feature(self, feature, df=None):\n if df is None:\n df = self.df\n target = self.target\n\n target_variables = df[target].unique()\n variables = df[feature].unique()\n entropy = 0\n\n # Aggregate entropy for each unique value in 'feature' feature on each unique value in target feature\n for variable in variables:\n entropy_inner = 0\n for target_variable in target_variables:\n # Number of values of 'variable' in 'feature' feature that matches current target value\n num = len(df[feature][df[feature] == variable][df[target] == target_variable])\n # Number of values of 'variable' in 'feature' feature\n den = len(df[feature][df[feature] == variable])\n # Machine epsilon\n eps = np.finfo(np.float).eps\n fraction_inner = num/(den+eps)\n entropy_inner += -fraction_inner*np.log(fraction_inner+eps)\n fraction = den/len(df)\n entropy += -fraction*entropy_inner\n\n return abs(entropy)",
"def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))",
"def c_entropy(state,target=None,log_base=2):\n if target!=None:\n state = state.ptrace(target)\n return entropy(com_measure(state),base=log_base)",
"def get_entropy(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return sum([-(float(label_count[label]) /\n total_count) * np.log2(float(label_count[label]) / total_count)\n for label in label_count.keys()])",
"def entropy(self, y):\n n = y.size\n if n <= 1:\n return 0\n\n labels, counts = unique(y, return_counts=True)\n\n if counts.size <= 1:\n return 0\n\n probs = counts / n\n entropy = -sum([p * log(p, 2) for p in probs])\n return entropy",
"def entropy(data, idxList):\n df = data.loc[idxList]\n counts = df.value_counts().to_numpy()\n counts = counts.reshape(1, -1).astype(np.float32)\n counts /= np.sum(counts)\n log_sum = counts @ np.log2(counts.T)\n return -log_sum[0, 0]",
"def get_lowest_entropy_feature(self, df=None):\n if df is None:\n df = self.df\n target = self.target\n\n entropies = []\n\n for feature in self.features:\n entropies.append(self.get_entropy_df(df=df) - self.get_entropy_feature(feature=feature, df=df))\n\n # Quit growing if no information gain is possible locally (works 100% in 99% of cases)\n if len(set(entropies)) is 1:\n return None\n return df.keys().drop(target)[np.argmax(entropies)]",
"def calculate_entropy(y):\n\tlog2 = lambda x: math.log(x) / math.log(2)\n\tunique_labels = np.unique(y)\n\tentropy = 0\n\tfor label in unique_labels:\n\t\tcount = len(y[y == label])\n\t\tp = count / len(y)\n\t\tentropy += -p * log2(p)\n\treturn entropy",
"def _entropy(self, y):\n # Get size\n n = y.shape[0]\n summation = 0\n\n # Summatory\n for c_i in np.unique(y):\n prob = sum(y == c_i) / float(n)\n summation += prob * np.log2(prob)\n\n return -summation",
"def compute_cross_entropy(probs, target):\n avg_probs_per_sample = probs.mean(\n -1)\n xe = torch.nn.CrossEntropyLoss(reduction='none')\n return xe(avg_probs_per_sample, target).detach().cpu().numpy()",
"def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')",
"def entropy(y):\n total = y.size\n value_counts = np.bincount(y).astype(\"float\")\n proportions = value_counts / y.size\n\n return sum(-i * np.log(i) for i in proportions if i)",
"def calculate_entropy(dataset) :\n\n num_entries = len(dataset)\n label_counts = {}\n for vector in dataset :\n # the label is at the last index of the data set\n current_label = vector[-1]\n if current_label not in label_counts :\n label_counts[current_label] = 0\n label_counts[current_label] += 1\n # Calculate the entropy\n entropy = 0.0\n for label in label_counts :\n # Calculate probability of each label within the dataset\n prob_of_label = label_counts[label]/num_entries\n # Since the entropy is the negative of the sum of all probability,\n # simply subtract it\n entropy -= prob_of_label * log(prob_of_label, 2)\n return entropy",
"def get_entropy(*labels):\n entropies = [] #list of entropy values from each subset\n total = 0 #total number of datapoints\n for subset in labels:\n n = len(subset)\n total += n\n counts = np.unique(subset, return_counts=True)[1] #frequency of unique values\n entropy = np.sum([-(i/n) * np.log2(i/n) for i in counts]) #subset entropy calcuation\n entropies.append((entropy, n))\n return np.sum([(n/total) * ent for n, ent in iter(entropies)])",
"def entropy(y):\n p = _proba(y)\n return (-p * np.log2(p)).sum()",
"def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en",
"def calc_entropy(data_set):\n size = len(data_set)\n label_counts = {}\n for feat_vector in data_set:\n label = feat_vector[-1]\n label_counts.setdefault(label, 0)\n label_counts[label] += 1\n\n entropy = 0.0\n for key, count in label_counts.iteritems():\n prob = float(count) / size\n entropy -= prob * log(prob, 2)\n\n return entropy",
"def entropy(rows, resCol = None):\n from math import log\n if not resCol: #create the dictionary of counts for each class using pure python\n total = len(rows)\n counts = __uniqueCounts(rows)\n else: #Create the dictionary of counts for each class using pandas.\n assert 'index' in dir(rows)\n total = len(rows.index)\n counts = __uniqueCountsPandas(rows, resCol)\n log2 = lambda x:log(x)/log(2) #Create a function to take log-base 2 of a number\n ent = 0 #Initialize the entropy at zero\n #Implement the formula for entropy, using log-base2\n fracs = [float(x)/total for x in counts.values()]\n for x in fracs:\n ent += -x*log2(x)\n return ent",
"def single_entropy(df, var):\n\n entropy_ = df.groupBy(var).agg(count(\"*\").alias('num_entries')) \\\n .withColumn('all', lit('all')) \\\n .withColumn('total_num_entries', sql_sum('num_entries').over(Window.partitionBy('all'))) \\\n .withColumn('pcg', col('num_entries') / col('total_num_entries')) \\\n .select(var, 'pcg') \\\n .withColumn('entropy_term', -col('pcg') * log('pcg')) \\\n .select(sql_sum('entropy_term').alias('entropy')).first()['entropy']\n\n return entropy_",
"def find_entropy(less_than_threshold,more_than_threshold):\n\n ''' Storing total number of records '''\n total_records = len(less_than_threshold) + len(more_than_threshold)\n\n ''' Calculating the probability '''\n less_than_probability = len(less_than_threshold) / total_records\n more_than_probability = len(more_than_threshold) / total_records\n\n ''' Converting the dataframe to numpy arrays '''\n less_than_threshold_values = less_than_threshold.values\n more_than_threshold_values = more_than_threshold.values\n\n ''' Storing the target attribute values (Muffin or Cupcake) for threshold values '''\n target_for_less_than = less_than_threshold_values[:, -1]\n target_for_more_than = more_than_threshold_values[:, -1]\n\n ''' Finding the counts of muffin and cupcake for values lower than and greater than threshold value '''\n recipe_type, less_than_cupcake_muffin_count = np.unique(target_for_less_than, return_counts=True)\n recipe_type, more_than_cupcake_muffin_count = np.unique(target_for_more_than, return_counts=True)\n\n # print(recipe_type, more_than_cupcake_muffin_count, len(more_than_cupcake_muffin_count))\n ''' To ensure there are at least 5 records in each node '''\n if less_than_cupcake_muffin_count.sum() < 5 or more_than_cupcake_muffin_count.sum() < 5:\n ''' Return horrible badness '''\n return math.inf\n else:\n ''' Find the entropies for less than threshold values and more than threshold values '''\n less_than_entropy = sum((less_than_cupcake_muffin_count / less_than_cupcake_muffin_count.sum()) * - np.log2(\n less_than_cupcake_muffin_count / less_than_cupcake_muffin_count.sum()))\n more_than_entropy = sum((more_than_cupcake_muffin_count / more_than_cupcake_muffin_count.sum()) * - np.log2(\n more_than_cupcake_muffin_count / more_than_cupcake_muffin_count.sum()))\n\n ''' Calculate the total weighted entropy '''\n total_weighted_entropy = less_than_probability * less_than_entropy + more_than_probability * more_than_entropy\n\n return total_weighted_entropy",
"def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent",
"def calc_entropy(column):\n # Compute the counts of each unique value in the column\n counts = numpy.bincount(column)\n # Divide by the total column length to get a probability\n probabilities = counts / len(column)\n \n # Initialize the entropy to 0\n entropy = 0\n # Loop through the probabilities, and add each one to the total entropy\n for prob in probabilities:\n if prob > 0:\n entropy += prob * math.log(prob, 2)\n \n return -entropy",
"def entropy(y):\n return -1 * sum(\n [\n pipe(np.sum(y == value) / len(y), lambda ratio: ratio * np.log(ratio))\n for value in set(y)\n ]\n )",
"def entropy(self):\n raise NotImplementedError",
"def entropy(self):\n n = len(self.y)\n sum_ = 0\n for i in np.unique(self.y):\n v = len(self.y[self.y == i])\n sum_ += -((v/n) * log2(v/n))\n return sum_"
] | [
"0.77557856",
"0.7240878",
"0.716849",
"0.7106271",
"0.7048393",
"0.69316816",
"0.65471375",
"0.6519684",
"0.64955693",
"0.6444423",
"0.64319384",
"0.6407732",
"0.63241047",
"0.630956",
"0.62471724",
"0.623521",
"0.6202024",
"0.6142288",
"0.6140276",
"0.6101606",
"0.6101134",
"0.6097924",
"0.6078207",
"0.60750544",
"0.606695",
"0.6062954",
"0.6062928",
"0.6042571",
"0.60194206",
"0.6009267"
] | 0.8257584 | 0 |
Returns the entropy from the given feature to the given target. | def get_entropy_feature(self, feature, df=None):
if df is None:
df = self.df
target = self.target
target_variables = df[target].unique()
variables = df[feature].unique()
entropy = 0
# Aggregate entropy for each unique value in 'feature' feature on each unique value in target feature
for variable in variables:
entropy_inner = 0
for target_variable in target_variables:
# Number of values of 'variable' in 'feature' feature that matches current target value
num = len(df[feature][df[feature] == variable][df[target] == target_variable])
# Number of values of 'variable' in 'feature' feature
den = len(df[feature][df[feature] == variable])
# Machine epsilon
eps = np.finfo(np.float).eps
fraction_inner = num/(den+eps)
entropy_inner += -fraction_inner*np.log(fraction_inner+eps)
fraction = den/len(df)
entropy += -fraction*entropy_inner
return abs(entropy) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __entropy(self, data_set, target_feature):\n frequencies = self.__calculate_frequency(data_set, target_feature)\n feature_entropy = 0.0\n number_of_values = len(data_set)\n\n # Add entropy for each value in frequencies.\n for frequency in frequencies:\n probability = frequencies[frequency] / number_of_values\n feature_entropy += (probability * math.log(probability, 2))\n\n return feature_entropy * -1",
"def _entropy(self, feature, node):\n entropy = 0\n categories = np.unique(feature)\n num_point = len(feature)\n for category in categories:\n # for each category in that feature\n num_category = len(feature[feature == category])\n for c in self.num_class:\n # count the number of each class\n num_category_class = len(feature[np.logical_and(feature == category, node.y == c)])\n if num_category_class == 0:\n continue\n # compute entropy/information gain or classification error\n entropy += num_category / num_point * (\n -num_category_class / num_category * log2(num_category_class / num_category))\n return entropy",
"def entropy(self, dataset, target_attr):\n freq = {} #A dictionary to counts how many samples for each target classification \n data_entropy = 0.0\n samplenumbers = len(dataset) #Total number of samplers in data set\n \n #Calculate the frequency of each of the values in the target attribute\n for record in dataset:\n if (record[target_attr] in freq):\n freq[record[target_attr]] += 1.0\n else:\n freq[record[target_attr]] = 1.0\n \n # Calculate the entropy of the data for the target attribute\n for freq in list(freq.values()):\n data_entropy += (-freq/samplenumbers) * math.log(freq/samplenumbers, 2) \n \n return data_entropy",
"def entropy(target_col):\n elements,counts = np.unique(target_col,return_counts = True)\n entropy = np.sum([(-counts[i]/np.sum(counts))*np.log2(counts[i]/np.sum(counts)) for i in range(len(elements))])\n return entropy",
"def get_entropy_df(self, df=None):\n if df is None:\n df = self.df\n target = self.target\n\n entropy = 0\n values = df[target].unique()\n\n for value in values:\n # Fraction of values of 'value' in target feature\n fraction = df[target].value_counts()[value]/len(df[target])\n entropy += -fraction*np.log2(fraction)\n\n return entropy",
"def c_entropy(state,target=None,log_base=2):\n if target!=None:\n state = state.ptrace(target)\n return entropy(com_measure(state),base=log_base)",
"def __gain(self, data_set, split_feature, target_feature):\n frequencies = self.__calculate_frequency(data_set, split_feature)\n data_entropy = 0.0\n\n # Calculate the entropy of the data.\n for value, frequency in frequencies.items():\n probability = frequency / sum(frequencies.values())\n data_subset = data_set[data_set[split_feature] == value]\n data_entropy += probability * self.__entropy(data_subset, target_feature)\n\n return self.__entropy(data_set, target_feature) - data_entropy",
"def entropy_difference(feature, answers, num_lemma):\n f_max = np.max(feature)\n f_min = np.min(feature)\n # check is it unsound feature\n if f_max == f_min:\n # print('lemma 0: ', num_lemma)\n return 10000\n step = (f_max - f_min) / 1000\n p = [[0, 0] for _ in range(1000)]\n sum_p = len(feature)\n for j in range(len(feature)):\n index = math.trunc((feature[j] - f_min)/step)\n if index == 1000:\n index = 999\n p[index][answers[j]] += 1\n # difference between entropy feature+answers and just feature\n result = 0\n for i in range(1000):\n if (p[i][0] != 0) & (p[i][1] != 0):\n result += math.log2((p[i][0] + p[i][1]) / sum_p) * (p[i][0] + p[i][1]) / sum_p - \\\n math.log2(p[i][0] / sum_p) * (p[i][0]) / sum_p - \\\n math.log2(p[i][1] / sum_p) * (p[i][1]) / sum_p\n # entropy answers\n all_answers = len(answers)\n positive_answers = sum(answers) / all_answers\n negative_answers = 1 - positive_answers\n if (positive_answers == 0) or negative_answers == 0:\n entropy_answers = 0\n else:\n entropy_answers = - positive_answers * math.log2(positive_answers) - \\\n negative_answers * math.log2(negative_answers)\n\n # difference between (feature entropy + answers entropy) and (feature + answers) entropy\n if entropy_answers - result < 0:\n print('negative information', num_lemma, entropy_answers - result)\n return - (entropy_answers - result)",
"def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):\n raise NotImplementedError()",
"def entropy ( target_array ):\n return -1 * sum (\n [\n pipe ( np.sum ( target_array == value ) / len ( target_array ), lambda ratio: ratio * np.log ( ratio ) )\n for value in set ( target_array )\n ]\n ) # End entropy()",
"def entropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n logits = args.attribution_model.output2logits(args.forward_output)\n out = torch.distributions.Categorical(logits=logits).entropy()\n if out.ndim > 1:\n out = out.squeeze(-1)\n return out",
"def get_lowest_entropy_feature(self, df=None):\n if df is None:\n df = self.df\n target = self.target\n\n entropies = []\n\n for feature in self.features:\n entropies.append(self.get_entropy_df(df=df) - self.get_entropy_feature(feature=feature, df=df))\n\n # Quit growing if no information gain is possible locally (works 100% in 99% of cases)\n if len(set(entropies)) is 1:\n return None\n return df.keys().drop(target)[np.argmax(entropies)]",
"def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))",
"def entropy(self, y):\n n = y.size\n if n <= 1:\n return 0\n\n labels, counts = unique(y, return_counts=True)\n\n if counts.size <= 1:\n return 0\n\n probs = counts / n\n entropy = -sum([p * log(p, 2) for p in probs])\n return entropy",
"def get_entropy(self, state):\n entropy = self.entropy_model.predict(state)\n return entropy[0]",
"def cross_entropy(y, y_hat):\n return -tf.math.log(\n tf.gather_nd(y_hat, tf.reshape(y, (-1, 1)), batch_dims=1)\n )",
"def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')",
"def entropy(y):\n p = _proba(y)\n return (-p * np.log2(p)).sum()",
"def binary_crossentropy(output, target):\r\n return -(target * tensor.log(output) + (1.0 - target) * tensor.log(1.0 - output))",
"def entropy(self, f):\n f_log = -torch.log(self.einsum(\"q,q->q\", [f, 1 / self.w]))\n return self.einsum(\"q,q->\", [f, f_log])",
"def get_entropy(distribution, samples):\n entropy = -tf.reduce_sum(distribution.log_prob(samples), axis=1)\n return entropy",
"def entropy(self):\n return self._entropy_func",
"def forward(self, input, target):\n target = target.squeeze_()\n return self.ratio * F.cross_entropy(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)",
"def entropy(out, dim=1, reduce='mean'):\n log_prob = F.log_softmax(out, dim=dim)\n h = -torch.sum(log_prob.exp() * log_prob, dim=dim)\n if reduce == 'none':\n return h\n if reduce == 'mean':\n return h.mean()\n if reduce == 'sum':\n return h.sum()",
"def entropy(self):\n raise NotImplementedError",
"def calculate_entropy(y):\n\tlog2 = lambda x: math.log(x) / math.log(2)\n\tunique_labels = np.unique(y)\n\tentropy = 0\n\tfor label in unique_labels:\n\t\tcount = len(y[y == label])\n\t\tp = count / len(y)\n\t\tentropy += -p * log2(p)\n\treturn entropy",
"def information_gain(features, attribute_index, targets):\r\n\r\n possible_feature_values = [0,1]\r\n \r\n possible_classifications = [0,1]\r\n \r\n feature = features[:,attribute_index]\r\n \r\n \r\n number_of_samples = len(feature)\r\n \r\n import math\r\n \r\n \r\n #current_entropy = np.sum([-(len(targets[targets==possible_classification])/number_of_samples)*math.log(len(targets[targets==possible_classification])/number_of_samples, 2) for possible_classification in possible_classifications])\r\n \r\n terms_to_be_summed_for_current_entropy = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_elements_with_this_classification = len(targets[targets==classification])\r\n \r\n p_for_this_classification = number_of_elements_with_this_classification/len(targets)\r\n \r\n if p_for_this_classification != 0:\r\n terms_to_be_summed_for_current_entropy.append(-p_for_this_classification*math.log(p_for_this_classification,2))\r\n else:\r\n terms_to_be_summed_for_current_entropy.append(0)\r\n \r\n current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n \r\n \r\n terms_to_be_summed_for_weighted_entropy = []\r\n \r\n for possible_value in possible_feature_values:\r\n \r\n targets_split_by_feature_value = targets[feature.flatten() == possible_value]\r\n \r\n if len(targets_split_by_feature_value) != 0:\r\n \r\n \r\n weight_of_feature_value = len(targets_split_by_feature_value)/len(targets)\r\n \r\n terms_for_entropy_within_subset = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_subset_elements_with_this_classification = len(targets_split_by_feature_value[targets_split_by_feature_value==classification])\r\n \r\n p_in_subset_for_this_classification = number_of_subset_elements_with_this_classification/len(targets_split_by_feature_value)\r\n \r\n if p_in_subset_for_this_classification != 0:\r\n terms_for_entropy_within_subset.append(-p_in_subset_for_this_classification*math.log(p_in_subset_for_this_classification,2))\r\n else:\r\n terms_for_entropy_within_subset.append(0)\r\n \r\n entropy_within_subset = np.sum(terms_for_entropy_within_subset)\r\n \r\n terms_to_be_summed_for_weighted_entropy.append(weight_of_feature_value*entropy_within_subset)\r\n \r\n weighted_entropy = np.sum(terms_to_be_summed_for_weighted_entropy)\r\n \r\n \r\n #current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n #weighted_entropy = np.sum([(len(feature[feature==possible_value])/number_of_samples)*(len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value]))*math.log((len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value])), 2) for possible_classification in possible_classifications for possible_value in possible_feature_values])\r\n\r\n information_gain = current_entropy - weighted_entropy \r\n \r\n return information_gain",
"def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en",
"def entropy(self, **kwargs) -> TensorType:",
"def entropy(self, **kwargs) -> TensorType:"
] | [
"0.80103743",
"0.72125024",
"0.6683741",
"0.6581703",
"0.64769596",
"0.6355471",
"0.6313559",
"0.63128346",
"0.6207924",
"0.61398417",
"0.6134875",
"0.6108048",
"0.6105479",
"0.6069138",
"0.60679644",
"0.60459185",
"0.6013914",
"0.60018563",
"0.59844184",
"0.59781724",
"0.5948515",
"0.59034395",
"0.5902789",
"0.5888784",
"0.58873934",
"0.5884112",
"0.58483624",
"0.58473134",
"0.58343667",
"0.58343667"
] | 0.7893348 | 1 |
Returns the feature with the lowest entropy given a target variable. | def get_lowest_entropy_feature(self, df=None):
if df is None:
df = self.df
target = self.target
entropies = []
for feature in self.features:
entropies.append(self.get_entropy_df(df=df) - self.get_entropy_feature(feature=feature, df=df))
# Quit growing if no information gain is possible locally (works 100% in 99% of cases)
if len(set(entropies)) is 1:
return None
return df.keys().drop(target)[np.argmax(entropies)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_entropy_feature(self, feature, df=None):\n if df is None:\n df = self.df\n target = self.target\n\n target_variables = df[target].unique()\n variables = df[feature].unique()\n entropy = 0\n\n # Aggregate entropy for each unique value in 'feature' feature on each unique value in target feature\n for variable in variables:\n entropy_inner = 0\n for target_variable in target_variables:\n # Number of values of 'variable' in 'feature' feature that matches current target value\n num = len(df[feature][df[feature] == variable][df[target] == target_variable])\n # Number of values of 'variable' in 'feature' feature\n den = len(df[feature][df[feature] == variable])\n # Machine epsilon\n eps = np.finfo(np.float).eps\n fraction_inner = num/(den+eps)\n entropy_inner += -fraction_inner*np.log(fraction_inner+eps)\n fraction = den/len(df)\n entropy += -fraction*entropy_inner\n\n return abs(entropy)",
"def choose_best_feature(data_set):\n feature_size = len(data_set[0]) - 1\n base_entropy = calc_entropy(data_set)\n best_info_gain = 0.0; best_feature = -1\n for i in xrange(feature_size):\n feat_list = [eg[i] for eg in data_set]\n unique_values = set(feat_list)\n new_entropy = 0.0\n for value in unique_values:\n sub_ds = splite_dataset(data_set, i, value)\n prob = len(sub_ds) / float(len(data_set))\n new_entropy += prob * calc_entropy(sub_ds)\n info_gain = base_entropy - new_entropy\n if info_gain > best_info_gain:\n best_info_gain = info_gain\n best_feature = i\n\n return best_feature",
"def get_fmin(self):\n return self.model.predict(self.model.X)[0].min()",
"def __entropy(self, data_set, target_feature):\n frequencies = self.__calculate_frequency(data_set, target_feature)\n feature_entropy = 0.0\n number_of_values = len(data_set)\n\n # Add entropy for each value in frequencies.\n for frequency in frequencies:\n probability = frequencies[frequency] / number_of_values\n feature_entropy += (probability * math.log(probability, 2))\n\n return feature_entropy * -1",
"def target_temperature_low(self) -> float | None:\n return self._state.target_temperature_low",
"def target_temperature_low(self):\n return self._target_temperature_low",
"def target_temperature_low(self) -> float | None:\n return self._target_temperature_low",
"def get_entropy(self, state):\n entropy = self.entropy_model.predict(state)\n return entropy[0]",
"def target_temperature_low(self) -> int:\r\n # TODO: Force this to return an int.\r\n if self.temperature_scale == \"C\":\r\n return self.target_temperature_low_c\r\n elif self.temperature_scale == \"F\":\r\n return self.target_temperature_low_f\r\n else:\r\n return self._target_temperature_low\r\n\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low\"))",
"def __get_split_feature(self, data_set, target_feature, tree_features):\n\n if self.__criterion == 'entropy':\n feature_gains = {feature: self.__gain(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = max(feature_gains, key=feature_gains.get)\n return split_feature\n elif self.__criterion == 'gini':\n feature_ginis = {feature: self.__gini(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = min(feature_ginis, key=feature_ginis.get)\n return split_feature\n # TODO: I should check this (gini index).",
"def target_temperature_low(self) -> float | None:\n target = self._node.aux_properties.get(PROP_SETPOINT_HEAT)\n if not target:\n return None\n return convert_isy_value_to_hass(target.value, target.uom, target.prec, 1)",
"def get_f_minimum(self):\n return np.min(self._Y)",
"def choose_feature(self, features, examples, tags):\n features_gains_dict = {feature : self.get_gain(examples, tags, feature) for feature in features}\n max_gain = 0\n max_feature = features[0]\n for feature in features:\n if features_gains_dict[feature] > max_gain:\n max_gain = features_gains_dict[feature]\n max_feature = feature\n\n # return the feature with the highest gain\n return max_feature",
"def target_temperature_low(self):\n if self._client.mode == self._client.MODE_AUTO:\n return self._client.heattemp\n return None",
"def _choose_best_feature(self, X, y, label, sample_weights=None):\n best_feature_idx = 0\n # YOUR CODE HERE\n # Note that you need to implement the sampling feature part here for random forest!\n # Hint: You may find `np.random.choice` is useful for sampling.\n # begin answer\n n_features = X.shape[1]\n if self.sample_feature:\n max_features=max(1, min(n_features, int(np.round(np.sqrt(n_features)))))\n new_features=np.random.choice(n_features, max_features, replace=False)\n new_X=X[:, new_features]\n else:\n new_X=X\n n_new_features=new_X.shape[1]\n #new_features=np.random.choice(n_features, n_features, replace=False)\n #old_cost=self.entropy(y, sample_weights)\n #use C4.5 algorirhm\n best_impurity=None\n best_feature_idx=0\n best_feature_val=X[0, 0]\n for i in range(n_new_features):\n unique_vals=np.unique(X[:,i])\n for value in unique_vals:\n sub1_X, sub1_y, label1, sub1_sample_weights, sub2_X, sub2_y, label2, sub2_sample_weights=self._split_dataset(X, y, label, i, value, sample_weights)\n if len(sub1_y)>0 and len(sub2_y)>0:\n new_impurity=self._impurity(y, sub1_y, sub2_y)\n if best_impurity is None or new_impurity > best_impurity:\n best_impurity=new_impurity\n best_feature_idx=i\n best_feature_val=value \n # end answer\n return best_feature_idx, best_feature_val",
"def value_head(features):\n with tf.variable_scope('critic', reuse=tf.AUTO_REUSE):\n features = tf.layers.dense(features, units=1, activation=None, name='output')\n return tf.squeeze(features, axis=-1)",
"def get_best_pred_finger(self,f):\n return min(self.best_finger_pred[f],\\\n key=lambda kn:dist_ident(kn.ident,self.get_finger_pred_loc(f)))",
"def _find_lowest_cost_node(self) -> str:\n lowest_cost = float(\"inf\")\n lowest_cost_node = None\n for node in self.costs:\n cost = self.costs[node]\n if cost < lowest_cost and node not in self.closed_nodes:\n lowest_cost = cost\n lowest_cost_node = node\n return lowest_cost_node",
"def find_min_node(self):\n min_energy = 10 ** 10\n min_id = -1\n for node in self.node:\n if node.energy < min_energy:\n min_energy = node.energy\n min_id = node.id\n return min_id",
"def target_temperature_low(self):\n return self._device.setpoint_heat",
"def get_min_shannon_entropy(grid):\r\n curr_min = math.inf\r\n curr_best = []\r\n for i in range(len(grid[0])):\r\n for j in range(len(grid)):\r\n if not grid[j][i].collapsed:\r\n w = grid[j][i].block_weights\r\n shannon_entropy = sum([-math.log(el) for el in w] )\r\n if shannon_entropy < curr_min:\r\n curr_min = shannon_entropy\r\n curr_best = [(i,j)]\r\n elif shannon_entropy == curr_min:\r\n curr_best.append((i,j))\r\n idx = np.random.choice(range(len(curr_best))) #choose randomly if theres a tie\r\n return curr_best[idx] #x,y\r",
"def get_best_vector(results, f_x, target):\n index_min = -1\n cur_min = 10**10\n\n with Session() as sess:\n for index, individual in enumerate(results):\n f_x_res = sess.run(f_x, feed_dict={\n target: individual\n })\n if f_x_res < cur_min:\n cur_min = f_x_res\n index_min = index\n\n best = results[index_min]\n\n return best",
"def target_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_low_c)",
"def minimum_value(self):\n return self._fitness[self._minidx]",
"def get_smallest_h_cost_unvisited_node(self):\n node_list = []\n for column in self.grid:\n for node in column:\n if node.pos in self.unvisited_pos:\n node_list.append(node)\n return min(node_list, key=lambda x: x.h_cost)",
"def minimum_temperature(self):\n return self._minimum_temperature",
"def _entropy(self, feature, node):\n entropy = 0\n categories = np.unique(feature)\n num_point = len(feature)\n for category in categories:\n # for each category in that feature\n num_category = len(feature[feature == category])\n for c in self.num_class:\n # count the number of each class\n num_category_class = len(feature[np.logical_and(feature == category, node.y == c)])\n if num_category_class == 0:\n continue\n # compute entropy/information gain or classification error\n entropy += num_category / num_point * (\n -num_category_class / num_category * log2(num_category_class / num_category))\n return entropy",
"def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)",
"def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)",
"def min(self):\r\n\t\treturn min(self.sample)"
] | [
"0.63987845",
"0.62906563",
"0.62745535",
"0.62361795",
"0.5970269",
"0.58792543",
"0.5835181",
"0.5813218",
"0.57950956",
"0.5747696",
"0.5695478",
"0.5677797",
"0.56756794",
"0.5600471",
"0.5550534",
"0.5540838",
"0.554071",
"0.5539472",
"0.55303925",
"0.5516388",
"0.54986537",
"0.547502",
"0.54568684",
"0.54394543",
"0.5417047",
"0.54096806",
"0.53909904",
"0.53876",
"0.53876",
"0.5383824"
] | 0.74907 | 0 |
Returns a recursively built tree using entropy for determining splits. | def build_tree(self, df=None, tree=None, depth=0):
if df is None:
df = self.df
target = self.target
node = self.get_lowest_entropy_feature(df)
if not node:
print("Pure solution not possible in current branch...")
return tree
variables = df[node].unique()
if tree is None:
tree = {}
tree[node] = {}
for value in variables:
subtable = df[df[node] == value].reset_index(drop=True)
inner_variables, counts = np.unique(subtable[target], return_counts=True)
if len(counts) == 1:
tree[node][value] = inner_variables[0]
elif depth >= self.max_depth:
return tree
else:
depth += 1
tree[node][value] = self.build_tree(df=subtable, depth=depth)
return tree | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __gen_merkle_tree__(self):\n tree_stage = []\n tree_stage_num = int(log2(self.l))\n current_tree_stage = self.keys[1:]\n\n for i in range(0, tree_stage_num):\n tree_stage.insert(i, self.__gen_parent_level_tree__(current_tree_stage))\n current_tree_stage = tree_stage[i]\n\n assert len(current_tree_stage) == 1\n\n self.hash_tree_root = current_tree_stage[0]",
"def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))",
"def make_tree(self, X_subset, y_subset, depth):\n \n # YOUR CODE HERE\n #self.depth += 1\n if depth < self.max_depth and X_subset.shape[0] >= self.min_samples_split:\n \n best_feature, best_threshold = self.choose_best_split(X_subset, y_subset)\n print('depth = {}, size parent node = {}'.format(depth, len(X_subset)))\n print('best_feature = {}, best_threshold = {}'.format(best_feature, best_threshold))\n new_node = Node(best_feature, best_threshold)\n \n left_child, right_child = self.make_split(best_feature, best_threshold, X_subset, y_subset)\n new_node.left_child = self.make_tree(*left_child, depth+1)\n new_node.right_child = self.make_tree(*right_child, depth+1)\n \n else: # we have a leaf\n new_node = Node(-1, -1) # We flag leaf nodes by setting feature_index and threshold to -1\n new_node.value = self.predicted_values(y_subset)\n \n if self.classification:\n new_node.proba = np.mean(y_subset, axis=0)\n \n # We reduce the depth to compensate for the two calls to self.depth += 1 we make on\n # the same level for left_child and right_child.\n #self.depth -= 1\n \n return new_node",
"def build_tree(self):\n stack = []\n self._handle_solo_node_case()\n while self.root_hash == None:\n if len(stack) >= 2 and stack[-1].height == stack[-2].height:\n mom = stack.pop()\n dad = stack.pop()\n child_hash = self.sha256Sum(mom.hash + dad.hash)\n child = self.Node(mom, dad, child_hash)\n self.node_table[child_hash] = child\n mom.child = child\n dad.child = child\n\n if child.height == self.max_height:\n self.root_hash = child.hash\n\n stack.append(child)\n elif len(self.leaves) > 0:\n leaf = self.leaves.pop()\n self.node_table[leaf.hash] = leaf\n stack.append(leaf)\n # Handle case where last 2 nodes do not match in height by \"graduating\"\n # last node\n else:\n stack[-1].height += 1\n self.is_built = True",
"def build_tree(self, rows, attribute_list, depth=1, parent_rows=None):\n if len(rows) == 0:\n if parent_rows is not None:\n label_map = DecisionTree.get_count_by_attribute_value(parent_rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n else:\n raise ValueError(\"Reached a decision node which had zero rows but was not\"\n \"provided with a parent node\")\n if self.max_depth is not None and depth == self.max_depth:\n label_map = DecisionTree.get_count_by_attribute_value(rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n try:\n splitting_func = {\"entropy\": self.get_entropy,\n \"gini\": self.get_gini}.get(self.splitting_criteria)\n except KeyError:\n print(\"Program only supports entropy and gini as splitting criteria. Provided criteria was \" +\n self.splitting_criteria)\n raise ValueError(\"Incorrect parameter value passed for splitting criteria\")\n\n value_before_split = splitting_func(rows)\n\n if len(attribute_list) == 0 or value_before_split == 0:\n label_map = DecisionTree.get_count_by_attribute_value(rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n if len(attribute_list) == 1 and attribute_list[0] == self.target_attribute:\n label_map = DecisionTree.get_count_by_attribute_value(parent_rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n best_gain = -np.inf\n best_criteria = None\n best_attribute_partitions = None\n\n # Find the attribute having the best split \"\n\n best_attribute_partitions, best_criteria = self.get_best_attribute_for_split(attribute_list,\n best_attribute_partitions,\n best_criteria, best_gain,\n rows, splitting_func,\n value_before_split)\n branches = {}\n for domain_value in self.attribute_domains[best_criteria]:\n branch_attr_list = list(attribute_list)\n branch_attr_list.remove(best_criteria)\n if domain_value in best_attribute_partitions.keys():\n partition_dataset = best_attribute_partitions[domain_value]\n branches[domain_value] = self.build_tree(rows=partition_dataset,\n attribute_list=branch_attr_list,\n parent_rows=rows,\n depth=depth+1)\n else:\n branches[domain_value] = self.build_tree(rows=[],\n attribute_list=branch_attr_list,\n parent_rows=rows,\n depth=depth+1)\n return DecisionTree.DecisionNode(attribute_name=best_criteria, branches=branches)",
"def decision_tree_clf():\n\tclf_entropy = DecisionTreeClassifier(\n\t\tcriterion = \"entropy\", random_state = seed,\n\t\tmax_depth = 3, min_samples_leaf = 5\n\t\t)\n\treturn clf_entropy",
"def build_tree(rows: list) -> DecisionNode or Leaf:\n info_gain, question = get_best_split(rows)\n\n # If no info is gained just return a leaf node with remaining rows\n if info_gain == 0:\n return Leaf(rows)\n\n true_rows, false_rows = partition(rows, question)\n false_branch = build_tree(false_rows)\n true_branch = build_tree(true_rows)\n return DecisionNode(question, rows, true_branch, false_branch)",
"def __build_tree(self, x, y, depth):\n\n node = Node(x, y, depth)\n # update max depth\n if depth > self.maximum_depth:\n self.maximum_depth = depth\n\n classes = np.unique(y)\n class_counts = np.unique(y, return_counts=True)[1]\n\n # accounting for data inconsistency (such as identical feature\n # distribution but different assigned class)\n predicted_class = classes[np.argmax(class_counts)]\n feature, split = self.__find_best_split(x, y)\n\n # only assign a predicted class to leaf nodes\n if feature is None or split is None:\n node.is_leaf = True\n node.predicted_class = predicted_class\n return node\n\n node.feature_index_split = feature\n node.integer_splitting_rule = split\n node.entropy = self.__entropy(y)\n\n row_indices_left_child = x[:, feature] < split\n left_child_features, left_child_labels = x[row_indices_left_child], y[row_indices_left_child]\n right_child_features, right_child_labels = x[~row_indices_left_child], y[~row_indices_left_child]\n\n # recursively call build tree of child nodes\n node.left_child = self.__build_tree(left_child_features,\n left_child_labels, depth + 1)\n node.right_child = self.__build_tree(right_child_features,\n right_child_labels, depth + 1)\n\n return node",
"def build():\n # root = TreeNode(5)\n # root.left = TreeNode(2)\n # root.right = TreeNode(7)\n # return root\n\n \"\"\"\n 5\n / \\\n 2 6\n / \\\n 1 3\n [5,2,1,3,6]\n \"\"\"\n _5 = TreeNode(5)\n _2 = TreeNode(2)\n _6 = TreeNode(6)\n _1 = TreeNode(1)\n _3 = TreeNode(3)\n _5.left = _2\n _5.right = _6\n _2.left = _1\n _2.right = _3\n return _5",
"def huffman_tree(left, right):\n return tree(root(left) + root(right), [left, right])",
"def create_tree(self):\n feature_indices = []\n for i in self.estimator.tree_.feature:\n n_features = self.n_features\n if self.n_features > 1 or (self.n_features == 1 and i >= 0):\n feature_indices.append([str(j) for j in range(n_features)][i])\n indentation = 1 if self.target_language in ['java', 'js',\n 'php', 'ruby'] else 0\n return self.create_branches(\n self.estimator.tree_.children_left,\n self.estimator.tree_.children_right,\n self.estimator.tree_.threshold,\n self.estimator.tree_.value,\n feature_indices, 0, indentation)",
"def build_tree(self, w):\n w_abs = np.abs(w)\n if sum(w_abs) != 1.:\n w_abs = w_abs / sum(w_abs)\n self.w = w_abs\n self.tree = np.zeros(w.shape)\n self._build_node(w_abs, 1)\n self.w_apx = extract_distribution(self.tree)\n\n n_levels = np.ceil(np.log2(len(w)))\n self.lfsr = []\n for n in range(int(n_levels)):\n seed = np.random.randint(1, int(2**(self.lfsr_nbits-n)-1))\n self.lfsr.append(LFSR(self.lfsr_nbits-n, seed))",
"def build():\n r = TreeNode(1)\n r.left = TreeNode(2)\n r.left.left = TreeNode(4)\n r.left.right = TreeNode(5)\n\n r.right = TreeNode(3)\n\n return r\n return TreeNode(3)",
"def construct_tree():\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.right = TreeNode(8)\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(7)\n return root",
"def create_density_tree(dataset, dimensions, clusters, parentnode=None, side_label=None):\n\n # split\n\n dim_max, val_dim_max, _, _ = get_best_split(dataset, labelled=False)\n left, right, e_left, e_right = split(dataset, dim_max, val_dim_max,\n get_entropy=True) # split along best dimension\n\n treenode = DensityNode() # initial node\n\n # save tree node\n treenode.split_dimension = dim_max\n treenode.split_value = val_dim_max\n\n treenode.dataset = dataset\n treenode.left_dataset = left\n treenode.right_dataset = right\n\n treenode.dataset_len = len(dataset)\n treenode.left_dataset_len = len(left)\n treenode.right_dataset_len = len(right)\n treenode.entropy = entropy_gaussian(dataset)\n treenode.cov = np.cov(dataset.T)\n treenode.mean = np.mean(dataset, axis=0)\n treenode.left_cov = np.cov(left.T)\n treenode.left_mean = np.mean(left, axis=0)\n treenode.right_cov = np.cov(right.T)\n treenode.right_mean = np.mean(right, axis=0)\n treenode.left_entropy = e_left\n treenode.right_entropy = e_right\n\n # link parent node to new node.\n if parentnode is not None:\n treenode.parent = parentnode\n if side_label == 'left':\n treenode.parent.left = treenode\n elif side_label == 'right':\n treenode.parent.right = treenode\n\n clusters_left = clusters - 1\n if clusters_left > 1:\n # recursively continue splitting\n # continue splitting always splitting on worst side (highest entropy)\n # find node where left or right entropy is highest and left or right node is not split yet\n node_e, e, side = treenode.get_root().highest_entropy(dataset, 0, 'None')\n\n if side == 'left':\n dataset = node_e.left_dataset\n side_label = 'left'\n elif side == 'right':\n dataset = node_e.right_dataset\n side_label = 'right'\n\n create_density_tree(dataset, dimensions, clusters=clusters_left,\n parentnode=node_e, side_label=side_label) # iterate\n\n return treenode",
"def compute_tree(self,\n verbose=True):\n\n # Tree structure in format {leaf_id: node()}\n self.tree = {}\n # A path is list of integers in (-1, 0, 1) indicating the set of\n # decisions to take through the tree (lower, null, higher)\n # based on the specified labels and cutoff of the nodes.\n paths = [[]]\n path_idx = 0\n start_time = time()\n\n # Each path will point to a leaf that is not yet in the tree.\n while path_idx < len(paths):\n if verbose:\n string = f'{path_idx}/{len(paths)} ({time()-start_time:.0f} s)'\n sys.stdout.write('\\r'+string[:40]+' '*(40-len(string)))\n sys.stdout.flush()\n path = paths[path_idx]\n self.compute_path(path)\n leaf = node(path_idx)\n if self.sub_y_data.size == 0:\n raise NameError('No data on the leaf error')\n if len(path) < self.max_tree_depth or self.max_tree_depth <= 0:\n cutoffs = []\n for split_label in self.labels:\n cutoff, value = self.best_cutoff(split_label)\n cutoffs.append([split_label, cutoff, value])\n cutoffs = sorted(cutoffs, key=lambda x: -x[2])\n split_label, cutoff, value = cutoffs[0]\n leaf.value = value\n if value > self.value_threshold:\n leaf.label = split_label\n leaf.cutoff = cutoff\n leaf.id_lower = len(paths)\n paths.append(path+[-1])\n leaf.id_higher = len(paths)\n paths.append(path+[1])\n if np.isnan(self.sub_split_data[split_label]).any():\n leaf.id_null = len(paths)\n paths.append(path+[0])\n else:\n leaf.is_leaf = True\n ys_with = self.sub_y_data[self.sub_bin_data]\n ys_without = self.sub_y_data[self.sub_bin_data]\n leaf.n_data_with = len(ys_with)\n leaf.n_data_without = len(ys_without)\n if ys_with.size == 0 or ys_without.size == 0:\n leaf.effect = 0\n else:\n leaf.effect = ys_with.mean() - ys_without.mean()\n self.tree[leaf.id] = leaf\n path_idx += 1\n\n if verbose:\n string = f'{path_idx}/{len(paths)} ({time()-start_time:.0f} s)'\n sys.stdout.write('\\r'+string[:40]+' '*(40-len(string)))\n sys.stdout.flush()\n print()",
"def _treeify(values):\n if len(values) == 1: # this case causes problems later\n return values\n tree = np.empty_like(values)\n # Tree indices work as follows:\n # 0 is the root\n # 2n+1 is the left child of n\n # 2n+2 is the right child of n\n # So we now rearrange `values` into that format...\n\n # The first step is to remove the bottom row of leaves, which might not be exactly full\n last_full_row = int(np.log2(len(values) + 1) - 1)\n len_ragged_row = len(values) - (2 ** (last_full_row + 1) - 1)\n if len_ragged_row > 0:\n bottom_row_ix = np.s_[:2 * len_ragged_row:2]\n tree[-len_ragged_row:] = values[bottom_row_ix]\n values = np.delete(values, bottom_row_ix)\n\n # Now `values` is length 2**n - 1, so can be packed efficiently into a tree\n # Last row of nodes is indices 0, 2, ..., 2**n - 2\n # Second-last row is indices 1, 5, ..., 2**n - 3\n # nth-last row is indices (2**n - 1)::(2**(n+1))\n values_start = 0\n values_space = 2\n values_len = 2 ** last_full_row\n while values_start < len(values):\n tree[values_len - 1:2 * values_len - 1] = values[values_start::values_space]\n values_start += int(values_space / 2)\n values_space *= 2\n values_len = int(values_len / 2)\n return tree",
"def _compute_merkle_tree(self):\n # Tree gets built bottom up\n level_nodes = [MerkleNode(t.hash, None, None) for t in self.txns]\n while True:\n if len(level_nodes) == 1:\n self.merkle_tree = level_nodes[0] # This is the root\n return\n if len(level_nodes) % 2 != 0:\n # Make sure there are an even number of nodes\n level_nodes.append(level_nodes[-1])\n new_level = []\n for i in range(0, len(level_nodes), 2):\n left = level_nodes[i]\n right = level_nodes[i+1]\n n = MerkleNode(Hash.dhash(bytes(left.hash) + bytes(right.hash)), left, right)\n new_level.append(n)\n level_nodes = new_level",
"def _treeify(values):\n if len(values) == 1: # this case causes problems later\n return values\n tree = np.empty_like(values)\n\n # The first step is to remove the bottom row of leaves, which might not be exactly full\n last_full_row = int(np.log2(len(values) + 1) - 1)\n len_ragged_row = len(values) - (2 ** (last_full_row + 1) - 1)\n if len_ragged_row > 0:\n bottom_row_ix = np.s_[: 2 * len_ragged_row : 2]\n tree[-len_ragged_row:] = values[bottom_row_ix]\n values = np.delete(values, bottom_row_ix)\n\n # Now `values` is length 2**n - 1, so can be packed efficiently into a tree\n # Last row of nodes is indices 0, 2, ..., 2**n - 2\n # Second-last row is indices 1, 5, ..., 2**n - 3\n # nth-last row is indices (2**n - 1)::(2**(n+1))\n values_start = 0\n values_space = 2\n values_len = 2 ** last_full_row\n while values_start < len(values):\n tree[values_len - 1 : 2 * values_len - 1] = values[values_start::values_space]\n values_start += int(values_space / 2)\n values_space *= 2\n values_len = int(values_len / 2)\n return tree",
"def _build_tree_recursive(self, tree, cur_node, X, y, depth):\r\n n_samples, n_features = X.shape\r\n leaf_reached = False\r\n\r\n # Evaluates if all instances belong to the same class\r\n if utils.all_instances_same_class(y):\r\n leaf_reached = True\r\n\r\n # Evaluates the min_samples_split stopping criteria\r\n if n_samples < self._min_samples_split:\r\n leaf_reached = True\r\n\r\n # Evaluates the depth stopping criteria\r\n if self._max_depth is not None and depth >= self._max_depth:\r\n leaf_reached = True\r\n\r\n best_split = None\r\n if not leaf_reached:\r\n best_split = self._find_split(X, y, n_features)\r\n if best_split is None or best_split.gain < self._min_gain_split:\r\n leaf_reached = True\r\n\r\n if leaf_reached:\r\n samples = utils.bin_count(y, length=self._n_classes)\r\n result = np.argmax(samples)\r\n new_leaf = DecisionLeaf(samples=samples, depth=depth, result=result)\r\n tree.nodes.append(new_leaf)\r\n\r\n else:\r\n is_categorical = utils.categorical_data(X[:, best_split.feature_id])\r\n samples = utils.bin_count(y, length=self._n_classes)\r\n\r\n if is_categorical:\r\n new_fork = DecisionForkCategorical(samples=samples, depth=depth,\r\n feature_id=best_split.feature_id, value=best_split.value,\r\n gain=best_split.gain)\r\n X_left, X_right, y_left, y_right = split_categorical_data(X, y, best_split.feature_id, best_split.value)\r\n\r\n else:\r\n new_fork = DecisionForkNumerical(samples=samples, depth=depth,\r\n feature_id=best_split.feature_id, value=best_split.value,\r\n gain=best_split.gain)\r\n X_left, X_right, y_left, y_right = split_numerical_data(X, y, best_split.feature_id, best_split.value)\r\n\r\n tree.nodes.append(new_fork)\r\n tree.last_node_id += 1\r\n node_to_split = tree.last_node_id\r\n new_branch = self._build_tree_recursive(tree, node_to_split, X_left, y_left, depth=depth+1)\r\n tree.nodes[cur_node].left_branch = new_branch\r\n\r\n tree.last_node_id += 1\r\n node_to_split = tree.last_node_id\r\n new_branch = self._build_tree_recursive(tree, node_to_split, X_right, y_right, depth=depth+1)\r\n tree.nodes[cur_node].right_branch = new_branch\r\n\r\n return cur_node",
"def _build_tree(self, X, y, label, feature_names, depth, sample_weights=None):\n mytree = dict()\n # YOUR CODE HERE\n # TODO: Use `_choose_best_feature` to find the best feature to split the X. Then use `_split_dataset` to\n # get subtrees.\n # Hint: You may find `np.unique` is useful.\n # begin answer\n #1. no feature 2. all lables are the same 3. depth exceed 4. X is too small\n if len(feature_names)==0 or len(np.unique(y))==1 or depth >= self.max_depth or len(X) <= self.min_samples_leaf: \n return self._leaf_calculation(y, label, sample_weights)\n best_feature_idx, best_feature_val=self._choose_best_feature(X, y, label, sample_weights)\n best_feature_name = feature_names[best_feature_idx]\n feature_names=feature_names[:]\n feature_names.remove(best_feature_name)\n mytree={best_feature_name:{}}\n sub1_X, sub1_y, label1, sub1_sample_weights, sub2_X, sub2_y, label2, sub2_sample_weights = self._split_dataset(X, y, label, best_feature_idx, best_feature_val, sample_weights)\n mytree[best_feature_name][(best_feature_val, True)]=self._build_tree(sub1_X, sub1_y, label1, feature_names, depth+1, sub1_sample_weights)\n mytree[best_feature_name][(best_feature_val, False)]=self._build_tree(sub2_X, sub2_y, label2, feature_names, depth+1, sub2_sample_weights)\n # end answer\n return mytree",
"def build_tree(n, d, name=defaultname):\n return build_tree_helper(1, n, 1, d, name)",
"def copyAndCleanTree (self):\n\t\t# TODO: Need to do several things here:\n\t\t# - NoNames\n\t\t# - copy support scores to internal branch names\n\n\t\t## Main:\n\t\t# Copy the tree so as not to damage original\n\t\tete_tree = deepcopy (self.data)\n\n\t\t# set root branch to zero, make change later\n\t\tete_tree.dist = 0.0\n\n\t\t# find max / min branchlength for diagnostic purposes\n\t\t# doesn't use negative or zero branch lengths\n\t\t# Also clean names\n\t\tmax_bl = None\n\t\tmin_bl = None\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (0.0 < n.dist):\n\t\t\t\tif (max_bl is None) or (max_bl < n.dist):\n\t\t\t\t\tmax_bl = n.dist\n\t\t\t\tif (min_bl is None) or (n.dist < min_bl):\n\t\t\t\t\tmin_bl = n.dist\n\t\t\tclean_name = n.name.strip()\n\t\t\tif (clean_name[0] == \"'\") and (clean_name[-1] == \"'\"):\n\t\t\t\tclean_name = clean_name[1:-1]\n\t\t\tn.name = clean_name\n\n\t\t# set all branches to be at least 1/100 of the largest or 1/10 the\n\t\t# smallest, whichever is larger\n\t\tdefault_bl = max (max_bl / 100, min_bl/10)\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (n.dist <= 0.0):\n\t\t\t\tn.dist = default_bl\n\n\t\t# get support values on tree by setting supprt as name\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\t# if an internal node\n\t\t\tif (not n.is_leaf()):\n\t\t\t\tn.name = config.SUPPORT_FMT % n.support\t\n\n\t\t# very hacky - calc appropriate scale bar size and stick on root\n\t\tmagn = int (floor (log10 (max_bl)))\n\t\tscale_size = 10**magn\n\t\tete_tree.scale_size = scale_size\n\n\t\t## Postcondtions & return:int ( floor ( log10 (x)))\n\t\treturn ete_tree",
"def _gen_test_tree_4():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(10)\n tree.right = BinaryNode(9)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n return tree",
"def build():\n root = TreeNode(3)\n root.left = TreeNode(2)\n root.right = TreeNode(4)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(5)\n return root",
"def _createTree(dataSet, impurity_crit, min_impurity_decrease, min_samples_split):\n\t\tif type(dataSet).__name__ != 'ndarray':\n\t\t\traise TypeError('input must be a numpy array.')\n\n\t\ttreenode = TreeNode()\n\t\tfeat_ind, val = DecisionTree._bestFeat2split(dataSet, impurity_crit, \n\t\t\t\t\t\tmin_impurity_decrease, min_samples_split)\n\t\tif feat_ind is None:\n\t\t\ttreenode.value = val\n\t\t\treturn treenode\n\t\ttreenode.cut_off = cut_off(feat_ind, val)\n\t\t\n\t\tD1, D2 = DecisionTree._binarySplit(dataSet, *treenode.cut_off)\n\n\t\ttreenode.left = DecisionTree._createTree(D1, impurity_crit, \n\t\t\t\t\t\tmin_impurity_decrease, min_samples_split)\n\t\ttreenode.right = DecisionTree._createTree(D2, impurity_crit, \n\t\t\t\t\t\tmin_impurity_decrease, min_samples_split)\n\t\treturn treenode",
"def growDecisionTreeFrom(rows, evaluationFunction=entropy):\n\n if len(rows) == 0: return DecisionTree()\n currentScore = evaluationFunction(rows)\n\n bestGain = 0.0\n bestAttribute = None\n bestSets = None\n\n columnCount = len(rows[0]) - 1 # last column is the result/target column\n for col in range(0, columnCount):\n columnValues = [row[col] for row in rows]\n\n for value in columnValues:\n (set1, set2) = divideSet(rows, col, value)\n\n # Gain -- Entropy or Gini\n p = float(len(set1)) / len(rows)\n gain = currentScore - p*evaluationFunction(set1) - (1-p)*evaluationFunction(set2)\n if gain>bestGain and len(set1)>0 and len(set2)>0:\n bestGain = gain\n bestAttribute = (col, value)\n bestSets = (set1, set2)\n\n if bestGain > 0:\n trueBranch = growDecisionTreeFrom(bestSets[0])\n falseBranch = growDecisionTreeFrom(bestSets[1])\n return DecisionTree(col=bestAttribute[0], value=bestAttribute[1], trueBranch=trueBranch, falseBranch=falseBranch)\n else:\n return DecisionTree(results=uniqueCounts(rows))",
"def create_binary_tree(self):\n heap = filter(lambda x: not self.index2word[x.index][0].isupper(), itervalues(self.vocab))\n treelen = len(heap)\n logger.info(\"constructing a huffman tree from %i words\" % treelen)\n\n # build the huffman tree\n \n heapq.heapify(heap)\n for i in xrange(treelen - 1):\n min1, min2 = heapq.heappop(heap), heapq.heappop(heap)\n heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2))\n\n # recurse over the tree, assigning a binary code to each vocabulary word\n if heap:\n max_depth, stack = 0, [(heap[0], [], [])]\n while stack:\n node, codes, points = stack.pop()\n if node.index < len(self.vocab):\n # leaf node => store its path from the root\n node.code, node.point = codes, points\n max_depth = max(len(codes), max_depth)\n else:\n # inner node => continue recursion\n points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32)\n stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))\n stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))\n \n logger.info(\"built huffman tree with maximum node depth %i\" % max_depth)",
"def _gen_test_tree_2():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.left.left = BinaryNode(1)\n tree.left.right = BinaryNode(4)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n tree.right.right.right = BinaryNode(9)\n return tree",
"def _initialize_trees(self):"
] | [
"0.70202446",
"0.6813201",
"0.66544646",
"0.66278887",
"0.651889",
"0.6421867",
"0.6334987",
"0.6331442",
"0.62777907",
"0.6258492",
"0.62338763",
"0.6215458",
"0.6194862",
"0.61937433",
"0.6149138",
"0.6122082",
"0.6111612",
"0.60880184",
"0.60734576",
"0.6070742",
"0.60533327",
"0.60363364",
"0.6019354",
"0.60147715",
"0.60117227",
"0.60029626",
"0.59912086",
"0.59799725",
"0.59767294",
"0.5925008"
] | 0.6884574 | 1 |
Calculates frequency distribution of univerasal POS Tags | def pos_tag_counts(doc):
tags = []
for token in doc:
tags.append(token.pos_)
frequency = dict(Counter(tags).most_common())
return frequency | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pos_frequencies(corpus):\n return frequencies(corpus, -2)",
"def freq():",
"def get_num_POS_tags(data, pos_tag):\n pos_count = []\n for tweet in data:\n tokens = nltk.word_tokenize(tweet)\n tags = nltk.pos_tag(tokens)\n counts = Counter([j for i, j in tags])\n total = sum(counts.values())\n # normalized_counts = dict((word, float(count) / total) for word, count in counts.items())\n normalized_verb_count = sum(count for pos, count in counts.iteritems() if pos.startswith(pos_tag))\n # verb_counts = sum(1 for word, pos in normalized_counts if word.startswith('VB'))\n pos_count.append(normalized_verb_count / total)\n\n return np.array(pos_count).reshape(-1, 1)",
"def freq(self) -> int:",
"def word_frequencies(corpus):\n return frequencies(corpus, 1, to_lower=True)",
"def get_feature_set_SC(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = 0.0\n obj_score = 0.0\n nrof_subwords = 0\n nrof_objwords = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n sub_score = sub_score + sentimentvalues[word][0]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][1]>0:\n sub_score = sub_score + sentimentvalues[word][1]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][2]>0:\n obj_score = obj_score + sentimentvalues[word][2]\n nrof_objwords = nrof_objwords + 1\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n if nrof_subwords>0:\n additional_freq[\"subjective_words\"] = nrof_subwords*1.0\n if nrof_objwords>0:\n additional_freq[\"objective_words\"] = nrof_objwords*1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features",
"def entropy(doc_or_tokens: types.DocOrTokens) -> float:\n words = utils.get_words(doc_or_tokens)\n word_counts = itertoolz.frequencies(word.text for word in words)\n n_words = sum(word_counts.values())\n probs = (count / n_words for count in word_counts.values())\n return -sum(prob * math.log2(prob) for prob in probs)",
"def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features",
"def get_e_probs(dataset):\n\n # Number of times that the state s is seen paired with observation x in the corpus\n e_word_tag_counts = {}\n\n for sentence in dataset:\n\n for word_to_tag in sentence:\n # Foreach (word, tag) tuple we are calculating number of incstances\n if word_to_tag in e_word_tag_counts:\n e_word_tag_counts[word_to_tag] += 1\n else:\n e_word_tag_counts[word_to_tag] = 1\n\n return e_word_tag_counts",
"def create_tag_frequencies(dataframe):\n from pyspark.sql.functions import desc\n from pyspark.sql.functions import col\n df_tags = dataframe.selectExpr(\"tag1 AS tag\").union(dataframe.selectExpr(\"tag2 AS tag\")).union(dataframe.selectExpr(\"tag3 AS tag\")) \\\n .union(dataframe.selectExpr(\"tag4 AS tag\")).union(dataframe.selectExpr(\"tag5 AS tag\"))\n df_tags = df_tags.na.drop(subset=[\"tag\"])\n tags_total_count = df_tags.count()\n print(\"Total number of tags used, including duplicates:\",tags_total_count)\n df_tag_freq = df_tags.groupBy(\"tag\").count().orderBy(desc(\"count\"))\n df_tag_freq = df_tag_freq.withColumn(\"frequency\", col(\"count\")/tags_total_count)\n df_tag_freq.orderBy(desc(\"frequency\")).show(20)",
"def tf(word, document):\n return freq(word,document) / wordCount(document)",
"def entity_counts(doc):\n \n tags = []\n for token in doc.ents:\n tags.append(token.label_)\n frequency = dict(Counter(tags).most_common())\n\n return frequency",
"def get_ngramlogprobs(freqdict):\n return",
"def countFreq(self,document):\n self.document = document\n vocab=['python','js','android','php','django','javascript','oracle','ruby','rails','java']\n cnt_vector = CountVectorizer(vocabulary=vocab)\n self.freq_term_matrix = cnt_vector.fit_transform(self.document)\n return self.freq_term_matrix.toarray()",
"def print_pos_tag_statistics(unique_tags, counts):\n print('\\nPOS tag distribution')\n count_sort_ind = np.argsort(-counts)\n print('Identified pairs (word, tag): {}'.format(np.sum(counts)))\n for tag, count in zip(unique_tags[count_sort_ind], counts[count_sort_ind]):\n print('{} \\t- \\t{} \\t- \\t{:.3f}'.format(tag, count,\n count/sum(counts)))\n print()",
"def _term_probability(self, frequency, total_frequency, doc_length, total_doc_length):\n if doc_length == 0:\n p1 = 0\n else:\n p1 = frequency / doc_length\n if total_doc_length == 0:\n p2 = 0\n else:\n p2 = total_frequency / total_doc_length\n return (1-self.omega) * p1 + self.omega * p2",
"def calc_tf(doc):\r\n tf = {}\r\n for term in doc:\r\n if term not in tf:\r\n tf[term] = doc.count(term)\r\n return tf",
"def raw_freq(*marginals):\n return float(marginals[NGRAM]) / marginals[TOTAL]",
"def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict",
"def initialize_document_frequencies():\n global document_frequency\n for term in dictionary:\n document_frequency[term] = len(postings[term])",
"def compute_class_freqs(gen):\r\n labels = gen.labels\r\n N = labels.shape[0]\r\n positive_frequencies = np.sum(labels, axis=0) / N\r\n negative_frequencies = np.sum(1 - labels, axis=0) / N\r\n return positive_frequencies, negative_frequencies",
"def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)",
"def compute_frequencies(num_words, documents):\n res = [0 for i in range(num_words)]\n sum = 0\n for word in documents:\n sum += 1\n tmp = set(word)\n for number in tmp:\n res[number] += 1\n \n res = [i / sum for i in res]\n return res",
"def freq_dist(corpus):\n fd = nltk.FreqDist(corpus)\n return fd",
"def score(self, sentence):\n # TODO your code here\n\n # initialize count with trained data\n unigram_count = self.count.copy()\n N = self.total\n\n # make a new key for UNK, add-one later\n for token in sentence:\n if token not in unigram_count:\n unigram_count[token] = 0\n\n # calcutate lopP(<s>) + logP(w1) + logP(w2) + ...\n score = 0.0 # P(<s>) = 1\n V = len(unigram_count) # the number of vocab including UNK\n for word in sentence:\n prob = float((unigram_count[word] + 1) / (N + V)) # c(w) + 1 / N + V\n score += math.log(prob)\n\n return score",
"def calc_tdf(docs):\r\n terms = set()\r\n for doc in docs:\r\n for term in doc:\r\n terms.add(term)\r\n tdf = {}\r\n for term in terms:\r\n doc_count = 0\r\n for doc in docs:\r\n doc_count += 1 if term in doc else 0\r\n tdf[term] = doc_count\r\n return tdf",
"def _term_probability(self, frequency, total_frequency, doc_length, total_doc_length):\n omega = self.alpha / (doc_length + self.alpha)\n if doc_length == 0:\n p1 = 0\n else:\n p1 = frequency / doc_length\n if total_doc_length == 0:\n p2 = 0\n else:\n p2 = total_frequency / total_doc_length\n return (1-omega) * p1 + omega * p2",
"def get_frequencies(filename):\n freq_dict = {}\n _,long_name = filename.split(\"\\\\\")\n name,_ = long_name.split(\"_gold_\")\n f = os.path.join(PARSED, name + \".fix.xml\")\n #soup = bs(open(f, 'r'))\n soup = bs(codecs.open(f, 'r', encoding='utf-8'))\n for sent in soup.findAll('sentence'):\n for token in sent.findAll('token'):\n try:\n w = token.word.string\n if w in freq_dict:\n freq_dict[w] += 1\n else:\n freq_dict[w] = 1\n except AttributeError:\n pass\n return freq_dict",
"def vectorize_pos_tags(self, tokens):\n\n pos_counts = defaultdict(float)\n for token in tokens:\n pos_counts[token.pos] += 1.0\n return pos_counts",
"def freq(word, document):\n return document.split(None).count(word)"
] | [
"0.6999544",
"0.6887276",
"0.6551115",
"0.6530391",
"0.6523099",
"0.6506458",
"0.6481359",
"0.64570904",
"0.6456174",
"0.6447249",
"0.63904",
"0.63714606",
"0.6351611",
"0.62936366",
"0.62892985",
"0.6284025",
"0.62587374",
"0.6250596",
"0.62461895",
"0.62247854",
"0.6215164",
"0.6200011",
"0.61920226",
"0.61593443",
"0.61560374",
"0.6154562",
"0.61520404",
"0.61443746",
"0.61292094",
"0.60835654"
] | 0.6933762 | 1 |
In this callback we check if the lidar sensor is online the bottom clearance data is set to nan if we are using barometric pressure | def altitude_callback(self, data):
self.altitude = data
self.altitude_bottom_clearance = np.float64(data.bottom_clearance)
if np.isnan(self.altitude_bottom_clearance):
# message is often enough that it should alert the user but shouldn't swamp the console
rospy.logwarn_throttle(0.5, 'bottom clearance is invalid type: {}'.format(self.altitude_bottom_clearance)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self):\n if self.temperature != None and self.humidity != None:\n self.sensor.set_environmental_data(self.humidity, self.temperature)\n# Trim away error values.\n new_eco2 = self.sensor.eco2\n if new_eco2 < 65535:\n self.eco2 = new_eco2\n self.tvoc = self.sensor.tvoc",
"def _distanceCheck(self):\n\n # Catches the occasional polling error that occurs with the ultrasonic distance sensor\n try:\n # 3 point averager to smooth out distance data\n dist = self.u.distance\n sleep(0.05)\n dist += self.u.distance\n sleep(0.05)\n dist += self.u.distance\n dist = dist/3\n\n #print(\"Distance check reading: {0:1.3f}\".format(dist))\n\n if( dist <= self.detectDist ):\n if( self.birdHere == 0 ):\n self.statusWrite(\"in\")\n self.birdHere = 1\n\n else:\n if( self.birdHere == 1 ):\n self.statusWrite(\"out\")\n self.birdHere = 0\n\n except RuntimeError:\n pass",
"def _process_ping_response(self, message):\n self.set_available(True, True)\n if self.in_RSSI != message.in_RSSI.value:\n self.in_RSSI = message.in_RSSI.value\n self.do_callback(SENSOR_RSSI_IN[\"id\"])\n if self.out_RSSI != message.out_RSSI.value:\n self.out_RSSI = message.out_RSSI.value\n self.do_callback(SENSOR_RSSI_OUT[\"id\"])\n if self.ping_ms != message.ping_ms.value:\n self.ping_ms = message.ping_ms.value\n self.do_callback(SENSOR_PING[\"id\"])",
"def on_discon_occurred(self):\n self.connected = False\n self.enable_text.setText(\"Enable: \")\n self.enable_box.setCheckState(Qt.Unchecked)\n\n # Trim potentially useless data\n num_samples = len(self.data_collected.timestamps)\n if self.num_trim_samples >= num_samples:\n self.data_collected.clear()\n else:\n self.data_collected.trim(self.num_trim_samples)\n\n self.warning = QErrorMessage()\n self.warning.showMessage(\"Myo armband device disconnected unexpectedly.\")\n self.warning.show()\n\n self.disconnect_notify(self.myo_device[\"sender_address\"].hex())\n self.enable_box.setEnabled(True)",
"def check_heartbeat(self):\n return True",
"def lux_above_threshold(self) -> bool:\n if self.lux_sensor:\n value = self.hass.get_state(self.lux_sensor)\n if value not in [\"unavailable\", \"unknown\"]:\n return float(value) > self.lux_threshold\n\n return False",
"def has_warning(self):\n \n if self['n_madloop_calls'] > 0:\n fraction = self['exceptional_points']/float(self['n_madloop_calls'])\n else:\n fraction = 0.0\n \n if self['skipped_subchannel'] > 0:\n return True\n elif fraction > 1.0e-4:\n return True\n else:\n return False",
"def data_ready(self) -> bool:\n data_ready = ctypes.c_uint8()\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_GetMeasurementDataReady(self.dev, byref(data_ready)))\n return data_ready.value != 0",
"def _battery_cb(self, msg):\n # self.battery_voltages[msg.header.seq %\n # len(self.battery_voltages)] = msg.voltage\n self.battery_voltages[msg.header.seq % len(\n self.battery_voltages)] = msg.percentage * 100.\n # delta = self.INIT_VOLTAGE - self.MINIMUM_VOLTAGE\n # self.low_battery = (np.mean(self.battery_voltages) <=\n # (self.MINIMUM_VOLTAGE +\n # (0.1 * delta))) and (self._current_wp != 0)\n self.low_battery = (np.mean(self.battery_voltages) <=\n self.MINIMUM_VOLTAGE * 1.5) and (self._current_wp\n != 0)",
"def poll_device(self):\n #self.logger.info(\"poll_device: Checking online status\")\n for tasmota_topic in self.tasmota_devices:\n if self.tasmota_devices[tasmota_topic].get('online', None) is not None:\n if self.tasmota_devices[tasmota_topic]['online_timeout'] < datetime.now():\n self.tasmota_devices[tasmota_topic]['online'] = False\n self.set_item_value(tasmota_topic, 'item_online', False, 'poll_device')\n self.logger.info(f\"poll_device: {tasmota_topic} is not online any more - online_timeout={self.tasmota_devices[tasmota_topic]['online_timeout']}, now={datetime.now()}\")",
"def state(self):\n if self.ticker.values is not None:\n return round(\n float(\n self.ticker.values.get(\"last\")\n ),\n self.display_currency_decimals,\n )\n else:\n _LOGGER.warning(\"sensor state was requested but values is not set yet\")",
"def callback_check(data):\n global BallDetected, BallCheck, currentRadius\n BallDetected = data.BallDetected\n currentRadius = data.currentRadius\n\n if (BallDetected == True) and (BallCheck == False):\n BallCheck = True\n rospy.loginfo(\"Ball Detected! Start tracking \")\n client.cancel_all_goals()",
"def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer",
"def ultrasonicChecker() -> None:\n ...",
"def min_humidity(self):\n return 0",
"def test_none_to_on_transition_no_reading():\n m = monitor.Monitor()\n t = datetime(2010, 1, 1, 0, 0)\n m.set_outside_temperature(10, t)\n assert m.temperature_update(20, t) == None\n m.boiler_on(t)\n assert m.temperature_update(22, t + timedelta(seconds=10)) == None",
"def check_peak_win(self):\n if self.peak_win[0] < 0.0:\n self.peak_win[0] = 0.0\n if self.logger is not None:\n self.logger.warning(('Start of peak window < 0 sec for cond: {}. ' +\n 'Setting to 0.').format(self.cond))\n if self.peak_win[1] > self.psc_dur:\n self.peak_win[1] = self.psc_dur\n if self.logger is not None:\n logger.warning(('End of peak window is longer than trial HRF ' +\n 'for cond: {}. Truncating.').format(self.cond))\n return",
"def test_verify_state_of_a_device_when_disconnected_from_the_device():",
"def check_config(self):\n assert 'AUTO' in self.config\n assert 'LAT' in self.config\n assert 'LON' in self.config\n assert 'ALL_CHANNELS' in self.config\n for key in self.extract_servo_channels():\n assert 'STATUS' in self.config[key]\n self.config[key]['STATUS'] = float(self.config[key]['STATUS'])\n assert 0.0 <= self.config[key]['STATUS'] <= 1.0\n\n if not 'SUNRISE_BUFFER' in self.config[key]:\n self.config[key]['SUNRISE_BUFFER'] = 0\n if not 'SUNSET_BUFFER' in self.config[key]:\n self.config[key]['SUNSET_BUFFER'] = 0",
"def check_lighting_state_room1():\n if timer_lights_on_off_room1() == room1_lux():\n pass\n else:\n light_room1(timer_lights_on_off_room1())",
"def ready_to_measure(self):\n\n constant_tank_ready = self.tanks[CONSTANT_TANK].is_filled\n measuring_tank_ready = (\n self.valves[MEASURING_DRAIN_VALVE].is_closed\n and not self.tanks[MEASURING_TANK].is_filled\n )\n constant_pump_running = self.pumps[PUMP_CONSTANT_TANK].is_running\n\n return constant_tank_ready and measuring_tank_ready and constant_pump_running",
"def am_i_offline(self):\n # -num_of_last_check_rounds_consider won't raise IndexError when len(self.data) is smaller\n logger.debug(\"called am_i_offline and data is: %s\" % self.data)\n if not self.data:\n return False\n for dict_check_results in self.data[-self.num_of_last_check_rounds_consider:]:\n for res in list(dict_check_results.values()):\n if res == 0:\n return False\n else:\n return True",
"def online_check(self):\n self.online = False\n online_topic = '{t_topic}/INFO2'.format(**self)\n print('{BLUE}Watching for {}{NC}'.format(online_topic, **colors))\n try:\n self.mqtt.connect(self.mqtt_host)\n except Exception:\n print('MQTT broker not online')\n return False\n\n self.mqtt.message_callback_add(online_topic, lambda *args: \\\n setattr(self, 'online', True))\n self.mqtt.subscribe(online_topic)\n startTime = dt.datetime.now()\n while not self.online and not too_old(startTime, wait_time):\n self.mqtt.loop(timeout=loop_time)\n time_waited = (dt.datetime.now() - startTime).total_seconds()\n # If we did not see device publish INFO2, sometimes platformio causes\n # a delay by checking for updates and we miss seeing this message.\n # To check for that case, query the device for its build timestamp and\n # check if it was built in the last couple minutes.\n if not self.online:\n self.query_tas_status()\n if 'build_time' in self.reported:\n build_time = dt.datetime.strptime(self.reported['build_time'],\n '%Y-%m-%dT%H:%M:%S')\n if dt.datetime.now() - build_time < dt.timedelta(minutes=2):\n self.online = True\n\n if not self.online:\n print('{RED}{f_name} did not come online within {wait_time} '\n 'seconds{NC}'.format(f_name=self.f_name,\n wait_time=str(wait_time),\n **colors))\n elif self.online:\n print('{GREEN}{f_name} came online in {time_waited} '\n 'seconds{NC}'.format(f_name=self.f_name,\n time_waited=time_waited,\n **colors))\n self.mqtt.unsubscribe(online_topic)\n self.mqtt.message_callback_remove(online_topic)\n self.mqtt.disconnect()\n return self.online",
"def check_sensor(self):\n return True if (self._read_register_1ubyte(self.BME680_ID) == 0x61) else False",
"def _is_connection_stale(self):\n\n if time.time() - self.last_ping > HEART_BEAT_PING_TIME:\n self._ping()\n\n return (time.time() - self.last_pong) > HEART_BEAT_PING_TIME + HEART_BEAT_PONG_TIME",
"def check_status(self):\n if self.voltage>self.criticalValue and not self.statusHigh:#just went high\n self.statusHigh = True\n self.channelMessage = self.channelMessageHigh\n if ss is not None:\n if self.highSoundFile is not None:#specific high soundfile\n ss.playFile(os.path.join(\"sounds\",self.highSoundFile),1, 60.0)\n elif self.highIsGood:\n winsound.MessageBeep(winsound.MB_ICONASTERISK)#high is good and we just went high so nice sound\n else:\n winsound.MessageBeep(winsound.MB_ICONHAND)#high is bad and we just went high so bad sound\n \n elif self.voltage<self.criticalValue and self.statusHigh:#just went low\n self.statusHigh = False\n self.channelMessage = self.channelMessageLow\n if ss is not None:\n if self.lowSoundFile is not None:#specific high soundfile\n ss.playFile(os.path.join(\"sounds\",self.lowSoundFile),1, 60.0)\n if not self.highIsGood:\n winsound.MessageBeep(winsound.MB_ICONASTERISK)#high is bad and we just went low so good sound\n else:\n winsound.MessageBeep(winsound.MB_ICONHAND)#high is good and we just went low so bad sound",
"def is_on(self):\n return (\n self._device.batterylevel != SHCBatteryDevice.BatteryLevelService.State.OK\n )",
"def test_light_no_data(self):\n light = Light({})\n\n assert light.warning is None\n assert light.off is None",
"def is_connected(self) -> bool:\n return self.arduino is not None",
"def checkGridSensorData(self):\r\n\r\n\r\n self.gridSensorDataX, self.gridSensorDataY, self.gridSensorDataZ, self.gridSensorDataRotationX, self.gridSensorDataRotationY, self.gridSensorDataRotationZ, res = self.receiver.getGridSensorData()\r\n return res"
] | [
"0.5760847",
"0.56696296",
"0.5600618",
"0.5469788",
"0.54373187",
"0.54115516",
"0.53911066",
"0.5358881",
"0.5356354",
"0.5347453",
"0.5337583",
"0.5324799",
"0.5323337",
"0.532301",
"0.52751553",
"0.5252866",
"0.52426714",
"0.5241194",
"0.521606",
"0.5205771",
"0.5201203",
"0.51944786",
"0.5185882",
"0.51781875",
"0.51780593",
"0.51764256",
"0.51647335",
"0.51516205",
"0.5142961",
"0.51181215"
] | 0.6732945 | 0 |
Extended Function to overide HTML Parser Collect Word | def handle_data(self, data):
# Extended from HTML Parser
# Overied this function for collecting word from webpage and store in to a list
words = data.split()
for word in words:
if word.isalpha() == True: # This is for filtering only alphabet string.
self.word_lst.append(word.lower()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse(self, word):\n raise NotImplementedError",
"def parser(self, value):\n value = self.lowercase(value)\n value = self.punctuation(value)\n value = self.tokenization(value)\n value = self.remove_stopwords(value)\n value.append(\"wiki\")\n searched_words = \" \"\n return searched_words.join(value)",
"def data_cleaner(doc):\n \n sw = stopwords.words('english')\n regex_token = RegexpTokenizer(r\"([a-zA-Z]+(?:’[a-z]+)?)\")\n doc = regex_token.tokenize(doc)\n doc = [word.lower() for word in doc]\n doc = [word for word in doc if word not in sw]\n #print(doc)\n doc = pos_tag(doc)\n doc = [(word[0], get_wordnet_pos(word[1])) for word in doc]\n #print(doc)\n lemmatizer = WordNetLemmatizer() \n doc = [lemmatizer.lemmatize(word[0], word[1]) for word in doc]\n #print(' '.join(doc))\n return ' '.join(doc)",
"def process(self, doc):\n self.doc = doc\n if self.replace_words is True:\n self.replace_words_fun()\n if self.remove_html_tags is True:\n self.remove_html_tags_fun()\n if self.remove_stopwords is True:\n self.remove_stopwords_fun()\n if self.remove_numbers is True:\n self.remove_numbers_fun()\n if self.remove_punctations is True:\n self.remove_punctations_fun() \n if self.lemmatize is True:\n self.lemmatize_fun()\n return self.doc",
"def parsingconvtext(retrievedtext,customtextlist):\r\n if not retrievedtext: #in case empty text \r\n retrievedtext=changenonetostr(retrievedtext)\r\n newtext=BeautifulSoup(retrievedtext).get_text() \r\n #newtext=changenonetostr(retrievedtext)\r\n #newtext=BeautifulSoup(newtext).get_text() \r\n #remove http links\r\n newtext=re.sub(r'http\\S+', '', newtext)\r\n newtext=re.sub(r'\\r\\r\\r\\n', ' ', newtext)\r\n #remove LL specific text\r\n if customtextlist:\r\n for i in customtextlist:\r\n newtext=re.sub(i, '', newtext)\r\n return newtext",
"def words(self, word):\n pass",
"def __call__(self, doc):\n matches = self.matcher(doc)\n spans = [] # keep spans here to merge them later\n for match_id, start, end in matches:\n span = doc[start : end]\n for token in span:\n token._.set(self._is_url, True)\n token.lemma_ = get_domain(token.text)\n spans.append(span)\n\n return doc",
"def extract_all_text(self, url, html_doc):\n self.title_text = self.get_title_words(html_doc)\n self.meta_text = self.get_meta_words(html_doc)\n self.url_text = self.get_url_words(url)\n self.heading_text = self.get_heading_words(html_doc)\n self.body_text = self.get_body_words(html_doc)",
"def _words(self):\n regex = r'\\b\\w+\\b'\n for word in re.findall(regex, self.text):\n yield word",
"def _add_text(self, elem):\n words = WORD_SEPARATORS.split(elem.string.lower())\n for word in words:\n word = word.strip()\n if word in self._ignored_words:\n continue\n self._curr_words.append((self.crawler.word_id(word), self._font_size))",
"def post_process(text):\n # XXX update to spit out HTML - no need for requests GDocs can take html\n verbose = False\n request_list = []\n chars = iter(text)\n normal_text = []\n knownsigils = {\"end\":('',\"NONE\"),\n \"^\": (\"0123456789+-\",\"SUPERSCRIPT\"),\n \"_\": (\"0123456789\",\"SUBSCRIPT\")\n }\n c = next(chars, \"end\")\n while (True):\n if (c in knownsigils.keys()):\n if len(normal_text): request_list.append((''.join(normal_text), \"NORMAL\"))\n normal_text.clear()\n (c,token) = _gettoken(c,chars,knownsigils)\n if (token is not None): request_list.append(token)\n if (c==\"end\"):\n break\n else:\n continue\n else:\n normal_text.append(c)\n c = next(chars, \"end\")\n return request_list",
"def process_doc_html(self, doc_in):\n self.feed(doc_in) #SGMLParser call\n self.close() #SGMLParser call\n self.hand_off_temp_pieces('to_doc_pieces')\n self.all_pieces = self.all_pieces[:-16] # drop </body></html>\n return self.all_pieces",
"def words(self):\n return self.title + self.content",
"def parseSearchHtml(self):\n pass",
"def parseSearchHtml(self):\n pass",
"def morpho_doc(doc):\n doc_text = doc.stripped\n mystem_analyzer.start()\n # new_morpho = mystem_analyzer.analyze(doc_text)\n new_morpho = mystem_analyzer.analyze(doc_text.replace('\\n',''))\n\n morpho_list = []\n\n for element in new_morpho: # разрезаем\n\n if is_sentence_end(element):\n morpho_list.append(element)\n else:\n\n line = element.get('text', '')\n\n space_len = 0\n\n word_start = -1\n word_len = 0\n\n symbol_number = -1\n for symbol in line:\n\n symbol_number+=1\n\n if symbol == \"'\" or symbol == '\"' or symbol == '»' or symbol == '«':\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n space_len = 0\n\n elif word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n word_start = -1\n word_len = 0\n\n # добавим кавычку\n new_element = {'text': symbol}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n elif symbol == \" \":\n\n if word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n word_start = -1\n word_len = 0\n\n space_len += 1\n\n else:\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n space_len = 0\n\n if word_start == -1:\n word_start = symbol_number\n word_len = 1\n else:\n word_len += 1\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n\n morpho_list.append(new_element)\n\n elif word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n for i in range(len(morpho_list) - 1): # переставляем\n if i > 0:\n if morpho_list[i - 1]['text'] == ' ' and morpho_list[i]['text'] == '\"' and morpho_list[i + 1]['text'] == '\\\\s':\n morpho_list[i], morpho_list[i + 1] = morpho_list[i + 1], morpho_list[i]\n\n sentence_index = 0\n word_index = 0\n start_offset = 0\n\n for element in morpho_list: # нумеруем\n if is_sentence_end(element):\n if word_index != 0:\n sentence_index += 1\n word_index = 0\n else:\n line = element.get('text', '')\n line_len = len(line)\n\n if(line[0]!=' '):\n element['start_offset'] = start_offset\n element['end_offset'] = start_offset + line_len - 1\n element['word_index'] = word_index\n element['sentence_index'] = sentence_index\n\n word_index += 1\n start_offset += line_len\n\n doc.morpho = morpho_list\n mystem_analyzer.close()",
"def tokenize(doc):\n text = doc\n doc = doc.lower()\n doc = re.sub('[,;]', ' ', doc)\n doc = re.split('\\s+', doc)\n doc = sorted(list(filter(None, doc)))\n ent = le.stanfordTagger(text)\n print(ent)\n l = []\n for item in ent:\n if ent[item] in ['LOCATION', 'GPE','PERSON']:\n l.append(item)\n ent = l#ent = sorted(list(le.stanfordTagger(text).keys()))\n #print(ent)\n #ent = [e.lower() for e in ent]\n crime_type = fileCrimeClassify.extractCrimeWord(text, returnOnlyLabels=True)\n crime_type = [c.lower() for c in crime_type]\n #print(crime_type + ent)\n #print(doc)\n return doc, ent + crime_type",
"def get_meta_words(self, html_doc):\n name_attr = ['description', 'keywords', 'title']\n meta_words = ''\n for attr in name_attr:\n data = html_doc.find('meta', {\"name\" : attr})\n if data != None: \n meta_words += data.get('content') + ' '\n \n return meta_words",
"def tokens(doc):\n return (tok.lower() for tok in re.findall(r\"\\w+\", doc))",
"def get_words(self, cleaner):\n return cleaner.clean(self.get_text())",
"def parseword(intext): # type: (str) -> str\n\n wordbinarydata = base64.b64decode(intext.strip())\n wordFileObj = io.BytesIO()\n wordFileObj.write(wordbinarydata)\n theword = docx.Document(wordFileObj)\n extractedText = ''\n for para in theword.paragraphs:\n extractedText = extractedText + para.text + '\\n'\n\n return extractedText",
"def process_input(self, word):\n return",
"def wikiword(self):\r\n prefix = \"https://fr.wikipedia.org/w/api.php?action=query&prop=extracts&exintro&format=json&pageids=\"\r\n requestpageid = prefix + str(self.Pageid)\r\n\r\n request = requests.get(requestpageid)# to get json from wikipedia\r\n return_json_API = request.json() # get json file from the request\r\n\r\n try:\r\n # regular expression to remove HTML + linebreaks tags we get back from wiki\r\n wiki_content = re.sub('<[^>]+>|\\n','',return_json_API[\"query\"][\"pages\"][str(self.Pageid)][\"extract\"])\r\n except:\r\n wiki_content = 'Je n\\'ai rien trouve sur Wikipedia, enfin je veux dire ce ne me dit rien du tout :-(' \\\r\n 'Au fait n\\'oublie pas de mettre des majuscules aux noms propres !'\r\n\r\n return wiki_content",
"def word_runner(self):\n with open(self.filename) as doc:\n text = doc.readlines()\n for line in text:\n for word in line.split():\n yield word",
"def get_words(self, article: BeautifulSoup):\n return len(re.findall(r'\\w+', self.get_article_text(article)))",
"def apply(self, text):",
"def word_of_the_day():\n r = requests.get(\"http://www.urbandictionary.com\") # link is always homepage\n soup = BeautifulSoup(r.content, features=\"html.parser\") # sets up soup\n def_header = \"**\" + soup.find(\"div\", attrs={\"class\": \"def-header\"}).text.replace(\"unknown\",\n \"\") + \"**\" # header is the word we are defining\n # def_header = def_header[0:len(def_header) - 10] # header always ends in \"unknown\" this removes it\n meaning = soup.find(\"div\", attrs={\"class\": \"meaning\"}).text # gets the definition\n # formatting TODO move to controller\n for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n meaning = meaning.replace(str(x) + \". \", \"\\n\" + str(x) + \". \")\n for x in [\"v.\", \"n.\"]:\n meaning = meaning.replace(x, x.upper()[:-1])\n example = soup.find(\"div\", attrs={\"class\": \"example\"}).text # gets the example\n output = def_header + \": \" + \"```\" + meaning + \"\\nEx: \" + example + \"```\" # output string\n output = output.replace(\"&apos\", \"'\") # replaces weird formatting of ' from original\n return output # returns the word, defintion, and example",
"def extract_words(self):\n str = self.text.lower()\n words = re.sub(r'[?|—|:|\"|,|\\.\\n|\\.|\\s|\\n|\\t|\\v|\\f|\\r]+', \"*\", str)\n self.word_list = words.split(\"*\")",
"def _postprocess(\n self,\n tags: List[str],\n words: List[str],\n pos: bool = False,\n ):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)",
"def taggerWord(self,word):\n if(\"tagger\" in self._classes):\n return self._tagger.taggerWord(word)"
] | [
"0.62091404",
"0.61985904",
"0.603075",
"0.6020231",
"0.59993106",
"0.5990653",
"0.5796557",
"0.5792185",
"0.57910925",
"0.5778996",
"0.57431465",
"0.57394904",
"0.5726501",
"0.56900585",
"0.56900585",
"0.5642334",
"0.5611355",
"0.56076735",
"0.5604131",
"0.5571104",
"0.5564887",
"0.55541533",
"0.5551021",
"0.5536094",
"0.55333084",
"0.55263865",
"0.5525567",
"0.54960734",
"0.54816115",
"0.5476701"
] | 0.62500596 | 0 |
Sort hypotheses according to their log probability. | def sort_hyps(self, hyps):
return sorted(hyps, key=lambda h: h.avg_log_prob, reverse=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _logp(self, trace, **inputs):\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'],\n loc=phen_pred,\n scale=step['phenotype_sigma'])\n return phen_prob\n\n phen_probs = [calc_log(trace[idx])\n for idx in np.random.randint(0, len(self.trace), 500)]\n phen_probs = np.asmatrix(phen_probs)\n mc_logp = phen_probs.sum(axis=1).mean()\n return mc_logp",
"def sort_probs(probs_list):\n return sorted(probs_list, key=lambda x: x[1])",
"def log_probability(self, samples):\n pass",
"def add(self, hyp, sum_logprobs):\n score = sum_logprobs / len(hyp) ** self.length_penalty\n if len(self) < self.n_hyp or score > self.worst_score:\n self.hyp.append((score, hyp))\n if len(self) > self.n_hyp:\n sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)])\n del self.hyp[sorted_scores[0][1]]\n self.worst_score = sorted_scores[1][0]\n else:\n self.worst_score = min(score, self.worst_score)",
"def gen_pr_thres_list(thres_count):\n zero_to_one = np.linspace(1.001, 1.5, num=int(thres_count/2))\n one_to_five = np.linspace(1.501, 8.0, num=int(thres_count/2))\n\n thres_list = sorted(list(zero_to_one) + list(one_to_five))\n thres_list = np.log(thres_list)\n return thres_list",
"def normaliseandsort(slu_hyps):\n result = []\n sorted_hyps = slu_hyps.items()\n sorted_hyps.sort(key=lambda x: -x[1])\n total_score = sum(slu_hyps.values())\n for hyp, score in sorted_hyps:\n if total_score == 0:\n result.append({\"score\": 0, \"slu-hyp\": json.loads(hyp)})\n else:\n result.append({\"score\": min(1.0, score/total_score), \"slu-hyp\": json.loads(hyp)})\n return result",
"def log_prob(self):",
"def sentence_logprob(self, sentence):\n line = get_ngrams(sentence,3)\n log_por = 0.0\n for item in line:\n raw_por = self.smoothed_trigram_probability(item)\n log_por = log_por+math.log2(raw_por)\n\n return float(log_por)",
"def hyp_score(log_probs, sequence_lengths, penalty_factor):\n\n # Calculate the length penality\n length_penality_ = length_penalty(\n sequence_lengths=sequence_lengths,\n penalty_factor=penalty_factor)\n\n score = log_probs / length_penality_\n \n return score, length_penality_",
"def cox_ph_loss_sorted(log_h: Tensor, events: Tensor, eps: float = 1e-7) -> Tensor:\n if events.dtype is torch.bool:\n events = events.float()\n events = events.view(-1)\n log_h = log_h.view(-1)\n gamma = log_h.max()\n log_cumsum_h = log_h.sub(gamma).exp().cumsum(0).add(eps).log().add(gamma)\n return - log_h.sub(log_cumsum_h).mul(events).sum().div(events.sum())",
"def log_prob(theta):\n params = parse_params(theta)\n prior = log_prior(**params)\n if not np.isfinite(prior):\n return -np.inf, -np.inf\n like = log_like(**params)\n if not np.isfinite(like):\n return -np.inf, prior\n return prior + like, prior",
"def sentence_logprob(self, sentence):\n grams = get_ngrams(sentence, 3)\n p = 1\n\n for gram in grams:\n p *= np.longfloat(self.smoothed_trigram_probability(gram))\n\n return np.log2(p)",
"def calc_probs(log_p):\n\n N = log_p.shape[0]\n\n log_Z_per_N = np.zeros(shape=(N, 1))\n\n for i in range(N):\n\n log_Z_per_N[i] = log_norm(log_p[i])\n\n log_p_new = log_p - log_Z_per_N\n\n p = np.exp(log_p_new)\n\n # log_Z = log_norm(log_p)\n\n # p = np.exp(log_p - log_Z)\n\n return p",
"def log_prob(self, th):\n\n\t\tmask = self.__low <= th <= self.__high\n\t\tif len(th.shape) == 1:\n\t\t\tlength = 1\n\t\telif len(th.shape) == 2:\n\t\t\tlength = th.shape[0]\n\t\telse:\n\t\t\traise RuntimeError(\"th must be either (d,) or (n_samples, d)\")\n\t\tlogprobs = np.empty(length)\n\t\tlogprobs[mask] = 0.\n\t\tlogprobs[~mask] = -float('inf')\n\t\treturn logprobs",
"def order_ideal(self, gens):",
"def sortNgrams(hashtable):\n\tsorted = map(lambda (x, y): (y,x), hashtable.items())\n\tsorted.sort() # sort on basis of frequency\n\tsorted.reverse() # revert order: most frequent first\n\treturn map(lambda (y, x): (x, y), sorted)",
"def trim_hypotheses(self, minprob=1.0e-6, maxhypot=20):\n\n # Skip pruning if less hypotheses exist than the maximum allowed.\n if len(self.__hypotheses) <= maxhypot:\n return\n\n # Sort the hypotheses in decreasing log probability order.\n self.__hypotheses.sort(key=lambda dct: -dct['log_probability'])\n\n # Store the indices of likely hypotheses.\n minprob = np.log(minprob)\n index = [i for i, hypot in enumerate(self.__hypotheses)\n if hypot['log_probability'] > minprob]\n\n # Trim the hypotheses.\n index = index[:maxhypot] if len(index) >= maxhypot else index\n self.__hypotheses = [self.__hypotheses[i] for i in index]\n\n # NOTE: This final ordering can preserve the original order of the\n # hypotheses. Interestingly, the algorithm specified in update\n # does not require that the hypotheses be ordered! This sort can\n # safely be ignored.\n # self.__hypotheses.sort(key=lambda dct: dct['index'])\n\n # Normalize the hypotheses so that their probabilities sum to one.\n logsum = -np.inf\n for hypot in self.__hypotheses:\n logsum = self.__soft_max(logsum, hypot['log_probability'])\n for hypot in self.__hypotheses:\n hypot['log_probability'] -= logsum",
"def sort(self):\r\n\t\treturn sorted(self.sample)",
"def add(self, hyp, sum_logprobs):\n score = sum_logprobs / len(hyp) ** self.length_penalty\n if len(self) < self.num_beams or score > self.worst_score:\n self.beams.append((score, hyp))\n if len(self) > self.num_beams:\n sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)])\n del self.beams[sorted_scores[0][1]]\n self.worst_score = sorted_scores[1][0]\n else:\n self.worst_score = min(score, self.worst_score)",
"def log_prob(self, x, y):\n p = self.tag_log_prob(y)\n for i in range(len(y)):\n if self.out_prob(x[i], y[i]) == 0:\n return -math.inf\n\n p += math.log2(self.out_prob(x[i], y[i]))\n\n return p",
"def tag_log_prob(self, y, add_end_token=True):\n p = 0\n n = self._n\n y = (START_TAG,) * (n - 1) + tuple(y) + (END_TAG,) * add_end_token\n for i in range(len(y) - self._n + 1):\n tag = y[i + n - 1]\n prev_tags = y[i:i + n - 1]\n if self.trans_prob(tag, prev_tags) == 0:\n return -math.inf\n\n p += math.log2(self.trans_prob(tag, prev_tags))\n\n return p",
"def log_prob(self, scores : torch.Tensor, permutations):\n s = torch.log(select_indices(scores, permutations))\n n = len(scores)\n p = self.upto if self.upto is not None else n - 1\n return -sum(\n torch.log(torch.exp((s[k:] - s[k]) * self.shape).sum(dim=0))\n for k in range(p))",
"def perm_vs_hyp():\n\n return [\"P\",\"P\",\"P\",\"P\",\"P\"]",
"def sort(self):\r\n\t\t\r\n\t\t# get variables, add i\r\n\t\tv = self.scan(p=False)\r\n\t\tv.append('i')\r\n\t\t\r\n\t\t# reverse so least weighted variables come first\r\n\t\tv.reverse()\r\n\t\t\r\n\t\t# assign a weight to each variable, based on position in list\r\n\t\tw = {}\r\n\t\tfor n,i in enumerate(v):\r\n\t\t\tw[i] = 1000 ** (n + 1)\r\n\t\t\t\r\n\t\t# assign score based on weights and exponents\r\n\t\ts = {}\r\n\t\tfor i in self:\r\n\t\t\t\r\n\t\t\t# sum weights\r\n\t\t\tc = 0\r\n\t\t\tfor k,j in i.items():\r\n\t\t\t\t\r\n\t\t\t\t# adjust weights based on exponent\r\n\t\t\t\tif k != 'i':\r\n\t\t\t\t\tc += w.get(k,0) * (100 + j)\r\n\t\t\t\t\t\r\n\t\t\t\t# i is adjusted based on even or odd exponents\r\n\t\t\t\telse:\r\n\t\t\t\t\tc += w.get(k,0) * (100 + j % 2)\r\n\t\t\t\t\t\r\n\t\t\t# use score as key\r\n\t\t\ts[c] = i\r\n\t\t\t\t\r\n\t\t# sort keys largest to smallest\r\n\t\ty = s.keys()\r\n\t\ty.sort()\r\n\t\ty.reverse()\r\n\t\t\r\n\t\t# new term list\r\n\t\tn = [s[k] for k in y]\r\n\t\t\r\n\t\treturn Li(n,c=False)",
"def ysort(L):\r\n return sorted(L, key=lambda x: x.freq)",
"def log_prob(self):\n res = -self.L_h/2*np.log(2*np.pi*self.la)\n res = res + self.L_h*(self.L_h-1)/2*self.a\n\n\n res = res - 1/(2*self.la)*np.square(np.linalg.norm(self.e*self.pie))\n\n res = res - 1/(2*self.la)*np.sum(self.e2*self.pie_var)\n\n res = res - self.L_h/2*np.log(2*np.pi*self.sigma2)\n res = res - 1/(2*self.sigma2)*(np.square(np.linalg.norm(self.w))+np.trace(self.R))\n\n print(\"Log-probability difference = {}\".format(res - self.LP), file=self.logfile)\n self.LP = res\n return res",
"def log_prob(self, samples):\n return -0.5 * sum_except_batch(\n np.log(2 * np.pi) + self.logstd + \\\n tf.exp(-2 * self.logstd) * tf.square(samples - self.mean))",
"def test_sort_distributions_alphabetical(self):\r\n exp = [([2, 1, 1], [1, 2, 3], [0, 0, 0, 1], [1]),\r\n ('baz', 'foo', 'foo', 'zab'), ('r', 'w', 'b', 'b')]\r\n obs = _sort_distributions(\r\n [[1, 2, 3], [2, 1, 1], [0, 0, 0, 1], [1]],\r\n ['foo', 'baz', 'foo', 'zab'], ['w', 'r', 'b', 'b'], 'alphabetical')\r\n self.assertEqual(obs, exp)",
"def sort_list_by_president_order(pronoun_proportion_list):\n return sorted(pronoun_proportion_list, key=lambda (k,d,v): (d,k,v))",
"def _logprob(self, sample):\n return 0, 0"
] | [
"0.5838612",
"0.5797235",
"0.5788106",
"0.57859063",
"0.5765085",
"0.56167305",
"0.56062204",
"0.5595072",
"0.5560781",
"0.5496417",
"0.54868",
"0.5470694",
"0.5460611",
"0.54308903",
"0.542925",
"0.5401365",
"0.5391079",
"0.5382952",
"0.5363151",
"0.5355698",
"0.53490305",
"0.53403145",
"0.5339368",
"0.5325012",
"0.532175",
"0.53129715",
"0.53025377",
"0.528172",
"0.5280761",
"0.527521"
] | 0.76663876 | 1 |
Generates attribute filter function for the given attributes value The attributes value can take one of several shapes. This returns a filter function appropriate to the attributes value. One nice thing about this is that there's less if/then shenanigans in the ``allow_token`` method. | def attribute_filter_factory(attributes):
if callable(attributes):
return attributes
if isinstance(attributes, dict):
def _attr_filter(tag, attr, value):
if tag in attributes:
attr_val = attributes[tag]
if callable(attr_val):
return attr_val(tag, attr, value)
if attr in attr_val:
return True
if '*' in attributes:
attr_val = attributes['*']
if callable(attr_val):
return attr_val(tag, attr, value)
return attr in attr_val
return False
return _attr_filter
if isinstance(attributes, list):
def _attr_filter(tag, attr, value):
return attr in attributes
return _attr_filter
raise ValueError('attributes needs to be a callable, a list or a dict') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_attributes_choices(self): \n filter_attributes = [\n 'no filters',\n 'user_id',\n 'device_id',\n 'device_first_seen_ts',\n 'device_first_view_ts', \n 'platform',\n 'platform_type',\n 'country',\n 'region',\n 'city',\n 'dma',\n 'os',\n 'os_version',\n 'manufacturer',\n 'app_mode',\n 'app_version',\n 'device_language',\n 'content_id',\n 'program_id',\n 'content_type',\n 'tvt_sec' # note: here tvt_sec is treated as an attribute rather than a cumulative metric\n ]\n return filter_attributes",
"def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter",
"def _get_attribute_functions(self, attributes):\n subqueries = []\n columns = []\n for attr in attributes:\n function = attributes[attr]\n if function == 'sum':\n sq = DBSession.query(\n self.db_item.id.label('item_id'),\n cast(self.db_value.value, Float).label('v')\n ). \\\n join(self.db_taggroup). \\\n join(\n self.db_tag,\n self.db_taggroup.id == self.db_tag.fk_tag_group). \\\n join(\n self.db_value,\n self.db_value.id == self.db_tag.fk_value). \\\n join(self.db_key, self.db_key.id == self.db_tag.fk_key). \\\n filter(self.db_key.key == attr). \\\n subquery()\n subqueries.append(sq)\n columns.append(func.sum(sq.c.v))\n elif function == 'count' or function == 'count distinct':\n if attr == 'Activity' or attr == 'Stakeholder':\n columns.append(func.count())\n else:\n sq = DBSession.query(\n self.db_item.id.label('item_id'),\n self.db_value.value.label('v')\n ). \\\n join(self.db_taggroup). \\\n join(\n self.db_tag,\n self.db_taggroup.id == self.db_tag.fk_tag_group). \\\n join(self.db_value). \\\n join(self.db_key). \\\n filter(self.db_key.key == attr). \\\n subquery()\n subqueries.append(sq)\n if (function == 'count distinct'):\n columns.append(func.count(distinct(sq.c.v)))\n else:\n columns.append(func.count(sq.c.v))\n return subqueries, columns",
"def query_filter_builder(cls, user_attribute: str, value: Any) -> List[Q]:\n attributes = re.compile(r\"Or|And|OR|AND\").split(user_attribute)\n query_builder = []\n for attr in attributes:\n attr = attr.strip().lower()\n cond = {f\"{attr}__icontains\": value}\n if user_attribute.split(attr)[0].lower().endswith(\"or\"):\n last_query = query_builder.pop()\n query_builder.append(Q(last_query, Q(**cond), join_type=\"OR\"))\n elif attr != \"\":\n query_builder = [*query_builder, Q(**cond)]\n return query_builder",
"def amh_attr_filter_query(self):\n \n attr_filter_query = \"\"\"\n WITH {final_cte_name} as (\n -- Pull list of devices that were active (has any row; don't need TVT >0) in the past 4 weeks\n SELECT DISTINCT device_id\n FROM tubidw.all_metric_hourly\n WHERE DATE_TRUNC('week',hs) >= dateadd('week',-4,DATE_TRUNC('week',GETDATE()))\n AND DATE_TRUNC('week',hs) < DATE_TRUNC('week',GETDATE())\n {attr_filter} -- attribute filters dynamically populate here\n -- TODO: currently can't get a metric/attribute combo filter, like \"devices that watched at least 50% of a specific content_id\"\n )\n \"\"\"\n return attr_filter_query",
"def filter(self, function):\n return FunctionalWrapper(filter(function, self.data))",
"def __init__(self, source, attributes=ALLOWED_ATTRIBUTES,\n strip_disallowed_elements=False, strip_html_comments=True,\n **kwargs):\n self.attr_filter = attribute_filter_factory(attributes)\n\n self.strip_disallowed_elements = strip_disallowed_elements\n self.strip_html_comments = strip_html_comments\n\n return super(BleachSanitizerFilter, self).__init__(source, **kwargs)",
"def dgfilter(*args, attribute: AnyStr=\"\", list: bool=True, logicalAnd: List[AnyStr,\n AnyStr]=None, logicalNot: AnyStr=\"\", logicalOr: List[AnyStr, AnyStr]=None, name:\n AnyStr=\"\", node: AnyStr=\"\", nodeType: AnyStr=\"\", plug: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass",
"def get_filter(feature, value):\r\n return {\r\n 'gender': {'user__profile__gender': value},\r\n 'level_of_education': {'user__profile__level_of_education': value},\r\n }[feature]",
"def _filterfunc(self,*args,**kwargs):\n self._filterfunc = self.f\n return self.f(*args,**kwargs)",
"def call_filter(\n self,\n name: str,\n value: t.Any,\n args: t.Optional[t.Sequence[t.Any]] = None,\n kwargs: t.Optional[t.Mapping[str, t.Any]] = None,\n context: t.Optional[Context] = None,\n eval_ctx: t.Optional[EvalContext] = None,\n ) -> t.Any:\n return self._filter_test_common(\n name, value, args, kwargs, context, eval_ctx, True\n )",
"def apply_filter(atom, isofilters):\n if 'None' in isofilters[0][0]:\n return True\n\n functionfilters = [isofilter for isofilter in isofilters if not isofilter[-1] == 'None']\n functionfilters = ['{}(atom.{}){}={}'.format(f[3], f[0], f[2], f[1]).replace('True', '=').replace('False', '!') for\n f in functionfilters]\n\n if all(getattr(atom, isofilter[0]) == isofilter[1] for isofilter in isofilters if\n isofilter[2] == 'True' and isofilter[-1] == 'None'):\n if all(getattr(atom, isofilter[0]) != isofilter[1] for isofilter in isofilters if\n isofilter[2] == 'False' and isofilter[-1] == 'None'):\n for functionfilter in functionfilters:\n if not eval(functionfilter):\n return False\n return True\n else:\n return False",
"def filter_feature(feature, typ, value):\n return value is None or feature.__getattribute__(typ) == value",
"def create_filter(args: dict) -> dict | None:\n if 'ip' in args:\n args['networkInterfaces.ipv4'] = args.pop('ip')\n expression_list = []\n for arg in args:\n value = args.get(arg)\n if arg == 'riskScore':\n restriction = \"GREATER_THAN_OR_EQUAL_TO\"\n values_list = [arg_to_number(value)]\n else:\n restriction = \"IN\"\n values_list = argToList(value)\n\n values_res = [{\"value\": val} for val in values_list]\n expression = {\n \"propertyName\": arg,\n \"restrictionType\": restriction,\n \"propertyValues\": values_res\n }\n expression_list.append(expression)\n if expression_list:\n return {\"criteria\": {\"criteriaList\": [{\"expressionList\": expression_list}], \"predicateType\": \"AND\"}}\n else:\n return None",
"def conforms_to_template_filter(self, template_filter):\n\n if not isinstance(template_filter, self.__class__):\n raise TypeError(\"AttributeFilter can only check conformance against \\\n another template filter, %s provided\" % (template_filter.__class__.__name__))\n\n #:\n #: Keys from the template\n #:\n template_filter_keys = template_filter.keys()\n # Keys from the object itself\n this_filter_keys = self.keys()\n\n #:\n #: 1. Check to see if the client has provided unwanted keys\n #:\n unwanted_keys = set(this_filter_keys) - set(template_filter_keys)\n if len(unwanted_keys) > 0:\n raise prestans.exception.AttributeFilterDiffers(list(unwanted_keys))\n\n #:\n #: 2. Make a attribute_filter that we send back\n #:\n evaluated_attribute_filter = AttributeFilter()\n\n #:\n #: 3. Evaluate the differences between the two, with template_filter as the standard\n #:\n for template_key in template_filter_keys:\n\n if template_key in this_filter_keys:\n\n value = getattr(self, template_key)\n\n #:\n #: If sub filter and boolean provided with of true, create default filter\n #: with value of true\n #:\n if isinstance(value, bool) and \\\n value is True and \\\n isinstance(getattr(template_filter, template_key), AttributeFilter):\n setattr(evaluated_attribute_filter, template_key, \\\n getattr(template_filter, template_key))\n elif isinstance(value, bool):\n setattr(evaluated_attribute_filter, template_key, value)\n elif isinstance(value, self.__class__):\n # Attribute lists sort themselves out, to produce sub Attribute Filters\n template_sub_list = getattr(template_filter, template_key)\n this_sub_list = getattr(self, template_key)\n setattr(evaluated_attribute_filter, template_key, \\\n this_sub_list.conforms_to_template_filter(template_sub_list))\n else:\n setattr(evaluated_attribute_filter, template_key, \\\n getattr(template_filter, template_key))\n\n return evaluated_attribute_filter",
"def _build_filters(self, criteria: Q):\n # Decide the function based on the connector type\n func = and_ if criteria.connector == criteria.AND else or_\n params = []\n for child in criteria.children:\n if isinstance(child, Q):\n # Call the function again with the child\n params.append(self._build_filters(child))\n else:\n # Find the lookup class and the key\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n\n # Instantiate the lookup class and get the expression\n lookup = lookup_class(stripped_key, child[1], self.model_cls)\n if criteria.negated:\n params.append(~lookup.as_expression())\n else:\n params.append(lookup.as_expression())\n\n return func(*params)",
"def generate_filter(filter_text):\n if ':' in filter_text:\n file_path_filter, _, contract_filter = filter_text.partition(':')\n else:\n file_path_filter = contract_filter = filter_text\n\n return functools.partial(check_if_matches_filter, file_path_filter, contract_filter)",
"def filter(self, value, model=None, context=None):\n\n # string filter: skip non-strings\n if type(value) is not str:\n return value\n\n linker = Linker(**self.linkify_params)\n return linker.linkify(value)",
"def access_filter(f):\n return AccessFilter(f)",
"def dict_to_filter(dct):\n _filter = None\n for attr, values in dct.items():\n attr = ''.join(escape_some_special_chars(attr))\n if not isinstance(values, (list, tuple)):\n values = [values]\n attrfilter = None\n for value in values:\n negate_value = False\n if isinstance(value, basestring):\n if value[0] == \"!\":\n negate_value = True\n value = value[1:]\n value = ''.join(escape_some_special_chars(value))\n valuefilter = '(%s=%s)' % (attr, value)\n if negate_value:\n valuefilter = '(!%s)' % (valuefilter,)\n if attrfilter is None:\n attrfilter = LDAPFilter(valuefilter)\n continue\n attrfilter &= valuefilter\n if _filter is None:\n _filter = attrfilter\n continue\n _filter &= attrfilter\n if _filter is None:\n _filter = LDAPFilter()\n return _filter",
"def filter_detect(self, x):\n b, a = self.c_detect\n return filtfilt(b, a, x)",
"def filters(self, name, tensor, **kwargs):\r\n assert 'data_format' not in kwargs\r\n with tf.name_scope('viz_filters'):\r\n # Find tensor holding trainable kernel weights\r\n name_stem = '/'.join(tensor.name.split('/')[:-1]) + '/kernel'\r\n matching_tensors = [t for t in tf.trainable_variables() if t.name.startswith(name_stem)]\r\n assert len(matching_tensors) == 1\r\n filters = matching_tensors[0]\r\n\r\n # H x W x C x N\r\n h, w, c, n = filters.shape.as_list()\r\n filters = tf.transpose(filters, perm=(3, 2, 0, 1))\r\n # N x C x H x W\r\n filters = tf.reshape(filters, (n*c, 1, h, w))\r\n # NC x 1 x H x W\r\n filters = tf.transpose(filters, perm=(2, 3, 1, 0))\r\n # H x W x 1 x NC\r\n\r\n self._4d_tensor(name, filters, **kwargs)",
"def closure(attributes: Set[A],\n fds: List[FunctionalDependency]) -> Set[A]:\n for i, (left, right) in enumerate(fds):\n if left.issubset(attributes):\n return closure(attributes.union(right), fds[:i] + fds[i + 1:])\n return attributes",
"def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data",
"def apply(self, *args):\n return _ida_hexrays.microcode_filter_t_apply(self, *args)",
"def evaluate(self, attributes):\n return self.predicate(attributes[self.name])",
"def ifilter_c(func):\n return functools.partial(ifilter, func)",
"def evaluate_filter(self, x):\n raise NotImplementedError",
"def apply_filters(ds, config):\n logger.info(\"Applying filters on every element in the dataset, keeping only elements which match the given config:\\n %s\", _dict_to_logstring(config))\n\n filters = []\n\n if \"equal\" in config:\n key = config[\"equal\"][\"key\"]\n value = config[\"equal\"][\"value\"]\n fn = (lambda x, k=key, v=value:\n k not in x or tf.math.reduce_all(x[k] == v))\n filters.append((fn, key))\n\n if \"min_signal_length_ms\" in config:\n key = \"signal\"\n min_signal_length_sec = tf.constant(1e-3 * config[\"min_signal_length_ms\"], tf.float32)\n tf.debugging.assert_scalar(min_signal_length_sec, message=\"min_signal_length_ms must be a scalar\")\n fn = (lambda x, k=key, v=min_signal_length_sec:\n k not in x or tf.size(x[k]) >= tf.cast(tf.cast(x[\"sample_rate\"], tf.float32) * v, tf.int32))\n filters.append((fn, \"min_signal_length_sec\"))\n\n if \"min_shape\" in config:\n key = config[\"min_shape\"][\"key\"]\n min_shape = tf.constant(config[\"min_shape\"][\"shape\"])\n fn = (lambda x, k=key, v=min_shape:\n k not in x or tf.math.reduce_all(tf.shape(x[k]) >= v))\n filters.append((fn, key))\n\n if filters:\n logger.info(\"Using %d different filters:\\n %s\", len(filters), \"\\n \".join(name for fn, name in filters))\n else:\n logger.warning(\"No filters defined, skipping filtering\")\n return ds\n\n def all_ok(x):\n # This will be traced by tf autograph and converted into a graph, we cannot use python's builtin 'all' at the moment\n ok = True\n for fn, _ in filters:\n ok = ok and fn(x)\n return ok\n\n return ds.filter(all_ok)",
"def itemFilterAttr(*args, byName: Union[AnyStr, bool]=\"\", byNameString: Union[AnyStr,\n List[AnyStr], bool]=\"\", byScript: Union[AnyStr, bool]=\"\", classification:\n Union[AnyStr, bool]=\"\", dynamic: bool=True, exists: bool=True, hasCurve:\n bool=True, hasDrivenKey: bool=True, hasExpression: bool=True, hidden:\n bool=True, intersect: Union[List[AnyStr, AnyStr], bool]=None, keyable:\n bool=True, listBuiltInFilters: bool=True, listOtherFilters: bool=True,\n listUserFilters: bool=True, negate: bool=True, parent: AnyStr=\"\", published:\n bool=True, readable: bool=True, scaleRotateTranslate: bool=True,\n secondScript: Union[AnyStr, bool]=\"\", text: Union[AnyStr, bool]=\"\", union:\n Union[List[AnyStr, AnyStr], bool]=None, writable: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass"
] | [
"0.5660973",
"0.5379473",
"0.534261",
"0.5250468",
"0.5212326",
"0.5194705",
"0.517152",
"0.51187414",
"0.50908965",
"0.5085648",
"0.49764767",
"0.48972845",
"0.48662314",
"0.4835603",
"0.48243764",
"0.48073313",
"0.4796485",
"0.47842774",
"0.47827226",
"0.4765363",
"0.4760341",
"0.4740688",
"0.472495",
"0.4714329",
"0.46974376",
"0.46816686",
"0.46774328",
"0.4663414",
"0.4632838",
"0.46208325"
] | 0.7323271 | 0 |
Creates a BleachSanitizerFilter instance | def __init__(self, source, attributes=ALLOWED_ATTRIBUTES,
strip_disallowed_elements=False, strip_html_comments=True,
**kwargs):
self.attr_filter = attribute_filter_factory(attributes)
self.strip_disallowed_elements = strip_disallowed_elements
self.strip_html_comments = strip_html_comments
return super(BleachSanitizerFilter, self).__init__(source, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean(self, text):\n if not isinstance(text, six.string_types):\n raise TypeError('argument must of text type')\n\n if not text:\n return u''\n\n text = force_unicode(text)\n\n dom = self.parser.parseFragment(text)\n filtered = BleachSanitizerFilter(\n source=self.walker(dom),\n\n # Bleach-sanitizer-specific things\n attributes=self.attributes,\n strip_disallowed_elements=self.strip,\n strip_html_comments=self.strip_comments,\n\n # html5lib-sanitizer things\n allowed_elements=self.tags,\n allowed_css_properties=self.styles,\n allowed_protocols=self.protocols,\n allowed_svg_properties=[],\n )\n\n # Apply any filters after the BleachSanitizerFilter\n for filter_class in self.filters:\n filtered = filter_class(source=filtered)\n\n return self.serializer.render(filtered)",
"def test_can_filter_tags(self):\n text = '<b><i>Example</i></b><!-- comment -->'\n filter = Bleach(tags=['b'], strip=True)\n filtered = filter.filter(text)\n expected = '<b>Example</b>'\n self.assertEquals(expected, filtered)",
"def test_create(self):\n filter = Bleach()\n self.assertIsInstance(filter, Bleach)",
"def __init__(self, filter_expression, nowrap = False, asvar=None, message_context=None):\n self.asvar = asvar\n self.message_context = message_context\n self.filter_expression = filter_expression\n if isinstance(self.filter_expression.var, six.string_types):\n self.filter_expression.var = Variable(\"'%s'\" %\n self.filter_expression.var)\n self.nowrap = nowrap",
"def filter_factory(global_conf, **local_conf):\n conf = global_conf.copy()\n conf.update(local_conf)\n\n def ext_filter(app):\n return UrlRewriteFilter(app, conf)\n return ext_filter",
"def __init__(self):\n if self.__class__ == ElementFilter:\n _self = None\n else:\n _self = self\n this = _libsbml.new_ElementFilter(_self, )\n try: self.this.append(this)\n except: self.this = this",
"def set_scanning_filter(self, **kwargs):\n warn(\n \"This method will be removed in a future version of Bleak. Use BleakScanner constructor args instead.\",\n FutureWarning,\n stacklevel=2,\n )\n self._backend.set_scanning_filter(**kwargs)",
"def std_filters():\n kwargs = {\n \"sentence_filters\":[punctuation_filter],\n \"word_filters\":[small_word_filter, stopword_filter, stemming_filter]\n }\n return kwargs",
"def ButterworthFilter(\n self,\n name: str,\n cutoffFrequency: float,\n order: int = 2,\n operation: SymbolicConstant = NONE,\n halt: Boolean = OFF,\n limit: float = None,\n invariant: SymbolicConstant = NONE,\n ) -> ButterworthFilter:\n self.filters[name] = butterworthFilter = ButterworthFilter(\n name, cutoffFrequency, order, operation, halt, limit, invariant\n )\n return butterworthFilter",
"def sanitize(cls):",
"def filter(self, filters):",
"def make_filter(name, schema):\n return HSMFilter(name, schema)",
"def apply_filter(self, inplace=True):\n\n if self.filter is None:\n if not inplace:\n return copy.deepcopy(self)\n else:\n return None\n\n x = copy.copy(self.__dict__)\n x['data'] = self.get_data()\n x['locs'] = self.get_locs()\n\n if self.filter == 'kurtosis':\n x['kurtosis'] = x['kurtosis'][x['kurtosis'] <= x['kurtosis_threshold']]\n\n for key in ['n_subs', 'n_elecs', 'n_sessions', 'dur', 'filter_inds', 'nifti_shape']:\n if key in x.keys():\n x.pop(key)\n\n boc = Brain(**x)\n boc.filter = None\n boc.update_info()\n if inplace:\n self.__init__(boc)\n else:\n return boc",
"def filter(self, name=None):\n def wrapper(fn):\n if name is not None:\n _name = name\n else:\n _name = fn.__name__\n\n if _name in self._filters:\n raise Error(\"Filter already defined: {0}\".format(_name))\n\n self._filters[_name] = fn\n return fn\n return wrapper",
"def new(name):\r\n\r\n g_filter(name)",
"def allow_filtering(self):\r\n clone = copy.deepcopy(self)\r\n clone._allow_filtering = True\r\n return clone",
"def testUsingFilterTool(self):\n pass",
"def __init__(self) -> None:\r\n self.filters: list[Filter] = []",
"def __init__(self, filter: Filter, matcher: Matcher, storage_manager: StorageManager):\n super().__init__()\n self.filter = filter\n self.matcher = matcher\n self.storage_manager = storage_manager",
"def template_filter(name: Optional[str] = None) -> Callable:\n\n def decorator(func):\n name_ = name if name else func.__name__\n FILTERS[name_] = func\n return func\n\n return decorator",
"def filter_factory(global_conf, **local_conf):\n conf = global_conf.copy()\n conf.update(local_conf)\n\n def auth_filter(app):\n return DevAuth(app, conf)\n return auth_filter",
"def filter_banned(text,replace_handler=banned_word_handler):\n banned=CensoredWord.objects.get_banned_wordlist()\n return word_filter(text,banned,replace_handler)",
"def test_can_filter_attributes(self):\n text = '<b><a href=\"\" target=\"_blank\">Example</a></b>'\n filter = Bleach(\n tags=['a'],\n attributes=dict(a=['href', 'title'])\n )\n filtered = filter.filter(text)\n expected = '<a href=\"\">Example</a>'\n self.assertEquals(expected, filtered)",
"def broadbandfilter(self):\n _, = self.broadbandfilters\n return _",
"def filterRansac():\n pass",
"def _get_filter(self, args):\n\n # Create the filters list\n filter_list = []\n \n # If we want to record all requests, add the file logger filter\n if args.record:\n filter_list.append(filters.StoreLoggerFilter(args.url))\n\n # Add the whitelist filter\n wl_filter = filters.WhitelistedSiteFilter(args.url)\n filter_list.append(wl_filter)\n\n # Create the ACL filter that filters all requests from devices\n acl_filter = filters.DeviceACLFilter(filter_list, args.url)\n\n return acl_filter",
"def copy(self):\n new_filter = BloomFilter(self.capacity, self.error_rate)\n new_filter.filter = self.filter.copy()\n return new_filter",
"def _apply_filters(self, text, tag):\n\n # The order of the filters below is important\n # and should not be changed\n\n # intial_quotes needs to happen at this point so that\n # attribute values introduced later on do not get affected\n text = self.initial_quotes(text)\n text = self.smarty_pants(text)\n text = self.amp(text)\n text = self.caps(text)\n\n return text",
"def get_filter_stringlist(self):\n return text_filter",
"def _pre_filter_decode(self):\n\n pass"
] | [
"0.5844354",
"0.5506664",
"0.5395552",
"0.53924406",
"0.53181165",
"0.52993613",
"0.528205",
"0.52735686",
"0.525211",
"0.52488923",
"0.5222469",
"0.5220284",
"0.5183616",
"0.51724327",
"0.5108",
"0.5105979",
"0.5087034",
"0.5083709",
"0.5063567",
"0.50324994",
"0.5003623",
"0.5002278",
"0.49703377",
"0.49627742",
"0.49385092",
"0.49289337",
"0.49129292",
"0.4906467",
"0.48981738",
"0.48755473"
] | 0.716723 | 0 |
Sanitize a token either by HTMLencoding or dropping. | def sanitize_token(self, token):
token_type = token['type']
if token_type in ['StartTag', 'EndTag', 'EmptyTag']:
if token['name'] in self.allowed_elements:
return self.allow_token(token)
elif self.strip_disallowed_elements:
return None
else:
if 'data' in token:
# Alphabetize the attributes before calling .disallowed_token()
# so that the resulting string is stable
token['data'] = alphabetize_attributes(token['data'])
return self.disallowed_token(token)
elif token_type == 'Comment':
if not self.strip_html_comments:
return token
else:
return None
elif token_type == 'Characters':
return self.sanitize_characters(token)
else:
return token | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_token(token):\n cleaned_token = token\n if cleaned_token in NORMALIZE_DICT:\n cleaned_token = NORMALIZE_DICT[cleaned_token]\n\n if cleaned_token not in REMOVED_CHAR:\n for char in REMOVED_CHAR:\n cleaned_token = cleaned_token.replace(char, u'')\n\n if len(cleaned_token) == 0:\n cleaned_token = \",\"\n return cleaned_token",
"def clean_for_html(cls, value):\r\n return cls._clean(value, INVALID_HTML_CHARS)",
"def sanitize(text):\n try:\n from airy.core import sanitizer\n return smart_unicode(sanitizer.clean_html(text))\n except ImportError:\n logging.error(\"You need html5lib in order to use sanitize\")\n return \"ERROR: You need html5lib in order to use sanitize\"",
"def sanitize_characters(self, token):\n data = token.get('data', '')\n\n if not data:\n return token\n\n data = INVISIBLE_CHARACTERS_RE.sub(INVISIBLE_REPLACEMENT_CHAR, data)\n token['data'] = data\n\n # If there isn't a & in the data, we can return now\n if '&' not in data:\n return token\n\n new_tokens = []\n\n # For each possible entity that starts with a \"&\", we try to extract an\n # actual entity and re-tokenize accordingly\n for part in next_possible_entity(data):\n if not part:\n continue\n\n if part.startswith('&'):\n entity = match_entity(part)\n if entity is not None:\n new_tokens.append({'type': 'Entity', 'name': entity})\n # Length of the entity plus 2--one for & at the beginning\n # and and one for ; at the end\n part = part[len(entity) + 2:]\n if part:\n new_tokens.append({'type': 'Characters', 'data': part})\n continue\n\n new_tokens.append({'type': 'Characters', 'data': part})\n\n return new_tokens",
"def assert_clean(data):\n def _ensure_clean(value):\n if value != bleach.clean(value):\n raise ValueError\n\n return escape_html(data)",
"def clean_tag(data):\n # TODO: make this a method of Tag?\n return escape_html(data).replace('\"', '"').replace(\"'\", ''')",
"def clean_token(token):\n return token.strip().lower().replace(' ', '_')",
"def sanitize(text):\n #text = re.sub(r'[*]',r'\\*',text) \n text = re.sub(r'~',r'\\~',text) \n #text = re.sub(r'<',r'\\textless',text) \n #text = re.sub(r'>',r'\\textgreater',text) \n text = re.sub(r'\\|',r'\\|',text) \n text = re.sub(r'_',r'\\\\_',text) \n return text",
"def xss_strip_unsafe_tags(s):\n return htmlsanitizer._sanitizeHTML(s, 'utf-8', None)",
"def filter_html(self, text):\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1], 16))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n print \"Value Error\"\n pass\n else:\n # named entity\n try:\n if text[1:-1] in (\"amp\",\"gt\",\"lt\"):\n return text\n else:\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])\n except KeyError:\n print \"keyerror\"\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)",
"def strip_unsafe_tokens(string, parser=ANSI_PARSER):\n return parser.strip_unsafe_tokens(string)",
"def sanitize_html(input):\n p = HTMLParser(tokenizer=HTMLSanitizer, tree=treebuilders.getTreeBuilder(\"dom\"))\n dom_tree = p.parseFragment(input)\n walker = treewalkers.getTreeWalker(\"dom\")\n stream = walker(dom_tree)\n\n s = HTMLSerializer(omit_optional_tags=False)\n return \"\".join(s.serialize(stream))",
"def sanitize_input(term: str) -> str:\n return term.strip().replace(\"*\", \"\").replace(\"'\", \"\\\\'\").replace(\"~\", \"\")",
"def clean(string):\r\n if string is None or not string: return ''\r\n string = html.unescape(string)\r\n string = unicodedata.normalize('NFC', string)\r\n string = unescape(string)\r\n string = html.escape(string)\r\n string = unicodedata.normalize('NFC', string)\r\n return string",
"def filter_token(token: Text) -> Text:\n def strip_enum(token: Text) -> Text:\n \"\"\"\n Remove any enumerations from the given token\n\n Parameters\n ----------\n token: Text :\n The token that we want to remove any enumerations from\n Returns\n -------\n A filtered version of the token that does not have any\n enumerations.\n \"\"\"\n if not token:\n return ''\n if token[0] == '(' and token[len(token) - 1] != ')':\n return ''\n if token[0] != '(' or (token[0] == '(' and token[len(token) -\n 1] == ')'):\n return ''.join(enum_filter.split(token))\n return ''\n\n if email_filter.match(token) or (\n stop_words and token in stop_words\n ):\n return ''\n # Strip enumeration from token\n token = strip_enum(token)\n # Strip punctuation from token\n token = ''.join(punc_filter.split(token))\n # Strip numbers from token\n token = ''.join(num_filter.split(token))\n # Remove non-printable characters\n token = ''.join(c for c in token if c in printable_chars)\n\n return '' if len(token) < 3 else token",
"def safeHTML(s):\n parser = StrippingParser()\n parser.feed(s)\n parser.close()\n parser.cleanup()\n return parser.result",
"def tweet_sanitize(tweet: str) -> str:\n pipeline = [strip_links, strip_mentions, strip_hashtags, strip_all_entities,\n remove_special_characters]\n for fun in pipeline:\n tweet = fun(tweet)\n return tweet",
"def strip_unsafe_tokens(self, string):\n return self.unsafe_tokens.sub(\"\", string)",
"def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet",
"def _sanitize(text):\n # TODO: any cleanup needed here?\n if text is None:\n return None\n text = text.replace('\\n', ' ')\n return text",
"def _strip_tags(value):\r\n return re.sub(r'<[^>]*?>', ' ', force_unicode(value))",
"def clean(self, text):\n if not isinstance(text, six.string_types):\n raise TypeError('argument must of text type')\n\n if not text:\n return u''\n\n text = force_unicode(text)\n\n dom = self.parser.parseFragment(text)\n filtered = BleachSanitizerFilter(\n source=self.walker(dom),\n\n # Bleach-sanitizer-specific things\n attributes=self.attributes,\n strip_disallowed_elements=self.strip,\n strip_html_comments=self.strip_comments,\n\n # html5lib-sanitizer things\n allowed_elements=self.tags,\n allowed_css_properties=self.styles,\n allowed_protocols=self.protocols,\n allowed_svg_properties=[],\n )\n\n # Apply any filters after the BleachSanitizerFilter\n for filter_class in self.filters:\n filtered = filter_class(source=filtered)\n\n return self.serializer.render(filtered)",
"def sanitize(cls):",
"def sanitize(cls, value):\n return value",
"def clean_twitter_tokens(text):\n preprocessor.set_options(preprocessor.OPT.URL, preprocessor.OPT.RESERVED, preprocessor.OPT.MENTION,\n preprocessor.OPT.NUMBER)\n return preprocessor.clean(text)",
"def unhtmlify(html):\n return unescape(re.sub(r'<.*?>', '', html))",
"def _remove_special_chars(self, doc: str):\n processed_tweet = re.sub('[\\.,!#¡\\?¿%:;´\"@”“&()\\|]', '', doc)\n return processed_tweet",
"def _sanitize(label):\n return re.sub(r'(\\W+| )', '', label)",
"def cleaningHTML(text):\n # HTML-Entities decodieren\n h = html.parser.HTMLParser(convert_charrefs=True)\n text = h.unescape(text)\n \n # Geschützte Leerzeichen löschen\n text = re.sub('\\u00A0', \" \", text)\n text = re.sub(r'&', r'&', text)\n text = re.sub(r'<a .*?>', r'', text)\n text = re.sub(r'</a>', r'', text)\n return text",
"def remove_html_tags_fun(self):\n cleaner = re.compile('<.*?>')\n cleaned_text = re.sub(cleaner, '', self.doc)\n cleaned_text = re.sub('[\\n\\t]', '', cleaned_text)\n self.doc = cleaned_text"
] | [
"0.6213719",
"0.61599076",
"0.6135648",
"0.61082906",
"0.6071557",
"0.6028807",
"0.60134274",
"0.6010262",
"0.6005891",
"0.5977613",
"0.5893458",
"0.58391213",
"0.5737908",
"0.57283384",
"0.5718727",
"0.5710873",
"0.56932616",
"0.5682348",
"0.56683916",
"0.5661966",
"0.5647215",
"0.5639497",
"0.5614055",
"0.55750763",
"0.5574519",
"0.55305666",
"0.552876",
"0.55002064",
"0.54978156",
"0.549496"
] | 0.72170484 | 0 |
Handles Characters tokens Our overridden tokenizer doesn't do anything with entities. However, that means that the serializer will convert all ``&`` in Characters tokens to ``&``. Since we don't want that, we extract entities here and convert them to Entity tokens so the serializer will let them be. | def sanitize_characters(self, token):
data = token.get('data', '')
if not data:
return token
data = INVISIBLE_CHARACTERS_RE.sub(INVISIBLE_REPLACEMENT_CHAR, data)
token['data'] = data
# If there isn't a & in the data, we can return now
if '&' not in data:
return token
new_tokens = []
# For each possible entity that starts with a "&", we try to extract an
# actual entity and re-tokenize accordingly
for part in next_possible_entity(data):
if not part:
continue
if part.startswith('&'):
entity = match_entity(part)
if entity is not None:
new_tokens.append({'type': 'Entity', 'name': entity})
# Length of the entity plus 2--one for & at the beginning
# and and one for ; at the end
part = part[len(entity) + 2:]
if part:
new_tokens.append({'type': 'Characters', 'data': part})
continue
new_tokens.append({'type': 'Characters', 'data': part})
return new_tokens | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def special_tokens(self, ):\n\n if self.tokenizer.bos_token is None or self.tokenizer.eos_token is None:\n special_tokens = self.tokenizer.build_inputs_with_special_tokens([])\n special_tokens_ids = self.tokenizer.convert_ids_to_tokens(special_tokens)\n self.tokenizer.bos_token, self.tokenizer.eos_token = special_tokens_ids\n\n special_tokens = self.tokenizer.eos_token, self.tokenizer.bos_token\n return special_tokens",
"def preprocess_sent(sent):\n #tokenized = word_tokenize(sent.lower())\n tokenizer = Tok()\n tokenized = tokenizer.tokenize(sent.lower())\n return tokenized",
"def parse(self, tokenizer):\n pass",
"def handle_entityref(self, ref):\r\n data = None\r\n if self.convertHTMLEntities:\r\n try:\r\n data = unichr(name2codepoint[ref])\r\n except KeyError:\r\n pass\r\n\r\n if not data and self.convertXMLEntities:\r\n data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)\r\n\r\n if not data and self.convertHTMLEntities and \\\r\n not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):\r\n # TODO: We've got a problem here. We're told this is\r\n # an entity reference, but it's not an XML entity\r\n # reference or an HTML entity reference. Nonetheless,\r\n # the logical thing to do is to pass it through as an\r\n # unrecognized entity reference.\r\n #\r\n # Except: when the input is \"&carol;\" this function\r\n # will be called with input \"carol\". When the input is\r\n # \"AT&T\", this function will be called with input\r\n # \"T\". We have no way of knowing whether a semicolon\r\n # was present originally, so we don't know whether\r\n # this is an unknown entity or just a misplaced\r\n # ampersand.\r\n #\r\n # The more common case is a misplaced ampersand, so I\r\n # escape the ampersand and omit the trailing semicolon.\r\n data = \"&%s\" % ref\r\n if not data:\r\n # This case is different from the one above, because we\r\n # haven't already gone through a supposedly comprehensive\r\n # mapping of entities to Unicode characters. We might not\r\n # have gone through any mapping at all. So the chances are\r\n # very high that this is a real entity, and not a\r\n # misplaced ampersand.\r\n data = \"&%s;\" % ref\r\n self.handle_data(data)",
"def _convertEntities(self, match):\r\n x = match.group(1)\r\n if self.convertHTMLEntities and x in name2codepoint:\r\n return unichr(name2codepoint[x])\r\n elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:\r\n if self.convertXMLEntities:\r\n return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]\r\n else:\r\n return u'&%s;' % x\r\n elif len(x) > 0 and x[0] == '#':\r\n # Handle numeric entities\r\n if len(x) > 1 and x[1] == 'x':\r\n return unichr(int(x[2:], 16))\r\n else:\r\n return unichr(int(x[1:]))\r\n\r\n elif self.escapeUnrecognizedEntities:\r\n return u'&%s;' % x\r\n else:\r\n return u'&%s;' % x",
"def override_special_tokens(self, opt: Opt):\n # now override\n self.start_token = self.hf_tokenizer.cls_token\n self.end_token = self.hf_tokenizer.sep_token\n self.null_token = self.hf_tokenizer.pad_token\n self.unk_token = self.hf_tokenizer.unk_token\n\n self._unk_token_idx = self.hf_tokenizer.unk_token_id\n\n self.start_idx = self[self.start_token]\n self.end_idx = self[self.end_token]\n self.null_idx = self[self.null_token]",
"def parse_entity(self, term):\n pass",
"def _tokenize(self, text: str) -> List[str]:\n return self.bert_model.tokenize(text.strip())",
"def tokenize_chars(line):\n return",
"def __encode_token(self, text: list) -> dict:\n encoded_input = self.tokenizer(\n [str(string) for string in text],\n truncation=True,\n padding=True,\n return_tensors=\"pt\",\n )\n\n return encoded_input",
"def handle_entityref(self, name):\n text = six.unichr(name2codepoint[name])\n self.result.append(text)\n return text",
"def set_tokens(self, char: str) -> None:\n if self.is_start_group(char):\n self.set_start_group(char)\n return\n\n if self.is_end_group(char):\n self.set_end_group(char)\n return\n\n if self.is_group_part(char):\n self.set_group_part(char)\n return\n\n if self.is_split_char(char):\n self.set_split_char(char)\n return\n else:\n self.set_generic_char(char)\n return\n\n raise ValueError(\n 'Edge case encountered: token ({}) '\n 'not parsed during Lexer state: ({})'.format(char, self.__dict__))",
"def test_bug2785373(self):\n input = \"So, one dey when I wes 17, I left.\"\n for _ in tokenize_en(input):\n pass\n input = raw_unicode(\"So, one dey when I wes 17, I left.\")\n for _ in tokenize_en(input):\n pass",
"def _to_text(self, token):\n return replace_entities(unicode(token))",
"def handle_entityref(self,name):\r\n self.handle_data(unichr(name2codepoint[name]))\r\n #self.handle_data(\"&\"+name+\";\")\r\n #print \"handle_entityref\", name\r\n #raise NotImplemented\r",
"def process_token_sentence(text):\n\n sentences = nltk.sent_tokenize(text)\n tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\n sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)\n\n return sentences",
"def xml2tokens(xml_tagged_sent, tokenized_sent, raw_sent):\n raw, entities = get_entities(xml_tagged_sent)\n if re.search(r\"ENAMEX\", raw):\n print(xml_tagged_sent)\n print(raw)\n # count += 1\n\n tokens, syllables = word_tokenize(tokenized_sent, raw_sent)\n level1_syl_tags = [\"O\" for i in range(len(syllables))]\n level2_syl_tags = [\"O\" for i in range(len(syllables))]\n level3_syl_tags = [\"O\" for i in range(len(syllables))]\n\n level1_token_tags = [\"O\" for i in range(len(tokens))]\n level2_token_tags = [\"O\" for i in range(len(tokens))]\n level3_token_tags = [\"O\" for i in range(len(tokens))]\n\n flag = False\n for entity in entities:\n value = entity[\"value\"]\n start = entity[\"start\"]\n end = entity[\"end\"]\n entity_type = entity[\"type\"]\n start_syl_id, end_syl_id = find_syl_index(start, end, syllables)\n start_tok_id, end_tok_id = find_tok_index(start_syl_id, end_syl_id, tokens)\n\n if start_syl_id != None and end_syl_id != None:\n if entity[\"level\"] == 1:\n level1_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level1_syl_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level2_syl_tags[i] = \"I-\" + entity_type\n else:\n level3_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level3_syl_tags[i] = \"I-\" + entity_type\n else:\n print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start,end,value,raw,xml_tagged_sent))\n flag = True\n\n if start_tok_id != None and end_tok_id != None:\n if entity[\"level\"] == 1:\n level1_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id+1, end_tok_id):\n level1_token_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level2_token_tags[i] = \"I-\" + entity_type\n else:\n level3_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level3_token_tags[i] = \"I-\" + entity_type\n else:\n pass\n # print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start_syl_id, end_syl_id, value, raw, xml_tagged_sent))\n\n ret_syllables = list(zip([ s.text for s in syllables], level1_syl_tags, level2_syl_tags, level3_syl_tags))\n ret_tokens = list(zip( [tk.text for tk in tokens], level1_token_tags, level2_token_tags, level3_token_tags))\n return ret_syllables, ret_tokens, raw, flag",
"def tokenize(self, inputs):\n if hasattr(self.tokenizer, \"batch_encode\"):\n return self.tokenizer.batch_encode(inputs)\n else:\n return [self.tokenizer.encode(x) for x in inputs]",
"def tokenize_wordchars(lines):\n return",
"def entity2text(entitydef):\n if entitydef.startswith(\"&#x\"):\n cp = int(entitydef[3:-1], 16)\n elif entitydef.startswith(\"&#\"):\n cp = int(entitydef[2:-1])\n elif entitydef.startswith(\"&\"):\n cp = name2codepoint[entitydef[1:-1]]\n else:\n logger.debug(entitydef)\n cp = None\n\n return chr(cp) if cp else entitydef",
"def tokenize(self, start_pos=0, text=None):\n pass",
"def encode_tokens(self, sentence_tokens: str) -> str:\n return self.bpe.process_line(sentence_tokens)",
"def _tokenize(self, raw_text):\n\n doc = self.nlp(raw_text.strip())\n\n # Loop through tokens and find known entities aren't already marked\n for token in doc:\n # Is this word in our known_entities, but is not recognized by the spaCy parser?\n if token.text.lower() in self.known_entities and token.ent_type not in self.entities:\n # We need to set the new entity to doc.ents directly (I believe the getter for doc.ents does\n # some important massaging. However, counter to the online docs, setting doc.ents wipes out\n # all of the previously recognized ents, so we stash the value, then we combine and reset.\n stash = doc.ents\n doc.ents = [(token.text.title(), doc.vocab.strings['PERSON'], token.i, token.i + 1)]\n doc.ents = doc.ents + stash\n\n # Find proper noun n-grams: (a) find a known entity, (b) is the next word also a known entity?,\n # (c) merge, (d) repeat\n # TODO: Joining multi-word named entities sometimes causes us trouble.\n doc_len = len(doc) # Helps us know when to exit the 'for loop' (since we change the # of items via merge)\n for token in doc:\n # if we're not at the end of the loop, and we recognize this as a proper noun and it's not a stop word\n # and the token isn't a space...\n if token.i + 1 < doc_len and token.ent_type in self.entities and \\\n token.text.lower() not in self.stop_words and token.text not in ' ':\n next_token = doc[token.i + 1]\n # keep looping while we're not at the end of the loop and this token has the same entity type as\n # the previous token and it's not a stop word or a space.\n while token.i + 1 < doc_len and next_token.ent_type == token.ent_type and \\\n next_token.text.lower() not in self.stop_words and next_token.text not in ' ':\n n_gram = doc[token.i:token.i + 2]\n n_gram.merge()\n doc_len -= 1 # the merge changes the list length, so we just shrunk the list!\n # print(x)\n if token.i + 1 >= doc_len:\n break\n\n return doc",
"def text2entity(text):\n return ESCAPE.get(text, text)",
"def _tokens(self):\n # get my renderer\n renderer = self.renderer\n # sign on\n yield \"\"\n yield renderer.commentLine(\"tokens\")\n # simple tokens\n yield from renderer.set(name=\"empty\")\n yield from renderer.set(name=\"comma\", value=\",\")\n yield from renderer.set(name=\"space\", value=\"$(empty) $(empty)\")\n\n # characters that don't render easily and make the makefile less readable\n yield from renderer.set(name=\"esc\", value='\"\\x1b\"')\n\n # all done\n return",
"def tokenize(self, raw_text):\n # TODO implement\n raw_tokens = word_tokenize(raw_text.decode('utf8'))\n return self.filter_tokens(raw_tokens)\n # return self.split_by(raw_tokens, '-')",
"def helper_decode(self, tokens: List[str]) -> str:\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n # We iterate over \"char\", which is supposed to be a single\n # character, because the TorchScripted version of the code\n # correctly splits a string into single characters in\n # self.utf8_chars() but the non-TorchScripted version doesn't\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)",
"def __iter__(self):\n normed_sent = preprocess(self.strings)\n for sent in split_iter(normed_sent, self.eos_placement):\n sent = ''.join(sent)\n if sent:\n yield list(tokenize(sent))",
"def tokenize (self, text):\n if self.type == \"gpt2\":\n return self._tokenizer.tokenize(text)\n if self.type == \"bpe\":\n return self._tokenizer.EncodeAsPieces(text)",
"def tokenize(self, text):\n # text = convert_to_unicode(text)\n\n output_tokens = []\n for token in split_by_whitespace(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens"
] | [
"0.57361805",
"0.5715559",
"0.5693421",
"0.5683378",
"0.5586623",
"0.5558785",
"0.55293304",
"0.5496346",
"0.54673356",
"0.5376477",
"0.5312388",
"0.5287163",
"0.52551013",
"0.5237161",
"0.52218133",
"0.5212501",
"0.5194779",
"0.515802",
"0.5128146",
"0.5125173",
"0.5115463",
"0.5081616",
"0.5078631",
"0.5077814",
"0.5077739",
"0.5067912",
"0.50590706",
"0.5056676",
"0.50525063",
"0.5043529"
] | 0.7078704 | 0 |
Sanitizes css in style tags | def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
# Validate the css in the style tag and if it's not valid, then drop
# the whole thing.
parts = style.split(';')
gauntlet = re.compile(
r"""^([-/:,#%.'"\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'\s*|"[\s\w]+"|\([\d,%\.\s]+\))*$"""
)
for part in parts:
if not gauntlet.match(part):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall('([-\w]+)\s*:\s*([^:;]*)', style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def condense_style(html): # May look silly but Emmet does this and is wrong.\n log.debug(\"Condensing HTML Style CSS tags.\")\n return html.replace('<style type=\"text/css\">', '<style>').replace(\n \"<style type='text/css'>\", '<style>').replace(\n \"<style type=text/css>\", '<style>')",
"def test_valid_css():\n css_sanitizer = CSSSanitizer(allowed_css_properties=[\"color\", \"float\"])\n assert (\n clean('<p style=\"float: left; color: \">foo</p>', css_sanitizer=css_sanitizer)\n == '<p style=\"float: left; color: ;\">foo</p>'\n )\n assert (\n clean('<p style=\"color: float: left;\">foo</p>', css_sanitizer=css_sanitizer)\n == '<p style=\"color: float: left;\">foo</p>'\n )",
"def apply_styles(source, styles):\n soup = BeautifulSoup(source)\n\n for style in styles:\n for markup in soup.findAll(style.markup):\n markup['style'] = style.style.strip()\n\n return soup.prettify()",
"def process_clevercss(source, filepath):\n return clevercss.convert(source)",
"def test_style_maintained(self):\n test_string = \"<p><font style='color: red'></p>\"\n cleaned = sanitizeFeedback(test_string)\n self.assertIn(\"style='color: red;'\", cleaned)\n\n test_string = \"<p><table border=\\\"1\\\"></table></p>\"\n cleaned = sanitizeFeedback(test_string)\n self.assertIn(\"border='1'\", cleaned)",
"def minify_css(css: str) -> str:\n css = re.sub(r\"[ ]{4}|\\n|(?<=[:{}]) | (?=[{}])\", \"\", css)\n css = re.sub(\n r\"/\\*.+?\\*/\", lambda m: m.group(0) if m.group(0).startswith(\"/*!\") else \"\", css\n )\n return Markup(css.replace(\"<style\", \"\\n<style\"))",
"def test_css_parsing_with_entities(data, styles, expected):\n css_sanitizer = CSSSanitizer(allowed_css_properties=styles)\n assert (\n clean(\n data, tags={\"p\"}, attributes={\"p\": [\"style\"]}, css_sanitizer=css_sanitizer\n )\n == expected\n )",
"def embed_styles(self):\n for style in self.book.xpath(\"//link[@rel='stylesheet']\"):\n style_raw = self.get_remote_content(style.attrib[\"href\"])\n if style_raw != None:\n style_content = style_raw.decode(\"utf-8\")\n new_style = html.Element(\"style\")\n new_style.attrib[\"type\"] = \"text/css\"\n new_style.text = style_content \n style.xpath(\"//head\")[0].insert(0, new_style)\n style.getparent().remove(style)",
"def restore_needed_space(css):\n return css.replace(\"!important\", \" !important\").replace( # !important\n \"@media(\", \"@media (\").replace( # media queries # jpeg > jpg\n \"data:image/jpeg;base64,\", \"data:image/jpg;base64,\").rstrip(\"\\n;\")",
"def scan_system_css():\r\n pass",
"def generateInlineCSS():",
"def clean_text_from_html_tags(message):\n regex_style_tag = re.compile('<style.*?>[\\\\s\\\\S]*?</style>')\n message = re.sub(regex_style_tag, \" \", message)\n regex_script_tag = re.compile('<script.*?>[\\\\s\\\\S]*?</script>')\n message = re.sub(regex_script_tag, \" \", message)\n regex_html_tags = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n message = re.sub(regex_html_tags, \" \", message)\n return message",
"def inline_css(html_src, path=None):\n css_re = re.compile(\"\\<link rel\\=\\\"stylesheet\\\" media\\=\\\"(screen|print)\\\" href\\=\\\"([0-9a-zA-Z.\\-_/]+)\\\"\\>\")\n\n def fetch_jssource(in_match):\n #media_type = in_match.group(1)\n rel_path = in_match.group(2)\n csspath = os.path.join(path, rel_path)\n return \"<style>\\n{0}\\n</style>\".format(open(csspath, 'r').read())\n #return \"<style media=\\\"{0}\\\">\\n{1}\\n</style>\".format(media_type, open(csspath, 'r').read())\n\n return css_re.sub(fetch_jssource, html_src)",
"def _html_style(self, style):\n ee = None\n try: return style.html_style()\n except Exception, e: ee = e; pass\n try: return style.xml_style()\n except Exception, e: print \"HtmlDocument/style\", ee, e; pass\n try: return str(style)\n except Exception, e: print \"HtmlDocument/style\", e; return \"\"",
"def fancy( _inStr):\r\n return '<head><style type=\"text/css\">td.special{ background-color:aqua;font-size: 100%;margin-left: 20px;font-family: times, sans-serif, arial}</style></head><table><tr><td class=\"special\">' + _inStr + '</td></tr></table>'",
"def test_overwrite(self):\n html = '<html><head><style>h1 {color: #000;}</style></head><body><h1 style=\"color: #fff\">Foo</h1></body></html>'\n desired_output = '<html><head></head><body><h1 style=\"color: #000; color: #fff\">Foo</h1></body></html>'\n output = Pynliner().from_string(html).run()\n self.assertEqual(output, desired_output)",
"def munge(self, stylesheet: str) -> str:\n s = ''.join([s.lstrip().replace(' ', ' ').replace(' \\n', '\\n')\n for s in g.splitLines(stylesheet)])\n return s.rstrip() # Don't care about ending newline.",
"def remove_unnecessary_whitespace(css):\n log.debug(\"Removing all unnecessary white spaces.\")\n\n def pseudoclasscolon(css):\n \"\"\"Prevent 'p :link' from becoming 'p:link'.\n\n Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'.\n This is translated back again later.\n \"\"\"\n regex = re.compile(r\"(^|\\})(([^\\{\\:])+\\:)+([^\\{]*\\{)\")\n match = regex.search(css)\n while match:\n css = ''.join([\n css[:match.start()],\n match.group().replace(\":\", \"___PSEUDOCLASSCOLON___\"),\n css[match.end():]])\n match = regex.search(css)\n return css\n\n css = pseudoclasscolon(css)\n # Remove spaces from before things.\n css = re.sub(r\"\\s+([!{};:>\\(\\)\\],])\", r\"\\1\", css)\n # If there is a `@charset`, then only allow one, and move to beginning.\n css = re.sub(r\"^(.*)(@charset \\\"[^\\\"]*\\\";)\", r\"\\2\\1\", css)\n css = re.sub(r\"^(\\s*@charset [^;]+;\\s*)+\", r\"\\1\", css)\n # Put the space back in for a few cases, such as `@media screen` and\n # `(-webkit-min-device-pixel-ratio:0)`.\n css = re.sub(r\"\\band\\(\", \"and (\", css)\n # Put the colons back.\n css = css.replace('___PSEUDOCLASSCOLON___', ':')\n # Remove spaces from after things.\n css = re.sub(r\"([!{}:;>\\(\\[,])\\s+\", r\"\\1\", css)\n return css",
"def test_invalid_rule_not_stripped(self):\n html = '<div class=\"pink\">test</div>'\n css = '.pink { opacity: 0.8; }'\n expected = '<div class=\"pink\" style=\"opacity: 0.8;\">test</div>'\n result = inline_css(html, css, strip_unsupported_css=False, pretty_print=False)\n self.assertEqual(expected, result)",
"def fixup_css(text):\n return text.replace('\\x00', '\\\\0')",
"def condense_whitespace(css):\n log.debug(\"Condensing all unnecessary white spaces.\")\n return re.sub(r\"\\s+\", \" \", css)",
"def stylecrunch(stystr):\n return dict(pair.split(\":\") for pair in semicolons.findall(stystr))",
"def parse_style(style):\n # ignore empty lines\n if not style.strip():\n raise EmptyLineError()\n\n css, title = style.split(\"|\")\n css.strip()\n title.strip()\n\n # Check for empty title part\n if not title:\n raise Exception()\n\n # Check for empty CSS class part\n if not css:\n raise NotAValidCssClassError()\n\n # Check for CSS class validity; a style can have multiple CSS classes\n # so we first need to split them\n for cls in css.split():\n if not _is_valid_css_class(cls):\n raise NotAValidCssClassError()\n\n return css, title",
"def getstyle(self, tag):\n try:\n styledict = tag.style.__dict__\n except AttributeError:\n return []\n else:\n stylelist = [x + \": \" + y for x, y in styledict.items()]\n return [u(' style=\"%s\"') % u(\"; \").join(stylelist)]",
"def _read_stylesheet(self, style):\n tree = ET.parse(style)\n for marker in tree.findall('style'):\n if marker.get('publishable') == 'true':\n self.publishable.add(marker.get('id'))",
"def load_style_sheet() -> str:\n return _preprocess_style(_read_text('style.css.template'))",
"def load_style() -> str:\n return '<style id=\"scipp-style-sheet\">' + load_style_sheet() + '</style>'",
"def create_css():\r\n background_color, font, paragraph_color, head_color = prompt_style()\r\n style = \"\"\r\n file = open(TEMPLATE_FILE)\r\n for line in file:\r\n search = True\r\n while search is True:\r\n if \"@BACKCOLOR\" in line:\r\n line = line.split(\"@BACKCOLOR\")\r\n line = line[0] + background_color + line[1]\r\n search = True\r\n elif \"@HEADCOLOR\" in line:\r\n line = line.split(\"@HEADCOLOR\")\r\n line = line[0] + head_color + line[1]\r\n search = True\r\n elif \"@FONTSTYLE\" in line:\r\n line = line.split(\"@FONTSTYLE\")\r\n line = line[0] + font + line[1]\r\n search = True\r\n elif \"@FONTCOLOR\" in line:\r\n line = line.split(\"@FONTCOLOR\")\r\n line = line[0] + paragraph_color + line[1]\r\n search = True\r\n else:\r\n style += line\r\n search = False\r\n style += '\\n'\r\n file.close()\r\n return style",
"def add_style(self, strstyle, content=\"\"):\n if content: # str is name of css file to use\n src = self.add_style_str(content, strstyle)\n else: # str is filename of actual css file\n src = self.add_style_file(strstyle)\n\n self.opf.add_manifest(sluggify(src), src, \"text/css\")",
"def block_html(self, html):\n if self.options.get('skip_style') and \\\n html.lower().startswith('<style'):\n return ['']\n if self.options.get('escape'):\n return [escape(html)]\n return [html]"
] | [
"0.75775725",
"0.7063994",
"0.6838947",
"0.67956835",
"0.6540166",
"0.65271175",
"0.61195105",
"0.6116861",
"0.60196775",
"0.6014334",
"0.5951415",
"0.5801298",
"0.57960355",
"0.579041",
"0.5779055",
"0.57478005",
"0.5692764",
"0.56792575",
"0.56675595",
"0.5645531",
"0.564218",
"0.56276345",
"0.5622646",
"0.56115645",
"0.561075",
"0.5602371",
"0.5554114",
"0.55487967",
"0.5461188",
"0.5450654"
] | 0.7883569 | 0 |
something like pythonfoo is pythonpythonfoo as spec requirement | def test_starting_with_python(self):
self.assertEqual({"python-python-xyz": None},
pr.sanitize_requirements(["python-xyz"])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_lowest_version(self):\n self.assertEqual({\"python-xyz\": \"1\",\n \"python-foo\": \"3.1\"},\n pr.sanitize_requirements(\n [\"xyz>=1,>=2\", \"foo>=4,>=3.1\"]))",
"def test_2x_only_python_version_deploy():\n pass",
"def test_valid_python():\n from decisionengine.framework.util import reaper # noqa: F401\n\n pass",
"def require(name):",
"def test_3x_only_python_versions_deploy():\n pass",
"def py_versiontest(c):\n pass",
"def test_skip_python3_requires(self):\n self.assertEqual(\n {\"python-ovs\": \"2.5.0\"},\n pr.sanitize_requirements(\n [\"ovs>=2.5.0;python_version=='2.7' # Apache-2.0\",\n \"ovs>=2.6.0.dev1;python_version>='3.4' # Apache-2.0\"])\n )",
"def require_python(version_spec, warn=False, extra_msg=None, prereleases=None):\n valid_specifiers = ('===', '==', '<=', '>=', '!=', '~=', '<', '>')\n for spec in valid_specifiers:\n if version_spec.startswith(spec):\n break\n else:\n if version_spec[0].isdigit():\n version_spec = f'~={version_spec}'\n else:\n raise InvalidSpecifier(\n f\"Invalid version specifier: '{version_spec}'\"\n )\n\n version_constraint = SpecifierSet(version_spec, prereleases=prereleases)\n python_version = sys.version.split()[0]\n if python_version not in version_constraint:\n msg = (\n \"The Python version installed in the environment \"\n f\"(v{python_version}) does not satisfy the constraint \"\n f\"'{version_constraint}'\"\n )\n if extra_msg is not None:\n msg = f\"{msg}.\\n{extra_msg}\"\n\n if warn:\n warnings.warn(msg, category=RuntimeWarning)\n else:\n raise DavosError(msg)",
"def test_require():",
"def check_pythonver(reqver_text):\n\treqver = map(int, reqver_text.split('.'))\n\tpythonver = sys.version_info[:3]\n\treturn check_ver(pythonver, reqver)",
"def test_with_markers_and_lowest_version(self):\n self.assertEqual(\n {\"python-futures\": \"3.0\"},\n pr.sanitize_requirements(\n [\"futures>=3.0,<=4.1,!=4.0;python_version=='2.7'\"\n \"or python_version=='2.6'\"]))",
"def test_with_ending_client(self):\n self.assertEqual({\"python-xyzclient\": None},\n pr.sanitize_requirements([\"python-xyzclient\"]))\n self.assertEqual({\"python-xyz-client\": None},\n pr.sanitize_requirements([\"python-xyz-client\"]))\n self.assertEqual({\"python-xyzclient\": None},\n pr.sanitize_requirements([\"xyzclient\"]))",
"def test_get_python_version():\n assert re.match(\"^[a-zA-Z0-9_]+, version [0-9.]+\", util.get_python_version())",
"def test_python_version(self):\n assert 2 == sys.version_info.major\n assert 7 == sys.version_info.minor\n assert 6 <= sys.version_info.micro",
"def require():",
"def test_strings_without_foo(self):\n write this test!",
"def test_flask_ext(Script, code, name):\n path = os.path.join(os.path.dirname(__file__), 'flask-site-packages')\n completions = Script(code, sys_path=[path]).completions()\n assert name in [c.name for c in completions]",
"def test_python_version():\n assert sys.version_info.major == 3",
"def test_strings_with_foo(self):\n write this test!",
"def testCheckPythonModule(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n dependency = dependencies.DependencyDefinition('os')\n result, _ = dependency_helper._CheckPythonModule(dependency)\n self.assertTrue(result)\n\n dependency = dependencies.DependencyDefinition('bogus')\n result, _ = dependency_helper._CheckPythonModule(dependency)\n self.assertFalse(result)",
"def is_python(self):\r\n return self.has_label('python')",
"def test_python2(self):\n if sys.version.startswith(\"2.\"):\n self.assertFalse(_PY3)",
"def test_all_python_versions_deploy():\n pass",
"def test_defining_only_and_defer_fails(self):",
"def test_pep8():\n pep8_test(get_python_filepaths())",
"def test_with_markers(self):\n self.assertEqual(\n {\"python-futures\": \"3.0\"},\n pr.sanitize_requirements(\n [\"futures>=3.0;python_version=='2.7' or python_version=='2.6'\"]\n )\n )",
"def test_molecool_imported():\n assert \"molecool\" in sys.modules",
"def validate_required_python_version_running(minimal_required_version: str) -> None:\n try:\n parts = minimal_required_version.split(\".\")\n min_py_version = 1000000*int(parts[0]) + 1000*(int(parts[1]) if len(parts) > 1 else 0) + (int(parts[2]) if len(parts) > 2 else 0)\n running_py_version = 1000000*sys.version_info.major + 1000*sys.version_info.minor + sys.version_info.micro\n if running_py_version < min_py_version:\n raise RuntimeError(\"\")\n except:\n raise RuntimeError(f\"Kqlmagic requires python >= {Constants.MINIMAL_PYTHON_VERSION_REQUIRED}, you use python {sys.version}\")",
"def test_python3(self):\n if sys.version.startswith(\"3.\"):\n self.assertTrue(_PY3)",
"def requires(self):"
] | [
"0.685132",
"0.65986437",
"0.65244454",
"0.6404376",
"0.63750374",
"0.63698786",
"0.6258343",
"0.6173323",
"0.61222714",
"0.60513926",
"0.6050884",
"0.6039168",
"0.59027624",
"0.58682185",
"0.58572984",
"0.580315",
"0.58021176",
"0.5800806",
"0.57969373",
"0.5785427",
"0.57723796",
"0.5757918",
"0.57440615",
"0.5736306",
"0.57223785",
"0.56960887",
"0.56573784",
"0.5604008",
"0.5595889",
"0.5589611"
] | 0.769921 | 0 |
allow markers in requirement lines;multiple versions specified | def test_with_markers_and_lowest_version(self):
self.assertEqual(
{"python-futures": "3.0"},
pr.sanitize_requirements(
["futures>=3.0,<=4.1,!=4.0;python_version=='2.7'"
"or python_version=='2.6'"])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_lowest_version(self):\n self.assertEqual({\"python-xyz\": \"1\",\n \"python-foo\": \"3.1\"},\n pr.sanitize_requirements(\n [\"xyz>=1,>=2\", \"foo>=4,>=3.1\"]))",
"def _update_properties_file(self, lines, filename):\n found_version_line = False\n if filename.endswith('cogent-requirements.txt'):\n for lineno, line in enumerate(lines):\n if 'packages/source/c/cogent' in line:\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n http_base = lines[lineno].rsplit('/',1)[0]\n lines[lineno] = '%s/PyCogent-%s.tgz\\n' % (http_base, self.Version)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)",
"def _add_spec(self, requirement_name, spec_str):\n spec_str = spec_str or '>=0.0.0'\n spec_str = spec_str.replace(' ', '')\n spec_str = '~' + spec_str.replace('.x', '.0') if '.x' in spec_str else spec_str\n self.versions_spec[requirement_name].add(spec_str)",
"def is_requirement(line):\n # Remove whitespace at the start/end of the line\n line = line.strip()\n\n # Skip blank lines, comments, and editable installs\n return not (\n line == '' or\n line.startswith('-r') or\n line.startswith('#') or\n line.startswith('-e') or\n line.startswith('git+')\n)",
"def test_with_markers(self):\n self.assertEqual(\n {\"python-futures\": \"3.0\"},\n pr.sanitize_requirements(\n [\"futures>=3.0;python_version=='2.7' or python_version=='2.6'\"]\n )\n )",
"def get_install_requires_version():\n require_str = \"pyscaffold>={major}.{minor}a0,<{next_major}.0a0\"\n major, minor, *rest = (parse_version(pyscaffold_version)\n .base_version.split('.'))\n next_major = int(major) + 1\n return require_str.format(major=major, minor=minor, next_major=next_major)",
"def read_requirements(*parts):\n requirements = []\n for line in read(*parts).splitlines():\n line_2 = re.sub(\n \"(\\s*)?#(?!egg=).*$\", # the space immediately before the hash mark, the hash mark, and anything that follows it, but not \"#egg=\" fragments\n \"\", # replace with a blank string\n line,\n )\n line_3 = re.sub(\n \"(\\s*)?-r.*$\", # we also can't reference other requirement files\n \"\", # replace with a blank string\n line_2,\n )\n if line_3: # i.e. we have a non-zero-length string\n requirements.append(line_3)\n return requirements",
"def is_requirement(line):\n # Remove whitespace at the start/end of the line\n line = line.strip()\n\n # Skip blank lines, comments, and editable installs\n return not (\n line == '' or\n line.startswith('-r') or\n line.startswith('#') or\n line.startswith('-e') or\n line.startswith('git+')\n )",
"def test_with_ending_client(self):\n self.assertEqual({\"python-xyzclient\": None},\n pr.sanitize_requirements([\"python-xyzclient\"]))\n self.assertEqual({\"python-xyz-client\": None},\n pr.sanitize_requirements([\"python-xyz-client\"]))\n self.assertEqual({\"python-xyzclient\": None},\n pr.sanitize_requirements([\"xyzclient\"]))",
"def test_req_file_parse_egginfo_end_of_line_with_url(tmpdir):\n with open(tmpdir.join(\"req1.txt\"), \"w\") as fp:\n fp.write(\"https://example.com/foo.tar.gz#egg=wat\")\n\n finder = PackageFinder([], [], session=PipSession())\n reqs = list(parse_requirements(tmpdir.join(\"req1.txt\"), finder,\n session=PipSession()))\n\n assert len(reqs) == 1\n assert reqs[0].name == \"wat\"",
"def resolve_requirement_versions(package_versions):\n resolved = Requirement(str(package_versions[0]))\n\n for package_version in package_versions[1:]:\n resolved.specifier = resolved.specifier & package_version.specifier\n resolved.extras = resolved.extras.union(package_version.extras)\n resolved.url = resolved.url or package_version.url\n if resolved.marker and package_version.marker:\n resolved.marker = Marker(f\"{resolved.marker} or {package_version.marker}\")\n elif package_version.marker:\n resolved.marker = package_version.marker\n\n return resolved",
"def test_req_file_parse_comment_start_of_line(tmpdir):\n with open(tmpdir.join(\"req1.txt\"), \"w\") as fp:\n fp.write(\"# Comment \")\n\n finder = PackageFinder([], [], session=PipSession())\n reqs = list(parse_requirements(tmpdir.join(\"req1.txt\"), finder,\n session=PipSession()))\n\n assert not reqs",
"def get_install_requires():\n requirements = []\n for line in open('requirements.txt').readlines():\n # skip to next iteration if comment or empty line\n if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'):\n continue\n # add line to requirements\n requirements.append(line)\n return requirements",
"def test_skip_python3_requires(self):\n self.assertEqual(\n {\"python-ovs\": \"2.5.0\"},\n pr.sanitize_requirements(\n [\"ovs>=2.5.0;python_version=='2.7' # Apache-2.0\",\n \"ovs>=2.6.0.dev1;python_version>='3.4' # Apache-2.0\"])\n )",
"def test_remote_reqs_parse():\n # this requirements file just contains a comment\n # previously this has failed in py3: https://github.com/pypa/pip/issues/760\n for req in parse_requirements(\n 'https://raw.githubusercontent.com/pypa/pip-test-package/master/'\n 'tests/req_just_comment.txt', session=PipSession()):\n pass",
"def parse_req_file(req_file, verbatim=False):\n req_list = []\n requirements = req_file.readlines()\n for requirement in requirements:\n requirement_no_comments = requirement.split(\"#\")[0].strip()\n\n # if matching requirement line (Thing==1.2.3), update dict, continue\n req_match = re.match(\n r\"\\s*(?P<package>[^\\s\\[\\]]+)(?P<extras>\\[\\S+\\])?==(?P<version>\\S+)\",\n requirement_no_comments,\n )\n req_ignore = requirement.strip().endswith(\" # norot\")\n\n if req_match:\n req_list.append(\n (req_match.group(\"package\"), req_match.group(\"version\"), req_ignore)\n )\n elif requirement_no_comments.startswith(\"-r\"):\n try:\n base_dir = os.path.dirname(os.path.abspath(req_file.name))\n except AttributeError:\n print(\n \"Recursive requirements are not supported in URL based \" \"lookups\"\n )\n continue\n\n # replace the -r and ensure there are no leading spaces\n file_name = requirement_no_comments.replace(\"-r\", \"\").strip()\n new_path = os.path.join(base_dir, file_name)\n try:\n if verbatim:\n req_list.append((None, requirement, req_ignore))\n req_list.extend(parse_req_file(open(new_path), verbatim=verbatim))\n except IOError:\n print(\"Failed to import {}\".format(file_name))\n elif verbatim:\n req_list.append((None, requirement, req_ignore))\n return req_list",
"def test_parse_update_spec_file(self):\n content_init = \"\\n\".join([\n \"Requires: python-pkg1 >=1.0\",\n \"BuildRequires: python-pkg1 >= 1.0\",\n ])\n content_expected = \"\\n\".join([\n \"Requires: python-pkg1 >= 2.0\",\n \"BuildRequires: python-pkg1 >= 1.0\",\n ])\n self.assertEqual(\n content_expected,\n pr.parse_update_spec_file(\n \"testpackage.spec\",\n content_init, {\n \"install_requires\": [\n \"pkg1>=2.0\",\n ],\n }\n )\n )",
"def _update_pyrex_file(self, lines, filename):\n found_version_line = False\n for lineno, line in enumerate(lines):\n if line.startswith('__version__'):\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n lines[lineno] = '__version__ = \"%s\"\\n' % str(self.VersionTuple)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)",
"def findRequirements():\n return [\n line.strip()\n for line in open(\"requirements.txt\").readlines()\n if not line.startswith(\"#\")\n ]",
"def parse_requirements(*filenames):\n requirements = []\n for f in filenames:\n for line in open(f, 'r').read().split('\\n'):\n # Comment lines. Skip.\n if re.match(r'(\\s*#)|(\\s*$)', line):\n continue\n # Editable matches. Put the egg name into our reqs list.\n if re.match(r'\\s*-e\\s+', line):\n pkg = re.sub(r'\\s*-e\\s+.*#egg=(.*)$', r'\\1', line)\n requirements.append(\"%s\" % pkg)\n # File-based installs not supported/needed. Skip.\n elif re.match(r'\\s*-f\\s+', line):\n pass\n else:\n requirements.append(line)\n return requirements",
"def get_requirements(req):\n\n install_requires = []\n with open(req) as f:\n for line in f:\n if not line.startswith(\"#\"):\n install_requires.append(line.strip())\n return install_requires",
"def test_req_file_parse_no_use_wheel(data):\n finder = PackageFinder([], [], session=PipSession())\n for req in parse_requirements(\n data.reqfiles.join(\"supported_options.txt\"), finder,\n session=PipSession()):\n pass\n assert not finder.use_wheel",
"def _update_python_file(self, lines, filename):\n found_version_line = False\n for lineno, line in enumerate(lines):\n if line.startswith('__version__'):\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n lines[lineno] = '__version__ = \"%s\"\\n' % self.Version\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)",
"def require_pip(version_spec, warn=False, extra_msg=None, prereleases=None):\n valid_specifiers = ('===', '==', '<=', '>=', '!=', '~=', '<', '>')\n for spec in valid_specifiers:\n if version_spec.startswith(spec):\n break\n else:\n if version_spec[0].isdigit():\n version_spec = f'~={version_spec}'\n else:\n raise InvalidSpecifier(\n f\"Invalid version specifier: '{version_spec}'\"\n )\n\n version_constraint = SpecifierSet(version_spec, prereleases=prereleases)\n pip_version = metadata.version('pip')\n if pip_version not in version_constraint:\n msg = (\n \"The version of pip installed in the environment \"\n f\"(v{pip_version}) does not satisfy the constraint \"\n f\"'{version_constraint}'\"\n )\n if extra_msg is not None:\n msg = f\"{msg}.\\n{extra_msg}\"\n\n if warn:\n warnings.warn(msg, category=RuntimeWarning)\n else:\n raise DavosError(msg)",
"def get_line(self):\n # type: () -> str\n line = \"{}=={}\".format(self.name, self.version)\n if self.type != RequirementType.LATEST_VERSION:\n line += ' # ' + TEMPLATES[self.type]\n if self.type == RequirementType.NOT_LATEST_VERSION:\n line = line.replace(r'(\\S*)', self.error_version)\n\n return line + '\\n'",
"def parse_requirement(req_text):\n req_text = req_text.strip()\n if not req_text:\n return None\n if req_text[0] == \"#\":\n return None\n return pkg_resources.Requirement.parse(req_text)",
"def _check_required(self):\n if self.data['history_file'] is None:\n return\n required = self.data.get('required_changelog_text')\n if not required:\n return\n if isinstance(required, six.string_types):\n required = [required]\n history_last_release = self.data['history_last_release']\n for text in required:\n if text in history_last_release:\n # Found it, all is fine.\n return\n pretty_required = '\"{}\"'.format('\", \"'.join(required))\n if not utils.ask(\n \"WARNING: Changelog should contain at least one of \"\n \"these required strings: {}. Are you sure you \"\n \"want to release?\".format(pretty_required),\n default=False):\n sys.exit(1)",
"def format_requirement(ireq, marker=None, hashes=None):\n if ireq.editable:\n line = \"-e {}\".format(ireq.link.url)\n elif is_url_requirement(ireq):\n line = ireq.link.url\n else:\n line = str(ireq.req).lower()\n\n if marker:\n line = \"{} ; {}\".format(line, marker)\n\n if hashes:\n for hash_ in sorted(hashes):\n line += \" \\\\\\n --hash={}\".format(hash_)\n\n return line",
"def parse_requirements_from_pipfile():\n lineiter = (line.strip() for line in open('Pipfile'))\n requirements_pipfile_style = [line for line in lineiter]\n start_index = requirements_pipfile_style.index('[packages]') + 1\n end_index = requirements_pipfile_style.index('[requires]') - 1\n requirements = list(map(lambda x: x.replace(' = \"', '').replace('\"', ''),\n requirements_pipfile_style[start_index:end_index]))\n return requirements",
"def test_url_preserved_line_req(self):\n url = 'git+http://foo.com@ref#egg=foo'\n req = InstallRequirement.from_line(url)\n assert req.link.url == url"
] | [
"0.68128824",
"0.6486892",
"0.64829147",
"0.6400448",
"0.6394141",
"0.6372991",
"0.63658035",
"0.6328819",
"0.63199604",
"0.63058853",
"0.6260977",
"0.62569046",
"0.622087",
"0.6197919",
"0.6197732",
"0.6161421",
"0.61268747",
"0.6084504",
"0.60470927",
"0.6019284",
"0.6015634",
"0.6011471",
"0.6008809",
"0.5998145",
"0.59624517",
"0.59581614",
"0.59433985",
"0.59420675",
"0.59314615",
"0.5893033"
] | 0.69502366 | 0 |
Ignore requirements with win32 marker | def test_skip_windows_requires(self):
self.assertEqual(
{"python-true": "1"},
pr.sanitize_requirements(
["true>=1",
"wmi;sys_platform=='win32'"])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_skip_unless_windows(self):\n pass",
"def test_skip_if_windows(self):\n pass",
"def missing_gdk(finder, caller):\n caller.IgnoreName(\"gdk\")",
"def skip_on_windows (func):\n import sys\n\n return skip_if(sys.platform.startswith('win'))(func)",
"def missing_EasyDialogs(finder, caller):\n if sys.platform == \"win32\":\n caller.IgnoreName(\"EasyDialogs\")",
"def validate_no_win32() -> None:\n try:\n assert sys.platform != \"win32\"\n except AssertionError:\n logger.exception(\"This application cannot run on Windows!\")\n sys.exit(1)",
"def enum_windows():\r\n raise NotImplementedError(\"The function has been removed. \" \\\r\n \"Use high level API instead or pin to version <=0.6.8.\")",
"def _should_ignore(self, name):\n _name = name.lower()\n return (_name.startswith(\"deprecated\") or\n _name.startswith(\"_\") or\n _name in (\"remote\", \"reserved\",\n \"dialogs_py\", \"dialogs_ipy\", \"dialogs_jy\"))",
"def fontforge_skip_checks():\n return None",
"def fontforge_skip_checks():\n return None",
"def missing_readline(finder, caller):\n if sys.platform == \"win32\":\n caller.IgnoreName(\"readline\")",
"def _on_windows() -> bool:\n return os.name == \"nt\"",
"def _missing_feature_to_skipped_tests(self):\n # FIXME: This list matches WebKitWin and should be moved onto the Win port.\n return {\n \"Accelerated Compositing\": [\"compositing\"],\n \"3D Rendering\": [\"animations/3d\", \"transforms/3d\"],\n }",
"def _windows(session, exclude=None):\n if exclude is None:\n exclude = []\n wins = [w for w in session.handles if w not in exclude]\n return set(wins)",
"def is_windows():\r\n return sys.platform == \"win32\"",
"def is_windows():\n return sys.platform == \"win32\"",
"def win():\n if platform.system() in WINDOWS:\n return True\n return False",
"def is_win():\n return sys.platform[:3] == \"win\"",
"def is_windows():\n return os.name == \"nt\"",
"def has32bit_windows(self, has32bit_windows):\n\n self._has32bit_windows = has32bit_windows",
"def test_windows(args):\n print('============= Testing for Correctness (Windows) =============')\n return _replace_test(args, '\\r\\n')",
"def is_windows():\n if os.name == \"nt\":\n return True\n return False",
"def load_win32api(finder, module):\n finder.IncludeModule(\"pywintypes\")",
"def disable_dialog_boxes(self):\n self.exec_command('SilentUpdateFW')\n self.exec_command('SuppressInfoUpdateFW')\n self.exec_command('SetBatchMode = 1')\n\n # SuppressControlPanel\n self.exec_command(\"HideDeviceSelection = 1\")\n self.exec_command(\"SuppressControlPanel\")\n # Hide Flash Windows\n self.exec_command(\"DisableInfoWinFlashDL\")\n self.exec_command(\"DisableInfoWinFlashBPs\")",
"def isWindows(cls):\n return WIN",
"def getwindowsversion(): # real signature unknown; restored from __doc__\n pass",
"def os_is_windows():\n return platform.system() == \"Windows\"",
"def _platform_compatible():\r\n raise NotImplementedError",
"def is_windows() -> bool:\n return sys.platform == \"win32\"",
"def _GetDefaultBinExcludes(self):\n if sys.platform == \"win32\":\n return [\"comctl32.dll\", \"oci.dll\", \"cx_Logging.pyd\"]\n else:\n return [\"libclntsh.so\", \"libwtc9.so\"]"
] | [
"0.6767136",
"0.67545867",
"0.6513805",
"0.635709",
"0.6211686",
"0.60411996",
"0.5990416",
"0.5737507",
"0.56727284",
"0.56727284",
"0.5605445",
"0.5456626",
"0.54284465",
"0.5417463",
"0.539966",
"0.5398191",
"0.5334668",
"0.53026396",
"0.5255139",
"0.5179256",
"0.514295",
"0.51233524",
"0.5118697",
"0.51025385",
"0.50943553",
"0.50359285",
"0.5026224",
"0.5024764",
"0.50237703",
"0.50219697"
] | 0.72444594 | 0 |
Ignore requirements with python3 marker | def test_skip_python3_requires(self):
self.assertEqual(
{"python-ovs": "2.5.0"},
pr.sanitize_requirements(
["ovs>=2.5.0;python_version=='2.7' # Apache-2.0",
"ovs>=2.6.0.dev1;python_version>='3.4' # Apache-2.0"])
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_with_markers_and_lowest_version(self):\n self.assertEqual(\n {\"python-futures\": \"3.0\"},\n pr.sanitize_requirements(\n [\"futures>=3.0,<=4.1,!=4.0;python_version=='2.7'\"\n \"or python_version=='2.6'\"]))",
"def test_starting_with_python(self):\n self.assertEqual({\"python-python-xyz\": None},\n pr.sanitize_requirements([\"python-xyz\"]))",
"def test_with_markers(self):\n self.assertEqual(\n {\"python-futures\": \"3.0\"},\n pr.sanitize_requirements(\n [\"futures>=3.0;python_version=='2.7' or python_version=='2.6'\"]\n )\n )",
"def test_3x_only_python_versions_deploy():\n pass",
"def test_lowest_version(self):\n self.assertEqual({\"python-xyz\": \"1\",\n \"python-foo\": \"3.1\"},\n pr.sanitize_requirements(\n [\"xyz>=1,>=2\", \"foo>=4,>=3.1\"]))",
"def test_req_file_parse_no_use_wheel(data):\n finder = PackageFinder([], [], session=PipSession())\n for req in parse_requirements(\n data.reqfiles.join(\"supported_options.txt\"), finder,\n session=PipSession()):\n pass\n assert not finder.use_wheel",
"def strip_marker(req):\n import pkg_resources # Delay import to avoid unnecessary side-effects\n\n # create a copy to avoid mutating the input\n req = pkg_resources.Requirement.parse(str(req))\n req.marker = None\n return req",
"def test_py3(self):\n if sys.version_info < self.MIN_SUPPORTED_VERSION:\n return\n import miflora # noqa: F401 # pylint: disable=unused-import,import-outside-toplevel",
"def test_2x_only_python_version_deploy():\n pass",
"def test_with_ending_client(self):\n self.assertEqual({\"python-xyzclient\": None},\n pr.sanitize_requirements([\"python-xyzclient\"]))\n self.assertEqual({\"python-xyz-client\": None},\n pr.sanitize_requirements([\"python-xyz-client\"]))\n self.assertEqual({\"python-xyzclient\": None},\n pr.sanitize_requirements([\"xyzclient\"]))",
"def test_environment_marker_extras(self, data):\n reqset = self.basic_reqset()\n req = InstallRequirement.from_editable(\n data.packages.join(\"LocalEnvironMarker\"))\n reqset.add_requirement(req)\n finder = PackageFinder([data.find_links], [], session=PipSession())\n reqset.prepare_files(finder)\n # This is hacky but does test both case in py2 and py3\n if sys.version_info[:2] in ((2, 7), (3, 4)):\n assert reqset.has_requirement('simple')\n else:\n assert not reqset.has_requirement('simple')",
"def test_pypi_py2py3_fresh_nodeps_ignore_git(self):\n fake_results = self.success_data + self.github_pairs\n self.fake_store.save_compatibility_statuses(fake_results)\n package_name = 'google-api-core'\n self.assertImageResponsePyPI(package_name)\n self.assertTargetResponse(package_name, 'py2', 'py3')",
"def test_skip_windows_requires(self):\n self.assertEqual(\n {\"python-true\": \"1\"},\n pr.sanitize_requirements(\n [\"true>=1\",\n \"wmi;sys_platform=='win32'\"]))",
"def test_python3(self):\n if sys.version.startswith(\"3.\"):\n self.assertTrue(_PY3)",
"def test_remote_reqs_parse():\n # this requirements file just contains a comment\n # previously this has failed in py3: https://github.com/pypa/pip/issues/760\n for req in parse_requirements(\n 'https://raw.githubusercontent.com/pypa/pip-test-package/master/'\n 'tests/req_just_comment.txt', session=PipSession()):\n pass",
"def test_python_after_38():\n import sys\n assert sys.version_info >= (3, 8)",
"def is_py3():\n return sys.version_info >= (3, 0)",
"def require_python(version_spec, warn=False, extra_msg=None, prereleases=None):\n valid_specifiers = ('===', '==', '<=', '>=', '!=', '~=', '<', '>')\n for spec in valid_specifiers:\n if version_spec.startswith(spec):\n break\n else:\n if version_spec[0].isdigit():\n version_spec = f'~={version_spec}'\n else:\n raise InvalidSpecifier(\n f\"Invalid version specifier: '{version_spec}'\"\n )\n\n version_constraint = SpecifierSet(version_spec, prereleases=prereleases)\n python_version = sys.version.split()[0]\n if python_version not in version_constraint:\n msg = (\n \"The Python version installed in the environment \"\n f\"(v{python_version}) does not satisfy the constraint \"\n f\"'{version_constraint}'\"\n )\n if extra_msg is not None:\n msg = f\"{msg}.\\n{extra_msg}\"\n\n if warn:\n warnings.warn(msg, category=RuntimeWarning)\n else:\n raise DavosError(msg)",
"def unsatisfied_requirements(buildout, package, working_set):\n\n # read all lines from \"requirements.txt\"\n specs = [k.strip() for k in package_readlines(package, 'requirements.txt')]\n\n # discard empty lines and comments\n specs = [k for k in specs if k and k[0] not in ('#', '-')]\n\n # do not consider packages which are already installed, with a reasonable\n # version matching the user specification, either on the current working\n # set, the installed eggs or the system paths\n newest = bool_option(buildout, 'newest', 'true')\n\n left_over = []\n for k in specs:\n if requirement_is_satisfied(k, working_set, newest):\n dist = working_set.require(k)[0]\n logger.info(\"taking requirement `%s' (%s) from `%s'\", dist.key,\n dist.version, dist.location)\n else:\n left_over.append(k)\n specs = left_over\n\n return left_over",
"def skip_require():\n global ignore_once\n ignore_once = True",
"def is_py3():\n return sys.version_info[0] == 3",
"def get_extras_require() -> Dict[str, List[str]]:\n extras = {\n \"testing\": [\n \"pytest==6.1.2\",\n \"pytest-cov==2.10.1\",\n ],\n \"linting\": [\n \"pylint==2.6.0\",\n \"flake8==3.8.4\",\n \"black>=20.8b1\",\n \"darglint==1.5.5\",\n \"mypy==0.790\",\n # \"data-science-types>=0.2.20\", # pandas, numpy, matplotlib\n ],\n }\n extras[\"all\"] = [item for group in extras.values() for item in group]\n return extras",
"def requires_dependency(name):\n if name == 'Fermi ST':\n name = 'pyLikelihood'\n\n try:\n __import__(name)\n skip_it = False\n except ImportError:\n skip_it = True\n\n reason = 'Missing dependency: {}'.format(name)\n return pytest.mark.skipif(skip_it, reason=reason)",
"def findRequirements():\n return [\n line.strip()\n for line in open(\"requirements.txt\").readlines()\n if not line.startswith(\"#\")\n ]",
"def main(\n req_in: str, req_pinned: str, tld: Optional[str], ignore: Optional[Iterable[str]]\n):\n print(\"Compiling requirements!\")\n if isinstance(ignore, str):\n ignore = [ignore]\n reqcompyle(\n Path(req_in),\n Path(req_pinned),\n Path(tld) if tld else None,\n [Path(i) for i in ignore] if ignore else None,\n )\n return 0",
"def get_setup_requirements() -> str:\n return \"\"\"\n setuptools\n wheel\n\"\"\"",
"def _python_dependencies(self):\n dependencies = []\n if self._requires_extensions():\n self._inject_extensions_build(dependencies)\n dependencies.append('- task: UsePythonVersion@0')\n dependencies.append(' displayName: \"Setting python version to 3.6 as required by functions\"')\n dependencies.append(' inputs:')\n dependencies.append(' versionSpec: \\'3.6\\'')\n dependencies.append(' architecture: \\'x64\\'')\n dependencies.append('- script: |')\n dependencies.append(' python3.6 -m venv worker_venv')\n dependencies.append(' source worker_venv/bin/activate')\n dependencies.append(' pip3.6 install setuptools')\n if self._requires_pip():\n dependencies.append(' pip3.6 install -r requirements.txt')\n return dependencies",
"def test_python2(self):\n if sys.version.startswith(\"2.\"):\n self.assertFalse(_PY3)",
"def test_pypi_py2py3_fresh_nodeps_ignore_pairs_without_common_versions(\n self):\n fake_results = self.success_data + self.pairs_without_common_versions\n self.fake_store.save_compatibility_statuses(fake_results)\n package_name = 'google-api-core'\n self.assertImageResponsePyPI(package_name)\n self.assertTargetResponse(package_name, 'py2', 'py3')",
"def check_pythonver(reqver_text):\n\treqver = map(int, reqver_text.split('.'))\n\tpythonver = sys.version_info[:3]\n\treturn check_ver(pythonver, reqver)"
] | [
"0.7039466",
"0.7032259",
"0.7014665",
"0.67280644",
"0.64697397",
"0.6431665",
"0.64238596",
"0.6393986",
"0.63385063",
"0.6160169",
"0.6138403",
"0.60360676",
"0.6019412",
"0.58904576",
"0.5868022",
"0.5848948",
"0.582707",
"0.5823058",
"0.582071",
"0.58111125",
"0.57750684",
"0.57740986",
"0.5773046",
"0.57652307",
"0.57582057",
"0.5749661",
"0.5749187",
"0.572402",
"0.5685485",
"0.5682424"
] | 0.78808457 | 0 |
Save a mask to filename. | def save_mask2file(mask, filename, crs=None, transform=None):
height, width = mask.shape
if mask.dtype == np.bool:
mask = mask.astype(np.uint8)
with rasterio.open(
filename,
'w',
driver='GTiff',
dtype=mask.dtype,
count=1,
width=width,
height=height,
crs=crs,
transform=transform,
) as dst:
dst.write(mask, indexes=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_mask(self, path):\n if self.im is None:\n raise ValueError('You didnt call the MaskOverlay, therefore there is no image in memory')\n np.save(path, self.mask)",
"def save_overlay(self, path):\n if self.im is None:\n raise ValueError('You didnt call the MaskOverlay, therefore there is no image in memory')\n im_masked = img_as_ubyte(self.im_masked)\n imsave(path, im_masked)",
"def onSaveMask(self, event): # --------------------------------------------------- Save new mask to file\n # set file types for find dialog\n wildcard = \"PySolo Video config file (*.msk)|*.msk|\" \\\n \"All files (*.*)|*.*\" # adding space in here will mess it up!\n\n dlg = wx.FileDialog(self,\n message=\"Save mask as file ...\",\n defaultDir=self.data_folder,\n wildcard=wildcard,\n style=(wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)\n )\n\n if not(dlg.ShowModal() == wx.ID_OK): # show the file browser window\n return False\n else:\n input = dlg.GetPath() # get the path from the save dialog\n\n if os.path.isfile(input):\n os.remove(input) # get rid of old file before appending data\n\n with open(input, 'a') as mask_file:\n for roi in self.maskFileContents:\n mask_file.write(roi) # write to file line by line\n\n gbl.shouldSaveMask = False # mask is saved\n gbl.shouldSaveCfg = True\n\n dlg.Destroy()\n mask_file.close()\n\n self.pickMaskBrowser.textbox.ChangeValue(str(input)) # update variables and textctrl\n self.screenUpdate(event)",
"def setFileMask(self, mask):\n self._checkArgs({'mask': types.StringTypes})\n self.fileMask = mask",
"def save_mask(cls, image: Image, save_path: str):\n mask = image.get_mask_for_save()\n if mask is None:\n return\n mask_max = np.max(mask)\n mask = mask.astype(minimal_dtype(mask_max))\n cls._save(mask, save_path)",
"def save_ma(self, Interferogram, mask_array):\n phsFile = self.DataPrefix + 'Data_' + Interferogram.Name[:-4]\n mskFile = self.MaskPrefix + 'Mask_' + Interferogram.Name[:-4]\n np.save(os.path.join(self.ProcDir, phsFile), mask_array.data)\n np.save(os.path.join(self.ProcDir, mskFile), mask_array.mask)\n\n return phsFile",
"def write_mask(infile, outfile, mask, allow_huge=True):\n\n os.system('rm -rf ' + outfile)\n os.system('cp -r ' + infile + ' ' + outfile)\n\n if allow_huge:\n casaStuff.exportfits(imagename=outfile,\n fitsimage=outfile + '.fits',\n stokeslast=False, overwrite=True)\n hdu = pyfits.open(outfile + '.fits')[0]\n hdu.data = mask.T\n hdu.header['BITPIX'] = -32\n\n # Match up the WCS so tclean doesn't throw an error (this is some rounding to the nth decimal place...)\n header = casaStuff.imhead(infile, mode='list')\n wcs_names = ['cdelt1', 'cdelt2', 'cdelt3', 'cdelt4',\n 'crval1', 'crval2', 'crval3', 'crval4']\n\n for wcs_name in wcs_names:\n hdu.header[wcs_name.upper()] = header[wcs_name]\n\n hdu.writeto(outfile + '.fits', clobber=True)\n casaStuff.importfits(fitsimage=outfile + '.fits',\n imagename=outfile,\n overwrite=True)\n\n # Remove the intermediate fits file\n os.system('rm -rf ' + outfile + '.fits')\n else:\n myia.open(outfile)\n myia.putchunk(mask)\n myia.close()\n\n return True",
"def save_data(features, labels, mask, file_name):\n label = labels[mask]\n label = label.reshape((len(label), 1))\n data = np.concatenate((features[mask, :], label), axis = 1)\n np.save(file_name, data)",
"def save_as(self, filename):\n opencv.imwrite(filename, self.img)",
"def export_fits(self, mask=None, **kwargs):\n \n ## Check key word arguments\n save_file = kwargs.pop('save_file', 'image.fits')\n fill_value = kwargs.pop('fill_value',0.)\n \n ## Check if mask provided matches data shape\n if self.is_valid_mask(mask):\n masked_data = np.ma.MasedArray()",
"def save_signal_mask(self, outpath, regions=[(250,350,0,100)]):\n mask = np.zeros(self.Set.Shape)\n for inds in regions:\n mask[inds[0]:inds[1],inds[2]:inds[3]] = 1\n np.save(outpath, mask)\n print('saved signal mask as {}'.format(outpath))",
"def make_fmask(self, feature_mask):\n fmask_filename = self.filename + \".fmask.\" + str(self.tet_num)\n mask = np.tile(feature_mask, (self.feature_array.shape[0], 1))\n with open(fmask_filename, \"w\") as f:\n f.write(str(self.feature_array.shape[1]))\n f.write(\"\\n\")\n np.savetxt(f, mask, fmt=\"%1d\")",
"def apply_mask(file: str, nlines: int, nsamples: int, mask_file: str):\n data = read_bin(file, nlines, nsamples)\n mask = read_bmp(mask_file)\n\n data[mask == 0] = 0\n\n outfile = \"{file}_masked\".format(file=file)\n data.tofile(outfile)\n\n return outfile",
"def save(self, fn):\n plt.imsave(fn, self.image)",
"def changeMask(self, mask): \n if self.fileDialogShow:\n return\n if mask == \"file\":\n self.fileDialogShow = True\n self.openFileDialog()\n else:\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n img = \"masko.png\"\n else:\n img = \"maska.png\"\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(GG.utils.PATH_EDITOR_INTERFACE, img))\n img = ocempgui.draw.Image.load_image(imgPath)\n self.imgOptionsTab.picture = img \n self.avatarConfiguration[\"mask\"] = None\n self.paintMask()",
"def writeto(self, save_to_path, method='ascii',\n\t\ttell_sp=None):\n\t\t#pixel = np.delete(np.arange(1024),list(self.mask))\n\t\tpixel = np.arange(len(self.oriWave))\n\t\t## create the output mask array 0=good; 1=bad\n\t\tif (self.apply_sigma_mask) or (self.mask != []):\n\t\t\tmask = np.zeros((len(self.oriWave),),dtype=int)\n\t\t\tnp.put(mask,self.mask,int(1))\n\t\telse:\n\t\t\tmask = np.zeros((len(self.oriWave),),dtype=int)\n\n\t\tif method == 'fits':\n\t\t\t#fullpath = self.path + '/' + self.name + '_' + str(self.order) + '_all.fits'\n\t\t\t#hdulist = fits.open(fullpath, ignore_missing_end=True)\n\t\t\t#hdulist.writeto(save_to_path)\n\t\t\t#hdulist.close()\n\t\t\tif self.header['NAXIS1'] == 1024:\n\t\t\t\tsave_to_path2 = save_to_path + self.header['FILENAME'].split('.')[0]\\\n\t\t\t\t+ '_O' + str(self.order)\n\t\t\telse:\n\t\t\t\tsave_to_path2 = save_to_path + self.header['OFNAME'].split('.')[0]\\\n\t\t\t\t+ '_O' + str(self.order)\n\t\t\t## wavelength\n\t\t\thdu1 = fits.PrimaryHDU(self.wave/10000, header=self.header)\n\t\t\tsave_to_path2_1 = save_to_path2 + '_wave.fits'\n\t\t\thdu1.writeto(save_to_path2_1)\n\t\t\t## flux\n\t\t\thdu2 = fits.PrimaryHDU(self.flux, header=self.header)\n\t\t\tsave_to_path2_2 = save_to_path2 + '_flux.fits'\n\t\t\thdu2.writeto(save_to_path2_2)\n\t\t\t## uncertainty\n\t\t\thdu3 = fits.PrimaryHDU(self.noise, header=self.header)\n\t\t\tsave_to_path2_3 = save_to_path2 + '_uncertainty.fits'\n\t\t\thdu3.writeto(save_to_path2_3)\n\t\t\t## pixel\n\t\t\thdu4 = fits.PrimaryHDU(pixel, header=self.header)\n\t\t\tsave_to_path2_4 = save_to_path2 + '_pixel.fits'\n\t\t\thdu4.writeto(save_to_path2_4)\n\t\t\t## mask\n\t\t\thdu5 = fits.PrimaryHDU(mask, header=self.header)\n\t\t\tsave_to_path2_5 = save_to_path2 + '_mask.fits'\n\t\t\thdu5.writeto(save_to_path2_5)\n\n\t\t\tif tell_sp is not None:\n\t\t\t\ttell_sp2 = copy.deepcopy(tell_sp)\n\t\t\t\t# the telluric standard model\n\t\t\t\twavelow = tell_sp2.wave[0] - 20\n\t\t\t\twavehigh = tell_sp2.wave[-1] + 20\n\t\t\t\ttell_mdl = smart.getTelluric(wavelow=wavelow,wavehigh=wavehigh)\n\t\t\t\t# continuum correction for the data\n\t\t\t\ttell_sp2 = smart.continuumTelluric(data=tell_sp2, \n\t\t\t\t\tmodel=tell_mdl,order=tell_sp2.order)\n\t\t\t\t# telluric flux\n\t\t\t\thdu6 = fits.PrimaryHDU(tell_sp.flux, header=tell_sp.header)\n\t\t\t\tsave_to_path2_6 = save_to_path2 + '_telluric_flux.fits'\n\t\t\t\thdu5.writeto(save_to_path2_6)\n\t\t\t\t# telluric uncertainty\n\t\t\t\thdu7 = fits.PrimaryHDU(tell_sp.noise, header=tell_sp.header)\n\t\t\t\tsave_to_path2_7 = save_to_path2 + '_telluric_uncertainty.fits'\n\t\t\t\thdu5.writeto(save_to_path2_7)\n\t\t\t\t# telluric model\n\t\t\t\thdu8 = fits.PrimaryHDU(tell_mdl.flux, header=tell_sp.header)\n\t\t\t\tsave_to_path2_8 = save_to_path2 + '_telluric_model.fits'\n\t\t\t\thdu5.writeto(save_to_path2_8)\n\t\t\t\t\n\n\t\telif method == 'ascii':\n\t\t\tif '.txt' not in save_to_path:\n\t\t\t\tif self.header['NAXIS1'] == 1024:\n\t\t\t\t\tsave_to_path2 = save_to_path + self.header['FILENAME'].split('.')[0]\\\n\t\t\t\t\t+ '_O' + str(self.order) + '.txt'\n\t\t\t\telse:\n\t\t\t\t\tsave_to_path2 = save_to_path + self.header['OFNAME'].split('.')[0]\\\n\t\t\t\t\t+ '_O' + str(self.order) + '.txt'\n\t\t\telse:\n\t\t\t\tsave_to_path2 = save_to_path\n\n\t\t\tif tell_sp is None:\n\t\t\t\tdf = pd.DataFrame(data={'wavelength':list(self.oriWave/10000),\n\t\t\t\t\t'flux':list(self.oriFlux),\n\t\t\t\t\t'uncertainty':list(self.oriNoise),\n\t\t\t\t\t'pixel':list(pixel),\n\t\t\t\t\t'mask':list(mask)})\n\t\t\t\tdf.to_csv(save_to_path2, index=None, sep='\\t', mode='a',\n\t\t\t\t\theader=True, columns=['wavelength', 'flux', 'uncertainty',\n\t\t\t\t\t'pixel', 'mask'])\n\t\t\t\n\t\t\telif tell_sp is not None:\n\t\t\t\ttell_sp2 = copy.deepcopy(tell_sp)\n\t\t\t\ttell_sp2 = smart.continuumTelluric(data=tell_sp2\n\t\t\t\t\t,order=self.order)\n\t\t\t\tlsf0 = smart.getLSF(tell_sp2)\n\t\t\t\ttell_sp2.flux = tell_sp2.oriFlux\n\t\t\t\ttell_sp2.wave = tell_sp2.oriWave\n\t\t\t\ttell_mdl = smart.convolveTelluric(lsf0, tell_sp2)\n\n\t\t\t\tprint(len(self.oriWave), len(self.oriFlux), len(self.oriNoise), len(tell_sp.oriFlux),\n\t\t\t\t\tlen(tell_sp.oriNoise), len(tell_mdl.flux), len(pixel), len(mask))\n\n\t\t\t\tdf = pd.DataFrame(data={'wavelength':list(self.oriWave/10000),\n\t\t\t\t\t'flux':list(self.oriFlux),\n\t\t\t\t\t'uncertainty':list(self.oriNoise),\n\t\t\t\t\t'telluric_flux':list(tell_sp.oriFlux),\n\t\t\t\t\t'telluric_uncertainty':list(tell_sp.oriNoise),\n\t\t\t\t\t'telluric_model':list(tell_mdl.flux),\n\t\t\t\t\t'pixel':list(pixel),\n\t\t\t\t\t'mask':list(mask)})\n\n\n\t\t\t\tdf.to_csv(save_to_path2, index=None, sep='\\t', mode='a',\n\t\t\t\t\theader=True, columns=['wavelength', 'flux', 'uncertainty', \n\t\t\t\t\t'telluric_flux', 'telluric_uncertainty', 'telluric_model',\n\t\t\t\t\t'pixel', 'mask'])",
"def generateMask(self, nameFile): \n imgPath = os.path.join(GG.utils.PATH_PHOTO_MASK, nameFile)\n imgMask = Image.open(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"mask.png\")))\n imgTemplate = Image.open(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"template.png\")))\n imgUpload = Image.open(imgPath)\n size = MASK_SIZE[self.avatarConfiguration[\"headSize\"]]\n imgUploadResized = imgUpload.resize(size, Image.ANTIALIAS)\n imgMask.paste(imgUploadResized, MASK_COORD[self.avatarConfiguration[\"headSize\"]], imgTemplate)\n imgMask.save(MASK_UPLOAD)\n self.avatarConfiguration[\"mask\"] = \"imgUploadMask.png\"\n self.paintMask()",
"def save_clips(fname, raw, bounds, mask, start_from=0):\n # data prep\n mask = np.asarray(mask)\n # save data\n if os.path.isfile(fname):\n _save_clips_append(fname, raw, bounds, mask, start_from)\n else:\n _save_clips_new(fname, raw, bounds, mask)",
"def save(img, path, file_name):\n\n name = os.path.join(path,file_name).replace('/', os.sep)\n\n io.imsave(name,img)",
"def final_mask(path, output_mask, percentage=0.5):\n with fits.open(path, \"readonly\") as temp_mask:\n mask_data = temp_mask[0].data\n mask_header = temp_mask[0].header\n mask_data[mask_data >= percentage] = 1\n mask_data[mask_data < percentage] = 0\n fits.writeto(output_mask, mask_data, mask_header, clobber=True)",
"def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")",
"def get_mask_path(self, file_path: str) -> str:",
"def save(self, filename):\n\n f = libc.fopen(filename, \"w\")\n if f == 0:\n raise IOError(\"Cannot open file for writing\")\n\n try:\n if not ipset.ipset_save(self.set, f):\n raise IOError(\"Could not write IP set\")\n\n finally:\n libc.fclose(f)",
"def save_as_fits(self, filename):",
"def save(self, filename):\n\n f = libc.fopen(filename, \"w\")\n if f == 0:\n raise IOError(\"Cannot open file for writing\")\n\n try:\n if not ipset.ipmap_save(self.map, f):\n raise IOError(\"Could not write IP map\")\n\n finally:\n libc.fclose(f)",
"def save(self, filename):\n \n path, name = os.path.split(filename)\n ext = name.split(\".\")[-1]\n _tkExec(self.image.write, filename, format=ext)",
"def write_df_to_binary(file_name_mask, df):\n write_matrix_to_binary(file_name_mask + '-value.bin', df.values)\n with open(file_name_mask + '-name.txt', 'w') as f:\n f.write(\"\\t\".join(df.index))\n f.write(\"\\n\")\n f.write(\"\\t\".join(df.columns))\n f.write(\"\\n\")",
"def generate_example_mask(lat_size, lon_size):\n mask_rand = np.random.randint(2, size=(lat_size, lon_size))\n save_path = directories.ANALYSIS + '/'\n np.savetxt(os.path.join(save_path, 'mask_example.out'), mask_rand)",
"def write(filename, data):\r\n with open(filename, \"wb\") as f:\r\n pic.dump(data, f)",
"def save_image(self, filename):\n raster.save_image(filename, self.image, self.metadata)"
] | [
"0.74468195",
"0.681574",
"0.67942196",
"0.64891225",
"0.64773023",
"0.62801504",
"0.62455696",
"0.62434334",
"0.6102407",
"0.6039554",
"0.60331523",
"0.5974805",
"0.5962826",
"0.5906069",
"0.5905",
"0.58839047",
"0.58183604",
"0.5792057",
"0.5689327",
"0.5682265",
"0.56185114",
"0.5610761",
"0.560785",
"0.55912226",
"0.5584652",
"0.55583817",
"0.55398285",
"0.55387735",
"0.55182195",
"0.55039793"
] | 0.72517014 | 1 |
Load a binary mask from filename into a numpy array. mask The mask image loaded as a numpy array | def load_mask_from_file(filename):
mask = imread(filename)
return mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_signal_mask(self, path):\n mask = np.load(path)",
"def load_mask(filename):\n nib_image = nib.load(filename)\n mask_affine = nib_image.affine\n\n return preprocess_nib(nib_image, is_mask=True), mask_affine",
"def load_mask(self):\n mask_file = fetch_one_file(self.ica_dir, self._mask_fname, pat_type='re.match')\n return niimg.load_img(mask_file)",
"def _read_mask_path(mask_path, data):\n if mask_path is not None:\n from astropy.io import fits\n extension = mask_path.split('.')[-1].lower()\n if extension == 'fits':\n mask = np.squeeze(fits.getdata(mask_path))\n elif extension == 'npy':\n mask = np.load(mask_path)\n else:\n raise ValueError(\"Mask must be a `.fits` or `.npy` file.\")\n if mask.shape != data.shape:\n raise ValueError(\"Mismatch in mask and data shape.\")\n mask = np.where(np.isfinite(mask), mask, 0.0)\n else:\n mask = np.ones(data.shape)\n return mask.astype('bool')",
"def _prepare_mask_file(mask):\n result = np.ndarray((mask.shape[0], mask.shape[1]), dtype=np.uint8)\n for i in range(mask.shape[0]):\n for j in range(mask.shape[1]):\n\n if mask[i][j] > 0:\n result[i][j] = 1\n else:\n result[i][j] = 0\n \n return result",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask_image_path = info['path'].replace(\"images\", \"masks\")\n mask = cv2.imread(mask_image_path)\n mask = (np.max(mask, axis=2) if len(mask.shape) > 2 else mask).reshape((128,128,1))\n \n return mask, np.array([1,])",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask_paths = glob.glob(info['path'].replace('images', 'masks').replace('.png', '*.png'))\n masks = []\n class_ids = []\n for mask_path in mask_paths:\n# print(mask_path)\n mask = cv2.imread(mask_path,cv2.IMREAD_GRAYSCALE) \n masks.append(mask)\n if 'normal' in mask_path:\n class_ids.append(0)\n if 'benign' in mask_path:\n class_ids.append(1)\n if 'malignant' in mask_path:\n class_ids.append(2)\n masks = np.moveaxis(masks,0,-1)\n class_ids = np.array(class_ids)\n return masks, class_ids",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n\n # Read mask files from .png image\n mask = []\n # for f in next(os.walk(mask_dir))[2]:\n m = skimage.io.imread(os.path.join(mask_dir, info['id']+'.png')).astype(np.bool)\n mask.append(m)\n # print(mask)\n mask = np.stack(mask, axis=-1)\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)",
"def _load_mask(self, image_id):\n\n mask_pattern = os.path.join(self.directory, image_id, \"masks/*.png\")\n ic = ImageCollection(mask_pattern)\n\n mask = np.zeros(self.imsize, dtype='uint8')\n for lbl, indiv_mask in enumerate(ic):\n mask += ((\n 1 + lbl) * self._process(indiv_mask, True).astype('uint8'))\n\n return mask",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(\n os.path.dirname(os.path.dirname(info['path'])), 'masks')\n\n # Read mask files from .png image\n masks = []\n for file in next(os.walk(mask_dir))[2]:\n if file.endswith('.png'):\n mask = imread(os.path.join(mask_dir, file),\n as_gray=True).astype(np.bool)\n masks.append(mask)\n masks = np.stack(masks, axis=-1)\n # Return masks, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return masks, np.ones([masks.shape[-1]], dtype=np.int32)",
"def load_inbreast_mask(mask_path, imshape=(4084, 3328)):\n\n def load_point(point_string):\n x, y = tuple([float(num) for num in point_string.strip('()').split(',')])\n return y, x\n\n mask_shape = np.transpose(imshape)\n mask = np.zeros(mask_shape)\n with open(mask_path, 'rb') as mask_file:\n plist_dict = plistlib.load(mask_file, fmt=plistlib.FMT_XML)['Images'][0]\n numRois = plist_dict['NumberOfROIs']\n rois = plist_dict['ROIs']\n assert len(rois) == numRois\n for roi in rois:\n numPoints = roi['NumberOfPoints']\n points = roi['Point_px']\n assert numPoints == len(points)\n points = [load_point(point) for point in points]\n if len(points) <= 2:\n for point in points:\n mask[int(point[0]), int(point[1])] = 1\n else:\n x, y = zip(*points)\n x, y = np.array(x), np.array(y)\n poly_x, poly_y = polygon(x, y, shape=mask_shape)\n mask[poly_x, poly_y] = 1\n return mask",
"def get_mask_arr(path):\n with rasterio.open(path) as src:\n img = src.read().transpose((1, 2, 0))\n seg = np.array(img, dtype=int)\n\n return seg[:, :, 0]",
"def read_mask(filename):\n # single label images\n # source_msk = (cv.imread(mask_filename) > 0).astype(np.float)\n # display_img = blend_in_channel(source_img, source_msk)\n\n # multi-label images\n try:\n source_msk = cv.imread(filename, cv.IMREAD_ANYCOLOR)\n except FileNotFoundError as e:\n logger.warning(\"'%s' not found, creating empty\" % filename)\n source_msk = np.zeros(source_img.shape[:2], dtype=np.uint8)\n logger.debug(\"source_msk.shape: '%s'\" % str(source_msk.shape))\n\n # if the image is multichannel, take only the first channel\n if len(source_msk.shape) > 2:\n logger.warning(\"'%s'.shape = %s, reducing to first channel\" % (basename(filename), str(source_msk.shape)))\n source_msk = source_msk.mean(axis=-1).astype(int)\n\n source_msk = source_msk[..., np.newaxis]\n\n # mask label values\n labels = np.unique(source_msk)\n logger.info(\"'%s':%s:%s %i labels\" % (basename(filename), str(source_msk.shape), str(source_msk.dtype), len(labels)))\n\n if any([label > max(colourmap.keys()) for label in labels]):\n logger.warning(\"label values > colourmap range [%i, %i] are mapped to %i\" % (\n min(colourmap.keys()), max(colourmap.keys()), 1))\n\n for label in labels:\n if label > max(colourmap.keys()):\n source_msk[source_msk==label] = 1\n\n labels = np.unique(source_msk)\n logger.info(\"'%s':%s:%s labels: %s\" % (basename(filename), str(source_msk.shape), str(source_msk.dtype), labels))\n\n return source_msk.astype(float)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]['mask_info']\n mask_, id_ = info\n\n return mask_, id_ #mask.astype(np.bool)",
"def load_mask(path, image, mask_name='module_unet', center=True):\n with open(path, 'r') as file:\n data = json.load(file)\n # if len(data[\"objects\"]) == 0:\n # return None\n # code = data[\"objects\"][0][\"bitmap\"][\"data\"]\n # origin = data[\"objects\"][0][\"bitmap\"][\"origin\"]\n # else:\n # flag = True\n # for obj in data[\"objects\"]:\n # if obj['classTitle'] == mask_name:\n inx = has_mask(mask_name, data=data)\n if inx is not False:\n obj = data[\"objects\"][inx]\n code = obj[\"bitmap\"][\"data\"]\n origin = obj[\"bitmap\"][\"origin\"]\n else:\n mask = np.zeros((image.shape[0], image.shape[1]))\n mask = mask.astype('uint8')\n mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])\n if center:\n return mask, mask_center\n else:\n return mask\n mask = base64_2_mask(code)\n mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])\n mask_center += origin\n\n up = np.zeros((origin[1], mask.shape[1]))\n mask2 = np.vstack((up, mask))\n left = np.zeros((mask2.shape[0], origin[0]))\n mask3 = np.hstack((left, mask2))\n down = np.zeros((image.shape[0] - mask3.shape[0], mask3.shape[1]))\n mask4 = np.vstack((mask3, down))\n right = np.zeros((mask4.shape[0], image.shape[1] - mask4.shape[1]))\n mask5 = np.hstack((mask4, right))\n\n if center:\n return mask5.astype('uint8'), mask_center.astype(int)\n else:\n return mask5.astype('uint8')",
"def load_mask(self, image_id):\n\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"dsb\":\n return super(self.__class__, self).load_mask(image_id)\n\n path = image_info[\"dir\"]\n\n mascara = next(os.walk(path + '/masks/'))[2]\n masc = skimage.io.imread(path + '/masks/' + mascara[0])\n height, width = masc.shape\n\n mask = np.zeros((height, width, len(mascara)), dtype=np.uint8)\n\n for i, mask_file in enumerate(mascara):\n mask[:,:,i] = skimage.io.imread(path + '/masks/' + mask_file)\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids",
"def load_mask(self, image_id):\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n logging.warning(\"You are using the default load_mask(), maybe you need to define your own one.\")\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids",
"def load_mask(self, image_id):\n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n height = info['height']\n width = info['width']\n mag_path = os.path.join(patch_path,\"mag\")\n tissue_path = os.path.join(patch_path,\"tissue\")\n \n # collect mask names\n \n mag_mask_list = os.listdir(mag_path)\n tissue_mask_list = os.listdir(tissue_path)\n \n classes = []\n masks = []\n \n # append masks and ids in list\n \n if mag_mask_list:\n for filename in mag_mask_list:\n a = os.path.join(mag_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(2)\n \n if tissue_mask_list:\n for filename in tissue_mask_list:\n a = os.path.join(tissue_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(1)\n \n return np.stack(masks,axis=2), np.asarray(classes).astype(int)",
"def load_bin(file_path):\n return np.load(file_path)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n label_path = info['path']\n\n # 读取json文件\n with open(os.path.join(self.DATA_ROOT_DIR, label_path), encoding='utf-8') as json_file:\n labelmeJson = json.load(json_file)\n height = labelmeJson['imageHeight']\n width = labelmeJson['imageWidth']\n shapes = labelmeJson['shapes']\n\n count = len(shapes)\n mask = np.zeros([height, width, count], dtype=np.uint8)\n\n for i, shape in enumerate(shapes):\n mask[:, :, i] = self.shape_to_mask(mask.shape, shape['points'], shape['shape_type'])\n\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(shape['label']) if shape['label'] in self.class_names else self.class_names.index('undefined') for shape in shapes])\n #print('class_ids:', class_ids)\n #input()\n return mask.astype(np.bool), class_ids.astype(np.int32)",
"def load_mask_pre(self, image_id, mask_path):\n img = Image.open(mask_path)\n colors = img.getcolors()\n n_dim = np.shape(colors)\n num_obj = n_dim[0]-1 #not include the background\n\n mask = np.zeros([np.shape(img)[0], np.shape(img)[1], num_obj], dtype=np.uint8)\n mask = self.draw_mask(num_obj, mask, img, colors)\n\n # Map class names to class IDs.\n class_ids = []\n for i in range(num_obj):\n class_ids.append(colors[i+1][1])\n\n return mask.astype(np.bool), np.array(class_ids, dtype=np.int32) #mask.astype(np.bool)",
"def read_seaice_mask(file=r'C:\\Users\\apbarret\\Documents\\data\\sea_ice_index\\Arctic_region_mask_Meier_AnnGlaciol2007.msk'):\n cols = 304\n rows = 448\n img = np.fromfile(file, dtype='byte').reshape(rows, cols)\n return img",
"def load_mask(self, image_id):\r\n mask_path = self.mask_path[self.ids[image_id]]\r\n file_pattern = os.path.join(mask_path, \"*.png\")\r\n info = self.image_info[image_id]\r\n mask_files = glob.glob(file_pattern)\r\n #mask_tmp = cv2.imread(mask_files[0])\r\n mask_new = np.zeros([info['height'], info['width'], mask_files.__len__()+1], dtype=np.uint8) # one more for background\r\n count = 1\r\n mask_total = 0\r\n for i in mask_files:\r\n mask = cv2.imread(i)\r\n mask = mask[:, :, 1] / 255.0\r\n #mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')\r\n mask_new[:, :, count] = (mask)\r\n mask_total = mask_total + (mask>0) * count\r\n count = count + 1\r\n # Map class names to class IDs.\r\n class_ids = np.ones(count) # one more fore background\r\n #add Background\r\n class_ids[0] = 0; # Background\r\n mask_new[:, :, 0] = np.invert(mask_total.astype(np.bool))\r\n # End add Background\r\n\r\n return mask_new, class_ids.astype(np.int32)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask_image_path = info['path'].replace(\"_resized.png\", \"_gt_chambers_resized.png\")\n mask = cv2.imread(mask_image_path)\n mask = np.max(mask, axis=2).reshape((128,128,1))\n # If grayscale. Convert to RGB for consistency.\n #if mask.ndim != 3:\n # mask = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if mask.shape[-1] == 4:\n mask = mask[..., :3]\n \n return mask,\\\n np.array([1,])\n #np.array([1, 2, 3])",
"def load_mask(self, image_id):\n # If not a vesicle dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"vesicle\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert 16 bit mask to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask_path = info['mask_path']\n mask = cv.imread(mask_path, cv.IMREAD_GRAYSCALE + cv.IMREAD_ANYDEPTH)\n bin_mask = get_bin_mask(mask)\n n_instance = bin_mask.shape[-1]\n return bin_mask, np.ones([n_instance], dtype=np.int32)",
"def mask_from_file(mask_file, obs_hdr, shape):\n try:\n return fits.getdata(mask_file).astype(bool)\n except IOError:\n pass # When not in fits format\n\n try:\n import pyregion\n regfilt = pyregion.open(mask_file).as_imagecoord(obs_hdr).get_filter()\n return ~regfilt.mask(shape)\n except ImportError:\n pyregion = None\n warn('pyregion module could not be imported. ds9 region format masks '\n 'will be ignored.')\n except UnicodeDecodeError:\n pass # When not ds9 region format\n\n return None",
"def read_mask(mask_path, mask_type, mask_name,patch_size,show_image=None):\n path_test = mask_path\n\n mask= Image.open(path_test+\"/\"+\"{}\".format(mask_type)+\n \"/\"+\"{}.tif\".format(mask_name))\n mask_list = np.asarray(list (mask.getdata() ))\n\n mask_list = mask_list / np.amax(mask_list)\n #either use from future or use // to get float result\n mask_list = np.reshape(mask_list,(patch_size,patch_size))\n if (show_image == True):\n\n print(mask_list.shape)\n plt.figure()\n plt.imshow(mask_list,cmap='gray')\n plt.show()\n print(mask_list)\n return mask_list",
"def load_mask_custom(self, image_id, image_shape):\n info = self.image_info[image_id]\n filePaths = info['maskPaths']\n classes = info['maskClasses']\n \n masks = []\n class_ids = []\n if(len(image_shape)==3):\n image_shape = image_shape[:2]\n \n # 1 filePath -- 1 class \n for i, filePath in enumerate(filePaths):\n \n if filePath.endswith(\".png\"):\n mask = cv2.imread(filePath, 0)\n mask = np.asarray(mask, dtype = \"uint8\")\n \n masks.append(mask)\n class_ids.append(classes[i])\n \n if len(masks)==0 :\n masks.append(np.zeros(image_shape, dtype = \"uint8\"))\n class_ids.append(0)\n \n image = np.stack(masks, axis=2)\n class_ids = np.array(class_ids, dtype=np.int32)\n return image, class_ids",
"def get_mask(self, dataset_name):\n p = path.join(self.dataset_root, dataset_name + \"/\")\n mask_path = serial.preprocess(p + \"mask.npy\")\n mask = np.load(mask_path)\n if not np.all(np.bitwise_or(mask == 0, mask == 1)):\n raise ValueError(\"Mask has incorrect values.\")\n return mask"
] | [
"0.8149826",
"0.75025517",
"0.7336277",
"0.72396976",
"0.69996434",
"0.69910765",
"0.69326127",
"0.684793",
"0.6726304",
"0.671339",
"0.6681663",
"0.66645074",
"0.6654374",
"0.66409326",
"0.6637058",
"0.66202277",
"0.66072327",
"0.65920156",
"0.65165544",
"0.6504364",
"0.64967656",
"0.64898676",
"0.64733195",
"0.6472499",
"0.64296836",
"0.64002174",
"0.63902956",
"0.63748544",
"0.6313282",
"0.6297182"
] | 0.78713435 | 1 |
Load a mask from a shapefile. | def load_mask_from_shapefile(filename, shape, transform):
multipolygon, _ = load_shapefile2multipolygon(filename)
mask = multipolygon2mask(multipolygon, shape, transform)
return mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_mask_from_file(filename):\n mask = imread(filename)\n\n return mask",
"def load_signal_mask(self, path):\n mask = np.load(path)",
"def load_mask(self):\n mask_file = fetch_one_file(self.ica_dir, self._mask_fname, pat_type='re.match')\n return niimg.load_img(mask_file)",
"def mask_from_file(mask_file, obs_hdr, shape):\n try:\n return fits.getdata(mask_file).astype(bool)\n except IOError:\n pass # When not in fits format\n\n try:\n import pyregion\n regfilt = pyregion.open(mask_file).as_imagecoord(obs_hdr).get_filter()\n return ~regfilt.mask(shape)\n except ImportError:\n pyregion = None\n warn('pyregion module could not be imported. ds9 region format masks '\n 'will be ignored.')\n except UnicodeDecodeError:\n pass # When not ds9 region format\n\n return None",
"def load_mask(filename):\n nib_image = nib.load(filename)\n mask_affine = nib_image.affine\n\n return preprocess_nib(nib_image, is_mask=True), mask_affine",
"def load_inbreast_mask(mask_path, imshape=(4084, 3328)):\n\n def load_point(point_string):\n x, y = tuple([float(num) for num in point_string.strip('()').split(',')])\n return y, x\n\n mask_shape = np.transpose(imshape)\n mask = np.zeros(mask_shape)\n with open(mask_path, 'rb') as mask_file:\n plist_dict = plistlib.load(mask_file, fmt=plistlib.FMT_XML)['Images'][0]\n numRois = plist_dict['NumberOfROIs']\n rois = plist_dict['ROIs']\n assert len(rois) == numRois\n for roi in rois:\n numPoints = roi['NumberOfPoints']\n points = roi['Point_px']\n assert numPoints == len(points)\n points = [load_point(point) for point in points]\n if len(points) <= 2:\n for point in points:\n mask[int(point[0]), int(point[1])] = 1\n else:\n x, y = zip(*points)\n x, y = np.array(x), np.array(y)\n poly_x, poly_y = polygon(x, y, shape=mask_shape)\n mask[poly_x, poly_y] = 1\n return mask",
"def _read_mask_path(mask_path, data):\n if mask_path is not None:\n from astropy.io import fits\n extension = mask_path.split('.')[-1].lower()\n if extension == 'fits':\n mask = np.squeeze(fits.getdata(mask_path))\n elif extension == 'npy':\n mask = np.load(mask_path)\n else:\n raise ValueError(\"Mask must be a `.fits` or `.npy` file.\")\n if mask.shape != data.shape:\n raise ValueError(\"Mismatch in mask and data shape.\")\n mask = np.where(np.isfinite(mask), mask, 0.0)\n else:\n mask = np.ones(data.shape)\n return mask.astype('bool')",
"def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"face\":\n return super(self.__class__, self).load_mask(image_id)\n info = self.image_info[image_id]\n mask = np.zeros([info['height'], info['width'], len(info['boundingbox'])], dtype=np.uint8)\n for i, p in enumerate(info['boundingbox'].values()):\n rr, cc = skimage.draw.polygon(p['y'], p['x'])\n mask[rr, cc, i] = 1\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)",
"def load_mask_custom(self, image_id, image_shape):\n info = self.image_info[image_id]\n filePaths = info['maskPaths']\n classes = info['maskClasses']\n \n masks = []\n class_ids = []\n if(len(image_shape)==3):\n image_shape = image_shape[:2]\n \n # 1 filePath -- 1 class \n for i, filePath in enumerate(filePaths):\n \n if filePath.endswith(\".png\"):\n mask = cv2.imread(filePath, 0)\n mask = np.asarray(mask, dtype = \"uint8\")\n \n masks.append(mask)\n class_ids.append(classes[i])\n \n if len(masks)==0 :\n masks.append(np.zeros(image_shape, dtype = \"uint8\"))\n class_ids.append(0)\n \n image = np.stack(masks, axis=2)\n class_ids = np.array(class_ids, dtype=np.int32)\n return image, class_ids",
"def load_mask(self, image_id):\n info = self.image_info[image_id]['mask_info']\n mask_, id_ = info\n\n return mask_, id_ #mask.astype(np.bool)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n label_path = info['path']\n\n # 读取json文件\n with open(os.path.join(self.DATA_ROOT_DIR, label_path), encoding='utf-8') as json_file:\n labelmeJson = json.load(json_file)\n height = labelmeJson['imageHeight']\n width = labelmeJson['imageWidth']\n shapes = labelmeJson['shapes']\n\n count = len(shapes)\n mask = np.zeros([height, width, count], dtype=np.uint8)\n\n for i, shape in enumerate(shapes):\n mask[:, :, i] = self.shape_to_mask(mask.shape, shape['points'], shape['shape_type'])\n\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(shape['label']) if shape['label'] in self.class_names else self.class_names.index('undefined') for shape in shapes])\n #print('class_ids:', class_ids)\n #input()\n return mask.astype(np.bool), class_ids.astype(np.int32)",
"def mask_from_file(filename, name, mesh):\n with open(filename) as f:\n features = json.load(f)[\"features\"]\n for feature in features:\n if feature[\"properties\"][\"name\"] == name:\n geom = shapely.geometry.shape(feature[\"geometry\"])\n mask = shapely.vectorized.contains(geom, mesh.x2, mesh.y2)\n return mask",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n\n # Read mask files from .png image\n mask = []\n # for f in next(os.walk(mask_dir))[2]:\n m = skimage.io.imread(os.path.join(mask_dir, info['id']+'.png')).astype(np.bool)\n mask.append(m)\n # print(mask)\n mask = np.stack(mask, axis=-1)\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n\n shapes = info['polygons']\n\n for i, p in enumerate(info['polygons']):\n shape = p['shape_attributes']['name']\n mask[:, :, i:i + 1] = self.draw_shape(mask[:, :, i:i + 1].copy(),\n shape, p, 1)\n\n # Map class names to class IDs.\n if (self.config.MODE == \"Combined\"):\n class_ids = np.array([self.class_names.index(s['region_attributes']['element_type'])\n if 'element_type' in s['region_attributes'].keys() else self.class_names.index('door') for s in shapes])\n elif (self.config.MODE == \"Separate\"):\n class_ids = np.array([self.class_names.index(s['region_attributes']['Class']) if 'Class' in s['region_attributes'].keys(\n ) else self.class_names.index('Door (Curve)') for s in shapes])\n\n return mask, class_ids.astype(np.int32)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(\n os.path.dirname(os.path.dirname(info['path'])), 'masks')\n\n # Read mask files from .png image\n masks = []\n for file in next(os.walk(mask_dir))[2]:\n if file.endswith('.png'):\n mask = imread(os.path.join(mask_dir, file),\n as_gray=True).astype(np.bool)\n masks.append(mask)\n masks = np.stack(masks, axis=-1)\n # Return masks, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return masks, np.ones([masks.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n height = info['height']\n width = info['width']\n mag_path = os.path.join(patch_path,\"mag\")\n tissue_path = os.path.join(patch_path,\"tissue\")\n \n # collect mask names\n \n mag_mask_list = os.listdir(mag_path)\n tissue_mask_list = os.listdir(tissue_path)\n \n classes = []\n masks = []\n \n # append masks and ids in list\n \n if mag_mask_list:\n for filename in mag_mask_list:\n a = os.path.join(mag_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(2)\n \n if tissue_mask_list:\n for filename in tissue_mask_list:\n a = os.path.join(tissue_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(1)\n \n return np.stack(masks,axis=2), np.asarray(classes).astype(int)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask_image_path = info['path'].replace(\"images\", \"masks\")\n mask = cv2.imread(mask_image_path)\n mask = (np.max(mask, axis=2) if len(mask.shape) > 2 else mask).reshape((128,128,1))\n \n return mask, np.array([1,])",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask_paths = glob.glob(info['path'].replace('images', 'masks').replace('.png', '*.png'))\n masks = []\n class_ids = []\n for mask_path in mask_paths:\n# print(mask_path)\n mask = cv2.imread(mask_path,cv2.IMREAD_GRAYSCALE) \n masks.append(mask)\n if 'normal' in mask_path:\n class_ids.append(0)\n if 'benign' in mask_path:\n class_ids.append(1)\n if 'malignant' in mask_path:\n class_ids.append(2)\n masks = np.moveaxis(masks,0,-1)\n class_ids = np.array(class_ids)\n return masks, class_ids",
"def load_mask(self, image_id):\n\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"dsb\":\n return super(self.__class__, self).load_mask(image_id)\n\n path = image_info[\"dir\"]\n\n mascara = next(os.walk(path + '/masks/'))[2]\n masc = skimage.io.imread(path + '/masks/' + mascara[0])\n height, width = masc.shape\n\n mask = np.zeros((height, width, len(mascara)), dtype=np.uint8)\n\n for i, mask_file in enumerate(mascara):\n mask[:,:,i] = skimage.io.imread(path + '/masks/' + mask_file)\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n# logger.info(\"mask {}\".format(image_id))\n if info[\"mask\"] is None:\n craters = info['craters']\n count = len(craters)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, dims in enumerate(craters):\n mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),\n \"circle\", dims, 1)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s) for s in info[\"shapes\"]])\n info[\"mask\"] = mask.astype(np.bool)\n info[\"class_ids\"] = class_ids.astype(np.int32)\n else:\n mask, class_ids = info[\"mask\"], info[\"class_ids\"]\n return mask, class_ids",
"def load_mask(self, image_id):\n\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pcb\":\n return super(self.__class__, self).load_mask(image_id)\n\n # convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n \n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n \n for i, p in enumerate(info[\"polygons\"]):\n # get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # return mask, and array of class IDs of each instance.\n # since we have one class ID only, we return an array of 1s\n return mask.astype(np.bool), info[\"class_ids\"]",
"def load_mask(self, image_id):\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n logging.warning(\"You are using the default load_mask(), maybe you need to define your own one.\")\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids",
"def load_mask(self, image_id):\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids",
"def _load_mask(self, image_id):\n\n mask_pattern = os.path.join(self.directory, image_id, \"masks/*.png\")\n ic = ImageCollection(mask_pattern)\n\n mask = np.zeros(self.imsize, dtype='uint8')\n for lbl, indiv_mask in enumerate(ic):\n mask += ((\n 1 + lbl) * self._process(indiv_mask, True).astype('uint8'))\n\n return mask",
"def load_mask(self, image_id):\n # If not homeobject dataset, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != 'homeobject':\n print(\n \"Warn: \\'{}\\' label not found. Processing with parent load_mask.\".format(image_info[\"source\"]))\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n class_ids = image_info['class_ids']\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])], dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n # modify dirt mask if it resides outside of image boundary\n rr[rr > mask.shape[0] - 1] = mask.shape[0] - 1\n cc[cc > mask.shape[1] - 1] = mask.shape[1] - 1\n\n mask[rr, cc, i] = 1\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n class_ids = np.array(class_ids, dtype=np.int32)\n # return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)\n return mask, class_ids",
"def load_mask(self, image_id):\n # TODO: build dict **self.image_info** in this form\n # self.image_info.keys() = ['objects', 'imgWidth', 'imgHeight']\n # objects is a list which contains label and polygon (same as annotations form below)\n # imgHeight and imgWidth are numbers (usually 1024, 2048)\n annotations = self.image_info[image_id][\"objects\"]\n # annotations form: [{'label': label, 'polygon': [[x1,y1], [x2,y2] ...]}, ...]\n height = self.image_info[image_id]['imgHeight']\n width = self.image_info[image_id]['imgWidth']\n instance_masks = []\n class_ids = []\n for ann in annotations:\n m = self.annToMask(ann, height, width)\n \n label_tmp = ann['label']\n if ( not label_tmp in list(self.class_labels.keys()) ) and label_tmp.endswith('group'):\n label_tmp = label_tmp[:-len('group')]\n \n class_id = self.class_labels[label_tmp]\n instance_masks.append(m)\n class_ids.append(class_id)\n \n mask = np.stack(instance_masks, axis=2)\n class_ids = np.array(class_ids)\n \n return mask, class_ids",
"def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n annotations = image_info['annotations']\n instance_masks = []\n class_ids = []\n \n for annotation in annotations:\n class_id = annotation['category_id']\n mask = Image.new('1', (image_info['width'], image_info['height']))\n mask_draw = ImageDraw.ImageDraw(mask, '1')\n for segmentation in annotation['segmentation']:\n mask_draw.polygon(segmentation, fill=1)\n bool_array = np.array(mask) > 0\n instance_masks.append(bool_array)\n class_ids.append(class_id)\n\n mask = np.dstack(instance_masks)\n class_ids = np.array(class_ids, dtype=np.int32)\n \n return mask, class_ids",
"def load_mask(path, image, mask_name='module_unet', center=True):\n with open(path, 'r') as file:\n data = json.load(file)\n # if len(data[\"objects\"]) == 0:\n # return None\n # code = data[\"objects\"][0][\"bitmap\"][\"data\"]\n # origin = data[\"objects\"][0][\"bitmap\"][\"origin\"]\n # else:\n # flag = True\n # for obj in data[\"objects\"]:\n # if obj['classTitle'] == mask_name:\n inx = has_mask(mask_name, data=data)\n if inx is not False:\n obj = data[\"objects\"][inx]\n code = obj[\"bitmap\"][\"data\"]\n origin = obj[\"bitmap\"][\"origin\"]\n else:\n mask = np.zeros((image.shape[0], image.shape[1]))\n mask = mask.astype('uint8')\n mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])\n if center:\n return mask, mask_center\n else:\n return mask\n mask = base64_2_mask(code)\n mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])\n mask_center += origin\n\n up = np.zeros((origin[1], mask.shape[1]))\n mask2 = np.vstack((up, mask))\n left = np.zeros((mask2.shape[0], origin[0]))\n mask3 = np.hstack((left, mask2))\n down = np.zeros((image.shape[0] - mask3.shape[0], mask3.shape[1]))\n mask4 = np.vstack((mask3, down))\n right = np.zeros((mask4.shape[0], image.shape[1] - mask4.shape[1]))\n mask5 = np.hstack((mask4, right))\n\n if center:\n return mask5.astype('uint8'), mask_center.astype(int)\n else:\n return mask5.astype('uint8')",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n ships = info['ships']\n count = len(ships)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, (ship, dims) in enumerate(info['ships']):\n mask[:, :, i:i + 1] = self.draw_mask(mask[:, :, i:i + 1].copy(),\n ship, dims)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count - 2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(\n occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s[0]) for s in ships])\n return mask, class_ids.astype(np.int32)",
"def load_mask_pre(self, image_id, mask_path):\n img = Image.open(mask_path)\n colors = img.getcolors()\n n_dim = np.shape(colors)\n num_obj = n_dim[0]-1 #not include the background\n\n mask = np.zeros([np.shape(img)[0], np.shape(img)[1], num_obj], dtype=np.uint8)\n mask = self.draw_mask(num_obj, mask, img, colors)\n\n # Map class names to class IDs.\n class_ids = []\n for i in range(num_obj):\n class_ids.append(colors[i+1][1])\n\n return mask.astype(np.bool), np.array(class_ids, dtype=np.int32) #mask.astype(np.bool)"
] | [
"0.7431536",
"0.7378019",
"0.71810657",
"0.71287495",
"0.7058122",
"0.70275694",
"0.682493",
"0.66390985",
"0.6612292",
"0.6592753",
"0.6572625",
"0.65397227",
"0.6522591",
"0.6463834",
"0.64407456",
"0.64399445",
"0.64317584",
"0.64289963",
"0.63150597",
"0.6312492",
"0.62943166",
"0.62818193",
"0.6213302",
"0.6209856",
"0.61794573",
"0.6151981",
"0.61361694",
"0.6107085",
"0.6068906",
"0.60609174"
] | 0.8144621 | 0 |
Compute a mask based on an NDXI feature. | def get_ndxi_mask(generator, feature=NirNDVI):
windows = (generator.step_size, )
values = next(extract_features([feature(windows)], generator)).vector
values.shape = (values.shape[0], values.shape[1])
mask = np.array(values.mask)
unmasked_values = np.array(values[~values.mask])
mask[~mask] = unmasked_values < threshold_otsu(unmasked_values)
return mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_contest_mask():\n return createmaskdf(\"data/fcstrodeo_nctemplates/fcstrodeo_mask.nc\")",
"def _get_mask(self, anno, idx):\n coco = self.coco\n img_info = coco.loadImgs(self.img_ids[idx])[0]\n\n m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)\n\n for obj in anno:\n if 'segmentation' in obj:\n if obj['iscrowd']:\n rle = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n m += pycocotools.mask.decode(rle)\n elif obj['num_keypoints'] == 0:\n rles = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n for rle in rles:\n m += pycocotools.mask.decode(rle)\n\n return m < 0.5",
"def get_mask(self):\n # use the feature array a to calculate which channels to include etc\n sums = np.sum(self.feature_array, 0)\n feature_mask = np.repeat(np.ones(4, dtype=int), self.n_features)\n # if there are \"missing\" channels use the older version of KK\n zero_sums = sums == 0\n if np.any(zero_sums):\n self.distribution = 1\n feature_mask[zero_sums] = 0\n self.feature_mask = feature_mask\n return feature_mask",
"def get_mask ( self, iloc ):\n mask = self._mask[iloc]\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n sat = g.ReadAsArray()\n m3 = sat == 0\n\n the_mask = mask.replace(\"SAT\", \"DIV\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n div = g.ReadAsArray()\n m1 = div == 0\n\n the_mask = mask.replace(\"SAT\", \"NUA\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n nua = g.ReadAsArray()\n m2 = np.logical_not ( np.bitwise_and ( nua, 1 ).astype ( np.bool ) )\n return m1 * m2 * m3",
"def get_mask ( self, iloc ):\n mask = self._mask[iloc]\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n sat = g.ReadAsArray()\n m3 = sat == 0\n\n the_mask = mask.replace(\"SAT\", \"DIV\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n div = g.ReadAsArray()\n m1 = div == 0\n\n the_mask = mask.replace(\"SAT\", \"NUA\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n nua = g.ReadAsArray()\n m2 = np.logical_not ( np.bitwise_and ( nua, 1 ).astype ( np.bool ) )\n return m1 * m2 * m3",
"def feature(input_dir, input_img, mask, output_dir, window_size):\n\t# Get the pixel array of the ROI\n\tws = int(float(window_size))\n\tfilename = os.path.join(input_dir, input_img)\n\tim = nib.load(filename)\n\taffine = im.affine\n\timg = im.get_data() \n\t# img= (img - img.min()) / (np.sort(img,axis=None)[-10] - img.min()) * 255\n\t# img[img<0]=0\n\t# img[img>255]=255\n\t# fea_img = nib.Nifti1Image(img, affine)\n\t# nib.save(fea_img, output_dir + 'Img_255.nii.gz')\t\n\tMask=nib.load(os.path.join(input_dir, mask)).get_data()\n\t# Slices=np.unique(np.where(Mask==1)[2])\n\t# img=img[:,:,tuple(Slices)]\n\t# Mask=Mask[:,:,tuple(Slices)]\n\t\"\"\" Local feature calculation \"\"\"\n\tif len(img.shape)>2:\n\t\tlength, width, height = img.shape\n\telse:\n\t\timg=img.reshape(img.shape[0],img.shape[1],1)\n\t\tMask=Mask.reshape(img.shape[0],img.shape[1],1)\n\t\tlength, width, height = img.shape\n\t# number of the features\n\tn_fea = 99\n\tfea = np.zeros((length, width, height, n_fea))\n\tfor h in range(height):\n\t\tfor l in range(length):\n\t\t\tfor w in range(width):\n\t\t\t\tif Mask[l,w,h]==0:\n\t\t\t\t\tcontinue\n\t\t\t\tpatch0 = img[max((l - ws),0):min(length,(l + ws)), max((w - ws),0):min(width,(w + ws)), h]\n\t\t\t\tl0, w0 = patch0.shape\n\t\t\t\tpatch = np.array(np.reshape(patch0, (1, l0 * w0)))\n\n\t\t\t\t\"\"\"\n\t\t\t\tfirst order statistics based feature\n\t\t\t\tlist {max, min, median, 25percentile, 75percentile, std, skew, kurtosis, entropy}\n\t\t\t\t\"\"\"\n\t\t\t\tfea[l, w, h, 0] = np.max(patch[0])\n\t\t\t\tfea[l, w, h, 1] = np.min(patch[0])\n\t\t\t\tfea[l, w, h, 2] = np.median(patch[0])\n\t\t\t\tfea[l, w, h, 3] = np.percentile(patch[0], 25)\n\t\t\t\tfea[l, w, h, 4] = np.percentile(patch[0], 75)\n\t\t\t\tfea[l, w, h, 5] = np.percentile(patch[0], 75)-np.percentile(patch[0], 25)\n\t\t\t\tfea[l, w, h, 6] = np.std(patch[0])\n\t\t\t\tfea[l, w, h, 7] = stats.skew(patch[0])\n\t\t\t\tfea[l, w, h, 8] = stats.kurtosis(patch[0])\n\t\t\t\thist = stats.histogram(patch[0], numbins=5)\n\t\t\t\tfea[l, w, h, 9] = stats.entropy(hist.count / np.sum(hist.count))\n\n\t\t\t\t\"\"\"\n\t\t\t\tGLCM based feature\n\t\t\t\tlist {angular second moment, contrast, correlation, variance, inverse difference moment,\n\t\t\t\tsum average, sum variance, sum entropy, entropy, difference variance, difference entropy,\n\t\t\t\tinfo. measure. of corr. 1, info. measure. of corr. 2, max. correlation coefficient}\n\t\t\t\t\"\"\"\n\t\t\t\tpatch2 = np.array((patch0 - img.min()) / (img.max() - img.min()) * 256, dtype=np.uint8)\n\t\t\t\tg_matrix = features.haralick(patch2)\n\t\t\t\tfea[l, w, h, 10:23] = np.mean(g_matrix, axis=0)\n\n\t\t\t\t\"\"\"\n\t\t\t\tLocal Binary Patterns based shape descriptors {7 first order statistics on histogram of LBP}\n\t\t\t\t\"\"\"\n\t\t\t\tlbp = local_binary_pattern(patch0, 8, ceil(ws/2), 'default')\n\t\t\t\tfea[l, w, h, 23] = np.max(lbp)\n\t\t\t\tfea[l, w, h, 24] = np.min(lbp)\n\t\t\t\tfea[l, w, h, 25] = np.median(lbp)\n\t\t\t\tfea[l, w, h, 26] = np.percentile(lbp, 25)\n\t\t\t\tfea[l, w, h, 27] = np.percentile(lbp, 75)\n\t\t\t\tfea[l, w, h, 28] = np.percentile(lbp, 75)-np.percentile(lbp, 25)\n\t\t\t\tfea[l, w, h, 29] = np.std(lbp)\n\t\t\t\t\n\t\t\t\t\"\"\"\n\t\t\t\tHu Moment based shape descriptor {7 moments}\n\t\t\t\t\"\"\"\n\t\t\t\tm = moments(np.array(patch0, dtype=np.float))\n\t\t\t\tcr = m[0, 1] / m[0, 0]\n\t\t\t\tcc = m[1, 0] / m[0, 0]\n\t\t\t\tcm = moments_central(np.array(patch0, dtype=np.float), cr, cc)\n\t\t\t\tncm = moments_normalized(cm)\n\t\t\t\thum = moments_hu(ncm)\n\t\t\t\tfea[l, w, h, 30:37] = hum\n\n\t\t\t\t\"\"\"\n\t\t\t\tZernike moment based shape descriptors {first 8 moments}\n\t\t\t\t\"\"\"\n\t\t\t\tzm = features.zernike_moments(patch0, ws)\n\t\t\t\tfea[l, w, h, 37:45] = zm[1:9]\t\t\n\t\t\t\t\n\t\t\t\t\"\"\"\n\t\t\t\tThreshold Adjacency Statistics based shape descriptors {9 statistics} * 6 = 54\n\t\t\t\t\"\"\"\n\t\t\t\ttas = features.tas(patch0)\n\t\t\t\t# lentas = len(tas)\n\t\t\t\tfea[l, w, h, 45:100] = tas[:54]\n\t# Save all the local feature maps in NIFTI format\t\n\tfeaturedir=os.path.join(output_dir,'Size'+str(window_size))\n\tif not os.path.exists(featuredir):\n\t\tos.makedirs(featuredir)\t\n\tfor ii in range(n_fea):\n\t\toutput_filename = featuredir+'/fea_'+str(ii+1)+'.nii.gz'\n\t\tdata = np.reshape(fea[:, :, :, ii], (length, width, height))\n\t\tfea_img = nib.Nifti1Image(data, affine)\n\t\tnib.save(fea_img, output_filename)",
"def Expand_Mask(mask, feature_dict):\n new_mask = np.zeros(mask.shape + (len(feature_dict),))\n for i in feature_dict.keys():\n ni = int(i)\n new_mask[mask == ni,ni] = 1 \n return new_mask",
"def mask(self, mask, logger=logger):\n if self.nodata is not None:\n da_masked = self._obj.where(mask != 0, self.nodata)\n else:\n logger.warning(\"Nodata value missing, skipping mask\")\n da_masked = self._obj\n return da_masked",
"def get_mask(self, img):\n raise NotImplementedError()",
"def attention_mask(nd, ns, dtype=tf.float32):\n i = tf.range(nd)[:, None]\n j = tf.range(ns)\n m = i >= j - ns + nd\n out = tf.cast(m, dtype)\n return out",
"def add_mask(self):\n return xr.open_dataset(f'/{test.dlfile_directory}/{test.climate}_mask_{test.mask_str}_dldata_traintest.nc')",
"def calcmask(self, *args, **kwargs):\n return _image.image_calcmask(self, *args, **kwargs)",
"def get_regions_mask(self, input):",
"def mask(self):",
"def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None):\n print(\"Loading NLCD LULC\")\n b = nlcd_ds.GetRasterBand(1)\n l = b.ReadAsArray()\n print(\"Filtering NLCD LULC with: %s\" % filter)\n #Original nlcd products have nan as ndv\n #12 - ice\n #31 - rock\n #11 - open water, includes rivers\n #52 - shrub, <5 m tall, >20%\n #42 - evergreeen forest\n #Should use data dictionary here for general masking\n #Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes\n if filter == 'rock':\n mask = (l==31)\n elif filter == 'rock+ice':\n mask = np.logical_or((l==31),(l==12))\n elif filter == 'rock+ice+water':\n mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11))\n elif filter == 'not_forest':\n mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)))\n elif filter == 'not_forest+not_water':\n mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11)))\n else:\n print(\"Invalid mask type\")\n mask = None\n #Write out original data\n if out_fn is not None:\n print(\"Writing out %s\" % out_fn)\n iolib.writeGTiff(l, out_fn, nlcd_ds)\n l = None\n return mask",
"def _get_flag(self, var):\n\n try:\n index = self.dataset[var].index\n except (KeyError, AttributeError):\n raise ValueError('Unable to get variable index')\n\n clear_air_series = self.dataset['NV_CLEAR_AIR_MASK']()\n mask = clear_air_series == UNMASKED\n nv_start = clear_air_series.index[0]\n nv_end = clear_air_series.index[-1]\n mask = mask.reindex(index)\n mask.loc[(mask.index <= nv_start) | (mask.index >= nv_end)] = 0\n mask.fillna(method='ffill', inplace=True)\n mask.fillna(method='bfill', inplace=True)\n\n return mask",
"def compute_mask(t, padding_idx=0):\n mask = torch.ne(t, padding_idx).float()\n return mask",
"def _mask(self):\n if self.__mask is None:\n # need this to be *exactly* the numpy boolean False\n return nomask\n return self.__mask",
"def get_binary_mask(self,index):\n mask = self.load_mask_png(index)\n (rows,cols) = np.where(mask>0)[0:2] #pixels in mask disregarding the color\n new_mask = np.zeros(shape=mask.shape[0:2], dtype=np.uint8)\n new_mask[(rows,cols)] = 255\n return new_mask",
"def mask_nodata(self):\n ds_out = self._obj\n for var in self.vars:\n ds_out[var] = ds_out[var].raster.mask_nodata()\n return ds_out",
"def mask(self):\n return self.mask_index",
"def get_mask(self, anno, img_info) -> np.ndarray:\n m = np.zeros((img_info[\"height\"], img_info[\"width\"]), dtype=np.float32)\n\n for obj in anno:\n if obj[\"iscrowd\"]:\n rle = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n m += mask\n elif obj[\"num_keypoints\"] == 0:\n rles = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n for rle in rles:\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n\n m += mask\n\n return (m < 0.5).astype(np.float32)",
"def causal_attention_mask(nd, ns, dtype):\n i = tf.range(nd)[:, None]\n j = tf.range(ns)\n m = i >= j - ns + nd\n return tf.cast(m, dtype)",
"def mask(self, mask):\n return MaskedDistribution(self, mask)",
"def get_input_mask_at(self, node_index):\n inputs = self.get_input_at(node_index)\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)",
"def mask(self, mask):\n ds_out = self._obj\n for var in self.vars:\n ds_out[var] = ds_out[var].raster.mask(mask)\n return ds_out",
"def get_sample_mask(self):",
"def mask_image(image):\n pass",
"def mask_nodata(img_patch: Union[str, Path], gt_patch: Union[str, Path], nodata_val: int, mask_val: int = 255) -> None:\n image_ds = gdal.Open(str(img_patch), gdalconst.GA_ReadOnly)\n image_arr = image_ds.ReadAsArray()\n nodata_mask = image_arr != nodata_val\n nodata_mask_flat = np.sum(nodata_mask, axis=0) != 0\n\n if nodata_mask_flat.min() == 1:\n image_ds = None\n return\n\n gt_patch_ds = gdal.Open(str(gt_patch), gdalconst.GA_Update)\n gt_patch_arr = gt_patch_ds.ReadAsArray()\n masked_gt_arr = np.where(nodata_mask_flat == 1, gt_patch_arr, mask_val)\n gt_patch_ds.GetRasterBand(1).WriteArray(masked_gt_arr)\n gt_patch_ds = None\n image_ds = None",
"def _source_mask(self, ilens):\n x_masks = make_non_pad_mask(ilens)\n return x_masks.unsqueeze(-2)"
] | [
"0.64785165",
"0.6324115",
"0.6131358",
"0.6096008",
"0.6096008",
"0.60605854",
"0.6017692",
"0.5902445",
"0.58749795",
"0.586848",
"0.5867567",
"0.5862303",
"0.584124",
"0.5813988",
"0.5810991",
"0.58052564",
"0.5790705",
"0.57692015",
"0.5762006",
"0.5745222",
"0.57375586",
"0.5727042",
"0.5719462",
"0.5717575",
"0.5712725",
"0.56734455",
"0.56588167",
"0.565762",
"0.5653491",
"0.5606505"
] | 0.7317054 | 0 |
Run computation on node performing `op_fun`. `op_fun` has to accept a node as an argument. | def run_op_node(input_data, op_fun, *args):
runtime = get_runtime()
comp_args = []
op_fun_args = []
comp_inputs = []
for idx, data in enumerate(input_data):
if np.isscalar(data):
op_fun_args.append(ng.constant(data, _get_numpy_dtype(data)))
else:
node = ng.parameter(data.shape, name=ascii_uppercase[idx], dtype=data.dtype)
op_fun_args.append(node)
comp_args.append(node)
comp_inputs.append(data)
op_fun_args.extend(args)
node = op_fun(*op_fun_args)
computation = runtime.computation(node, *comp_args)
return computation(*comp_inputs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_op_numeric_data(input_data, op_fun, *args):\n runtime = get_runtime()\n node = op_fun(input_data, *args)\n computation = runtime.computation(node)\n return computation()",
"def __call__(self, tf_node, input_ops):\n op_name = tf_node.op\n\n # if op not handled, gets -1\n ng_op = getattr(self, op_name, None)\n\n if ng_op:\n return ng_op(tf_node, input_ops)\n else:\n # ignored op set to None\n print(tf_node.name, \"ignored.\")\n return None",
"def local_operation(operation, node, environment):\n run_operation(operation, node, environment)",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def do_operation(self):\n operation = self.inputs['operation']\n res = self.entity.do_operation(self.context, **self.inputs)\n if res:\n return self.RES_OK, \"Node operation '%s' succeeded.\" % operation\n else:\n return self.RES_ERROR, \"Node operation '%s' failed.\" % operation",
"def run_node(\n self,\n node,\n fx_graph_module: torch.fx.GraphModule,\n onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,\n op_level_debug: bool,\n onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,\n onnxscript_tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n ):\n # Record stack trace of node in diagnostic.\n node_stack_trace = node.stack_trace\n if node_stack_trace:\n diagnostic = self.diagnostic_context.inflight_diagnostic(\n rule=diagnostics.rules.fx_node_to_onnx\n )\n diagnostic.with_additional_message(\n f\"### PyTorch source information\\n```\\n{node_stack_trace}\\n```\"\n )\n location = _location_from_fx_stack_trace(node_stack_trace)\n if location is not None:\n diagnostic.with_location(location)\n\n if node.op == \"placeholder\":\n self.placeholder(node, onnxscript_graph, fx_name_to_onnxscript_value)\n elif node.op == \"get_attr\":\n self.get_attr(\n node,\n onnxscript_graph,\n fx_name_to_onnxscript_value,\n fx_graph_module,\n )\n elif node.op == \"call_function\":\n self.call_function(\n node,\n onnxscript_tracer,\n fx_name_to_onnxscript_value,\n onnxfunction_dispatcher,\n op_level_debug,\n fx_graph_module,\n )\n elif node.op == \"call_method\":\n self.call_method(node)\n elif node.op == \"call_module\":\n self.call_module(\n node,\n onnxscript_graph,\n fx_name_to_onnxscript_value,\n onnxscript_tracer,\n fx_graph_module,\n onnxfunction_dispatcher,\n op_level_debug,\n )\n elif node.op == \"output\":\n self.output(node, onnxscript_graph, fx_name_to_onnxscript_value)\n else:\n raise RuntimeError(f\"Found node type not defined in torch.fx: {node.op}\")",
"def eval(self, i, node, fn):\r\n if self.pre_func is not None:\r\n self.pre_func(i, node, fn)\r\n fn()\r\n if self.post_func is not None:\r\n self.post_func(i, node, fn)",
"def register_apply_node_func(self, func, block_id=...): # -> None:\n ...",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def eval_node(node, env):\n global genv\n global result\n node_type = node_name(node)\n\n if node_type == 'Expr':\n return eval_node(node.value, env)\n elif node_type == 'Assign':\n val = eval_node(node.value, env)\n\n while type(val) is tuple and len(val) == 2 and (type(val[1]) == GlobalEnv or type(val[1]) == LocalEnv):\n val = val[0]\n\n # extract the variable name, evaluate the RHS, then extend the environment.\n return 0, env.extend([node.targets[0].id], [val])\n elif node_type == 'BinOp':\n # get the left and right operands (we use only single operands) and the operator.\n # evaluate the operands and apply the operator. return the number, env.\n\n left = eval_node(node.left, env)[0]\n right = eval_node(node.right, env)[0]\n\n left = left[0] if type(left) is tuple else left\n right = right[0] if type(right) is tuple else right\n\n op = node_name(node.op)\n\n if op == \"Add\":\n return (left + right), env\n elif op == \"Sub\":\n return (left - right), env\n elif op == \"Mult\":\n return (left * right), env\n elif op == \"Div\":\n return (left / right), env\n elif op == \"Mod\":\n return (left % right), env\n return 0, env\n elif node_type == 'FunctionDef':\n # need the function id (name), args, and body. Extend the environment.\n # you can leave the args wrapped in the ast class and the body and unpack them\n # when the function is called.\n\n return 0, env.extend([node.name], [(node.args, node.body)])\n elif node_type == 'Call':\n # get any values passed in to the function from the Call object.\n # get the fxn name and look up its parameters, if any, and body from the env.\n # get lists for parameter names and values and extend a LocalEnv with those bindings.\n # evaluate the body in the local env, return the value, env.\n\n func = eval_node(node.func, env)[0]\n local_env = LocalEnv(None, env)\n\n args = func[0].args\n body = func[1]\n\n index = 0\n for val in node.args:\n local_env = local_env.extend([args[index].arg], [eval_node(val, local_env)[0]])\n index += 1\n\n for node in body:\n val = eval_node(node, local_env)\n\n if node_name(node) == \"Return\":\n output_val = val[0]\n local_env = val[1]\n return output_val, env\n elif node_type == 'Return':\n # evaluate the node, return the value, env.\n return eval_node(node.value, env)\n elif node_type == 'Name':\n # Name(identifier id)- lookup the value binding in the env\n # return the value, env\n return env.lookup(node.id), env\n # Num(object n) -- a number, return the number, env.\n elif node_type == 'Num':\n return node.n, env",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def run(self, in_op):\n raise NotImplementedError",
"def tf_op(\n self, py_fun):\n with tf.name_scope('tf_op'):\n return self.context.as_nql(py_fun(self.tf), self._type_name)",
"def run1(self, eval_node , feed_dict = {}):\r\n self.eval_node_list = eval_node \r\n for i in feed_dict:\r\n feed_dict[i] = np.array(feed_dict[i]).astype(i.dtype)\r\n #print(self.eval_node_list)\r\n node_to_val_map = dict(feed_dict)\r\n # Traverse graph in topological sort order and compute values for all nodes.\r\n topo_start_node = []\r\n for node in self.eval_node_list:\r\n if isinstance(node,Node):\r\n topo_start_node.append(node)\r\n if topo_start_node==[]:\r\n return eval_node\r\n topo_order = find_topo_sort(topo_start_node)\r\n \r\n \"\"\"TODO: Your code here\"\"\"\r\n for node in topo_order :\r\n if isinstance(node.op, PlaceholderOp):\r\n if not(node in node_to_val_map) and (node in global_variables):\r\n node_to_val_map[node] = global_variables[node]\r\n continue \r\n if not(node in node_to_val_map):\r\n input_vals1=[]\r\n for inp in node.inputs:\r\n input_vals1.append( node_to_val_map[inp] )\r\n #print(input_vals1)\r\n node_to_val_map[node] = node.op.compute(node, input_vals1)\r\n # Collect node values.\r\n node_val_results = []\r\n for node in self.eval_node_list:\r\n if isinstance(node, Node):\r\n node_val_results.append(node_to_val_map[node])\r\n else:\r\n node_val_results.append(node)\r\n #node_val_results = [node_to_val_map[node] for node in self.eval_node_list]\r\n return node_val_results",
"def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)",
"def graph_to_function(graph, target, ctx, shape=None, dtype=None):\n # Infer missing shapes and dtypes\n graph, shape, dtype, output_shapes, output_dtypes = \\\n infer_shapes_dtypes(graph, shape=shape, dtype=dtype)\n\n if None in dtype.values():\n raise ValueError(\"Input variables with no type: {}\".format(dtype))\n\n if not all(shape.values()):\n raise ValueError(\"Input variables with no shape: {}\".format(shape))\n\n compute_graph, lib, params = nnvm.compiler.build(graph, target, shape=shape, dtype=dtype)\n module = graph_runtime.create(compute_graph, lib, ctx)\n\n if params:\n module.set_inputs(**params)\n\n def run(**kwargs):\n module.run(**kwargs)\n res = []\n for i, (o_shape, o_dtype) in enumerate(zip(output_shapes, output_dtypes)):\n res.append(module.get_output(i, tvm.nd.empty(o_shape, o_dtype)).asnumpy())\n return res\n\n return run",
"def test_basic_tf(self):\n def _map_fun(args, ctx):\n import tensorflow as tf\n x = tf.constant(args['x'])\n y = tf.constant(args['y'])\n sum = tf.add(x, y)\n with tf.Session() as sess:\n result = sess.run([sum])\n assert result[0] == 3\n\n args = {'x': 1, 'y': 2}\n cluster = TFCluster.run(self.sc, _map_fun, tf_args=args, num_executors=self.num_workers, num_ps=0)\n cluster.shutdown()",
"def compute(self, node, input_vals):\r\n raise NotImplementedError",
"def RunOperator(op_def, verbose=False):\n if isinstance(op_def, pb.OperatorDef):\n op_def = op_def.SerializeToString()\n _C.RunOperator(op_def, verbose)",
"def binary_op(node_factory_function: Callable) -> Callable:\n\n @wraps(node_factory_function)\n def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:\n left, right = as_nodes(left, right)\n node = node_factory_function(left, right, *args, **kwargs)\n node = _set_node_friendly_name(node, **kwargs)\n return node\n\n return wrapper",
"def run_operator(scope_node, node, name, op, code, f_globals):\n operators = __get_operators()\n if op not in operators:\n raise TypeError(\"failed to load operator '%s'\" % op)\n scope_key = scope_node.scope_key\n pair = operators[op](code, scope_key, f_globals)\n if isinstance(name, tuple):\n # The template inst binding with a single name will take this\n # path by using a length-1 name tuple. See bug #78.\n bind_extended_member(node, name, pair, scope_key)\n else:\n item = getattr(node.klass, name, None)\n if isinstance(item, Alias):\n bind_aliased_member(node, name, item, pair, scope_key)\n else:\n # This is the path for a standard binding on a child def.\n # It does not need the closure scope key. See bug #78.\n bind_member(node, name, pair)",
"def with_tree(fun: Callable) -> Callable:\n\n def handle_tree(*args, **kwargs):\n\n # get the task tree\n global task_tree\n\n # create the code object that gets executed\n code = Code(fun, inspect.getcallargs(fun, *args, **kwargs))\n\n task_tree = TaskTreeNode(code, parent=task_tree)\n\n # Try to execute the task\n try:\n task_tree.status = TaskStatus.CREATED\n task_tree.start_time = datetime.datetime.now()\n result = task_tree.code.execute()\n\n # if it succeeded set the flag\n task_tree.status = TaskStatus.SUCCEEDED\n\n # iff a PlanFailure occurs\n except PlanFailure as e:\n\n # log the error and set the flag\n logging.exception(\"Task execution failed at %s. Reason %s\" % (str(task_tree.code), e))\n task_tree.reason = e\n task_tree.status = TaskStatus.FAILED\n raise e\n finally:\n # set and time and update current node pointer\n task_tree.end_time = datetime.datetime.now()\n task_tree = task_tree.parent\n return result\n\n return handle_tree",
"def RunOperator(op_def):\n RunOperatorCC(_stringify_proto(op_def))",
"def compute(self, *args, **kwargs):\n for node in self.evaluation_sequence:\n node.evaluate()",
"def _operation_traverse(self, op, op_f, aggregate_f, combine_f): # noqa\n # apply op_f for each operation\n op_res = op_f(op)\n if len(op.children) == 0:\n return op_res # no children return\n else:\n # apply _operation_traverse recursively\n children = [\n self._operation_traverse(child, op_f, aggregate_f, combine_f)\n for child in op.children\n ]\n # combine the operation result with the children aggregated result\n return combine_f(op_res, aggregate_f(children))",
"def eval(self, node):\n\n return None",
"def run(self, *ops):\n \n new_ops = []\n for op in ops:\n if isinstance(op, VarStruct):\n new_ops.append(op.var)\n else:\n new_ops.append(op)\n if len(new_ops) == 1:\n return self.sess.run(new_ops[0])\n return self.sess.run(new_ops)",
"def run_thunk_of_node(self, node):\r\n idx = self.node_idx[node]\r\n t0 = time.time()\r\n rval = self.thunks[idx]()\r\n\r\n # Some thunks on some computers run faster than the granularity\r\n # of the time.time clock.\r\n # Profile output looks buggy if a node has run but takes 0 time.\r\n # (and profile code might hide real bugs if it rounds up 0)\r\n dt = max(time.time() - t0, 1e-10)\r\n if self.callback is not None:\r\n self.callback(\r\n node=node,\r\n thunk=self.thunks[idx],\r\n storage_map=self.storage_map,\r\n compute_map=self.compute_map,\r\n )\r\n return rval, dt",
"def evaluationFunction(problem, gFunc, hFunc, node):\n #g = getattr(searchAgents, gFunc)\n #h = getattr(searchAgents, hFunc)\n h = hFunc\n #return g(node) + h(node)\n return gFunc + h(node, problem)",
"def node(func, name=None):\n return NamedFunc(func, name)"
] | [
"0.68987066",
"0.63727605",
"0.61560744",
"0.60866606",
"0.59173733",
"0.589501",
"0.58199143",
"0.56726414",
"0.5670642",
"0.5646401",
"0.5547242",
"0.5536755",
"0.5459274",
"0.5457556",
"0.54173595",
"0.53511155",
"0.534198",
"0.5328317",
"0.53263",
"0.52942103",
"0.5292017",
"0.5279823",
"0.52643454",
"0.52484506",
"0.524668",
"0.5229414",
"0.5209588",
"0.5199147",
"0.5187172",
"0.51802003"
] | 0.7836884 | 0 |
Run computation on node performing `op_fun`. `op_fun` has to accept a scalar or an array. | def run_op_numeric_data(input_data, op_fun, *args):
runtime = get_runtime()
node = op_fun(input_data, *args)
computation = runtime.computation(node)
return computation() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_op_node(input_data, op_fun, *args):\n runtime = get_runtime()\n comp_args = []\n op_fun_args = []\n comp_inputs = []\n for idx, data in enumerate(input_data):\n if np.isscalar(data):\n op_fun_args.append(ng.constant(data, _get_numpy_dtype(data)))\n else:\n node = ng.parameter(data.shape, name=ascii_uppercase[idx], dtype=data.dtype)\n op_fun_args.append(node)\n comp_args.append(node)\n comp_inputs.append(data)\n op_fun_args.extend(args)\n node = op_fun(*op_fun_args)\n computation = runtime.computation(node, *comp_args)\n return computation(*comp_inputs)",
"def __call__(self, tf_node, input_ops):\n op_name = tf_node.op\n\n # if op not handled, gets -1\n ng_op = getattr(self, op_name, None)\n\n if ng_op:\n return ng_op(tf_node, input_ops)\n else:\n # ignored op set to None\n print(tf_node.name, \"ignored.\")\n return None",
"def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op",
"def local_operation(operation, node, environment):\n run_operation(operation, node, environment)",
"def run_node(\n self,\n node,\n fx_graph_module: torch.fx.GraphModule,\n onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,\n op_level_debug: bool,\n onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,\n onnxscript_tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n ):\n # Record stack trace of node in diagnostic.\n node_stack_trace = node.stack_trace\n if node_stack_trace:\n diagnostic = self.diagnostic_context.inflight_diagnostic(\n rule=diagnostics.rules.fx_node_to_onnx\n )\n diagnostic.with_additional_message(\n f\"### PyTorch source information\\n```\\n{node_stack_trace}\\n```\"\n )\n location = _location_from_fx_stack_trace(node_stack_trace)\n if location is not None:\n diagnostic.with_location(location)\n\n if node.op == \"placeholder\":\n self.placeholder(node, onnxscript_graph, fx_name_to_onnxscript_value)\n elif node.op == \"get_attr\":\n self.get_attr(\n node,\n onnxscript_graph,\n fx_name_to_onnxscript_value,\n fx_graph_module,\n )\n elif node.op == \"call_function\":\n self.call_function(\n node,\n onnxscript_tracer,\n fx_name_to_onnxscript_value,\n onnxfunction_dispatcher,\n op_level_debug,\n fx_graph_module,\n )\n elif node.op == \"call_method\":\n self.call_method(node)\n elif node.op == \"call_module\":\n self.call_module(\n node,\n onnxscript_graph,\n fx_name_to_onnxscript_value,\n onnxscript_tracer,\n fx_graph_module,\n onnxfunction_dispatcher,\n op_level_debug,\n )\n elif node.op == \"output\":\n self.output(node, onnxscript_graph, fx_name_to_onnxscript_value)\n else:\n raise RuntimeError(f\"Found node type not defined in torch.fx: {node.op}\")",
"def run(self, *ops):\n \n new_ops = []\n for op in ops:\n if isinstance(op, VarStruct):\n new_ops.append(op.var)\n else:\n new_ops.append(op)\n if len(new_ops) == 1:\n return self.sess.run(new_ops[0])\n return self.sess.run(new_ops)",
"def eval(self, i, node, fn):\r\n if self.pre_func is not None:\r\n self.pre_func(i, node, fn)\r\n fn()\r\n if self.post_func is not None:\r\n self.post_func(i, node, fn)",
"def do_operation(self):\n operation = self.inputs['operation']\n res = self.entity.do_operation(self.context, **self.inputs)\n if res:\n return self.RES_OK, \"Node operation '%s' succeeded.\" % operation\n else:\n return self.RES_ERROR, \"Node operation '%s' failed.\" % operation",
"def compute(self, node, input_vals):\r\n raise NotImplementedError",
"def scalar_op_helper(node, op_name, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n from onnx import numpy_helper\n input_type = kwargs[\"in_type\"]\n scalar_value = np.array([attrs.get(\"scalar\", 1)],\n dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type])\n\n initializer = kwargs[\"initializer\"]\n flag = True\n # If the input value is in initializer, just multiply with scalar input\n # and create a new initializer\n for i in initializer:\n if i.name == input_nodes[0]:\n if op_name == 'Mul':\n new_initializer = numpy_helper.to_array(i) * scalar_value[0]\n elif op_name == 'Sub':\n if name.startswith(\"_rminusscalar\"):\n new_initializer = scalar_value[0] - numpy_helper.to_array(i)\n else:\n new_initializer = numpy_helper.to_array(i) - scalar_value[0]\n elif op_name == 'Add':\n new_initializer = numpy_helper.to_array(i) + scalar_value[0]\n elif op_name == 'Div':\n if name.startswith(\"_rdivscalar\"):\n new_initializer = scalar_value[0] / numpy_helper.to_array(i)\n else:\n new_initializer = numpy_helper.to_array(i) / scalar_value[0]\n elif op_name == 'Pow':\n new_initializer = numpy_helper.to_array(i) ** scalar_value[0]\n flag = False\n break\n\n # else create a new tensor of the scalar value, add it in initializer\n if flag is True:\n dims = np.shape(scalar_value)\n\n scalar_op_name = \"scalar_op\" + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(scalar_op_name, input_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=scalar_op_name,\n data_type=input_type,\n dims=dims,\n vals=scalar_value,\n raw=False,\n )\n )\n\n mul_node = onnx.helper.make_node(\n op_name,\n [input_nodes[0], scalar_op_name],\n [name],\n name=name\n )\n\n return [tensor_node, mul_node]\n else:\n data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[new_initializer.dtype]\n dims = np.shape(new_initializer)\n\n new_a_node = input_nodes[0] + str(kwargs[\"idx\"])\n tensor_node = onnx.helper.make_tensor_value_info(new_a_node, data_type, dims)\n\n initializer.append(\n onnx.helper.make_tensor(\n name=new_a_node,\n data_type=data_type,\n dims=dims,\n vals=new_initializer,\n raw=False,\n )\n )\n return [tensor_node]",
"def run(self, in_op):\n raise NotImplementedError",
"def eval_node(node, env):\n global genv\n global result\n node_type = node_name(node)\n\n if node_type == 'Expr':\n return eval_node(node.value, env)\n elif node_type == 'Assign':\n val = eval_node(node.value, env)\n\n while type(val) is tuple and len(val) == 2 and (type(val[1]) == GlobalEnv or type(val[1]) == LocalEnv):\n val = val[0]\n\n # extract the variable name, evaluate the RHS, then extend the environment.\n return 0, env.extend([node.targets[0].id], [val])\n elif node_type == 'BinOp':\n # get the left and right operands (we use only single operands) and the operator.\n # evaluate the operands and apply the operator. return the number, env.\n\n left = eval_node(node.left, env)[0]\n right = eval_node(node.right, env)[0]\n\n left = left[0] if type(left) is tuple else left\n right = right[0] if type(right) is tuple else right\n\n op = node_name(node.op)\n\n if op == \"Add\":\n return (left + right), env\n elif op == \"Sub\":\n return (left - right), env\n elif op == \"Mult\":\n return (left * right), env\n elif op == \"Div\":\n return (left / right), env\n elif op == \"Mod\":\n return (left % right), env\n return 0, env\n elif node_type == 'FunctionDef':\n # need the function id (name), args, and body. Extend the environment.\n # you can leave the args wrapped in the ast class and the body and unpack them\n # when the function is called.\n\n return 0, env.extend([node.name], [(node.args, node.body)])\n elif node_type == 'Call':\n # get any values passed in to the function from the Call object.\n # get the fxn name and look up its parameters, if any, and body from the env.\n # get lists for parameter names and values and extend a LocalEnv with those bindings.\n # evaluate the body in the local env, return the value, env.\n\n func = eval_node(node.func, env)[0]\n local_env = LocalEnv(None, env)\n\n args = func[0].args\n body = func[1]\n\n index = 0\n for val in node.args:\n local_env = local_env.extend([args[index].arg], [eval_node(val, local_env)[0]])\n index += 1\n\n for node in body:\n val = eval_node(node, local_env)\n\n if node_name(node) == \"Return\":\n output_val = val[0]\n local_env = val[1]\n return output_val, env\n elif node_type == 'Return':\n # evaluate the node, return the value, env.\n return eval_node(node.value, env)\n elif node_type == 'Name':\n # Name(identifier id)- lookup the value binding in the env\n # return the value, env\n return env.lookup(node.id), env\n # Num(object n) -- a number, return the number, env.\n elif node_type == 'Num':\n return node.n, env",
"def tf_op(\n self, py_fun):\n with tf.name_scope('tf_op'):\n return self.context.as_nql(py_fun(self.tf), self._type_name)",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def graph_to_function(graph, target, ctx, shape=None, dtype=None):\n # Infer missing shapes and dtypes\n graph, shape, dtype, output_shapes, output_dtypes = \\\n infer_shapes_dtypes(graph, shape=shape, dtype=dtype)\n\n if None in dtype.values():\n raise ValueError(\"Input variables with no type: {}\".format(dtype))\n\n if not all(shape.values()):\n raise ValueError(\"Input variables with no shape: {}\".format(shape))\n\n compute_graph, lib, params = nnvm.compiler.build(graph, target, shape=shape, dtype=dtype)\n module = graph_runtime.create(compute_graph, lib, ctx)\n\n if params:\n module.set_inputs(**params)\n\n def run(**kwargs):\n module.run(**kwargs)\n res = []\n for i, (o_shape, o_dtype) in enumerate(zip(output_shapes, output_dtypes)):\n res.append(module.get_output(i, tvm.nd.empty(o_shape, o_dtype)).asnumpy())\n return res\n\n return run",
"def convert_elemwise(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.AddOptions import AddOptions\n from tflite.SubOptions import SubOptions\n from tflite.MulOptions import MulOptions\n from tflite.DivOptions import DivOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) == 2, \"input tensors length should be 2\"\n\n def get_input_nodes(tensor):\n if tensor.tensor_idx in self.tensor_tab:\n # In most cases, we can assume that TOCO fuses elemwise operators\n # with constants - it means both will be tensors.\n return self.tensor_tab[tensor.tensor_idx]\n else:\n # However, in some corner cases, the elemwise operator is not fused,\n # we can receive as constant.\n t_value = self.get_tensor_value(tensor)\n return self.nn_new_const(tensor, t_value)\n\n lhs_nodes = get_input_nodes(input_tensors[0])\n rhs_nodes = get_input_nodes(input_tensors[1])\n\n assert len(lhs_nodes) in [1, 3], \"Nodes list size should be 1 or 3\"\n assert len(lhs_nodes) == len(rhs_nodes), \"Left and right nodes list size should be equal\"\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n # Options (fused_activation_function)\n options = None\n if op.BuiltinOptionsType() == BuiltinOptions.AddOptions:\n op_type = \"Add\"\n options = AddOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.SubOptions:\n op_type = \"Sub\"\n options = SubOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.MulOptions:\n op_type = \"Mul\"\n options = MulOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.DivOptions:\n op_type = \"Div\"\n options = DivOptions()\n\n if options is not None:\n op_options = op.BuiltinOptions()\n options.Init(op_options.Bytes, op_options.Pos)\n fused_activation_fn = options.FusedActivationFunction()\n # if we have activation fn\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Elemwise operators with fused activation are not supported yet.'\n\n out_nodes = self.nn_elemwise(lhs_nodes, rhs_nodes, op_type, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes",
"def apply(op_type, device, inputs, **kwargs):\n cache = ExecutionCache.get_cache(op_type)\n run_config = cache.get_config(device, **kwargs)\n return FunctionLib._forward(inputs, run_config, **kwargs)",
"def reduce(op, in_array, out_array, axis=None, identity=None):\n # The function is empty because it is parsed in the Python frontend\n return None",
"def run1(self, eval_node , feed_dict = {}):\r\n self.eval_node_list = eval_node \r\n for i in feed_dict:\r\n feed_dict[i] = np.array(feed_dict[i]).astype(i.dtype)\r\n #print(self.eval_node_list)\r\n node_to_val_map = dict(feed_dict)\r\n # Traverse graph in topological sort order and compute values for all nodes.\r\n topo_start_node = []\r\n for node in self.eval_node_list:\r\n if isinstance(node,Node):\r\n topo_start_node.append(node)\r\n if topo_start_node==[]:\r\n return eval_node\r\n topo_order = find_topo_sort(topo_start_node)\r\n \r\n \"\"\"TODO: Your code here\"\"\"\r\n for node in topo_order :\r\n if isinstance(node.op, PlaceholderOp):\r\n if not(node in node_to_val_map) and (node in global_variables):\r\n node_to_val_map[node] = global_variables[node]\r\n continue \r\n if not(node in node_to_val_map):\r\n input_vals1=[]\r\n for inp in node.inputs:\r\n input_vals1.append( node_to_val_map[inp] )\r\n #print(input_vals1)\r\n node_to_val_map[node] = node.op.compute(node, input_vals1)\r\n # Collect node values.\r\n node_val_results = []\r\n for node in self.eval_node_list:\r\n if isinstance(node, Node):\r\n node_val_results.append(node_to_val_map[node])\r\n else:\r\n node_val_results.append(node)\r\n #node_val_results = [node_to_val_map[node] for node in self.eval_node_list]\r\n return node_val_results",
"def apply_function(\n self,\n y: Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"],\n op_str: str,\n ) -> \"ShareTensor\":\n ShareTensor.sanity_check(y)\n\n op = ShareTensor.get_op(self.ring_size, op_str)\n numpy_type = utils.RING_SIZE_TO_TYPE.get(self.ring_size, None)\n if numpy_type is None:\n raise ValueError(f\"Do not know numpy type for ring size {self.ring_size}\")\n\n if isinstance(y, ShareTensor):\n utils.get_ring_size(self.ring_size, y.ring_size) # sanity check\n value = op(self.child, y.child)\n else:\n if op_str in {\"add\", \"sub\"}:\n # TODO: Converting y to numpy because doing \"numpy op torch tensor\" raises exception\n value = (\n op(self.child, np.array(y, numpy_type))\n if self.rank == 0\n else deepcopy(self.child)\n )\n elif op_str in [\"mul\", \"matmul\", \"lt\"]:\n value = op(self.child, np.array(y, numpy_type))\n else:\n raise ValueError(f\"{op_str} not supported\")\n\n res = self.copy_tensor()\n res.child = value\n return res",
"def test_basic_tf(self):\n def _map_fun(args, ctx):\n import tensorflow as tf\n x = tf.constant(args['x'])\n y = tf.constant(args['y'])\n sum = tf.add(x, y)\n with tf.Session() as sess:\n result = sess.run([sum])\n assert result[0] == 3\n\n args = {'x': 1, 'y': 2}\n cluster = TFCluster.run(self.sc, _map_fun, tf_args=args, num_executors=self.num_workers, num_ps=0)\n cluster.shutdown()",
"def exec_op(self, op, input_values,\n deterministic, training, **_):\n input_kwargs: Dict[str, Any] = op.input_kwargs\n op_kwargs: Dict[str, Any] = op.op_kwargs\n op_type = op.type\n if \"name\" not in op_kwargs:\n raise ValueError(\"Op kwargs must contain a name.\")\n op_name = op_kwargs[\"name\"]\n\n if op_type == OpType.NONE:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert not input_kwargs\n assert len(op_kwargs) == 1\n output_values = [lax.stop_gradient(input_value)]\n\n elif op_type == OpType.IDENTITY:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert not input_kwargs\n assert len(op_kwargs) == 1\n output_values = [input_value]\n\n # nn.linear\n\n elif op_type == OpType.DENSE:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert not input_kwargs\n output_values = [nn.Dense(**op_kwargs)(input_value)]\n\n elif op_type == OpType.DENSE_GENERAL:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert not input_kwargs\n assert 2 <= len(op_kwargs) <= 7\n output_values = [nn.DenseGeneral(**op_kwargs)(input_value)]\n\n elif op_type == OpType.CONV:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert not input_kwargs\n\n ks = op_kwargs[\"kernel_size\"]\n if isinstance(ks, int):\n op_kwargs[\"kernel_size\"] = (ks,) * (input_value.ndim - 2)\n\n output_values = [nn.Conv(**op_kwargs)(input_value)]\n\n # others\n\n elif op_type == OpType.MUL:\n assert len(input_values) == 2\n assert not input_kwargs\n assert len(op_kwargs) == 1 # name\n output_values = [input_values[0] * input_values[1]]\n\n elif op_type in [OpType.ADD, OpType.STOCH_DEPTH]:\n assert len(op_kwargs) == 1 # name\n\n input_value = input_values[0]\n if \"layer_drop_rate\" in input_kwargs:\n assert len(input_kwargs) == 1\n survival_rate = 1 - input_kwargs[\"layer_drop_rate\"]\n if survival_rate == 1.0 or deterministic:\n pass\n else:\n # Reuse dropout's rng stream.\n rng = self.make_rng(\"dropout\")\n mask_shape = [input_value.shape[0]] + [1] * (input_value.ndim - 1)\n mask = random.bernoulli(rng, p=survival_rate, shape=mask_shape)\n mask = jnp.tile(mask, [1] + list(input_value.shape[1:]))\n input_value = lax.select(mask, input_value / survival_rate,\n jnp.zeros_like(input_value))\n else:\n assert not input_kwargs\n assert op_type == OpType.ADD\n\n if op_type == OpType.ADD:\n assert len(input_values) == 2\n output_values = [input_value + input_values[1]]\n else:\n assert len(input_values) == 1\n output_values = [input_value]\n\n elif op_type == OpType.SCALAR_MUL:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert len(input_kwargs) <= 1\n assert len(op_kwargs) == 1 # name\n if \"const\" in input_kwargs:\n c = input_kwargs[\"const\"]\n else:\n c = 1 / jnp.sqrt(input_values[0].shape[-1])\n output_values = [input_values[0] * c]\n\n elif op_type == OpType.SCALAR_ADD:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert len(input_kwargs) <= 1\n assert len(op_kwargs) == 1 # name\n assert \"const\" in input_kwargs\n c = input_kwargs[\"const\"]\n output_values = [input_values[0] + c]\n\n elif op_type == OpType.DOT_GENERAL:\n assert len(input_values) == 2\n assert 0 < len(input_kwargs) <= 3\n assert len(op_kwargs) == 1 # name\n output_values = [\n lax.dot_general(input_values[0], input_values[1], **input_kwargs)\n ]\n\n elif op_type == OpType.EINSUM:\n assert len(input_values) == 2\n assert len(input_kwargs) == 1\n assert \"sum\" in input_kwargs\n output_values = [\n jnp.einsum(input_kwargs[\"sum\"], input_values[0], input_values[1])\n ]\n\n # nn.attention\n\n elif op_type == OpType.SELF_ATTENTION:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert not input_kwargs\n output_values = [\n nn.SelfAttention(**op_kwargs,\n deterministic=deterministic)(input_value)\n ]\n\n # nn.activation\n\n elif op_type in [OpType.RELU, OpType.GELU, OpType.SWISH, OpType.SIGMOID]:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert not input_kwargs\n fn = {\n OpType.RELU: nn.relu,\n OpType.GELU: nn.gelu,\n OpType.SWISH: nn.swish,\n OpType.SIGMOID: nn.sigmoid\n }[op_type]\n output_values = [fn(input_value)]\n\n elif op_type == OpType.SOFTMAX:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert len(input_kwargs) <= 1\n output_values = [nn.softmax(input_value, **input_kwargs)]\n\n # nn.normalization\n\n elif op_type == OpType.BATCH_NORM:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert len(input_kwargs) <= 1\n add_kwargs = {}\n if \"use_running_average\" not in input_kwargs:\n add_kwargs = {\"use_running_average\": not training}\n else:\n add_kwargs = {}\n output_values = [\n nn.BatchNorm(**op_kwargs)(input_value, **input_kwargs, **add_kwargs)\n ]\n\n elif op_type == OpType.LAYER_NORM:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert not input_kwargs\n output_values = [nn.LayerNorm(**op_kwargs)(input_value)]\n\n elif op_type == OpType.GROUP_NORM:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert not input_kwargs\n output_values = [nn.GroupNorm(**op_kwargs)(input_value)]\n\n # reshape operators\n\n elif op_type == OpType.RESHAPE:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert 0 < len(input_kwargs) < 3\n new_shape = input_kwargs.pop(\"new_shape\")\n if new_shape[0] == \"B\":\n new_shape = (input_value.shape[0],) + new_shape[1:]\n output_values = [jnp.reshape(input_value, new_shape, **input_kwargs)]\n\n elif op_type == OpType.FLATTEN:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert not input_kwargs\n new_shape = (input_value.shape[0], -1)\n output_values = [jnp.reshape(input_value, new_shape)]\n\n elif op_type == OpType.TRANSPOSE:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert len(input_kwargs) == 1\n assert len(op_kwargs) == 1 # name\n output_values = [jnp.transpose(input_value, **input_kwargs)]\n\n # nn.stochastic\n\n elif op_type == OpType.DROPOUT:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert len(input_kwargs) <= 1\n output_values = [\n nn.Dropout(**op_kwargs)(\n input_value, deterministic=deterministic, **input_kwargs)\n ]\n\n # nn.pooling\n\n elif op_type == OpType.AVG_POOL or op_type == OpType.MAX_POOL:\n op_fn = nn.avg_pool if op_type == OpType.AVG_POOL else nn.max_pool\n assert len(input_values) == 1\n input_value = input_values[0]\n assert input_kwargs\n\n ws = input_kwargs[\"window_shape\"]\n if isinstance(ws, int):\n ws = [ws] * (input_value.ndim - 2)\n new_ws = []\n for window_dim_shape, dim_shape in zip(ws, input_value.shape[1:]):\n if window_dim_shape == 0:\n new_ws.append(dim_shape)\n else:\n new_ws.append(window_dim_shape)\n input_kwargs[\"window_shape\"] = tuple(new_ws)\n\n if \"strides\" in input_kwargs:\n s = input_kwargs[\"strides\"]\n if isinstance(s, int):\n input_kwargs[\"strides\"] = (s,) * (input_value.ndim - 2)\n\n output_values = [op_fn(input_value, **input_kwargs)]\n\n elif op_type == OpType.MEAN:\n assert len(input_values) == 1\n input_value = input_values[0]\n assert input_kwargs\n output_values = [jnp.mean(input_value, **input_kwargs)]\n\n # new param\n\n elif op_type == OpType.PARAM:\n assert not input_values\n assert 0 < len(input_kwargs) <= 2\n init_fn = input_kwargs.pop(\"init_fn\")\n\n init_fn_with_kwargs = functools.partial(init_fn, **input_kwargs)\n output_values = [self.param(op_name, init_fn_with_kwargs)]\n\n else:\n raise ValueError(f\"op_type {op_type} not supported...\")\n\n return output_values",
"def _apply_binary_op_elementwise(\n self: ConcreteStructuredMetricValue, other: ConcreteStructuredMetricValue,\n op: Callable[[float, float], float]) -> ConcreteStructuredMetricValue:\n ...",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def run_operator(scope_node, node, name, op, code, f_globals):\n operators = __get_operators()\n if op not in operators:\n raise TypeError(\"failed to load operator '%s'\" % op)\n scope_key = scope_node.scope_key\n pair = operators[op](code, scope_key, f_globals)\n if isinstance(name, tuple):\n # The template inst binding with a single name will take this\n # path by using a length-1 name tuple. See bug #78.\n bind_extended_member(node, name, pair, scope_key)\n else:\n item = getattr(node.klass, name, None)\n if isinstance(item, Alias):\n bind_aliased_member(node, name, item, pair, scope_key)\n else:\n # This is the path for a standard binding on a child def.\n # It does not need the closure scope key. See bug #78.\n bind_member(node, name, pair)",
"def RunOperator(op_def, verbose=False):\n if isinstance(op_def, pb.OperatorDef):\n op_def = op_def.SerializeToString()\n _C.RunOperator(op_def, verbose)",
"def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n \"\"\"Given values of two input nodes, return result of element-wise multiplication.\"\"\"\r\n assert len(input_vals) == 2\r\n return input_vals[0] / input_vals[1]",
"def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)",
"def _partition_call_operator(self, inputs, attr):\n\n try:\n from tensorflow.python.framework import function_def_to_graph\n except ImportError as e:\n raise ImportError(f\"Unable to import tensorflow which is required {e}\")\n\n main_graph_proto = self._main_graph_proto\n outer_graph_def = main_graph_proto._graph\n\n node_func_name = attr.get(\"f\").name\n func = next(\n (f for f in outer_graph_def.library.function if f.signature.name == node_func_name),\n None,\n )\n if func:\n devices = set(node.device for node in func.node_def)\n if len(devices) > 1:\n raise Exception(\n \"Found inconsistent Device assignment in the \"\n \"Stateful Partitioned SubGraph. Rejecting \"\n \"the subgraph \"\n )\n # Convert function definition to graph\n func_input_shapes = func.attr[\"_input_shapes\"].list.shape\n subgraph, _ = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes)\n\n # Computing subgraph's input shape dictionary\n subgraph_shape_dict, input_expr_dict = {}, {}\n for f_arg, input in zip(func.signature.input_arg, inputs):\n input_expr_dict[f_arg.name] = input\n subgraph_shape_dict[f_arg.name] = _infer_shape(input, main_graph_proto._mod)\n\n func_name = f\"func_{func.signature.name}\"\n try:\n global_func = main_graph_proto._mod[func_name]\n sub_func = global_func\n sub_params = main_graph_proto._params\n except ValueError:\n # Construct relay nodes from the subgraph\n g1 = SubGraphProto(main_graph_proto)\n sub_func, sub_params = g1.from_tensorflow(subgraph, shape=subgraph_shape_dict)\n main_graph_proto._params.update(sub_params)\n func_expr = _function.Function(sub_func.params, sub_func.body)\n global_func = tvm.relay.GlobalVar(func_name)\n main_graph_proto._mod[global_func] = func_expr\n main_graph_proto._mod = InferType()(main_graph_proto._mod)\n\n param_exprs = []\n for param_expr in sub_func.params:\n # sub_params is subset of sub_func.params\n param_name = param_expr.vid.name_hint\n if param_name in input_expr_dict.keys():\n param_exprs.append(input_expr_dict[param_name])\n elif param_name in sub_params.keys():\n param_exprs.append(param_expr)\n else:\n raise Exception(f\"Input parameter {param_name} not found\")\n\n sb = tvm.relay.scope_builder.ScopeBuilder()\n loop_ret = global_func(*param_exprs)\n sb.ret(loop_ret)\n ret = sb.get()\n else:\n raise Exception(f\"Function not found - {node_func_name}\")\n return ret",
"def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n \"\"\"Given values of two input nodes, return result of element-wise multiplication.\"\"\"\r\n assert len(input_vals) == 2\r\n return input_vals[0] * input_vals[1]"
] | [
"0.8162125",
"0.6447787",
"0.6189435",
"0.596804",
"0.5792386",
"0.5773607",
"0.5733637",
"0.56997246",
"0.5688552",
"0.5668171",
"0.5667309",
"0.55916333",
"0.55768436",
"0.55464166",
"0.5527756",
"0.54990244",
"0.54635894",
"0.5456901",
"0.5447017",
"0.5434432",
"0.54329336",
"0.5422993",
"0.5413006",
"0.5391331",
"0.53908616",
"0.53895026",
"0.5379462",
"0.53554374",
"0.5349714",
"0.5347114"
] | 0.7614549 | 1 |
Return the latest version of the given schema | def get_latest_version(self, name):
return self.filter(name=name).order_by('schema_version').last() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_latest_schemaorg_version():\n tag_name = requests.get(SCHEMAORG_VERSION_URL).json()[\"tag_name\"] # \"v13.0-release\"\n mat = re.match(r\"v([\\d.]+)-release\", tag_name)\n if not mat:\n raise ValueError(f\"Unrecognized release tag name {tag_name}\")\n latest = mat.group(1)\n return latest",
"def schema_version(self):\n # return self._parsed[\"schemaVersion\"]\n # does not exist in manifest reference\n pass",
"def test_fetch_latest_version(self, mongo_fixture):\n collection_name = 'fake_schema_table'\n\n with mongo_fixture as t_mongo:\n t_mongo[collection_name].insert(self.fake_schema_table)\n\n with mongo_fixture as t_mongo:\n latest_schema = schema_utils.fetch_latest_schema(\n 'fake.schema',\n 'test',\n t_mongo[collection_name]\n )\n\n assert latest_schema['schema'] == {'result': 'YUP'}\n assert latest_schema['version'] == '1.1.1'",
"def get_schema_org_version():\n return _get_schemaorg_version()",
"def get_schemaorg_version():\n try:\n version = get_latest_schemaorg_version()\n except ValueError:\n version = SCHEMAORG_DEFAULT_VERSION\n return version",
"def schema_version(conn):\n with Tx(conn) as c:\n try:\n c.execute('SELECT version FROM meta LIMIT 1', ['version'])\n except psycopg2.ProgrammingError:\n return 0\n if c.rowcount == 0:\n return 0\n return c.fetchone()['version']",
"def schema_version(self):\n return self._parsed[\"schemaVersion\"]",
"def _get_schema(want_version):\n for maj, min in _GET_SCHEMA_MICROVERSIONS:\n if want_version.matches((maj, min)):\n return getattr(schema, 'GET_SCHEMA_%d_%d' % (maj, min))\n\n return schema.GET_SCHEMA_1_10",
"def get_latest_version(db_path):\n\t\t\n\t\t# create a file system and return latest version\n\t\treturn VersionedFile(db_path).get_latest_version()",
"def latest_product_version(product):\n return product.productversions.order_by(\"-created_on\").first()",
"def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None",
"def schema_version(self) -> str:\n return self._pipeline_definition.get(\"version\")",
"def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]",
"def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version",
"def get_latest_ml_pipeline_version() -> str:\n\n return execute_query('''\n SELECT pipeline_version\n FROM ml_pipeline\n ORDER BY created_at DESC\n LIMIT 1\n ''')[0][0]",
"def _get_version_from_db(dbname):\n with psycopg2.connect(f\"dbname='{dbname}'\") as conn, conn.cursor() as cr:\n query = \"SELECT replace((regexp_matches(latest_version, '^\\d+\\.0|^saas~\\d+\\.\\d+|saas~\\d+'))[1], '~', '-') FROM ir_module_module WHERE name='base'\"\n cr.execute(query)\n return cr.fetchone()[0]",
"def test_fetch_latest_version_empty(self, mongo_fixture):\n collection_name = 'blank_schema_table'\n\n with pytest.warns(exceptions.FirstRunWarning):\n with mongo_fixture as t_mongo:\n latest_schema = schema_utils.fetch_latest_schema(\n 'fake.schema',\n 'test',\n t_mongo[collection_name]\n )\n\n assert latest_schema['schema'] == {}\n assert latest_schema['version'] == '1.0.0'",
"def get_schema(self, schema_versions_info):\n schema = None\n version = api_version_request.APIVersionRequest(VOLUME_MICROVERSION)\n for items in schema_versions_info:\n min_version = api_version_request.APIVersionRequest(items['min'])\n max_version = api_version_request.APIVersionRequest(items['max'])\n # This is case where COMPUTE_MICROVERSION is None, which means\n # request without microversion So select base v2.1 schema.\n if version.is_null() and items['min'] is None:\n schema = items['schema']\n break\n # else select appropriate schema as per COMPUTE_MICROVERSION\n elif version.matches(min_version, max_version):\n schema = items['schema']\n break\n if schema is None:\n raise exceptions.JSONSchemaNotFound(\n version=version.get_string(),\n schema_versions_info=schema_versions_info)\n return schema",
"def schema_transformation_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"schema_transformation_version\")",
"def get_datasetSchemaVersion(self):\n\t\treturn self.dsDoc['about']['datasetSchemaVersion']",
"def sql_version(connection):\n cursor = connection.cursor()\n cursor.execute(\"SELECT ecs.versionTable.version FROM ecs.versionTable;\")\n for ver in cursor.fetchone():\n version = ver\n cursor.close()\n return version",
"def get_problemSchemaVersion(self):\n\t\treturn self.prDoc['about']['problemSchemaVersion']",
"def check_schema_version(context, version):\n data = context.response.json()\n check_and_get_attribute(data, version)",
"def get_schema(schema): # noqa: E501\n return 'do some magic!'",
"def get_latest_vsn(self):\n # The last version in the list should be the newest one.\n if len(self.versions) > 0:\n v = sorted(self.versions, key=lambda v: int(v['id']))[len(self.versions)-1]\n return self.get_version(v['id'])\n else: return None",
"def version(self):\n self._get_latest_content()\n return self._data.get('version', None)",
"def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None",
"def latest_coupon_version(coupon):\n return coupon.versions.order_by(\"-created_on\").first()",
"def latest_version(self):\n from leonardo_system.pip import check_versions\n return check_versions(True).get(self.name, None).get('new', None)",
"def latest(cls):\n releases = cls.query.all()\n if len(releases) == 0:\n return None\n\n releases.sort(key=lambda x: x.version)\n return releases[-1]"
] | [
"0.7790253",
"0.7104092",
"0.7003174",
"0.6998072",
"0.6989781",
"0.6966129",
"0.68279827",
"0.67498225",
"0.6675044",
"0.64771247",
"0.64744395",
"0.6458408",
"0.6391964",
"0.63075215",
"0.629649",
"0.6258136",
"0.62317574",
"0.61514646",
"0.6149739",
"0.6116614",
"0.6109423",
"0.6106146",
"0.6020665",
"0.59213334",
"0.59036756",
"0.58786875",
"0.5872308",
"0.5865964",
"0.5840015",
"0.5828158"
] | 0.75875497 | 1 |
Validates `registration_responses` against this schema (using `schema_blocks`). Raises `ValidationError` if invalid. Otherwise, returns True. | def validate_registration_responses(self, registration_responses, required_fields=False):
validator = RegistrationResponsesValidator(self.schema_blocks.all(), required_fields)
return validator.validate(registration_responses) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(self, registration):\n self.l.info(\"Starting registration validation\")\n\n validation_errors = []\n\n # Check if registrant_id is a valid UUID\n if not utils.is_valid_uuid(registration.registrant_id):\n validation_errors += [\"Invalid UUID registrant_id\"]\n\n # Check that required fields are provided and valid\n data_fields = registration.data.keys()\n\n if registration.reg_type == \"pmtct_prebirth\":\n validation_errors += self.check_lang(data_fields, registration)\n validation_errors += self.check_mom_dob(data_fields, registration)\n validation_errors += self.check_edd(data_fields, registration)\n validation_errors += self.check_operator_id(\n data_fields, registration)\n\n elif registration.reg_type == \"pmtct_postbirth\":\n validation_errors += self.check_lang(data_fields, registration)\n validation_errors += self.check_mom_dob(data_fields, registration)\n validation_errors += self.check_baby_dob(data_fields, registration)\n validation_errors += self.check_operator_id(\n data_fields, registration)\n\n elif registration.reg_type == \"nurseconnect\":\n validation_errors += self.check_faccode(\n data_fields, registration)\n validation_errors += self.check_operator_id(\n data_fields, registration)\n validation_errors += self.check_msisdn_registrant(\n data_fields, registration)\n validation_errors += self.check_msisdn_device(\n data_fields, registration)\n validation_errors += self.check_lang(\n data_fields, registration)\n\n elif registration.reg_type == \"momconnect_prebirth\":\n # Checks that apply to clinic, chw, public\n validation_errors += self.check_operator_id(\n data_fields, registration)\n validation_errors += self.check_msisdn_registrant(\n data_fields, registration)\n validation_errors += self.check_msisdn_device(\n data_fields, registration)\n validation_errors += self.check_lang(\n data_fields, registration)\n validation_errors += self.check_consent(\n data_fields, registration)\n\n # Checks that apply to clinic, chw\n if registration.source.authority in [\"hw_full\", \"hw_partial\"]:\n validation_errors += self.check_id(\n data_fields, registration)\n\n # Checks that apply to clinic only\n if registration.source.authority == \"hw_full\":\n validation_errors += self.check_edd(\n data_fields, registration)\n validation_errors += self.check_faccode(\n data_fields, registration)\n\n elif registration.reg_type == \"momconnect_postbirth\":\n validation_errors.append(\"Momconnect postbirth not yet supported\")\n\n elif registration.reg_type == \"loss_general\":\n validation_errors.append(\"Loss general not yet supported\")\n\n # Evaluate if there were any problems, save and return\n if len(validation_errors) == 0:\n self.l.info(\"Registration validated successfully - updating \"\n \"registration object\")\n registration.validated = True\n registration.save()\n self.l.info(\"Registration object updated.\")\n return True\n else:\n self.l.info(\"Registration validation failed - updating \"\n \"registration object\")\n registration.data[\"invalid_fields\"] = validation_errors\n registration.save()\n self.l.info(\"Registration object updated.\")\n return False",
"def validate(self, json_data):\n self._errors = None\n success = True\n for item in self._schema:\n if not item.validate(json_data):\n success = False\n\n return success",
"def validate_response(self, response):\n pass",
"def validate(self, response):\n return response[\"status_code\"] == 1",
"def _validate_response(request, response, schema_data, schema_resolver):\n try:\n validate_outgoing_response(\n request,\n response,\n schema_data,\n schema_resolver\n )\n except jsonschema.exceptions.ValidationError as exc:\n # This will alter our stack trace slightly, but Pyramid knows how\n # to render it. And the real value is in the message anyway.\n raise HTTPInternalServerError(str(exc))",
"def check_event_registration_response_valid(\n response: HTTPResponse, user_id: user_models.UserId) -> bool:\n try:\n assert response.status_code == 201\n assert response.json()\n event_id = response.json().get(\"event_id\")\n assert check_event_id_added_to_user(event_id, user_id)\n assert check_event_status_set_properly(event_id)\n return True\n except AssertionError as assert_error:\n debug_msg = f\"failed at: {assert_error}. resp json: {response.json()}\"\n logging.debug(debug_msg)\n return False",
"def validate_outgoing_response(request, response, schema_map, resolver):\n body = prepare_body(response)\n Draft4Validator(\n schema_map.response_body_schema,\n resolver=resolver,\n types=EXTENDED_TYPES,\n ).validate(body)",
"def validate(self):\n try:\n # update _resource to have default values from the schema\n self._resource = self.schema(self._resource)\n except MultipleInvalid as e:\n errors = [format_error(err, self.resource_type) for err in e.errors]\n raise exceptions.ValidationError({'errors': errors})\n\n yield self.check_unique()",
"def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)",
"def validate(self):\n\n if self.validate_all_fields():\n return True\n return False",
"def isValid(self):\n def _isValid(obj):\n return obj.errors.isEmpty()\n return self.validate().addCallback(_isValid)",
"def check_response_errors(self, resp):\n return True",
"def is_valid_response(self, response):\r\n if response.status_code in VALID_CODES:\r\n return True\r\n return False",
"def validate(self):\n return self.validator.validate(self.fields)",
"def validate(self):\n if not ((self.bpq_kind == self.BPQ_BLOCK_KIND_QUERY) or\n (self.bpq_kind == self.BPQ_BLOCK_KIND_RESPONSE) or\n (self.bpq_kind == self.BPQ_BLOCK_KIND_RESPONSE_DO_NOT_CACHE_FRAG) or\n (self.bpq_kind == self.BPQ_BLOCK_KIND_PUBLISH)):\n return False\n\n if not ((self.matching_rule == self.BPQ_MATCHING_RULE_EXACT) or\n (self.matching_rule == self.BPQ_MATCHING_RULE_TOKENS) or\n (self.matching_rule == self.BPQ_MATCHING_RULE_NEVER)):\n return False\n\n if not (self.src_eid and (len(self.src_eid) == self.src_eid_len)):\n return False\n\n if not (self.bpq_id and (len(self.bpq_id) == self.bpq_id_len)):\n return False\n\n if not (self.bpq_val and (len(self.bpq_val) == self.bpq_val_len)):\n return False\n\n if not (self.frag_cnt == len(self.frag_desc)):\n return False\n\n for d in self.frag_desc:\n if not (d.has_key(\"frag_offset\") and d.has_key(\"frag_len\")):\n return False\n\n return True",
"def check_response_invalid_fields(response: HTTPResponse) -> bool:\n return response.status_code == 422",
"def has_validation_error(self):\n for err in self._schema.validator.validation_errors:\n if not err.warning:\n return True\n return False",
"def assert_register_response_in_pipeline_looks_correct(self, response, pipeline_kwargs):\r\n self.assertEqual(200, response.status_code)\r\n # Check that the correct provider was selected.\r\n self.assertIn('successfully signed in with <strong>%s</strong>' % self.PROVIDER_CLASS.NAME, response.content)\r\n # Expect that each truthy value we've prepopulated the register form\r\n # with is actually present.\r\n for prepopulated_form_value in self.PROVIDER_CLASS.get_register_form_data(pipeline_kwargs).values():\r\n if prepopulated_form_value:\r\n self.assertIn(prepopulated_form_value, response.content)",
"def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if email_exists(cur, self.email.data):\n self.email.errors.append('This email already exists!')\n return False\n\n if username_exists(cur, self.username.data):\n self.username.errors.append('This username already exists!')\n return False\n\n return True",
"def verify_response_with_requested_fields(self, response):\n self.verify_response_block_dict(response)\n for block_key_string, block_data in response.data['blocks'].items():\n block_key = deserialize_usage_key(block_key_string, self.course_key)\n xblock = self.store.get_item(block_key)\n\n self.assert_in_iff('children', block_data, xblock.has_children)\n self.assert_in_iff('graded', block_data, xblock.graded is not None)\n self.assert_in_iff('format', block_data, xblock.format is not None)\n self.assert_in_iff('due', block_data, xblock.due is not None)\n self.assert_true_iff(block_data['student_view_multi_device'], block_data['type'] == 'html')\n assert 'not_a_field' not in block_data\n\n if xblock.has_children:\n self.assertSetEqual(\n {str(child.location) for child in xblock.get_children()},\n set(block_data['children']),\n )",
"def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))",
"def _validate_response(self, response):\n # Check for unexpected response - all should be JSON dicts that have\n # already been deserialised\n if not isinstance(response, types.DictionaryType):\n self.message(\n \"\\t\\t[!] ERROR - Unexpected value returned from the API: '%s'\" %\n (response))\n return False\n\n # Check for valid errors\n if \"error\" in response and \"msg\" in response:\n self.message(\n \"\\t\\t[!] ERROR - %s (%s)\" %\n (response[\"msg\"], response[\"timestamp\"]))\n return False\n\n # Is this a valid response message\n if \"msg\" in response:\n return True\n\n # Catch all...dictionary returned but does not contain expected keys?\n # Who know's what's going on here?!\n else:\n self.message(\n \"\\t\\t[!] ERROR - Unexpected dictionary response returned from the API: '%s'\" %\n (response))\n return False",
"def isValidForSchema(schema):\n\n return True",
"def validate(self) -> bool:\n\n # Start by reading in the blueprint schema json\n schema = json.loads(pkgutil.get_data(\"FactorioTools\", \"blueprintSchema.json\"))\n\n # Validate the object's schema against the blueprintSchema JSON\n try:\n jsonschema.validate(self.data, schema)\n return True\n except jsonschema.ValidationError:\n pass\n\n return False",
"def fusion_api_validate_response(self, respDict, valDict):\n success = True\n returnDict = {}\n keys = []\n for key in valDict:\n if not valDict[key]:\n continue\n # logger._log_to_console_and_log_file('key: %s' % (key))\n keyDict = {'key': key, 'expected': valDict[\n key], 'actual': respDict[key], 'success': True}\n if key in respDict:\n pattern = re.compile(str(valDict[key]))\n # if not re.search(str(valDict[key]), str(respDict[key])):\n # t = re.compile('(?i)Warning|Unknown|Terminated|Killed|Error|Completed')\n\n if not re.search(pattern, str(respDict[key])):\n\n success = False\n keyDict['success'] = False\n else:\n success = False\n keyDict['success'] = False\n keys.append(keyDict)\n\n returnDict['success'] = success\n returnDict['keys'] = keys\n return returnDict",
"def schema_check(self):\n\n try:\n self.schema.assertValid(self.get_content())\n except lxml.etree.DocumentInvalid:\n logger.error(\"PDU failed schema check\")\n for line in self.pretty_print_content().splitlines():\n logger.warning(line)\n raise",
"def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if email_exists(cur, self.email.data):\n self.email.errors.append('This email already exists!')\n return False\n\n return True",
"def valid_in_response(self):\n return self._repeatable[1] is not None",
"def isValid(self):\n errorList = self.getErrors()\n\n return not errorList",
"def validate(self, schema=None, callback=None):\n return hxl.schema(schema, callback).validate(self)"
] | [
"0.57850087",
"0.56401217",
"0.5630611",
"0.5599469",
"0.55679965",
"0.55353653",
"0.53955424",
"0.5328068",
"0.530389",
"0.5194013",
"0.5185546",
"0.5152043",
"0.5151474",
"0.5126505",
"0.5081167",
"0.5079145",
"0.5051262",
"0.5037521",
"0.50278944",
"0.50093704",
"0.49799225",
"0.49531746",
"0.49508607",
"0.4932299",
"0.49287462",
"0.4925822",
"0.49145025",
"0.49135062",
"0.4901456",
"0.48781714"
] | 0.8683601 | 0 |
Allows us to use a unique_together constraint, so each "registration_response_key" only appears once for every registration schema. To do this, we need to save empty "registration_response_key"s as null, instead of an empty string. | def save(self, *args, **kwargs):
self.registration_response_key = self.registration_response_key or None
return super().save(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def record_unique_together_applied(model_sig):\n model_sig['meta']['__unique_together_applied'] = True",
"def unique_together(self):\n if self._meta.unique_together:\n return self._meta.unique_together[0]\n return ()",
"def apply_unique_together(self, unique_together):\n self.unique_together = unique_together\n self._unique_together_applied = True",
"def unique_together(self, new_value):\n self._unique_together = self._normalize_together(new_value)",
"def unique_together(self):\n return self._unique_together",
"def test_registration_id_unique(self):\n\n\t\t# add a device\n\t\tserializer = GCMDeviceSerializer(data={\n\t\t\t\"registration_id\": \"foobar\",\n\t\t\t\"name\": \"Galaxy Note 3\",\n\t\t\t\"device_id\": \"0x1031af3b\",\n\t\t})\n\t\tserializer.is_valid(raise_exception=True)\n\t\tobj = serializer.save()\n\n\t\t# ensure updating the same object works\n\t\tserializer = GCMDeviceSerializer(obj, data={\n\t\t\t\"registration_id\": \"foobar\",\n\t\t\t\"name\": \"Galaxy Note 5\",\n\t\t\t\"device_id\": \"0x1031af3b\",\n\t\t})\n\t\tserializer.is_valid(raise_exception=True)\n\t\tobj = serializer.save()\n\n\t\t# try to add a new device with the same token\n\t\tserializer = GCMDeviceSerializer(data={\n\t\t\t\"registration_id\": \"foobar\",\n\t\t\t\"name\": \"Galaxy Note 3\",\n\t\t\t\"device_id\": \"0xdeadbeaf\",\n\t\t})\n\n\t\twith self.assertRaises(ValidationError) as ex:\n\t\t\tserializer.is_valid(raise_exception=True)\n\t\tself.assertEqual({'registration_id': [u'This field must be unique.']}, ex.exception.detail)",
"def enforce_unique_values(self):\n return self.properties.get('enforceUniqueValues', None)",
"def test_registration_id_unique(self):\n\n\t\t# add a device\n\t\tserializer = GCMDeviceSerializer(data={\n\t\t\t\"registration_id\": \"foobar\",\n\t\t\t\"name\": \"Galaxy Note 3\",\n\t\t\t\"device_id\": \"0x1031af3b\",\n\t\t\t\"application_id\": \"XXXXXXXXXXXXXXXXXXXX\",\n\t\t})\n\t\tserializer.is_valid(raise_exception=True)\n\t\tobj = serializer.save()\n\n\t\t# ensure updating the same object works\n\t\tserializer = GCMDeviceSerializer(obj, data={\n\t\t\t\"registration_id\": \"foobar\",\n\t\t\t\"name\": \"Galaxy Note 5\",\n\t\t\t\"device_id\": \"0x1031af3b\",\n\t\t\t\"application_id\": \"XXXXXXXXXXXXXXXXXXXX\",\n\t\t})\n\t\tserializer.is_valid(raise_exception=True)\n\t\tobj = serializer.save()\n\n\t\t# try to add a new device with the same token\n\t\tserializer = GCMDeviceSerializer(data={\n\t\t\t\"registration_id\": \"foobar\",\n\t\t\t\"name\": \"Galaxy Note 3\",\n\t\t\t\"device_id\": \"0xdeadbeaf\",\n\t\t\t\"application_id\": \"XXXXXXXXXXXXXXXXXXXX\",\n\t\t})\n\n\t\twith self.assertRaises(ValidationError):\n\t\t\tserializer.is_valid(raise_exception=True)",
"def _set_unique_keys(self, keys):\n # make this final once set\n if self._unique_keys:\n raise AlreadySetError()\n\n self._unique_keys = self._prepare_keys(keys)",
"def validate_registration_responses(self, registration_responses, required_fields=False):\n validator = RegistrationResponsesValidator(self.schema_blocks.all(), required_fields)\n return validator.validate(registration_responses)",
"def make_fields_unique(self, fields):\n ...",
"def validate_unique(self, exclude=None, **kwargs):\n return super().validate_unique(exclude=exclude, user=self.user)",
"def create_unique(self, table_name, columns):\r\n print \" ! WARNING: SQLite does not support adding unique constraints. Ignored.\"",
"def check_unique(self):\n pass",
"def unique_key(self, unique_key):\n\n self._unique_key = unique_key",
"def has_unique_together_changed(self, old_model_sig):\n old_unique_together = old_model_sig.unique_together\n new_unique_together = self.unique_together\n\n return (old_unique_together != new_unique_together or\n ((old_unique_together or new_unique_together) and\n not old_model_sig._unique_together_applied))",
"def validate_unique(self, exclude=None):\n qs_barcode = Product.objects.filter(barcode=self.barcode)\n qs_item_number = Product.objects.filter(item_number=self.item_number)\n qs_name = Product.objects.filter(name=self.name)\n if qs_barcode.filter(webshop_id=self.webshop_id).exclude(id=self.id).exists():\n raise ValidationError(detail='Barcode must be unique in one webshop')\n if qs_item_number.filter(webshop_id=self.webshop_id).exclude(id=self.id).exists():\n raise ValidationError(detail='Item number must be unique in one webshop')\n if qs_name.filter(webshop_id=self.webshop_id).exclude(id=self.id).exists():\n raise ValidationError(detail='Item Name must be unique in one webshop')",
"def migrate_registrations_metadata_key(schema):\n registrations = Node.find(Q('is_registration', 'eq', True) & Q('registered_schema', 'eq', schema))\n total_reg = registrations.count()\n logger.info('Examining {} registrations for improper key'.format(total_reg))\n reg_count = 0\n\n for reg in registrations:\n reg_count += 1\n if reg.registered_meta.get(schema._id, {}).get('recommended-methods', {}).get('value', {}).get('undefined', {}):\n reg.registered_meta[schema._id]['recommended-methods']['value']['procedure'] = \\\n reg.registered_meta[schema._id]['recommended-methods']['value'].pop('undefined')\n reg.save()\n logger.info('{}/{} Migrated key for {}'.format(reg_count, total_reg, reg._id))\n else:\n logger.info('{}/{} Key already correct for {}. No change.'.format(reg_count, total_reg, reg._id))",
"def unique(self, column=None):\n if column is None:\n self.table.add_constraint(\n self._last_column.name, \"unique\", columns=[self._last_column.name]\n )\n return self\n\n self.table.add_constraint(column, \"unique\", columns=[column])\n\n return self",
"def _update_context_no_unique_values(metadata, column, unique_values):\r\n\r\n return None",
"def unique_key(self):\n return json.dumps([self.name, self.birthDate])",
"def save_response(self, key, response):\n self.responses[key] = response, datetime.now(timezone.utc)",
"def validate_unique_taxon_slugs(cls, values):\n if 'attributes' in values:\n # count occurrence of each taxon slug in attributes\n attributes: List[FdqModelAttribute] = values['attributes']\n taxon_slugs = cls._get_available_attrs_taxon_slugs(attributes)\n\n taxon_slugs_counter = Counter(taxon_slugs)\n\n multiple_taxon_slugs = [\n taxon_slug for taxon_slug, occurrence in taxon_slugs_counter.items() if occurrence > 1\n ]\n if len(multiple_taxon_slugs):\n raise ValueError('Following fields are mapped more than once - ' + ','.join(multiple_taxon_slugs))\n\n return values",
"def test_unique_signature_constraint(self):\n with self.assertRaisesRegex(ValidationError, 'Signature with this Agreement and Signatory already exists.'):\n new_test_sig = Signature(agreement=self.test_agreement,\n signatory=self.test_user,\n username=self.test_user.username,\n first_name=self.test_user.first_name,\n last_name=self.test_user.last_name,\n email=self.test_user.email,\n department=self.test_department)\n new_test_sig.full_clean()",
"def _remove_duplicates(self):\n for key in self._role_strings_info:\n self._role_strings_info[key] = [dict(tupleized) for tupleized in set(tuple(item.items())\n for item in self._role_strings_info[key])]",
"def has_unique_together_changed(self, old_model_sig):\n old_unique_together = old_model_sig.unique_together\n new_unique_together = self.unique_together\n\n return (old_unique_together != new_unique_together or\n ((old_unique_together or new_unique_together) and\n old_model_sig._unique_together_applied is not\n self._unique_together_applied))",
"def remove_validation_unique(self):\n fields = {}\n # extract unique validators\n for name, field in self.fields.items():\n fields[name] = []\n assert hasattr(field, 'validators'), \"no validators on {}\".format(field.__class__.__name__)\n for validator in field.validators:\n if isinstance(validator, UniqueValidator):\n fields[name].append(validator)\n for validator in fields[name]:\n field.validators.remove(validator)\n # extract unique_together validators\n fields['_'] = []\n for validator in self.validators:\n if isinstance(validator, UniqueTogetherValidator):\n fields['_'].append(validator)\n for validator in fields['_']:\n self.validators.remove(validator)\n return fields",
"def has_unique_together_changed(old_model_sig, new_model_sig):\n old_meta = old_model_sig['meta']\n new_meta = new_model_sig['meta']\n old_unique_together = old_meta['unique_together']\n new_unique_together = new_meta['unique_together']\n\n return (list(old_unique_together) != list(new_unique_together) or\n ((old_unique_together or new_unique_together) and\n not old_meta.get('__unique_together_applied', False)))",
"def clean_up(self) -> None:\n self.single_device_matches = collections.defaultdict(\n lambda: collections.defaultdict(list)\n )",
"def _save_attrs(self) -> None:\n for attr_req_id in self.attr_req_ids:\n orig_label = self.attr_req_ids[attr_req_id]\n attr_req_label = self.attr_req_labels[attr_req_id].GetValue()\n attr_req_element = self.attr_requirements[self.element][orig_label]\n if attr_req_label == '':\n continue\n if orig_label != attr_req_label and orig_label != '':\n self.attr_requirements[self.element].pop(orig_label)\n self.attr_req_ids[attr_req_id] = attr_req_label\n if attr_req_label not in self.attr_requirements[self.element] \\\n or self.attr_requirements[self.element][\n attr_req_label] != attr_req_element:\n self.attr_requirements[self.element][\n attr_req_label] = attr_req_element"
] | [
"0.57599163",
"0.56867594",
"0.5517179",
"0.53901464",
"0.5270959",
"0.5225833",
"0.517888",
"0.5130246",
"0.5084795",
"0.5074656",
"0.5059439",
"0.49477896",
"0.48854136",
"0.47908023",
"0.47602364",
"0.46850708",
"0.46745723",
"0.46619073",
"0.46608734",
"0.46590465",
"0.46309957",
"0.46194506",
"0.459707",
"0.4568624",
"0.45560953",
"0.45557922",
"0.45172992",
"0.44944704",
"0.4483649",
"0.44784945"
] | 0.64849234 | 0 |
Return True if frame is mergable with self instance. | def mergable(self, frame):
for pos in self.srcList:
if pos in frame.srcList:
return True
for pos in self.tgtList:
if pos in frame.tgtList:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merged(self) -> bool:\n return pulumi.get(self, \"merged\")",
"def is_merged(self):\n return self.get_data(\"state\") == self.STATE_MERGED",
"def can_overlap(self):\n return self.is_open",
"def can_overlap(self):\n return self.is_open",
"def can_overlap(self):\n return False",
"def hasCurrentFrame(self):\n if self.currentFrame == []:\n return False\n return True",
"def frame_available(self):\n return type(self._frame) != type(None)",
"def developer_can_merge(self) -> bool:\n return pulumi.get(self, \"developer_can_merge\")",
"def is_extended(self):\n return self._parent is not None",
"def isActive(self):\n self._acquire_lock()\n returned = True\n try:\n if len(self.existing_frames) == 0 :\n returned = False\n finally:\n self._release_lock()\n return returned",
"def isMergableWith(self, op):\n if not is_glsl_block_function(op):\n return False\n if (self.getName() != op.getName()) or (self.getType() != op.getType()):\n return False\n return True",
"def captured(self):\n return self.game.enemyTeam.flag.carrier != None",
"def captured(self):\n return self.game.enemyTeam.flag.carrier != None",
"def is_full_frame(self):\n return self['application'] == 'ap3_250_fullframe' or self['application'] == 'ap9_250_fullframe_mindead'",
"def is_opened(self):\n return len(Container._construction_stack) > 0 and self == Container._construction_stack[-1]",
"def isAlive(self):\n\n import wx\n\n if not fwidgets.isalive(self.parent):\n return False\n\n if isinstance(self.widget, wx.MenuItem):\n return fwidgets.isalive(self.menu)\n\n else:\n return fwidgets.isalive(self.widget)",
"def __bool__(self):\n return _osgAnimation.BoneMap___bool__(self)",
"def __is_selected_frame(self, frame_index):\n return frame_index == self.selected_index",
"def captured(self):\n return self.commander.game.enemyTeam.flag.carrier != None",
"def captured(self):\n return self.commander.game.enemyTeam.flag.carrier != None",
"def get_active(self):\n if hasattr(self, 'canvas'):\n return True\n else:\n return False",
"def canBeMergedWith(self, other):",
"def is_target(self):\n\t\treturn self.window and self.window.target is self",
"def hooked(self):\n return hasattr(self, \"hook\")",
"def get_active(self):\n try:\n return not (self.jframe.getExtendedState() & JFrame.ICONIFIED)\n except AttributeError:\n if self.jframe:\n return True\n else:\n return False",
"def _animation_over(self) -> bool:\n \treturn self.current_height == 0 or self.current_height == self.original_height",
"def __bool__(self: Self) -> bool:\n return bool(self.removed or self.added)",
"def hooked(self):\n return hasattr(self, 'hook')",
"def is_merged(self):\r\n url = '{0}/merge'.format(self.get_url())\r\n\r\n return http.Request('GET', url), resource.parse_boolean",
"def is_duplicator_visible(obj):\r\n assert obj.is_duplicator\r\n\r\n # obj.is_duplicator is also true if it has particle/hair systems - they allow to show the duplicator\r\n for psys in obj.particle_systems:\r\n if psys.settings.use_render_emitter:\r\n return True\r\n\r\n # Dupliframes duplicate the original object, so it must be visible\r\n if obj.dupli_type == \"FRAMES\":\r\n return True\r\n\r\n # Duplicators (Dupliverts/faces) are always hidden\r\n return False"
] | [
"0.6856943",
"0.66920245",
"0.612307",
"0.612307",
"0.60129476",
"0.5976742",
"0.5958957",
"0.5924997",
"0.58609396",
"0.58477247",
"0.57797647",
"0.5731097",
"0.5731097",
"0.5692386",
"0.56780577",
"0.5652896",
"0.5599992",
"0.55998355",
"0.55879635",
"0.55879635",
"0.5565105",
"0.55525726",
"0.55225986",
"0.55112606",
"0.5509401",
"0.5507157",
"0.5503195",
"0.5496695",
"0.54779583",
"0.5472726"
] | 0.77469105 | 0 |
Merge two Frame instances frame1 and frame 2, return merged Frame instance. frame1 and frame2 must be on the same tree pair. | def merge(cls, frame1, frame2):
return cls(list(set(frame1.srcList+frame2.srcList)), list(set(frame1.tgtList+frame2.tgtList)), frame1.srcTree, frame1.tgtTree) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge(t1, t2):\n if t2 is None:\n return t1\n if t1 is None:\n return t2\n\n t1 = _splay(_find_max(t1))\n t1.right = t2\n t2.parent = t1\n return t1",
"def blend_frames(self, frame0, frame1, blend):\n root_pos0 = self.get_frame_root_pos(frame0)\n root_pos1 = self.get_frame_root_pos(frame1)\n\n root_rot0 = self.get_frame_root_rot(frame0)\n root_rot1 = self.get_frame_root_rot(frame1)\n\n joints0 = self.get_frame_joints(frame0)\n joints1 = self.get_frame_joints(frame1)\n\n blend_root_pos = (1.0 - blend) * root_pos0 + blend * root_pos1\n blend_root_rot = transformations.quaternion_slerp(root_rot0, root_rot1,\n blend)\n blend_joints = (1.0 - blend) * joints0 + blend * joints1\n\n blend_root_rot = motion_util.standardize_quaternion(blend_root_rot)\n\n blend_frame = np.zeros(self.get_frame_size())\n self.set_frame_root_pos(blend_root_pos, blend_frame)\n self.set_frame_root_rot(blend_root_rot, blend_frame)\n self.set_frame_joints(blend_joints, blend_frame)\n return blend_frame",
"def merge(self, other: \"BB\") -> \"BB\":\n cp_bb = lib.cpBBMerge(self, other)\n return BB(cp_bb.l, cp_bb.b, cp_bb.r, cp_bb.t)",
"def combine_position_dataframes(dataframe1, dataframe2):\n\n # check that the dataframes have the same number of columns\n print(\"Dimensions of dataframe1: \", dataframe1.shape)\n print(\"Dimensions of dataframe2: \", dataframe2.shape)\n\n frames = [dataframe1, dataframe2]\n\n combined_dataframe = pandas.concat(frames)\n\n dataframe1.drop(dataframe1.index, inplace=True) # Delete data from dataframe to save memory\n dataframe2.drop(dataframe2.index, inplace=True) # Delete data from dataframe to save memory\n\n # confirm that the dataframes no longer exist (saving memory)\n print(\"Dimensions of dataframe1: \", dataframe1.shape)\n print(\"Dimensions of dataframe2: \", dataframe2.shape)\n\n # check that all rows of both dataframes have been combined into the new dataframe. Sort by date and time.\n print(\"Dimensions of combined dataframe: \", combined_dataframe.shape)\n combined_dataframe_sorted = combined_dataframe.sort_values('date_time')\n\n print(\"Sample of combined dataframe: \", combined_dataframe_sorted.sample(10))\n\n return combined_dataframe_sorted",
"def _merge(h1, h2, meld=_PairingHeap.add_subheap):\n if h1 is None:\n return h2\n elif h2 is None:\n return h1\n return meld(h1, h2) if h1.root < h2.root else meld(h2, h1)",
"def _mergeFrames_(self, frameSet):\n\t\tmergedList = [] \n\t\tframeList = list(frameSet)\n\t\tframeList.sort()\n\t\tfor currentFrame in frameList:\n\t\t\t#print [currentFrame.srcTree[pos].leaves() for pos in currentFrame.srcList]\n\t\t\t#print [currentFrame.tgtTree[pos].leaves() for pos in currentFrame.tgtList]\n\t\t\tfor frame in mergedList:\n\t\t\t\tif frame.mergable(currentFrame):\n\t\t\t\t\t#print \"merge\"\n\t\t\t\t\tmergedList.remove(frame)\n\t\t\t\t\tmergedList.append(Frame.merge(frame, currentFrame))\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\t#print \"append\"\n\t\t\t\tmergedList.append(currentFrame)\n\t\t\t#print \n\n\t\treturn mergedList",
"def merge(self, other: ProjectMeta) -> ProjectMeta:\n return self.clone(\n obj_classes=self._obj_classes.merge(other.obj_classes),\n tag_metas=self._tag_metas.merge(other._tag_metas),\n )",
"def merge(left: Node, right: Node) -> Node:\n if (not left) or (not right): #如果至少有一个是 None , 返回另一个\n return left or right\n elif left.prior < right.prior:\n # print(\"left\") # @Haor: 没有用到?\n left.r = merge(left.r, right)\n return left\n else: #以右为头结点, 将左树与右的左孩子重做结合\n \"\"\"\n Right will be root because it has more priority\n Now we need to merge left tree and right's left son\n \"\"\"\n right.l = merge(left, right.l)\n return right",
"def _recursive_merge(self, tree: Tree):\r\n\r\n # Keep record of the parent subtree\r\n parent = tree\r\n\r\n # Visit all child nodes:\r\n # 1) If child node is a leaf node, then merge parent node with child node\r\n # 2) If child node is a root node, then:\r\n # 2.1) Merge recursively all its children.\r\n # 2.2) Merge parent node with child node\r\n for child in tree.children:\r\n if len(child.children) == 0:\r\n self._merge_dataframes(left_node=parent.node, right_node=child.node)\r\n parent.children = parent.children[1:]\r\n else:\r\n self._recursive_merge(child)\r\n self._merge_dataframes(left_node=parent.node, right_node=child.node)",
"def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)",
"def diff(self, other, match=lambda x: True, clean=False):\n result = {}\n\n def _iterativediff(t1, t2, subdir):\n \"\"\"compares two trees and appends new tree nodes to examine to\n the stack\"\"\"\n if t1 is None:\n t1 = {}\n if t2 is None:\n t2 = {}\n\n for e1 in t1:\n realname = subdir + pycompat.fsencode(e1.name)\n\n if e1.type == pygit2.GIT_OBJ_TREE:\n try:\n e2 = t2[e1.name]\n if e2.type != pygit2.GIT_OBJ_TREE:\n e2 = None\n except KeyError:\n e2 = None\n\n stack.append((realname + b'/', e1, e2))\n else:\n n1, fl1 = self.find(realname)\n\n try:\n e2 = t2[e1.name]\n n2, fl2 = other.find(realname)\n except KeyError:\n e2 = None\n n2, fl2 = (None, b'')\n\n if e2 is not None and e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n\n if not match(realname):\n continue\n\n if n1 != n2 or fl1 != fl2:\n result[realname] = ((n1, fl1), (n2, fl2))\n elif clean:\n result[realname] = None\n\n for e2 in t2:\n if e2.name in t1:\n continue\n\n realname = subdir + pycompat.fsencode(e2.name)\n\n if e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n elif match(realname):\n n2, fl2 = other.find(realname)\n result[realname] = ((None, b''), (n2, fl2))\n\n stack = []\n _iterativediff(self._tree, other._tree, b'')\n while stack:\n subdir, t1, t2 = stack.pop()\n # stack is populated in the function call\n _iterativediff(t1, t2, subdir)\n\n return result",
"def union(self, node1, node2):\n root1 = self.find(node1)\n root2 = self.find(node2)\n if root1 != root2: # only merge if the connected components differ\n if self.ranks[root1] > self.ranks[root2]:\n self.parents[root2] = root1\n else:\n self.parents[root1] = root2\n if self.ranks[root1] == self.ranks[root2]:\n self.ranks[root2] += 1",
"def merge(self, other_btree):\n pass",
"def _merge_dataframes(self, left_node: Node, right_node: Node):\r\n\r\n left_dataframe = self._get_table(left_node.name)\r\n right_dataframe = self._get_table(right_node.name)\r\n\r\n join_id = 0\r\n while left_node.inner_joins[join_id]['join_with'] != right_node.name:\r\n join_id += 1\r\n\r\n merged_df = left_dataframe.merge(right_dataframe,\r\n how='left',\r\n left_on=left_node.inner_joins[join_id]['on'],\r\n right_on=left_node.inner_joins[join_id]['join_with_on'],\r\n suffixes=(None, \"_\" + right_node.name))\r\n self._set_table(left_node.name, merged_df)",
"def merge(self, other):\n if other is None:\n return\n if self.theta1 > other.theta1:\n self.theta1 = other.theta1\n self.p1 = other.p1\n if self.theta2 < other.theta2:\n self.theta2 = other.theta2\n self.p2 = other.p2",
"def append(self, other, ignore_index=False, verify_integrity=False):\n new_frame = self.frame.append(other.frame, ignore_index=ignore_index,\n verify_integrity=verify_integrity)\n return self.__class__(new_frame)",
"def merge(self):\n collapsable = self.findSameSubtrees()\n\n dummy = GraphiusNode(-1, None)\n for i, node in self.nodes.items():\n dummy.addNeighbor(node)\n\n # Perform the merge\n self.mergeHelper(dummy, collapsable)\n\n # Regenerate trees\n newNodes = self.dfs(dummy)\n assert(-1 not in newNodes)\n self.nodes = newNodes",
"def recreate_frames(from_tree: bpy.types.NodeTree,\n to_tree: bpy.types.NodeTree,\n frame_names: Set[str],\n from_to_node_names: Dict[str, str]):\n new_frame_names = {n: to_tree.nodes.new('NodeFrame').name for n in frame_names}\n frame_attributes = ['label', 'use_custom_color', 'color', 'label_size', 'text']\n for frame_name in frame_names:\n old_frame = from_tree.nodes[frame_name]\n new_frame = to_tree.nodes[new_frame_names[frame_name]]\n for attr in frame_attributes:\n setattr(new_frame, attr, getattr(old_frame, attr))\n for from_node in from_tree.nodes:\n if from_node.name not in from_to_node_names:\n continue\n if from_node.parent and from_node.parent.name in new_frame_names:\n if from_node.bl_idname == 'NodeFrame':\n to_node = to_tree.nodes[new_frame_names[from_node.name]]\n else:\n to_node = to_tree.nodes[from_to_node_names[from_node.name]]\n to_node.parent = to_tree.nodes[new_frame_names[from_node.parent.name]]",
"def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)",
"def union(node1, node2):\n node1_root = find(node1)\n node2_root = find(node2)\n if node1_root == node2_root:\n return\n if node1_root.rank < node2_root.rank:\n node1_root.parent = node2_root\n elif node2_root.rank > node2_root.rank:\n node2_root.parent = node1_root\n else:\n node2_root.parent = node1_root\n node1_root.rank = node1_root.rank + 1",
"def merge(self, db2):\n delta_db = Database(db2)\n\n # Find common headers between the master and delta databases\n common_headers = [x for x in self.headers if x in delta_db.headers]\n\n # Any new headers found in the delta are added to the master\n self.headers.extend(\n [x for x in delta_db.headers if x not in self.headers])\n\n if len(common_headers) < 1:\n print(\"No shared headers were found. These files cannot be merged.\")\n else:\n key = ''\n # Skip picker prompt if there is only one common header\n if len(common_headers) == 1:\n key = common_headers[0]\n else:\n key = self.headerpicker(common_headers)\n\n # Create a temp list for new records to be added to\n records_temp = list(self.records)\n\n # Iterate over new records and attempt to match to existing record\n for each in delta_db.records:\n record = self.fetch_record(key, each, records_temp)\n if record:\n record.attributes.update(each.attributes)\n\n self.records = records_temp\n print(\"Merge successful!\\n\")",
"def merge(self, right, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.merge)(\n self, right=right, **kwargs\n )",
"def deep_merge(a, b):\n assert quacks_like_dict(a), quacks_like_dict(b)\n dst = a.copy()\n\n stack = [(dst, b)]\n while stack:\n current_dst, current_src = stack.pop()\n for key in current_src:\n if key not in current_dst:\n current_dst[key] = current_src[key]\n else:\n if (quacks_like_dict(current_src[key])\n and quacks_like_dict(current_dst[key])):\n stack.append((current_dst[key], current_src[key]))\n else:\n current_dst[key] = current_src[key]\n return dst",
"def merge_proto_types(tp1, tp2):\n if tp1 is None and tp2 is None:\n return _none_proto_type\n if tp1 is None:\n return tp2\n if tp2 is None:\n return tp1\n if tp1 == _none_proto_type and tp2 == _none_proto_type:\n return _none_proto_type\n if tp2 == _none_proto_type:\n return tp1\n if tp1 == _none_proto_type:\n return tp2\n if tp1 == tp2:\n return tp1\n if tp1.HasField(\"basic_type\") and tp2.HasField(\"basic_type\"):\n assert tp1.basic_type == tp2.basic_type, (tp1, tp2)\n return pb.SQLType(\n basic_type = tp1.basic_type,\n nullable = tp1.nullable or tp2.nullable)\n if tp1.HasField('array_type'):\n assert tp2.HasField('array_type'), (tp1, tp2)\n return pb.SQLType(\n array_type=merge_proto_types(tp1.array_type, tp2.array_type),\n nullable=tp1.nullable or tp2.nullable)\n if tp1.HasField('struct_type'):\n assert tp2.HasField('struct_type'), (tp1, tp2)\n l1 = tp1.struct_type.fields\n l2 = tp2.struct_type.fields\n assert len(l1) == len(l2), (l1, l2)\n l = []\n for (f1, f2) in zip(l1, l2):\n assert f1.field_name == f2.field_name, (f1, f2)\n l.append(pb.StructField(field_name=f1.field_name,\n field_type=merge_proto_types(f1.field_type, f2.field_type)))\n return pb.SQLType(\n struct_type=pb.StructType(fields=l),\n nullable=tp1.nullable or tp2.nullable)\n raise Exception(\"Cannot merge incompatible types %s and %s\" % (tp1, tp2))",
"def add_trees(t1, t2):\n \"*** YOUR CODE HERE ***\"\n if not t1:\n return t2 # Could replace with copy_tree(t2)\n if not t2:\n return t1 # Could replace with copy_tree(t1)\n new_entry = t1.root + t2.root\n t1_branches, t2_branches = list(t1.branches), list(t2.branches)\n length_t1, length_t2 = len(t1_branches), len(t2_branches)\n if length_t1 < length_t2:\n t1_branches += [None for _ in range(length_t1, length_t2)]\n elif length_t1 > length_t2:\n t2_branches += [None for _ in range(length_t2, length_t1)]\n return Tree(new_entry, [add_trees(branch1, branch2) for branch1, branch2 in zip(t1_branches, t2_branches)])",
"def merge(self, other):\n\n if not self.can_merge(other):\n raise ValueError('These protocols can not be safely merged.')\n\n inputs_to_consider = self._find_inputs_to_merge()\n\n for input_path in inputs_to_consider:\n\n merge_behavior = getattr(type(self), input_path.property_name).merge_behavior\n\n if merge_behavior == MergeBehaviour.ExactlyEqual:\n continue\n\n if (isinstance(self.get_value(input_path), ProtocolPath) or\n isinstance(other.get_value(input_path), ProtocolPath)):\n\n continue\n\n if merge_behavior == InequalityMergeBehaviour.SmallestValue:\n value = min(self.get_value(input_path), other.get_value(input_path))\n elif merge_behavior == InequalityMergeBehaviour.LargestValue:\n value = max(self.get_value(input_path), other.get_value(input_path))\n else:\n raise NotImplementedError()\n\n self.set_value(input_path, value)\n\n return {}",
"def join_union(self, other):\n\n assert type(self) is type(other), 'Expected NestedRE instance'\n\n A = self.make_flat()\n B = other.make_flat()\n\n if A == B and A !='ϵ':\n return self.merge_union(A, [self.closure, other.closure])\n elif A == 'ϵ' and B == 'ϵ':\n return NestedRE('ϵ')\n elif A == 'ϵ':\n return NestedRE(B, '?')\n elif B == 'ϵ':\n return NestedRE(A, '?')\n else:\n return NestedRE( '(' + A + '|' + B + ')' )",
"def join(self, other):\n\n assert self.is_node(self.root)\n assert self.is_node(other.root)\n\n x = self.maximum()\n self.splay(x)\n self.root.right = other.root\n other.root.parent = self.root.right\n del other\n\n return self",
"def merge(self, first, second):\n return second if self.failed(first) else first",
"def assertFrameEqual( df1, df2 ):\n\n return assert_frame_equal( df1.sort( axis=1) , df2.sort( axis=1) , check_names = True )"
] | [
"0.62682045",
"0.59971756",
"0.5608523",
"0.5507865",
"0.5435409",
"0.5363136",
"0.5361167",
"0.5360327",
"0.53100735",
"0.5293331",
"0.52818555",
"0.5263779",
"0.5189165",
"0.51624644",
"0.5137856",
"0.5104978",
"0.5097869",
"0.5088363",
"0.5084046",
"0.50600046",
"0.49450502",
"0.49143988",
"0.4907288",
"0.4902896",
"0.4890374",
"0.488065",
"0.48759755",
"0.48720306",
"0.48074117",
"0.47992674"
] | 0.8000494 | 0 |
Return the offset of the first word and the last word for every subtree in the list. | def treeposition2offsetPosition(subTrPosList, tr):
offsetList = []
cnt = 0
for pos in subTrPosList:
par = tr[pos]
while par != tr:
for i in xrange(par.parent_index()):
if isinstance(par.parent()[i], nltk.ParentedTree):
cnt += len(par.parent()[i].leaves())
else:
print >> debug_log, tr
par = par.parent()
label = ''
start = False
for char in tr[pos].node:
if not start:
if char not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
continue
else:
start = True
label += char
else:
if char not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
break
else:
label += char
offsetList.append((cnt, cnt+len(tr[pos].leaves()), label))
cnt = 0
return offsetList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _gen_loc_words(word_list: list):\r\n loc = 0\r\n res = []\r\n for word in word_list:\r\n res.append((loc, word))\r\n loc += len(word)\r\n return res",
"def get_words_position(self, words: List[Word]) -> Tuple[int, int]:\n start: int = self.get_word_postion(words[0])[0]\n end: int = self.get_word_postion(words[-1])[1]\n return start, end",
"def listPosition(word):\n if len(word) == 1: return 1\n pos = 0\n for c in set(word):\n if c < word[0]:\n letters = list(word)\n letters.remove(c)\n pos += arrangements(letters)\n pos += listPosition(word[1:])\n return pos",
"def compute_representation_positions(self):\r\n offset = 3\r\n for node in self.depth_first_search():\r\n\r\n if node.is_leaf():\r\n node.str_pos = offset\r\n offset += len(str(node.keys)) + 2\r\n\r\n else:\r\n first_child_mid = node.children[ 0].str_pos + len(str(node.children[ 0].keys))//2\r\n last_child_mid = node.children[-1].str_pos + len(str(node.children[-1].keys))//2\r\n node.str_pos = (first_child_mid + last_child_mid)//2 - len(str(node.keys))//2",
"def get_offsets(word, raw_text):\n try:\n match = re.search(word, raw_text)\n return (match.start(), match.end())\n except AttributeError: #could not find word\n return (0, 0)",
"def treepos(self, tree):\n if tree is None:\n raise ValueError(\"Parse tree not available\")\n stack = [tree]\n treepos = []\n\n wordnum = 0\n while True:\n # tree node:\n if isinstance(stack[-1], Tree):\n # Select the next child.\n if len(treepos) < len(stack):\n treepos.append(0)\n else:\n treepos[-1] += 1\n # Update the stack.\n if treepos[-1] < len(stack[-1]):\n stack.append(stack[-1][treepos[-1]])\n else:\n # End of node's child list: pop up a level.\n stack.pop()\n treepos.pop()\n # word node:\n else:\n if wordnum == self.wordnum:\n return tuple(treepos[: len(treepos) - self.height - 1])\n else:\n wordnum += 1\n stack.pop()",
"def _word_index(word, wordlist=wordlist):\n lo, hi = 0, len(wordlist) - 1\n while lo < hi:\n mid = (lo + hi) // 2\n if word <= wordlist[mid]:\n hi = mid\n else:\n lo = mid + 1\n return lo",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if not hasattr(node, 'first_token'):\n return (1, 0), (1, 0)\n\n start = node.first_token.start\n end = node.last_token.end\n if padded and any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):\n # Set col_offset to 0 to include leading indentation for multiline statements.\n start = (start[0], 0)\n\n return start, end",
"def get_word_postion(self, word: Word) -> Tuple[int, int]:\n text: str = self.to_text()\n words: List[Word] = self.get_words()\n current_position: int = 0\n\n for w in words:\n current_position = text.find(w.text, current_position)\n\n if w == word:\n return (current_position, current_position + len(w.text))\n return 0, 0",
"def _extractSubtreeSpan_(self, tree, wordRulesFlag):\n\t\tspanDict = {}\n\t\tsnt = tree.leaves()\n\t\t#pdb.set_trace()\n\t\tfor subtree in tree.subtrees():\n\t\t\tif not wordRulesFlag and subtree.height() <= 2:\n\t\t\t\tcontinue\n\t\t\tspan = Frame.treeposition2offsetPosition([subtree.treeposition()], tree)[0] \n\t\t\tspanDict[(span[0], span[1])] = subtree.treeposition()\n\t\treturn spanDict",
"def aux_phrase_subtree(tree, tag_list):\n highest_subtree = max([(0, None)]+[(subtree.height(), subtree) for subtree in tree.subtrees(lambda x: x.node in tag_list) if subtree != tree[0]])[1]\n # print highest_subtree\n return highest_subtree",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if getattr(node, \"_broken_positions\", None):\n # This node was marked in util.annotate_fstring_nodes as having untrustworthy lineno/col_offset.\n return (1, 0), (1, 0)\n\n if supports_tokenless(node):\n return self._get_text_positions_tokenless(node, padded)\n\n return self.asttokens.get_text_positions(node, padded)",
"def leftmost_leaf_descendant_indices(self, node_list):\r\n # Cf. Zhang & Shasha:p.1249:\r\n # \"l(i) is the number of the leftmost leaf descendant of the subtree\r\n # rooted at T[i]. When T[i] is a leaf, l(i)=i.\"\r\n def get_leftmost_leaf(node):\r\n if not node.is_leaf():\r\n return get_leftmost_leaf(node.left_child())\r\n else:\r\n return node\r\n \r\n indices = []\r\n for node in node_list:\r\n leftmost_node = get_leftmost_leaf(node)\r\n for i in range(len(node_list)):\r\n if id(node_list[i]) == id(leftmost_node):\r\n indices.append(i)\r\n break\r\n return indices",
"def word_offset(signame, argname):\n return \"CCP_%s_%s_WORD_OFFSET\" % (\n signame.upper(), argname.upper())",
"def compute_level_offset(self, root: Position) -> int:\n pattern = self.adoc_title_pat if self.kind == 'adoc' else self.pandoc_title_pat\n for line in g.splitLines(root.b):\n if pattern.match(line):\n return 1\n return 0",
"def _get_text_positions_tokenless(self, node, padded):\n # type: (ast.AST, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if sys.version_info[:2] < (3, 8):\n raise AssertionError(\"This method should only be called internally after checking supports_tokenless()\")\n\n if isinstance(node, ast.Module):\n # Modules don't have position info, so just return the range of the whole text.\n # The token-using method does something different, but its behavior seems weird and inconsistent.\n # For example, in a file with only comments, it only returns the first line.\n # It's hard to imagine a case when this matters.\n return (1, 0), self._line_numbers.offset_to_line(len(self._text))\n\n if not hasattr(node, 'lineno'):\n return (1, 0), (1, 0)\n\n assert node # tell mypy that node is not None, which we allowed up to here for compatibility\n\n decorators = getattr(node, 'decorator_list', [])\n if decorators:\n # Function/Class definition nodes are marked by AST as starting at def/class,\n # not the first decorator. This doesn't match the token-using behavior,\n # or inspect.getsource(), and just seems weird.\n start_node = decorators[0]\n else:\n start_node = node\n\n if padded and last_stmt(node).lineno != node.lineno:\n # Include leading indentation for multiline statements.\n start_col_offset = 0\n else:\n start_col_offset = self._line_numbers.from_utf8_col(start_node.lineno, start_node.col_offset)\n\n start = (start_node.lineno, start_col_offset)\n\n # To match the token-using behaviour, we exclude trailing semicolons and comments.\n # This means that for blocks containing multiple statements, we have to use the last one\n # instead of the actual node for end_lineno and end_col_offset.\n end_node = last_stmt(node)\n end_lineno = cast(int, end_node.end_lineno)\n end_col_offset = cast(int, end_node.end_col_offset)\n end_col_offset = self._line_numbers.from_utf8_col(end_lineno, end_col_offset)\n end = (end_lineno, end_col_offset)\n\n return start, end",
"def get_left_child_index(self):\n return (2 * self.index) + 1",
"def left_child_idx(idx):\n return (idx << 1) + 1",
"def span(self):\n return self.right - self.left",
"def _traverse(node):\n all_words = []\n if node.is_leaf:\n return node.actual_word\n for key, value in node.children.items():\n curr_word = Trie._traverse(value)\n all_words = all_words + curr_word\n return all_words",
"def word_midpoint(user_word):\n\t\n\tword_length = len(user_word) \t\t# Length of word.\n\tmid_point = word_length / 2 \t\t# Mid-Point of word.\n\tmid_letter = user_word[int(mid_point)]\t# Middle letter that will track.\n\t\n\treturn word_length, mid_letter",
"def index_tree(tree, offset):\n for idx, _ in enumerate(tree.leaves()):\n tree_location = tree.leaf_treeposition(idx)\n non_terminal = tree[tree_location[:-1]]\n # non_terminal.append(idx)\n non_terminal[0] = (non_terminal[0], idx+offset)\n return tree",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n raise NotImplementedError",
"def list_diff_index(list1, list2):\n\tfor list_index, strings in enumerate(zip(list1, list2)):\n\t\tchar_index = string_diff_index(strings[0], strings[1])\n\t\tif char_index != -1:\n\t\t\treturn (list_index, char_index)\n\treturn (diff_index(list1, list2), -1)",
"def test_calculate_offsets_word_part(self):\n applicable_terms = [('act', 'a')]\n text = \"I am about to act on this transaction.\"\n t = Terms(None)\n matches = t.calculate_offsets(text, applicable_terms)\n self.assertEqual(1, len(matches))\n self.assertEqual(1, len(matches[0][2]))",
"def get_ellipsis_location(tree, target_tag):\n\n index = \"\".join(re.findall(r\"\\d+\", target_tag))\n tag = re.sub(index, \"\", target_tag)\n counter = 0\n for node in tree.subtrees():\n if node.label().split(\"end\")[0] == tag:\n if counter == int(index):\n return node.treeposition()\n else:\n counter += 1",
"def find_boundaries(s, w):\n ind = w.i\n # handling height\n if ind + 2 < len(s) and s[ind + 1].text == \"'\" and s[ind + 2].like_num:\n return ind, ind + 3\n if ind - 2 >= 0 and s[ind - 1].text == \"'\" and s[ind - 2].like_num:\n return ind - 2, ind + 1\n\n # forward\n if s[ind].ent_iob == 2:\n return ind, ind + 1\n if ind != len(s) - 1:\n i = ind + 1\n while s[i].ent_iob == 1 and (s[i].pos_ == 'NUM' or s[i].like_num or\n (i+1 < len(s) and (s[i+1].pos_ == 'NUM' or s[i+1].like_num))):\n i += 1\n if i == len(s):\n break\n if s[i - 1].pos_ == 'NUM' or s[i - 1].like_num or s[i - 1].lemma_ in ['one']:\n end = i\n else:\n end = i - 1\n else:\n end = ind + 1\n\n # backward\n if s[ind].ent_iob == 3:\n return ind, end\n i = ind - 1\n while s[i].ent_iob != 2 and (s[i].pos_ == 'NUM' or s[i].like_num or s[i-1].pos_ == 'NUM' or s[i-1].like_num):\n i -= 1\n if i == -1:\n break\n i += 1\n if s[i].pos_ != 'NUM' and not s[i].like_num:\n i += 1\n return i, end",
"def _get_offset(widget: Widget, target_width: int) -> tuple[int, int]:\n\n if widget.parent_align is Widget.PARENT_CENTER:\n total = target_width - widget.width\n padding = total // 2\n return padding + total % 2, padding\n\n if widget.parent_align is Widget.PARENT_RIGHT:\n return target_width - widget.width, 0\n\n # Default to left-aligned\n return 0, target_width - widget.width + 1",
"def get_node_loc(node):\n lineno = node.lineno\n end_lineno = get_last_deep_child(node).lineno\n return end_lineno - lineno",
"def word_indexer(word_lst):\n unique_words = list(set(word_lst))\n word_index = {}\n for i in range(len(unique_words)):\n word_index[unique_words[i].lower()] = i + 4\n word_index['<PAD>'] = 0\n word_index['<START>'] = 1\n word_index['<UNK>'] = 2\n word_index['<UNUSED>'] = 3\n return word_index"
] | [
"0.61927235",
"0.61151785",
"0.60680246",
"0.59052956",
"0.5831214",
"0.58253384",
"0.57263756",
"0.5528103",
"0.5399332",
"0.533705",
"0.5324308",
"0.53128594",
"0.5294802",
"0.52609503",
"0.5232178",
"0.5219642",
"0.52062565",
"0.5159778",
"0.51504004",
"0.51499826",
"0.5148476",
"0.5144797",
"0.51361585",
"0.5134929",
"0.5131046",
"0.5105586",
"0.5092464",
"0.50883764",
"0.50877476",
"0.50847477"
] | 0.61881554 | 1 |
Use subtreeAlignFunc to pick one subtree alignment from this frame. | def subtreeAlign(self, subtreeAlignFunc, srcTr, tgtTr):
self.subtreeAlignment_treepos = subtreeAlignFunc(self, srcTr, tgtTr)
self.subtreeAlignment_waMatrixPos = [self.treeposition2waMatrixPosition(suba[0], suba[1], srcTr, tgtTr) \
for suba in self.subtreeAlignment_treepos] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subtreeAlign(self, subtreeAlignFunc):\n\t\tfor frame in self.frameList:\n\t\t\tframe.subtreeAlign(subtreeAlignFunc, self.srcTree, self.tgtTree)",
"def rotate_subtree_left(subtree):\n right = subtree.right\n subtree.right = right.left\n right.left = subtree\n right.colour = subtree.colour\n subtree.colour = True # set red\n right.size = subtree.size\n subtree.size = size_node(subtree.left) + size_node(subtree.right) + 1\n return right",
"def get_alignment_from(tree):\r\n msa = []\r\n for node in tree.get_terminals():\r\n alignment = self.msa_by_name[node.name.split(' ')[0]]\r\n if msa:\r\n msa.append(alignment)\r\n else:\r\n msa = MultipleSeqAlignment([alignment])\r\n\r\n return msa",
"def align(self, unit, main_unit, align_dict: dict, strict=False) -> 'function':\n\n aligner = Unit_Aligner(align_dict)\n return aligner.convert(unit, main_unit, strict)",
"def align(self, unit, main_unit, align_dict: dict, strict=False) -> 'function':\n\n aligner = Unit_Aligner(align_dict)\n return aligner.convert(unit, main_unit, strict)",
"def fn(node, x):\n if not node: return x\n x = fn(node.right, x) # sum of right subtree\n x += node.val \n node.val = x\n return fn(node.left, x)",
"def _assignAlignment(self, aln):\n self.sequence = None\n for i in range(self.nChildren()):\n self.children[i]._assignAlignment(aln)\n for seq in aln.seqs:\n if seq.name == self.label:\n self.sequence = seq\n break",
"def setSubtreeDF(self, index, subtree):\n if index == 0:\n try:\n self[:] = subtree\n except TypeError:\n del self[1:]\n self[0] = subtree\n return\n \n total = 0\n for i, child in enumerate(self):\n if total == index:\n self[i] = subtree\n return\n nbr_child = child.size\n if nbr_child + total > index:\n child.setSubtreeDF(index-total, subtree)\n return\n total += nbr_child",
"def rotate_subtree_right(subtree):\n left = subtree.left\n subtree.left = left.right\n left.right = subtree\n left.colour = subtree.colour\n subtree.colour = True # set red\n left.size = subtree.size\n subtree.size = size_node(subtree.left) + size_node(subtree.right) + 1\n return left",
"def align(self):\n ...",
"def with_aligner_edge(self, aligner_edge):\n\n return self.clone()._with_aligner_edge(aligner_edge)",
"def aligned(self):\n return self.__aligned",
"def setSubtreeBF(self, index, subtree):\n if index == 0:\n try:\n self[:] = subtree\n except TypeError:\n del self[1:]\n self[0] = subtree\n return\n \n queue = deque(izip(repeat(self, len(self[1:])), count(1)))\n for i in xrange(index):\n elem = queue.popleft()\n parent = elem[0]\n child = elem[1]\n if isinstance(parent[child], Tree):\n tree = parent[child]\n queue.extend(izip(repeat(tree, len(tree[1:])), count(1)))\n parent[child] = subtree",
"def align(self, alignment=\"ymax\") -> \"Group\":\n _align(elements=self.elements, alignment=alignment)\n return self",
"def _align(elements, alignment=\"ymax\"):\n if len(elements) == 0:\n return elements\n if alignment not in ([\"x\", \"y\", \"xmin\", \"xmax\", \"ymin\", \"ymax\"]):\n raise ValueError(\n \"'alignment' argument must be one of 'x','y','xmin', 'xmax', 'ymin','ymax'\"\n )\n value = Group(elements).__getattribute__(alignment)\n for e in elements:\n e.__setattr__(alignment, value)\n return elements",
"def build_tree(self, genes_share_one_alignment):\r\n species_name = self.species\r\n fun_built_tree = getattr(SSTree, species_name)\r\n return fun_built_tree(genes_share_one_alignment)",
"def clustalw_alignment_tree(seqrecs, **kwargs):\n\n def commandline(ft, **kwargs):\n with tempfile.NamedTemporaryFile(delete=False, mode=\"w\") as ft_out:\n cline = ClustalwCommandline(infile=ft.name, output=\"fasta\", newtree=ft_out.name)\n stdout, stderr = cline()\n return Phylo.read(ft_out.name, \"newick\")\n\n return _generic_alignment(commandline, seqrecs, preserve_order=False, **kwargs)",
"def AdjustTree(self):\n p = self\n while p != None:\n # If current node's leaves number is more than M, just split and adjust father node MBR\n if len(p.leaves) > p.M:\n p.SplitNode()\n else:\n # Or just adjust father point MBR\n if p.father != None:\n p.father.MBR = merge(p.father.MBR, p.MBR)\n p = p.father",
"def apply(self, f):\n if self.is_empty():\n return 0\n else:\n self.get_root().value = f(self.get_root().value)\n if self.get_left():\n self.get_left().apply(f)\n if self.get_right():\n self.get_right().apply(f)",
"def format_alignment(self, alignment):\n raise NotImplementedError(\"This method should be implemented\")\n ###################################################\n # You MUST implement this method in the subclass. #\n ###################################################",
"def overlay_alignment(self):\n return self._overlay_alignment",
"def new_tree_file_name(alignment):\n return '%s.tre' % alignment",
"def __get_subtree(self, target_node: _AVLTreeNode) -> _AVLTreeNode or None:\n\n if target_node.left is not None:\n return target_node.left\n\n return target_node.right",
"def include_final_offset(node, offset):\n if offset != 0.0:\n for leaf in node.leaves:\n leaf.value = leaf.value + offset",
"def visit_node_align(self, node, children):\n if len(children) == 2:\n # Merge the two dictionaries\n return {**children[0], **children[1]}\n else:\n return children[0]",
"def _subtree_first_position(self, p):\n \"\"\"will be used by before()\"\"\"\n walk = p\n #recursivly walking to the left child until the left subtree has no child\n while self.left(walk) is not None:\n walk = self.left(walk)\n return walk",
"def align_rect(r, frame, align='tl', margin=0):\n if 'l' in align:\n r.left = frame.left + margin\n elif 'r' in align:\n r.right = frame.right - margin\n else:\n r.centerx = frame.centerx\n if 't' in align:\n r.top = frame.top + margin\n elif 'b' in align:\n r.bottom = frame.bottom - margin\n else:\n r.centery = frame.centery",
"def minimal_align(self):\n desired = int(PlatformVar(\"align\"))\n for ii in range(len(self.__content)):\n line = self.__content[ii]\n match = re.match(r'.*\\.align\\s+(\\d+).*', line)\n if match:\n align = int(match.group(1))\n # Due to GNU AS compatibility modes, .align may mean different things.\n if osarch_is_amd64 or osarch_is_ia32():\n if desired != align:\n if is_verbose():\n print(\"Replacing %i-byte alignment with %i-byte alignment.\" % (align, desired))\n self.__content[ii] = \" .balign %i\\n\" % (desired)\n else:\n print(\"Replacing low-order bit alignment %i with %i-byte alignment.\" % (align, desired))\n self.__content[ii] = \" .balign %i\\n\" % (desired)",
"def _subtree_first_position(self, p):\n walk = p\n while self.left(walk) is not None:\n walk = self.left(walk) # keep walking left\n return walk",
"def GetAlignment(self):\r\n\r\n return self.alignment"
] | [
"0.7943352",
"0.49963066",
"0.48671794",
"0.48312488",
"0.48312488",
"0.46881577",
"0.4671619",
"0.46248424",
"0.46107876",
"0.46053573",
"0.46003407",
"0.45801792",
"0.4549547",
"0.4548548",
"0.45392197",
"0.4525178",
"0.44993445",
"0.4491107",
"0.44905266",
"0.44830802",
"0.44036806",
"0.43816477",
"0.43785903",
"0.4370412",
"0.43602598",
"0.4358373",
"0.43578777",
"0.43545318",
"0.43316197",
"0.43257287"
] | 0.77718335 | 1 |
Return a dictionary, keys are 2tuples, each tuple (i, j) represents a subtree span covering from word i to word j1, values are the corresponding subtree treeposition. Here we only keep subtree spans, but not leaf spans, i.e. one layer subtrees like (NNP China) are ignored. [experiment] include one layer subtrees like (NNP China) as subtrees | def _extractSubtreeSpan_(self, tree, wordRulesFlag):
spanDict = {}
snt = tree.leaves()
#pdb.set_trace()
for subtree in tree.subtrees():
if not wordRulesFlag and subtree.height() <= 2:
continue
span = Frame.treeposition2offsetPosition([subtree.treeposition()], tree)[0]
spanDict[(span[0], span[1])] = subtree.treeposition()
return spanDict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_grouped_items(self):\n word_map = dict()\n it = self.root\n \n # Get all immediate children of root.\n for prefix, node in self.root.children.items():\n prefix_key = ''\n it = node\n # Continue with each child of root node till it has one child for finding common prefixes.\n # Once a node has more than one child it means, two words which share prefixes.\n while it is not None and len(it.children) == 1:\n key = next(iter(it.children))\n prefix_key = prefix_key + it.info + ' '\n it = it.children[key]\n prefix_key = prefix_key + it.info\n # Already grabbed the common prefix. Now find out all strings which share this prefix.\n word_map[prefix_key] = Trie._traverse(it)\n return word_map",
"def build(\n word_dict: AsrDictionary,\n subword_dict: AsrDictionary,\n subword_tokenizer: Callable[[str], List[str]] = None,\n ):\n\n root = lexical_prefix_tree(\n word_dict=word_dict,\n subword_dict=subword_dict,\n subword_tokenizer=subword_tokenizer,\n ) # build traditional tree data structure by reusing existing routines\n\n # Performs pre-order traversal of this tree to assign an index for each node\n max_num_children = 0\n nodes = [None] # nodes[0] is a dummy node for OOV\n node_to_id_dict = {}\n stack = [root]\n\n while len(stack) > 0:\n curr = stack.pop()\n node_id = len(nodes)\n nodes.append(curr)\n node_to_id_dict[curr] = node_id\n if len(curr.children) > max_num_children:\n max_num_children = len(curr.children)\n\n # Guarantee that the children are traversed ascendingly according to the subword index\n for _, next_node in sorted(\n curr.children.items(), key=lambda t: t[0], reverse=True\n ):\n stack.append(next_node)\n\n # Construct the tree\n num_nodes = len(nodes)\n children = np.full([num_nodes, max_num_children], 0, dtype=np.int64)\n prev_subword_idx = np.full([num_nodes], subword_dict.pad(), dtype=np.int64)\n word_idx = np.full([num_nodes], -1, dtype=np.int64)\n word_set_idx = np.full([num_nodes, 2], word_dict.pad(), dtype=np.int64)\n\n for node_id in range(1, len(nodes)): # skip 0, which is `None`\n node = nodes[node_id]\n # Guarantee that the children are traversed ascendingly according to the subword index\n for i, (subword_id, child) in enumerate(\n sorted(node.children.items(), key=lambda t: t[0])\n ):\n child_node_id = node_to_id_dict[child]\n children[node_id, i] = child_node_id\n prev_subword_idx[child_node_id] = subword_id\n\n word_idx[node_id] = node.word_idx\n if node.word_set is not None:\n word_set_idx[node_id] = node.word_set\n else:\n word_set_idx[node_id] = [0, len(word_dict) - 1]\n\n return TensorizedPrefixTree(\n children=torch.from_numpy(children),\n prev_subword_idx=torch.from_numpy(prev_subword_idx),\n word_idx=torch.from_numpy(word_idx),\n word_set_idx=torch.from_numpy(word_set_idx),\n )",
"def pos_treebank(data_word):\n #returns dict\n w_pos_treebank = nltk.pos_tag(data_word)\n w_pos_treebank = dict(w_pos_treebank)\n return w_pos_treebank",
"def build_dict_from(cls, trees):\n allnames = set()\n alllabels = set()\n suffixes = [cls.suffix_sep + cls.leaf_suffix, cls.suffix_sep + cls.last_suffix,\n \"{}{}{}{}\".format(cls.suffix_sep, cls.leaf_suffix, cls.suffix_sep, cls.last_suffix)]\n for tree in trees:\n treenames, treelabels = tree._all_names_and_labels()\n allnames.update(treenames)\n alllabels.update(treelabels)\n if len(alllabels) == 1 and alllabels == {None} or len(alllabels) == 0:\n alltokens = allnames\n else:\n alltokens = set(sum([[token+\"/\"+label for label in alllabels] for token in allnames], []))\n\n indic = OrderedDict([(\"<MASK>\", 0), (\"<START>\", 1), (\"<STOP>\", 2),\n (cls.root_symbol, 3), (cls.none_symbol, 4)])\n outdic = OrderedDict()\n outdic.update(indic)\n offset = len(indic)\n alltokens = [\"<RARE>\"] + sorted(list(alltokens))\n numtokens = len(alltokens)\n newidx = 0\n for token in alltokens:\n indic[token] = newidx + offset\n newidx += 1\n numtokens = len(alltokens)\n newidx = 0\n for token in alltokens:\n outdic[token] = newidx + offset\n for i, suffix in enumerate(suffixes):\n outdic[token +\n suffix] = newidx + offset + (i + 1) * numtokens\n newidx += 1\n return indic, outdic",
"def get_subtree(tree, start_node):\n \n plot_nodes = [start_node]\n finished=False\n while not finished:\n extra_nodes = []\n for node in plot_nodes:\n children = []\n if \"yes_branch\" in tree[node]:\n children.append(tree[node][\"yes_branch\"])\n if \"no_branch\" in tree[node]:\n children.append(tree[node][\"no_branch\"])\n for child in children:\n if child not in extra_nodes and child not in plot_nodes:\n extra_nodes.append(child)\n if extra_nodes == []:\n finished=True\n else:\n plot_nodes.extend(extra_nodes)\n \n sub_tree = {}\n for node in tree.keys():\n if node in plot_nodes:\n sub_tree[node] = tree[node]\n \n return sub_tree",
"def build_index(text: Iterable) -> Dict[str, List[Tuple[int, int]]]:\n index = defaultdict(list)\n for line_no, line in enumerate(text, 1):\n for match in WORD_RE.finditer(line):\n word = match.group()\n column_no = match.start() + 1\n location = (line_no, column_no)\n index[word].append(location)\n return index",
"def build_tree(lines: []) -> {}:\n key_regex = re.compile(r\"(?P<key_val>^.*) bags contain(?P<contents>.*$)\")\n values_regex = re.compile(r\"(?P<count>\\d) (?P<color>.+?(?= bag))\")\n bag_map = {}\n for line in lines:\n match = key_regex.match(line)\n key = match['key_val']\n bag_map[key] = {}\n contents = match['contents']\n content_matches = values_regex.findall(contents)\n for color_match in content_matches:\n bag_map[key][color_match[1]] = int(color_match[0])\n\n return bag_map",
"def get_trees(self, word): # -> list:\r\n raise NotImplementedError",
"def __init__(self, annotated_text):\n self.tokens = ['ROOT'] # initially has only root element\n self.spans = [None]\n self.heads = [None] # root has no head element\n self.labels = [None] # root has no head element => no label\n\n span_to_index = {} # maps token spans to indexes\n root_indexes = [] # to store indexes of root elements\n\n # get token spans and values from the Texterra-annotated document\n for i, an in enumerate(annotated_text['annotations']['syntax-relation']):\n span = (an['start'], an['end'])\n self.spans.append(span)\n span_to_index[span] = i + 1\n self.tokens.append(annotated_text['text'][an['start']: an['end']])\n\n # iterate over the document again to set heads and labels\n for i, an in enumerate(annotated_text['annotations']['syntax-relation']):\n if 'parent' in an['value']:\n self.heads.append(span_to_index[(an['value']['parent']['start'], an['value']['parent']['end'])])\n self.labels.append(an['value']['type'])\n else:\n self.heads.append(0)\n self.labels.append('ROOT')\n root_indexes.append(i + 1)\n\n # stores dependency structure of the sentence in dict, with\n # root elements as key and their child elements as value.\n # child elements that have their own children are stored as dicts\n # where they serve as key and their children as value.\n self.tree = {}\n self._visited = [] # stores elements visited during tree's building process\n self.to_string = ''\n\n # iterate over root elements and build their subtrees\n for root_index in root_indexes:\n # get the root's span\n root_span = self.spans[root_index]\n\n # indicate the root as visited\n self._visited.append(root_index)\n\n # build the roots subtree\n sub_tree, sub_tree_string = self._build_tree(root_index)\n sub_tree_key = (root_span[0], root_span[1], self.tokens[root_index], 'ROOT')\n self.tree[sub_tree_key] = sub_tree\n\n # attach the subtrees string to the sentence's parse string\n if len(root_indexes) > 0 and not sub_tree_string.startswith('('):\n format_string = '({0}) '\n else:\n format_string = '{0} '\n self.to_string += format_string.format(sub_tree_string)",
"def get_grid(doc):\n grid = {}\n i = 0\n for sent in doc.sents:\n if len(sent) > 2:\n for token in sent:\n if token.pos_ == \"NOUN\" or token.pos_ == \"PRON\"\\\n or token.pos_ == \"PROPN\":\n add_to_grid(grid, i, token)\n i += 1\n return grid",
"def build_match_tree(abbreviation_list):\n match_tree = {}\n for word, abbreviation in abbreviation_list:\n tree_node = match_tree\n for letter in word[:-1]:\n if letter not in tree_node:\n tree_node[letter] = {}\n tree_node = tree_node[letter]\n tree_node[word[-1]] = abbreviation\n return match_tree",
"def compute_representation_positions(self):\r\n offset = 3\r\n for node in self.depth_first_search():\r\n\r\n if node.is_leaf():\r\n node.str_pos = offset\r\n offset += len(str(node.keys)) + 2\r\n\r\n else:\r\n first_child_mid = node.children[ 0].str_pos + len(str(node.children[ 0].keys))//2\r\n last_child_mid = node.children[-1].str_pos + len(str(node.children[-1].keys))//2\r\n node.str_pos = (first_child_mid + last_child_mid)//2 - len(str(node.keys))//2",
"def _get_term_depth_dictionary(self):\n\n\n\n\t\t# Find the root term(s) of the ontology.\n\t\troot_term_ids = []\n\t\tfor term in self.terms():\n\t\t\t# Check if this term has no inherited terms (is a root), discounting terms that are obsolete.\n\t\t\tinherited_terms = [t for t in term.superclasses(with_self=False)]\n\t\t\tif (len(inherited_terms)==0) and (term.name is not None) and (\"obsolete\" not in term.name):\n\t\t\t\troot_term_ids.append(term.id)\n\t\t\t\t\n\t\t# Find the depths of all terms in the ontology below those terms.\n\t\tdepths = {i:0 for i in root_term_ids}\n\t\tdepth = 1\n\t\tdone = False\n\t\twhile not done:\n\t\t\t\n\t\t\t# Add all the terms immediately below \n\t\t\tbefore = len(depths)\n\t\t\tnew_terms = []\n\t\t\tfor old_term_id in [i for i in depths.keys() if depths[i] == depth-1]:\n\t\t\t\tfor new_term_id in [t.id for t in self[old_term_id].subclasses(with_self=False,distance=1)]:\n\t\t\t\t\tif new_term_id not in depths:\n\t\t\t\t\t\tdepths[new_term_id] = depth\n\t\t\t\n\t\t\t# Increment the depth and see if any new terms were added to the distance dictionary during this pass.\n\t\t\tdepth = depth + 1\n\t\t\tafter = len(depths)\n\t\t\tif before == after:\n\t\t\t\tdone = True\n\t\t\t\t\n\t\t# Add any other remaining terms to the dictionary with a depth of 0 indicating minimal specificity.\n\t\tfor term in self.terms():\n\t\t\tif term.id not in depths:\n\t\t\t\tdepths[term.id] = 0\n\t\t\n\t\t# Return the dictionary mapping term IDs to their depth in the hierarchy.\n\t\treturn(depths)",
"def _tree():\n return collections.defaultdict(_tree)",
"def build_island_subtree(node, pattern, mapping):\n last_processed = 0\n content = node.text\n children = []\n\n # Intervals describes a non-overlapping splitting of the content according to the pattern.\n intervals = []\n for m in re.finditer(pattern, content):\n intervals.extend([(g, m.start(g), m.end(g)) for g in list(pattern.groupindex.keys()) if m.start(g) != m.end(g)])\n intervals.sort(key=lambda x: (x[1], x[2]))\n\n for interval in intervals:\n # Create simple HDDToken of the substring proceeding a subgroup.\n if last_processed < interval[1]:\n next_token_text = content[last_processed:interval[1]]\n prefix = content[0:last_processed]\n children.append(HDDToken(name='',\n text=next_token_text,\n start=Position(node.start.line + content[0:last_processed].count('\\n'),\n len(prefix) - prefix.rfind('\\n')),\n end=Position(node.start.line + next_token_text.count('\\n'),\n len(next_token_text) - next_token_text.rfind('\\n')),\n replace=next_token_text))\n\n # Process an island and save its subtree.\n children.append(build_hdd_tree(input_stream=InputStream(content[interval[1]:interval[2]]),\n grammar_name=mapping[interval[0]][0],\n start_rule=mapping[interval[0]][1]))\n last_processed = interval[2]\n\n # Create simple HDDToken of the substring following the last subgroup if any.\n if last_processed < len(content):\n next_token_text = content[last_processed:]\n prefix = content[0:last_processed]\n children.append(HDDToken(name='',\n text=next_token_text,\n start=Position(node.start.line + content[0:last_processed].count('\\n'),\n len(prefix) - prefix.rfind('\\n')),\n end=Position(node.start.line + next_token_text.count('\\n'),\n len(next_token_text) - next_token_text.rfind('\\n')),\n replace=next_token_text))\n return children",
"def hierarchy_pos(G, root, width=1.5, vert_gap = 0.2, vert_loc = 0, xcenter = 0.5):\n def h_recur(G, root, width=1., vert_gap = 0.2, vert_loc = 0, xcenter = 0.5,\n pos = None, parent = None, parsed = []):\n if(root not in parsed):\n parsed.append(root)\n if pos == None:\n pos = {root:(xcenter,vert_loc)}\n else:\n pos[root] = (xcenter, vert_loc)\n neighbors = G.neighbors(root)\n if parent != None:\n neighbors.remove(parent)\n if len(neighbors)!=0:\n dx = width/len(neighbors)\n nextx = xcenter - width/2 - dx/2\n for neighbor in neighbors:\n nextx += dx\n pos = h_recur(G,neighbor, width = dx, vert_gap = vert_gap,\n vert_loc = vert_loc-vert_gap,\n xcenter=nextx, pos=pos,\n parent = root, parsed = parsed)\n return pos\n return h_recur(G, root, width=1., vert_gap = 0.2, vert_loc = 0, xcenter = 0.5)",
"def treepos(self, tree):\n if tree is None:\n raise ValueError(\"Parse tree not available\")\n stack = [tree]\n treepos = []\n\n wordnum = 0\n while True:\n # tree node:\n if isinstance(stack[-1], Tree):\n # Select the next child.\n if len(treepos) < len(stack):\n treepos.append(0)\n else:\n treepos[-1] += 1\n # Update the stack.\n if treepos[-1] < len(stack[-1]):\n stack.append(stack[-1][treepos[-1]])\n else:\n # End of node's child list: pop up a level.\n stack.pop()\n treepos.pop()\n # word node:\n else:\n if wordnum == self.wordnum:\n return tuple(treepos[: len(treepos) - self.height - 1])\n else:\n wordnum += 1\n stack.pop()",
"def get_span(tree):\n if tree[2][0]=='X':\n return tree[2][2]\n elif type(tree[2][0])==list:\n return tree[2][0][2]\n else:\n print 'Error in get_span'\n return None",
"def _build_tree(self, index):\n\n children = []\n to_string = '({0}/{1}'.format(self.tokens[index], self.labels[index])\n\n for i in range(1, len(self.tokens)):\n\n if i not in self._visited and self.heads[i] == index:\n self._visited.append(i)\n child_tree = {}\n c, s = self._build_tree(i)\n child_tree[(self.spans[i][0], self.spans[i][1], self.tokens[i], self.labels[i])] = c\n children.append(child_tree)\n to_string += ' {0}'.format(s)\n\n if len(children) > 0:\n to_string += ')'\n return children, to_string\n else:\n return children, to_string[1:]",
"def improve_tree(tree, freq_dict):\n # todo",
"def tree2tok(a_tree, a_tree_idx, a_root_idx, a_tk_start=0):\n # set of terminals corresponding to the given node\n iroot = a_tree.nodes[a_root_idx]\n tkset = set()\n if iroot[WORD] is not None:\n tkset.add((a_tk_start + iroot[WORD][0], iroot[WORD][1]))\n tr2tk = {(a_tree_idx, a_root_idx): (a_tree, tkset)}\n for ch_idcs in iroot[DEPS].itervalues():\n for ch_idx in ch_idcs:\n t2t = tree2tok(a_tree, a_tree_idx, ch_idx, a_tk_start)\n tr2tk.update(t2t)\n tkset.update(t2t[(a_tree_idx, ch_idx)][-1])\n return tr2tk",
"def findSameSubtrees(self):\n\n collapsable = {}\n\n for i in range(0, len(list(self.nodes))):\n for j in range(i + 1, len(list(self.nodes))):\n # Be careful, non-zero based indexing here\n if self.isSameTree(self.nodes[i + 1], self.nodes[j + 1]):\n # Note time complexity of isSameTree\n collapsable[self.nodes[i + 1]] = self.nodes[j + 1]\n\n return collapsable",
"def vectorize(self,text):\r\n \r\n lv_active = set()\r\n words = word_tokenize(text)\r\n for word in words:\r\n if word in self.tree:\r\n ancestors = self.tree.word_ancestors(word)\r\n lv_active.update(ancestors)\r\n \r\n return self.nl.isin(lv_active).values",
"def construct_parent_map(\n events: Sequence[SnubaTransaction],\n ) -> Dict[str, List[SnubaTransaction]]:\n parent_map: Dict[str, List[SnubaTransaction]] = defaultdict(list)\n for item in events:\n if not is_root(item):\n parent_map[item[\"trace.parent_span\"]].append(item)\n return parent_map",
"def instr_tree():\n return OrderedDict({'self': instr_dict(),\n 'measure': {'self': measure_dict()}})",
"def _get_bag_of_pos_with_dependency(words, index):\n pos_list = []\n\n def _get_governor(_index, name):\n governor_list = []\n if int(words[_index].governor) == 0:\n # case _index word has no governer\n return -1, governor_list\n governor_index = _index + (int(words[_index].governor) - int(words[_index].index))\n if governor_index < len(words):\n governor = words[governor_index]\n governor_list.append(_get_word_feature(governor) + '_' + name)\n else:\n governor_list.append(NONE_DEPENDENCY + '_' + name)\n return governor_index, governor_list\n\n def _get_children(_index, name):\n children = []\n child_list = []\n roots = [(i, w) for i, w in enumerate(words) if int(w.index) == 1]\n start_index = 0\n end_index = len(words) - 1\n for i, w in roots:\n if i <= _index:\n start_index = i\n else:\n end_index = i - 1\n break\n for i, w in enumerate(words[start_index:end_index + 1]):\n if int(w.governor) == int(words[_index].index):\n children.append(start_index + i)\n child_list.append(_get_word_feature(w) + '_' + name)\n return children, child_list\n\n # add governor\n governor_index, governor_list = _get_governor(index, 'governor')\n if 0 <= governor_index < len(words):\n # case index word has a governer\n pos_list.extend(governor_list)\n if int(words[governor_index].governor) != 0:\n # case _index word has a governer\n # add ancestor\n _, ancestor_list = _get_governor(governor_index, 'ancestor')\n pos_list.extend(ancestor_list)\n\n # add sibling\n siblings, sibling_list = _get_children(governor_index, 'sibling')\n i_index = siblings.index(index)\n del sibling_list[i_index]\n del siblings[i_index]\n pos_list.extend(sibling_list)\n\n # add sibling list\n for i in siblings:\n sibling_children, sibling_child_list = _get_children(i, 'sibling_child')\n pos_list.extend(sibling_child_list)\n\n # add child\n children, child_list = _get_children(index, 'child')\n pos_list.extend(child_list)\n for i in children:\n grandchildren, grandchild_list = _get_children(i, 'grandchild')\n pos_list.extend(grandchild_list)\n return pos_list",
"def get_word_structure(word, roots_data_frame, suffixes_data_frame):\n\n result = find_roots(word, roots_data_frame['Root word regex'])\n\n root_starts = [x[0] for x in result]\n root_ends = [x[1] for x in result]\n root_indices = [x[2] for x in result]\n root_words = roots_data_frame.iloc[root_indices, 0].values\n root_meanings = roots_data_frame.iloc[root_indices, 1].values\n\n word_fragments = []\n prev_end = 0\n for start, end, meaning in zip(root_starts, root_ends, root_meanings):\n diff = start - prev_end\n if diff > 0:\n word_end = prev_end + diff\n word_fragments.append( ('none', word[prev_end:word_end], '', prev_end, word_end) )\n word_fragments.append( ('styled', word[start:end], meaning, start, end) )\n prev_end = end\n\n word_end = len(word)\n if prev_end < word_end:\n # Find suffix\n result = find_suffix(word[prev_end:word_end], suffixes_data_frame['Suffix regex'])\n word_fragments_suffix = []\n if len(result) > 0:\n suffix_start, suffix_end, suffix_regex_i = result[0]\n suffix_starts = [x[0] for x in result]\n suffix_ends = [x[1] for x in result]\n suffix_indices = [x[2] for x in result]\n suffix_words = suffixes_data_frame.iloc[suffix_indices, 0].as_matrix()\n suffix_meanings = suffixes_data_frame.iloc[suffix_indices, 1].as_matrix()\n for start, end, meaning in zip(suffix_starts, suffix_ends, suffix_meanings):\n start += prev_end\n end += prev_end\n word_fragments_suffix.append(\n ('styled', word[start:end], meaning, start, end) )\n word_end = start # Move word end boundary to suffix start\n\n if prev_end < word_end:\n word_fragments.append( ('none', word[prev_end:word_end], '', prev_end, word_end) )\n\n word_fragments.extend(word_fragments_suffix)\n\n # pprint(word_fragments)\n return word_fragments",
"def _build_tree(self, X, y, label, feature_names, depth, sample_weights=None):\n mytree = dict()\n # YOUR CODE HERE\n # TODO: Use `_choose_best_feature` to find the best feature to split the X. Then use `_split_dataset` to\n # get subtrees.\n # Hint: You may find `np.unique` is useful.\n # begin answer\n #1. no feature 2. all lables are the same 3. depth exceed 4. X is too small\n if len(feature_names)==0 or len(np.unique(y))==1 or depth >= self.max_depth or len(X) <= self.min_samples_leaf: \n return self._leaf_calculation(y, label, sample_weights)\n best_feature_idx, best_feature_val=self._choose_best_feature(X, y, label, sample_weights)\n best_feature_name = feature_names[best_feature_idx]\n feature_names=feature_names[:]\n feature_names.remove(best_feature_name)\n mytree={best_feature_name:{}}\n sub1_X, sub1_y, label1, sub1_sample_weights, sub2_X, sub2_y, label2, sub2_sample_weights = self._split_dataset(X, y, label, best_feature_idx, best_feature_val, sample_weights)\n mytree[best_feature_name][(best_feature_val, True)]=self._build_tree(sub1_X, sub1_y, label1, feature_names, depth+1, sub1_sample_weights)\n mytree[best_feature_name][(best_feature_val, False)]=self._build_tree(sub2_X, sub2_y, label2, feature_names, depth+1, sub2_sample_weights)\n # end answer\n return mytree",
"def make_tree(self, strings):\n # start from tree root\n root = self.tree()\n # mark if letter is the end of string or just its part\n root['end'] = set()\n root['part'] = set(self.s_options_total)\n for idx, string in enumerate(strings):\n node = root\n for letter in string:\n new_node = node[letter]\n new_node.setdefault('end', set())\n new_node.setdefault('part', set()).add(idx)\n node = new_node\n node['end'].add(idx)\n node['part'].remove(idx)\n\n return root",
"def split(self, thresh=0):\n\n new_tree_bounds = []\n new_tree_ids = []\n\n self.contains_null = False\n\n for qi, quad in enumerate(self.tree):\n\n left, bottom, right, top = quad.bounds\n xcenter = left + (right - left) / 2.0\n ycenter = top - (top - bottom) / 2.0\n\n quad_id = self.tree_ids[qi]\n\n for id_, bbox in zip(\n [1, 3, 0, 2],\n [\n (left, ycenter, xcenter, top),\n (xcenter, ycenter, right, top),\n (left, bottom, xcenter, ycenter),\n (xcenter, bottom, right, ycenter),\n ],\n ):\n\n id_list = list(self.sindex.intersection(bbox))\n\n if id_list:\n\n if len(id_list) > thresh:\n\n new_tree_bounds.append(bbox)\n new_tree_ids.append(quad_id + str(id_))\n\n else:\n self.contains_null = True\n\n else:\n self.contains_null = True\n\n self.tree_bounds = new_tree_bounds\n self.tree_ids = new_tree_ids\n\n return self"
] | [
"0.591011",
"0.5883264",
"0.5746557",
"0.5689263",
"0.5616009",
"0.54649633",
"0.5443807",
"0.5409442",
"0.53955895",
"0.53509617",
"0.53248715",
"0.5319948",
"0.53106767",
"0.52917504",
"0.52781814",
"0.52756643",
"0.52704436",
"0.5250692",
"0.52246916",
"0.52226436",
"0.52190006",
"0.52181065",
"0.5206816",
"0.5204727",
"0.5200763",
"0.5191813",
"0.5181947",
"0.51686984",
"0.51561564",
"0.5147198"
] | 0.75793076 | 0 |
Return True if "span" is consistent with the word alignment on language "lan", else return False. | def _consistentWithWA_(self, span, lan):
if lan == 'src':
wordAlign = self.waMatrix
else:
wordAlign = [[self.waMatrix[i][j] for i in xrange(len(self.waMatrix))] for j in xrange(len(self.waMatrix[0]))]
pos1 = [j for i in xrange(span[0], span[1]) for j in xrange(len(wordAlign[i])) if wordAlign[i][j] == 1]
if pos1 == []: return True
for i in xrange(span[0], span[1]):
for j in xrange(min(pos1), max(pos1) + 1):
if sum([wordAlign[row][j] for row in xrange(len(wordAlign[:span[0]]))]) == 0 and \
sum([wordAlign[row][j] for row in xrange(span[1], len(wordAlign))]) == 0:
continue
else:
return False
#print >> debug_log, 'consistent:', span
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find(self) -> bool:\n alignments = []\n for sw_idx in range(len(self.sw)):\n for nu_idx in range(len(self.nu)):\n alignments.append(Alignment(self.nu, self.sw, nu_idx, sw_idx, self.orig_nu))\n alignment = max(alignments, key=lambda align: align.score)\n if alignment.score > 0:\n self.alignment = alignment\n return True\n return False",
"def isAligned(self):\n return (\n abs(self.desired_distance - self.vision.getDistance())\n <= self.DISTANCE_TOLERANCE\n ) and (abs(self.vision.getHeading()) <= self.HEADING_TOLERANCE)",
"def check_alignment(name):\n translate_alignment = {\n 'middle': 'center',\n 'center-left': 'left',\n 'center-right': 'right',\n 'bottom-left': 'left',\n 'bottom-right': 'right',\n 'bottom-center': 'center',\n }\n\n if name in translate_alignment:\n return translate_alignment[name]\n else:\n return None",
"def is_ambiguous_align(tags, multi_align_tag):\n for t in tags:\n if t[0] == multi_align_tag:\n return True\n return False",
"def _scanSpan_(self, span, lan):\n\t\t#pdb.set_trace()\n\t\tif lan == 'src':\n\t\t\twordAlign = self.waMatrix\n\t\telse:\n\t\t\twordAlign = [[self.waMatrix[i][j] for i in xrange(len(self.waMatrix))] for j in xrange(len(self.waMatrix[0]))] \n\t\t\t\n\t\totherSpan = [MAX, MIN]\n\t\tfor i in xrange(span[0], span[1]):\n\t\t\tfor j in xrange(len(wordAlign[i])):\n\t\t\t\tif wordAlign[i][j] == 1:\n\t\t\t\t\tif j < otherSpan[0]:\n\t\t\t\t\t\totherSpan[0] = j\n\t\t\t\t\tif j+1 > otherSpan[1]:\n\t\t\t\t\t\totherSpan[1] = j+1\n\n\t\tif otherSpan[0] == MAX or otherSpan[1] == MIN:\n\t\t\treturn []\n\n\t\t# relax span to include not-aligned words\n\t\totherSpanList = []\n\t\tfor j in xrange(otherSpan[0]-1, -1, -1):\n\t\t\tif sum([wordAlign[i][j] for i in xrange(len(wordAlign))]) == 0:\n\t\t\t\totherSpanList.append((j, otherSpan[1]))\n\t\t\telse:\n\t\t\t\tbreak\n\t\tfor j in xrange(otherSpan[1], len(wordAlign[0])):\n\t\t\tif sum([wordAlign[i][j] for i in xrange(len(wordAlign))]) == 0:\n\t\t\t\totherSpanList.append((otherSpan[0], j+1))\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\totherSpanList.append(tuple(otherSpan))\n\t\treturn otherSpanList",
"def is_span_valid(self)->bool:\n if self.get_start_offset() < 0 or self.get_end_offset() < 0:\n logger.error(\"Start and end of position of the fragment must be non-negative: %d, %d\"\n %(self.get_start_offset(), self.get_end_offset()))\n return False\n if self.get_start_offset() >= self.get_end_offset():\n logger.error(\"End position of the fragment must be greater than the starting one: start=%d, end=%d\"%(self.get_start_offset(), self.get_end_offset()))\n return False\n return True",
"def group_expects(self, word: str, update: bool = True) -> bool:\n expected = False\n if self.last_word is None:\n expected = True\n elif (\n self.last_word in self.lang.UNITS\n and self.grp_val < 10\n or self.last_word in self.lang.STENS\n and self.grp_val < 20\n ):\n expected = word in self.lang.HUNDRED\n elif self.last_word in self.lang.MTENS:\n expected = (\n word in self.lang.UNITS\n or word in self.lang.STENS\n and self.last_word in self.lang.MTENS_WSTENS\n )\n elif self.last_word in self.lang.HUNDRED:\n expected = word not in self.lang.HUNDRED\n\n if update:\n self.last_word = word\n return expected",
"def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True",
"def assert_alignment(align: str) -> None:\n assert isinstance(align, str), f'alignment \"{align}\" must be a string'\n assert align in (ALIGN_LEFT, ALIGN_CENTER, ALIGN_RIGHT), \\\n f'incorrect alignment value \"{align}\"'",
"def fl_is_inside_lalign(align):\n _fl_is_inside_lalign = library.cfuncproto(\n library.load_so_libforms(), \"fl_is_inside_lalign\", \\\n cty.c_int, [cty.c_int],\n \"\"\"int fl_is_inside_lalign(int align) \"\"\")\n library.check_if_flinitialized()\n i_align = library.convert_to_intc(align)\n library.keep_elem_refs(align, i_align)\n retval = _fl_is_inside_lalign(i_align)\n return retval",
"def can_generate_ransom_note(self):\n if self.ransom_text == '' or self.ransom_text == ' ':\n return True\n ransom_text_words = self.ransom_text.split(' ')\n magazine_text_words = self.magazine_text.split(' ')\n # counting the occurrences of words in the ransom and magazine texts.\n ransom_count = self._count_words_in_string(ransom_text_words)\n magazine_count = self._count_words_in_string(magazine_text_words)\n result = False\n for i in ransom_text_words:\n # if magazine_count hashmap doesn't have word\n if magazine_count.get(i) is None:\n result = False\n break\n # if ransom_count hashmap have less word occurances than magazine count.\n if ransom_count.get(i) <= magazine_count.get(i):\n result = True\n else:\n result = False\n break\n return result",
"def isaligned(a: np.ndarray, alignment: int) -> bool:\n return (a.ctypes.data % alignment) == 0",
"def verify_page_alignment(toc):\n if len({len(toc_entry[2]) for toc_entry in toc}) != 1:\n return False\n return True",
"def check_span_indexes(row, print_mismatch=False):\n\n span1 = \"\"\n span2 = \"\"\n signal = \"\"\n\n try:\n for arg in row[\"idx\"][\"span1\"]:\n span1 += row[\"context\"][arg[0]:arg[1]] + \" \"\n\n for arg in row[\"idx\"][\"span2\"]:\n span2 += row[\"context\"][arg[0]:arg[1]] + \" \"\n\n for sig in row[\"idx\"][\"signal\"]:\n signal += row[\"context\"][sig[0]:sig[1]] + \" \"\n\n flags = {'s1': False, 's2': False, 'sig': False, 'context': False}\n if span1.strip() != (\" \".join(row[\"span1\"])).strip():\n if print_mismatch:\n print(\"span1: [{}]\\n[{}]\".format(span1, (\" \".join(row[\"span1\"])).strip()))\n flags[\"s1\"] = True\n if span2.strip() != (\" \".join(row[\"span2\"])).strip():\n if print_mismatch:\n print(\"span2: [{}]\\n[{}]\".format(span2, (\" \".join(row[\"span2\"])).strip()))\n flags[\"s2\"] = True\n if signal.strip() != (\" \".join(row[\"signal\"])).strip():\n if print_mismatch:\n print(\"signal: [{}]\\n[{}]\".format(signal, (\" \".join(row[\"signal\"])).strip()))\n flags[\"sig\"] = True\n if str(row[\"context\"]) == \"nan\":\n flags[\"context\"] = True\n if any(a for a in flags.values()):\n if print_mismatch:\n print(\"context: [{}] \\n========\".format(row[\"context\"]))\n return False\n except Exception as e:\n return False\n return True",
"def is_abecedarian(word):\n pass",
"def other_alignments_with_same_score(all_alignments, cur_alignment_idx,\n cur_alignment_score):\n if len(all_alignments) <= 1:\n return False\n\n for i, a0 in enumerate(all_alignments):\n if i > 0 and a0.score < cur_alignment_score:\n break\n if i == cur_alignment_idx:\n continue\n elif a0.score == cur_alignment_score:\n return True\n\n return False",
"def verify(self, word):\n if len(word) < 2:\n return (True, word)\n\n if word.lower() in self.replacement_words.keys():\n return (True, self.replacement_words[word.lower()])\n\n if word.lower() in self.word_list:\n return (True, word)\n\n if word.lower() in self.ignored_words:\n return (True, word)\n\n return (False, word)",
"def same_as(self, space, in_space):\n if self.marks == space.marks and self.genus == space.genus:\n return True\n space = space.complementary_component(in_space)\n if self.marks == space.marks and self.genus == space.genus:\n return True\n return False",
"def is_abecedarian(w):\n\treturn list(w) == sorted(list(w))",
"def fl_is_center_lalign(align):\n _fl_is_center_lalign = library.cfuncproto(\n library.load_so_libforms(), \"fl_is_center_lalign\", \\\n cty.c_int, [cty.c_int],\n \"\"\"int fl_is_center_lalign(int align) \"\"\")\n library.check_if_flinitialized()\n i_align = library.convert_to_intc(align)\n library.keep_elem_refs(align, i_align)\n retval = _fl_is_center_lalign(i_align)\n return retval",
"def is_translation_unit(self):\r\n return conf.lib.clang_isTranslationUnit(self)",
"def is_compatible(*, structure, symmetry):\n analyzer = mg.symmetry.analyzer.SpacegroupAnalyzer(structure)\n valid_sym_ops = analyzer.get_symmetry_operations(cartesian=False)\n for sym_op in valid_sym_ops:\n if (\n np.\n allclose(sym_op.translation_vector, symmetry.translation_vector)\n and np.allclose(sym_op.rotation_matrix, symmetry.rotation_matrix)\n ):\n return True\n return False",
"def is_word_common(self, word):\n if word in self.stopwords:\n return True\n if re.match(r'[a-zA-Z]+[a-zA-Z]$', word):\n word = self.lemmatizer.lemmatize(word, pos='n')\n synset = wn.synsets(word)\n if len(synset) > 0:\n return True\n else:\n return False\n return False",
"def is_google_format(self, docstring):\n\n google_section_names = self._google_preprocessor.get_section_names()\n for section_name in google_section_names:\n if section_name in docstring:\n return True\n return False",
"def is_lexical(word_i, word_j):\n if word_i.isalpha() and word_j.isalpha():\n return True\n return False",
"def has_layout(lang, name):\n kn = _get_keyboard_names()\n return kn.has_layout(lang, name)",
"def consistent(self, assignment):\n # for each of the current assignments\n for word in assignment:\n # if the word does not fit in the gaps\n if len(assignment[word]) != word.length:\n # reject attempt\n return False\n # if the word is already in the assignment\n if list(assignment.values()).count(assignment[word]) > 1:\n # reject attempt\n return False\n # for each of the overlaps\n for overlap in self.crossword.overlaps:\n # if the overlap isn't empty and is an overlap for the word\n # overlaps are a superset: if the overlap of (x, y) is in the set, so is (y, x), so we can just go by the first overlap element\n if self.crossword.overlaps[overlap] is not None and overlap[0] == word:\n # try to access the word assignment for the other overlap target\n try:\n test_word = assignment[overlap[1]]\n # if it does not exist in the assignment\n except KeyError:\n # continue to the next overlap\n continue\n # if the other overlap target has been assigned\n else:\n # extract the letter we want to match for the overlap\n test_letter = test_word[self.crossword.overlaps[overlap][1]]\n # if the letters do not match\n if assignment[word][self.crossword.overlaps[overlap][0]] != test_letter:\n # reject attempt\n return False\n return True",
"def is_aligned(self):\n\n return self._bits == 0",
"def is_marked(self):\n\n pos0 = self.ui.textBrowser.textCursor().selectionStart()\n pos1 = self.ui.textBrowser.textCursor().selectionEnd()\n for c in self.case_text:\n if c['pos0'] <= pos0 <= c['pos1']:\n return True\n if c['pos0'] <= pos1 <= c['pos1']:\n return True\n return False",
"def is_valid(text):\n return is_all_word_segment_in_text(WORDS, text)"
] | [
"0.63992625",
"0.6315273",
"0.60882205",
"0.58788294",
"0.58561766",
"0.56875026",
"0.5668905",
"0.56449294",
"0.56140864",
"0.5543863",
"0.5537767",
"0.5527828",
"0.54822993",
"0.54472035",
"0.5431362",
"0.54153144",
"0.5383697",
"0.5379699",
"0.53690237",
"0.5358199",
"0.5351267",
"0.5346621",
"0.5336489",
"0.532729",
"0.5321575",
"0.5316157",
"0.5314994",
"0.529734",
"0.52848256",
"0.5283243"
] | 0.7869109 | 0 |
Return a 2tuple, which is a span of the other language, representing the corresponding span of the "span" in "lan". | def _scanSpan_(self, span, lan):
#pdb.set_trace()
if lan == 'src':
wordAlign = self.waMatrix
else:
wordAlign = [[self.waMatrix[i][j] for i in xrange(len(self.waMatrix))] for j in xrange(len(self.waMatrix[0]))]
otherSpan = [MAX, MIN]
for i in xrange(span[0], span[1]):
for j in xrange(len(wordAlign[i])):
if wordAlign[i][j] == 1:
if j < otherSpan[0]:
otherSpan[0] = j
if j+1 > otherSpan[1]:
otherSpan[1] = j+1
if otherSpan[0] == MAX or otherSpan[1] == MIN:
return []
# relax span to include not-aligned words
otherSpanList = []
for j in xrange(otherSpan[0]-1, -1, -1):
if sum([wordAlign[i][j] for i in xrange(len(wordAlign))]) == 0:
otherSpanList.append((j, otherSpan[1]))
else:
break
for j in xrange(otherSpan[1], len(wordAlign[0])):
if sum([wordAlign[i][j] for i in xrange(len(wordAlign))]) == 0:
otherSpanList.append((otherSpan[0], j+1))
else:
break
otherSpanList.append(tuple(otherSpan))
return otherSpanList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getspaninfo(lnode, rnode):\n try:\n eduspan = (lnode.eduspan[0], rnode.eduspan[1])\n except TypeError:\n print lnode.prop, rnode.prop\n print lnode.nucspan, rnode.nucspan\n return eduspan",
"def spanToTuple(self, span):\n\t\ttry:\n\t\t\tspanBegin, spanEnd = span.split(',')\n\t\texcept ValueError:\n\t\t\tnumSpans = len(span.split(';'))\n\t\t\tspans = [subSpan.split(',') for subSpan in span.split(';')]\n\t\t\tspanTuple = [(int(spanBegin), int(spanEnd)) for spanBegin, spanEnd in spans]\n\t\telse:\n\t\t\tnumSpans = 1\n\t\t\tspanTuple = [(int(spanBegin), int(spanEnd))]\n\t\treturn numSpans, spanTuple",
"def get_span(self, i, j):\n assert j > i\n return self.span_v[i][j - i - 1]",
"def get_current_span():\n return cv_span_context.get(), cv_span_parent.get()",
"def parsed_span(self, start, end, include_boundaries=False):\n start_sent, start_tok = start\n end_sent, end_tok = end\n\n if start_sent == end_sent:\n span = self.conll[start_sent][start_tok:end_tok]\n\n else:\n\n span = self.conll[start_sent][start_tok:]\n\n if include_boundaries:\n span.append([-1, 'EOS'])\n\n for sent in range(start_sent + 1, end_sent):\n span.extend(self.conll[sent])\n\n if include_boundaries:\n span.append([-1, 'EOS'])\n\n span.extend(self.conll[end_sent][:end_tok])\n\n return span",
"def get_span(self,key):\n\t\tif self.wordspans.has_key(key):\n\t\t\t# span of this word already computed\n\t\t\treturn self.wordspans[key]\n\t\telif not self.deps.has_key(key):\n\t\t\t#The word has no dependents\n\t\t\tself.wordspans[key] = (key-1,key)\n\t\t\treturn (key -1,key)\n\t\telif self.deps.has_key(key):\n\t\t\t# make a list with its dependents\n\t\t\tdeplist = [(key-1,key)]\n\t\t\tfor item in self.deps[key]:\n\t\t\t\tdeplist.append(self.get_span(item[0]))\n\t\t\tself.wordspans[key] = (min(min(deplist)),max(max(deplist)))\n\t\t\treturn self.wordspans[key]",
"def _add_constituents_spanning(self, span, constituents, tokens):\n ...",
"def get_shortening_and_split_point(self):\n\n def get_shortening_type():\n \"\"\"Vrati Type of Lexical Shortening alebo None.\"\"\"\n if self.len_fn(self.sw) == self.get_length(): # self.len_fn je obycajne len\n return 'FSW'\n elif self.alignment.sw_idx == 0:\n return 'RS'\n elif self.alignment.sw_idx + len(self.alignment.generate()) == len(self.sw):\n return 'LS'\n\n shortening = get_shortening_type()\n\n if shortening not in ('RS', 'LS'):\n return shortening, None\n\n split_interface = {\n 'RS': (self.alignment.score - 1, self.alignment.score),\n 'LS': (self.alignment.sw_idx - 1, self.alignment.sw_idx)\n }[shortening]\n\n # ohodnotit rozhranie\n # ci je slabikove\n if split_interface in get_interfaces(self.sw_str):\n return shortening, 'syllable'\n\n types = [Phones.get_type(self.sw[idx]) for idx in split_interface]\n\n if types == ['c', 'v']:\n return shortening, 'onset-nucleus' # consonant - vowel\n elif types == ['v', 'c']:\n return shortening, 'nucleus-coda' # vowel - consonant\n else:\n return shortening, None",
"def get_span(tree):\n if tree[2][0]=='X':\n return tree[2][2]\n elif type(tree[2][0])==list:\n return tree[2][0][2]\n else:\n print 'Error in get_span'\n return None",
"def _extractSubtreeSpan_(self, tree, wordRulesFlag):\n\t\tspanDict = {}\n\t\tsnt = tree.leaves()\n\t\t#pdb.set_trace()\n\t\tfor subtree in tree.subtrees():\n\t\t\tif not wordRulesFlag and subtree.height() <= 2:\n\t\t\t\tcontinue\n\t\t\tspan = Frame.treeposition2offsetPosition([subtree.treeposition()], tree)[0] \n\t\t\tspanDict[(span[0], span[1])] = subtree.treeposition()\n\t\treturn spanDict",
"def span(self):\r\n return self._start, self._end",
"def get_comp_spanrels(self):",
"def span(self):\n return self.right - self.left",
"def parse_streetdir(self):\n \n first = self.words[self.index]['word']\n if self.index + 1 < self.length:\n second = self.words[self.index+1]['word']\n else:\n second = None\n \n if first in ['northwest', 'northeast', 'southwest', 'southeast']:\n return first, 1 \n elif first == 'nw':\n return \"northwest\", 1\n elif first == 'ne':\n return \"northeast\", 1\n elif first == 'sw':\n return \"southwest\", 1\n elif first == 'se':\n return \"southeast\", 1\n \n if first in ['n', 'north']:\n if second in ['w', 'west']:\n return \"northwest\", 2\n elif second in ['e', 'east']:\n return \"northeast\", 2\n else:\n return \"north\", 1\n elif first in ['s', 'south']:\n if second in ['w', 'west']:\n return \"southwest\", 2\n elif second in ['e', 'east']:\n return \"southeast\", 2\n else:\n return \"south\", 1\n elif first in ['e', 'east']:\n return \"east\", 1\n elif first in ['w', 'west']:\n return \"west\", 1\n \n return None,0",
"def find_multiword_span(self, gold_words, system_words, gi, si):\n\n # Initialize multiword_span_end characters index.\n if gold_words[gi].is_multiword:\n multiword_span_end = gold_words[gi].span.end\n if (\n not system_words[si].is_multiword\n and system_words[si].span.start < gold_words[gi].span.start\n ):\n si += 1\n else: # if system_words[si].is_multiword\n multiword_span_end = system_words[si].span.end\n if (\n not gold_words[gi].is_multiword\n and gold_words[gi].span.start < system_words[si].span.start\n ):\n gi += 1\n gs, ss = gi, si\n\n # Find the end of the multiword span (so both gi and si are pointing\n # to the word following the multiword span end).\n while not self.beyond_end(\n gold_words, gi, multiword_span_end\n ) or not self.beyond_end(system_words, si, multiword_span_end):\n if gi < len(gold_words) and (\n si >= len(system_words)\n or gold_words[gi].span.start <= system_words[si].span.start\n ):\n multiword_span_end = self.extend_end(gold_words[gi], multiword_span_end)\n gi += 1\n else:\n multiword_span_end = self.extend_end(system_words[si], multiword_span_end)\n si += 1\n return gs, ss, gi, si",
"def _get_l2_label(self):\n return self.__l2_label",
"def span(self):\n return self.interval.span",
"def _extract_spans(self, tag_sequence: List[int]) -> Set[Tuple[Tuple[int, int], str]]:\n spans = set()\n span_start = 0\n span_end = 0\n active_conll_tag = None\n for index, integer_tag in enumerate(tag_sequence):\n # Actual BIO tag.\n string_tag = self._bio_vocabulary[integer_tag]\n bio_tag = string_tag[0]\n conll_tag = string_tag[2:]\n if bio_tag == \"O\" or conll_tag ==\"V\":\n # The span has ended.\n if active_conll_tag:\n spans.add(((span_start, span_end), active_conll_tag))\n active_conll_tag = None\n # We don't care about tags we are\n # told to ignore, so we do nothing.\n continue\n elif bio_tag == \"U\":\n # The U tag is used to indicate a span of length 1,\n # so if there's an active tag we end it, and then\n # we add a \"length 0\" tag.\n if active_conll_tag:\n spans.add(((span_start, span_end), active_conll_tag))\n spans.add(((index, index), conll_tag))\n active_conll_tag = None\n elif bio_tag == \"B\":\n # We are entering a new span; reset indices\n # and active tag to new span.\n if active_conll_tag:\n spans.add(((span_start, span_end), active_conll_tag))\n active_conll_tag = conll_tag\n span_start = index\n span_end = index\n elif bio_tag == \"I\" and conll_tag == active_conll_tag:\n # We're inside a span.\n span_end += 1\n else:\n # This is the case the bio label is an \"I\", but either:\n # 1) the span hasn't started - i.e. an ill formed span.\n # 2) The span is an I tag for a different conll annotation.\n # We'll process the previous span if it exists, but also\n # include this span. This is important, because otherwise,\n # a model may get a perfect F1 score whilst still including\n # false positive ill-formed spans.\n if active_conll_tag:\n spans.add(((span_start, span_end), active_conll_tag))\n active_conll_tag = conll_tag\n span_start = index\n span_end = index\n # Last token might have been a part of a valid span.\n if active_conll_tag:\n spans.add(((span_start, span_end), active_conll_tag))\n return spans",
"def parse(sentence,label_sentence,sign):\n span = []\n start = None\n for index, word in enumerate(sentence):\n if word==B_token:\n start = index\n elif word==S_token:\n # if ''.join(label_sentence[index:index+1]) in kb_set: ## 在数据库中发现实体名\n # span.append((index, index+1))\n # start = None\n # else:\n # start = None\n span.append((index, index + 1))\n start = None\n elif word == E_token and start is not None:\n end = index\n # if ''.join(label_sentence[start:end + 1]) in kb_set:\n # span.append((start, end+1))\n # start = None\n # else:\n # start = None\n span.append((start, end + 1))\n start = None\n # elif word==E_token and start is not None:\n # end = index\n # if ''.join(label_sentence[start:end + 1]) in kb_set:\n # span.append((start, end+1))\n # start = None\n # else:\n # start = None\n # 相邻两entity可以合并则合并\n if len(span) <= 1 or sign == 'label':\n return span\n new_span = []\n for i in range(len(span)-1):\n if span[i][1]==span[i+1][0] and ''.join(label_sentence[span[i][0]:span[i+1][1]]) in kb_set:\n new_span.append((span[i][0], span[i+1][1]))\n if i == len(span)-2:\n return new_span\n else:\n new_span.append((span[i][0], span[i][1]))\n new_span.append((span[-1][0], span[-1][1]))\n return new_span",
"def parse(sentence,label_sentence,sign):\n span = []\n start = None\n for index, word in enumerate(sentence):\n if word==B_token:\n start = index\n elif word==S_token:\n # if ''.join(label_sentence[index:index+1]) in kb_set: ## 在数据库中发现实体名\n # span.append((index, index+1))\n # start = None\n # else:\n # start = None\n span.append((index, index + 1))\n start = None\n elif word==E_token and start is not None:\n end = index\n # if ''.join(label_sentence[start:end + 1]) in kb_set:\n # span.append((start, end+1))\n # start = None\n # else:\n # start = None\n span.append((start, end + 1))\n start = None\n # 相邻两entity可以合并则合并\n if len(span) <= 1 or sign == 'label':\n return span\n new_span = []\n for i in range(len(span)-1):\n if span[i][1]==span[i+1][0] and ''.join(label_sentence[span[i][0]:span[i+1][1]]) in kb_set:\n new_span.append((span[i][0], span[i+1][1]))\n if i == len(span)-2:\n return new_span\n else:\n new_span.append((span[i][0], span[i][1]))\n new_span.append((span[-1][0], span[-1][1]))\n return new_span",
"def lookup(conn, language_code, graphic, phonetic, restrictions):\n c = conn.cursor()\n entry_ids = tuple(c.execute('SELECT entry_id FROM lemmas WHERE language = ? AND graphic = ? and phonetic = ?', (language_code, graphic, hiragana_to_katakana(phonetic))))\n return tuple(Lexeme(conn, language_code, entry_id, restrictions) for (entry_id,) in entry_ids)",
"def to_spans(l_ids, voc):\n spans = {}\n current_lbl = None\n current_start = None\n for i, l_id in enumerate(l_ids):\n l = voc[l_id]\n\n if l[0] == 'B': \n # Beginning of a named entity: B-something.\n if current_lbl:\n # If we're working on an entity, close it.\n spans[current_start] = (current_lbl, i)\n # Create a new entity that starts here.\n current_lbl = l[2:]\n current_start = i\n elif l[0] == 'I':\n # Continuation of an entity: I-something.\n if current_lbl:\n # If we have an open entity, but its label does not\n # correspond to the predicted I-tag, then we close\n # the open entity and create a new one.\n if current_lbl != l[2:]:\n spans[current_start] = (current_lbl, i)\n current_lbl = l[2:]\n current_start = i\n else:\n # If we don't have an open entity but predict an I tag,\n # we create a new entity starting here even though we're\n # not following the format strictly.\n current_lbl = l[2:]\n current_start = i\n else:\n # Outside: O.\n if current_lbl:\n # If we have an open entity, we close it.\n spans[current_start] = (current_lbl, i)\n current_lbl = None\n current_start = None\n if current_lbl != None:\n spans[current_start] = (current_lbl, i+1)\n return spans",
"def find_span(input_text: str, pattern: Any,\n prefix_len: int) -> Tuple[int, int]:\n match = pattern.search(input_text)\n span_start = match.start() + prefix_len + 1\n # We want inclusive spans, hence -2 instead of -1\n span_end = match.end() - 2\n return (span_start, span_end)",
"def tags_to_spans(tags):\n spans = set()\n span_start = 0\n span_end = 0\n active_conll_tag = None\n for index, string_tag in enumerate(tags):\n # Actual BIO tag.\n bio_tag = string_tag[0]\n assert bio_tag in [\"B\", \"I\", \"O\"], \"Invalid Tag\"\n conll_tag = string_tag[2:]\n if bio_tag == \"O\":\n # The span has ended.\n if active_conll_tag:\n spans.add((active_conll_tag, (span_start, span_end)))\n active_conll_tag = None\n # We don't care about tags we are\n # told to ignore, so we do nothing.\n continue\n elif bio_tag == \"B\":\n # We are entering a new span; reset indices and active tag to new span.\n if active_conll_tag:\n spans.add((active_conll_tag, (span_start, span_end)))\n active_conll_tag = conll_tag\n span_start = index\n span_end = index\n elif bio_tag == \"I\" and conll_tag == active_conll_tag:\n # We're inside a span.\n span_end += 1\n else:\n # This is the case the bio label is an \"I\", but either:\n # 1) the span hasn't started - i.e. an ill formed span.\n # 2) We have IOB1 tagging scheme.\n # We'll process the previous span if it exists, but also include this\n # span. This is important, because otherwise, a model may get a perfect\n # F1 score whilst still including false positive ill-formed spans.\n if active_conll_tag:\n spans.add((active_conll_tag, (span_start, span_end)))\n active_conll_tag = conll_tag\n span_start = index\n span_end = index\n # Last token might have been a part of a valid span.\n if active_conll_tag:\n spans.add((active_conll_tag, (span_start, span_end)))\n # Return sorted list of spans\n return sorted(list(spans), key=lambda x: x[1][0])",
"def char_span_to_token_span(self, token_offsets, character_span):\n\t\t# We have token offsets into the passage from the tokenizer; we _should_ be able to just find\n\t\t# the tokens that have the same offsets as our span.\n\t\terror = False\n\t\tstart_index = 0\n\t\twhile start_index < len(token_offsets) and token_offsets[start_index][0] < character_span[0]:\n\t\t\tstart_index += 1\n\t\t# start_index should now be pointing at the span start index.\n\t\tif token_offsets[start_index][0] > character_span[0]:\n\t\t\t# In this case, a tokenization or labeling issue made us go too far - the character span\n\t\t\t# we're looking for actually starts in the previous token. We'll back up one.\n\t\t\tpass\n\t\t\t# print(\"Bad labelling or tokenization - start offset doesn't match\")\n\t\t\tstart_index -= 1\n\t\tif token_offsets[start_index][0] != character_span[0]:\n\t\t\terror = True\n\t\tend_index = start_index\n\t\twhile end_index < len(token_offsets) and token_offsets[end_index][1] < character_span[1]:\n\t\t\tend_index += 1\n\t\tif end_index == start_index and token_offsets[end_index][1] > character_span[1]:\n\t\t\t# Looks like there was a token that should have been split, like \"1854-1855\", where the\n\t\t\t# answer is \"1854\". We can't do much in this case, except keep the answer as the whole\n\t\t\t# token.\n\t\t\tpass\n\t\t\t# print(\"Bad tokenization - end offset doesn't match\")\n\t\telif token_offsets[end_index][1] > character_span[1]:\n\t\t\t# This is a case where the given answer span is more than one token, and the last token is\n\t\t\t# cut off for some reason, like \"split with Luckett and Rober\", when the original passage\n\t\t\t# said \"split with Luckett and Roberson\". In this case, we'll just keep the end index\n\t\t\t# where it is, and assume the intent was to mark the whole token.\n\t\t\tpass\n\t\t\t# print(\"Bad labelling or tokenization - end offset doesn't match\")\n\t\tif token_offsets[end_index][1] != character_span[1]:\n\t\t\terror = True\n\t\treturn (start_index, end_index), error",
"def get_spans(self, tokens_hightlight):\n spans, nb_tokens = [], len(tokens_hightlight)\n cur_start_idx, cur_bool_val = 0, tokens_hightlight[0]\n for idx in range(nb_tokens):\n if idx == nb_tokens - 1:\n if tokens_hightlight[idx] != cur_bool_val:\n spans.append((cur_start_idx, nb_tokens - 2, cur_bool_val))\n spans.append((nb_tokens - 1, nb_tokens - 1, tokens_hightlight[idx]))\n else:\n spans.append((cur_start_idx, nb_tokens - 1, cur_bool_val))\n else:\n if tokens_hightlight[idx] != cur_bool_val:\n spans.append((cur_start_idx, idx - 1, cur_bool_val))\n cur_start_idx, cur_bool_val = idx, tokens_hightlight[idx]\n return spans",
"def get_bispans(symbol: Span):\n if not isinstance(symbol, Span):\n raise ValueError('I need a span, got %s of type %s' % (symbol, type(symbol)))\n s, start2, end2 = symbol.obj() # this unwraps the target or length annotation\n _, start1, end1 = s.obj() # this unwraps the source annotation\n return (start1, end1), (start2, end2)",
"def find_spans(self, doc: Doc) -> Iterable[Tuple[int, int, str]]:\n\n # Create a new document (to avoid conflicting annotations)\n doc2 = self.create_new_doc(doc)\n # And run the model\n for _, proc in self.model.pipeline:\n doc2 = proc(doc2)\n # Add the annotation\n for ent in doc2.ents:\n yield ent.start, ent.end, ent.label_",
"def merge(span1, span2, t=None):\n if span1['text'] == \"\":\n return span2\n if span2['text'] == \"\":\n return span1\n\n\n # Putting the text together\n if span1['text'][-1] != \" \" and span2['text'][0] != \" \" and span2['bbox'][0] - span1['bbox'][2] > 1:\n # This means there won't be a space character in between the spans\n text = span1['text'] + \" \" + span2['text']\n else:\n text = span1[\"text\"] + span2[\"text\"]\n\n if 'html' in span1.keys():\n prev_html = span1['html']\n else:\n prev_html = span1['text']\n if t == \"2+\":\n html = prev_html + \"<sup>\" + span2['text'] + \"</sup>\"\n elif t == \"2-\":\n html = prev_html + \"<sub>\" + span2['text'] + \"</sub>\"\n elif t == \"1+\":\n html = \"<sup>\" + prev_html + \"</sup>\" + span2['text']\n elif t == \"1-\":\n html = \"<sub>\" + prev_html + \"</sub>\" + span2['text']\n\n # calculating the y coordinates of the new bbox\n if (span1[\"bbox\"][3] - span1[\"bbox\"][1]) > (span2[\"bbox\"][3] - span2[\"bbox\"][1]):\n bbox1 = span1[\"bbox\"][1]\n bbox3 = span1[\"bbox\"][3]\n else:\n bbox3 = span2[\"bbox\"][3]\n bbox1 = span2[\"bbox\"][1]\n\n # if span1['text'] == \"\":\n # return {\"bbox\": span1['bbox'], \"font\": span2['font'], \"size\": span2['size'], \"text\": span2['text'], \"html\":span2['text']}\n # text = span1[\"text\"] + span2[\"text\"]\n\n # Determining the font size of new span based of length of text\n if len(span1['text']) > len(span2['text']):\n size = span1['size']\n else:\n size = span2['size']\n\n # Getting new font type of new span based on being italic or bold and length of text\n if not (span1['font'][-6:].lower() == 'italic' or span1['font'][-4:].lower() == 'bold' or\n span2['font'][-6:].lower() == 'italic' or span2['font'][-4:].lower() == 'bold'):\n if len(span1['text']) > len(span2['text']):\n type_font = span1['font']\n else:\n type_font = span2['font']\n else:\n # print(span2)\n # print()\n if span1['font'][-6:].lower() == 'italic' or span1['font'][-4:].lower() == 'bold':\n type_font = span1['font']\n else:\n type_font = span2['font']\n if t is None and 'html' not in span1.keys():\n merged = {\"bbox\": [span1['bbox'][0], bbox1, span2['bbox'][2], bbox3], \"font\": type_font, \"size\": size,\n \"text\": text}\n elif t is None and 'html' in span1.keys():\n merged = {\"bbox\": [span1['bbox'][0], bbox1, span2['bbox'][2], bbox3], \"font\": type_font, \"size\": size,\n \"text\": text, \"html\": span1['html']+span2['text']}\n else:\n merged = {\"bbox\": [span1['bbox'][0], bbox1, span2['bbox'][2], bbox3], \"font\": type_font, \"size\": size,\n \"text\": text, \"html\": html}\n # print(merged)\n return merged",
"def lang(self, on, lang_name):\n tag = 'span'\n if lang_name != self.request.current_lang:\n # Enclose text in span using lang attributes\n if on:\n attr = self._langAttr(lang=lang_name)\n return self._open(tag, attr=attr)\n return self._close(tag)\n\n # Direction did not change, no need for span\n return ''"
] | [
"0.5928464",
"0.57783765",
"0.55528563",
"0.54318804",
"0.5424503",
"0.53724504",
"0.53511155",
"0.53242594",
"0.53223926",
"0.5250803",
"0.5224226",
"0.5191113",
"0.51670516",
"0.51624835",
"0.5142095",
"0.51099247",
"0.5045781",
"0.5033951",
"0.49898773",
"0.4979068",
"0.4977251",
"0.4972414",
"0.49586305",
"0.49549744",
"0.4949182",
"0.49176732",
"0.49102423",
"0.48985484",
"0.48940527",
"0.4892441"
] | 0.6735803 | 0 |
For each frame in this sentence, pick one subtree alignment from it. | def subtreeAlign(self, subtreeAlignFunc):
for frame in self.frameList:
frame.subtreeAlign(subtreeAlignFunc, self.srcTree, self.tgtTree) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_alignment_from(tree):\r\n msa = []\r\n for node in tree.get_terminals():\r\n alignment = self.msa_by_name[node.name.split(' ')[0]]\r\n if msa:\r\n msa.append(alignment)\r\n else:\r\n msa = MultipleSeqAlignment([alignment])\r\n\r\n return msa",
"def _assignAlignment(self, aln):\n self.sequence = None\n for i in range(self.nChildren()):\n self.children[i]._assignAlignment(aln)\n for seq in aln.seqs:\n if seq.name == self.label:\n self.sequence = seq\n break",
"def subtreeAlign(self, subtreeAlignFunc, srcTr, tgtTr):\n\t\tself.subtreeAlignment_treepos = subtreeAlignFunc(self, srcTr, tgtTr)\n\t\tself.subtreeAlignment_waMatrixPos = [self.treeposition2waMatrixPosition(suba[0], suba[1], srcTr, tgtTr) \\\n\t\t\t\tfor suba in self.subtreeAlignment_treepos]",
"def getAlignment(self):\n # Code to complete - generated by traceback through matrix to generate aligned pairs\n \n # find the position of the max_value\n max_value = self.getMaxAlignmentScore()\n max_pos = tuple(numpy.argwhere(self.matrix == max_value)[-1])\n x_pos = max_pos[0]; y_pos = max_pos[1]\n\n # array that holds the tuples\n path = list()\n\n # now find the path to the 0\n \n while self.matrix[x_pos][y_pos] != 0:\n \n # if diagonal is a match take that as priority\n if self.string1[x_pos - 1] == self.string2[y_pos - 1]:\n path.append((x_pos - 1, y_pos - 1))\n x_pos -=1; y_pos -= 1\n continue\n\n # finds the best horizontal alignment\n bestX = 0; bestY = y_pos - 1\n for i in range(x_pos - 1):\n if self.matrix[i][y_pos - 1] >= self.matrix[bestX][bestY]:\n bestX = i\n \n # finds best vertical alignment\n bestX_vertical = x_pos - 1; bestY_vertical = 0\n for i in range(y_pos - 1):\n if self.matrix[x_pos - 1][i] >= self.matrix[bestX_vertical][bestY_vertical]:\n bestY_vertical = i\n \n # if diagonal not satisfied, see which is better\n # the horizontal of vertical alignment.\n if self.matrix[bestX][bestY] < self.matrix[bestX_vertical][bestY_vertical]:\n path.append((bestX_vertical, bestY_vertical))\n x_pos = bestX_vertical; y_pos = bestY_vertical\n else:\n path.append((bestX, bestY))\n x_pos = bestX; y_pos = bestY\n\n return path[::-1] # reversed because we want origin to highest element.",
"def _subtree_first_position(self, p):\n walk = p\n while self.left(walk) is not None:\n walk = self.left(walk) # keep walking left\n return walk",
"def expand(self):\n indices = tf.where(self.path).numpy()\n indices = indices[np.argsort(indices[:, 0] + indices[:, 1])]\n start = indices[0, :2]\n end = indices[-1, :2]\n\n # Summary matches in terms of exact matches and, if available, substitution\n # scores.\n a_x, a_y = [], []\n summary = []\n for t, (i, j, s) in enumerate(indices):\n summary.append(self._position_to_char(i, j, s))\n if s == 0: # Match.\n a_x.append(self.left[i])\n a_y.append(self.right[j])\n else: # Gap\n i_prev, j_prev = indices[t - 1][:2]\n if (i - i_prev) == 1 and (j - j_prev) == 0: # Gap in Y.\n a_x.append(self.left[i])\n a_y.append('-')\n elif (i - i_prev) == 0 and (j - j_prev) == 1: # Gap in X.\n a_x.append('-')\n a_y.append(self.right[j])\n else: # Incorrectly formatted alignment.\n raise ValueError('Alignment is inconsistent.')\n\n self.start = start\n self.end = end\n self.left_match = ''.join(a_x)\n self.right_match = ''.join(a_y)\n self.matches = ''.join(summary)",
"def compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n align_x = \"\"\n align_y = \"\"\n\n len_x = len(seq_x)\n len_y = len(seq_y)\n\n #score = max([alignment_matrix[row][col] for row in range(len_x + 1) for col in range(len_y+1)])\n\n max_score = -1\n max_positions = []\n for row in range(len(seq_x)+1):\n for col in range(len(seq_y)+1):\n if alignment_matrix[row][col] == max_score:\n max_positions.append((row,col))\n if alignment_matrix[row][col] > max_score:\n max_score = alignment_matrix[row][col]\n max_positions = [(row, col)]\n max_row, max_col = random.choice(max_positions)\n\n #print max_score, max_row, max_col\n\n len_x = max_row\n len_y = max_col\n\n while alignment_matrix[len_x][len_y] > 0:\n #print len_x, len_y\n if alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y - 1] + scoring_matrix[seq_x[len_x-1]][seq_y[len_y-1]]:\n align_x = seq_x[len_x-1] + align_x\n align_y = seq_y[len_y-1] + align_y\n len_x -= 1\n len_y -= 1\n elif alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y] + scoring_matrix[seq_x[len_x-1]][\"-\"]:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n else:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n #while len_x > 0:\n # align_x = seq_x[len_x-1] + align_x\n # align_y = \"-\" + align_y\n # len_x -= 1\n\n #while len_y > 0:\n # align_x = \"-\" + align_x\n # align_y = seq_y[len_y-1] + align_y\n # len_y -= 1\n\n return (max_score, align_x, align_y)",
"def draw_tre_and_aln(aln_region, tre_str, og, target_sp):\n #site, aa, score=ps_site\n\n # get leaves from tree string\n tstax = getleaf(tre_str)\n\n ts = TreeStyle()\n ts.margin_left = 5\n ts.margin_right = 30\n ts.margin_top = 20\n ts.tree_width = 50\n\n aln_region_str = aln_region.format(\"fasta\")\n\n t = PhyloTree(tre_str, alignment=aln_region_str, alg_format=\"fasta\")\n\n # interfact\n def _set_style(t):\n # input an t with alignment, add label to it\n \"\"\"\n info = TextFace(\"{}\\nCodon:{}\\nScore:{}\".format(og,site,score), fsize=8, fgcolor='black', ftype='Arial')\n info.margin_top = 10\n info.margin_right = 20\n info.margin_left = 5\n t.add_face(info, column=0, position=\"branch-bottom\")\n #t.add_face(TextFace(\"Codon:{}\".format(site)),column=0,position=\"branch-bottom\")\n \"\"\"\n ## label the longbranch\n nstyle = NodeStyle()\n # red line\n #nstyle[\"bgcolor\"] = \"DarkSeaGreen\"\n #nstyle[\"bgcolor\"] = \"LightSalmon\"\n nstyle[\"hz_line_type\"] = 0\n #nstyle[\"hz_line_color\"] = \"#ff0000\"\n for tst in tstax:\n tsnode = t.get_leaves_by_name(name=tst)[0]\n tsnode.set_style(nstyle)\n # add #1 to target species\n if target_sp in tsnode.name:\n tsnode.name = tsnode.name + \"_#1\"\n return t\n t = _set_style(t)\n ## add AA alignment\n def _trans_aln(aln_region):\n # input an coding aln\n aa_aln_str = \"\"\n for seq_obj in aln_region:\n dna = Seq(str(seq_obj.seq))\n aa_s = dna.translate(gap=\"-\")\n aa_aln_str += '>{}\\n{}\\n'.format(seq_obj.id, aa_s)\n #print(aa_aln_str)\n return aa_aln_str\n\n aa_aln_region_str = _trans_aln(aln_region)\n t_aa = PhyloTree(tre_str, alignment=aa_aln_region_str, alg_format=\"fasta\")\n t_aa = _set_style(t_aa)\n\n return t, t_aa, ts",
"def fill_parents(self):\n for words in self.matched_words:\n words.gold_parent = (\n words.gold_word.parent if words.gold_word.parent is not None else 0\n )\n\n words.system_parent_gold_aligned = (\n self.matched_words_map.get(words.system_word.parent, None)\n if words.system_word.parent is not None\n else 0\n )",
"def _subtree_first_position(self, p):\n \"\"\"will be used by before()\"\"\"\n walk = p\n #recursivly walking to the left child until the left subtree has no child\n while self.left(walk) is not None:\n walk = self.left(walk)\n return walk",
"def do_global_alignment(sequences, matrix, penalty):\n seq1 = '-' + sequences[0].Sequence\n seq2 = '-' + sequences[1].Sequence\n\n # scoring matrix initializer\n scoring = global_setup(len(seq1), len(seq2), penalty)\n\n # fill scoring matrix\n aa_start = ord('A')\n for i in range(1, len(seq1)):\n aa_x = seq1[i]\n for j in range(1, len(seq2)):\n aa_y = seq2[j]\n xgap = scoring[i][j-1] - penalty\n ygap = scoring[i-1][j] - penalty\n match = scoring[i-1][j-1] + \\\n matrix[ord(aa_x) - aa_start][ord(aa_y) - aa_start]\n\n # store the max value of them all\n scoring[i].append(max([xgap, ygap, match]))\n\n # Perform traceback\n alignment = traceback(scoring, seq1, seq2, penalty, matrix)\n # Add the sequences to the scoring matrix for visualizing\n scoring = add_sequences_to_scoring(scoring, seq1, seq2)\n\n return alignment, scoring",
"def _forwardParsimony(self, aln):\n if self.sequence == None: # no sequence has been assigned\n if self.nChildren() == 0: # no children, so terminal, cannot propagate scores\n raise RuntimeError(\"No sequence assigned to leaf node:\", self.label)\n scores = [None for _ in range(self.nChildren())]\n for i in range(self.nChildren()):\n scores[i] = self.children[i]._forwardParsimony(aln)\n # for each position in the alignment,\n # introduce (initially zero) score for each symbol in alphabet\n self.seqscores = [[0 for _ in aln.alphabet] for col in range(aln.alignlen)]\n # for each position in the alignment,\n # allocate a position to put the each child symbol from which each current node symbol score was determined\n self.backptr = [[[None for _ in aln.alphabet] for _ in range(aln.alignlen)] for _ in range(self.nChildren())]\n for col in range(aln.alignlen):\n for i in range(self.nChildren()):\n # left child will contribute first\n for a_parent in range(len(aln.alphabet)):\n best_score = +9999999\n best_symb = 0\n for a in range(len(aln.alphabet)):\n score = (scores[i][col][a] + (\n 1 if a != a_parent else 0)) # if we want to weight scores, this would need to change\n if score < best_score:\n best_symb = a\n best_score = score\n self.seqscores[col][a_parent] += best_score\n self.backptr[i][col][a_parent] = best_symb\n else:\n self.seqscores = [[0 if a == sym else 999999 for a in aln.alphabet] for sym in\n self.sequence] # if we want to weight scores, this would need to change\n return self.seqscores",
"def compute_global_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n\n align_x = \"\"\n align_y = \"\"\n\n len_x = len(seq_x)\n len_y = len(seq_y)\n\n score = alignment_matrix[len_x][len_y]\n\n while len_x > 0 and len_y > 0:\n if alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y - 1] + scoring_matrix[seq_x[len_x-1]][seq_y[len_y-1]]:\n align_x = seq_x[len_x-1] + align_x\n align_y = seq_y[len_y-1] + align_y\n len_x -= 1\n len_y -= 1\n elif alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y] + scoring_matrix[seq_x[len_x-1]][\"-\"]:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n else:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n while len_x > 0:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n\n while len_y > 0:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n return (score, align_x, align_y)",
"def aligned_set(bitext):\n aligned = []\n for d in bitext:\n f_sent = d[\"fr\"] ## foreign sentence\n e_sent = d[\"en\"] ## English sentence\n fr = f_sent.split()\n en = e_sent.split()\n aligned.append(AlignedSent(fr,en))\n return aligned",
"def _extractFrames_(self, wordRulesFlag):\n\t\t#pdb.set_trace()\n\t\tsimplifyTree(self.srcTree)\n\t\tsimplifyTree(self.tgtTree)\n\n\t\tsDic, tDic = {}, {}\n\t\tframeSet = set() \n\t\tfor sSubtr in self.srcTree.subtrees():\n\t\t\tif sSubtr.height() <= 2:\n\t\t\t\tcontinue\n\n\t\t\tsTreePos = sSubtr.treeposition()\n\t\t\tsOffset = treePosition2offset(sTreePos, self.srcTree)\n\t\t\tsDic[sTreePos] = scanBlock(sOffset, 'src', self.waMatrix)\n\n\t\t\tfor tSubtr in self.tgtTree.subtrees():\n\t\t\t\tif tSubtr.height() <= 2:\n\t\t\t\t\tcontinue\n\n\t\t\t\ttTreePos = tSubtr.treeposition()\n\t\t\t\tif tTreePos not in tDic:\n\t\t\t\t\ttOffset = treePosition2offset(tTreePos, self.tgtTree)\n\t\t\t\t\ttDic[tTreePos] = scanBlock(tOffset, 'tgt', self.waMatrix)\n\t\t\t\telse:\n\t\t\t\t\ttOffset = tDic[tTreePos]\n\n\t\t\t\t#print >> debug_log, self.srcTree[sTreePos]\n\t\t\t\t#print >> debug_log, self.tgtTree[tTreePos]\n\t\t\t\tif sDic[sTreePos] == tDic[tTreePos] and -1 not in sDic[sTreePos] and -1 not in tDic[tTreePos]:\n\t\t\t\t\t#print >> debug_log, 'paird up!!'\n\t\t\t\t\tframeSet.add(Frame([sTreePos], [tTreePos], self.srcTree, self.tgtTree))\n\t\t\t\t#print >> debug_log\n\n\t\t'''\n\t\tsrcSubtreeSpanDict = self._extractSubtreeSpan_(self.srcTree, wordRulesFlag)\n\t\ttgtSubtreeSpanDict = self._extractSubtreeSpan_(self.tgtTree, wordRulesFlag)\n\n\t\tframeSet = set() \n\t\tfor span in srcSubtreeSpanDict:\n\t\t\tif not self._consistentWithWA_(span, 'src'):\n\t\t\t\tcontinue\n\t\t\ttgtSpanList = self._scanSpan_(span, 'src')\n\t\t\tfor tgtSpan in tgtSpanList:\n\t\t\t\tif tgtSpan in tgtSubtreeSpanDict:\n\t\t\t\t\t#print >> debug_log, span, tgtSpan\n\t\t\t\t\tframeSet.add(Frame([srcSubtreeSpanDict[span]], [tgtSubtreeSpanDict[tgtSpan]], self.srcTree, self.tgtTree))\n\n\t\t#print >> debug_log, '\\n'\n\t\tfor span in tgtSubtreeSpanDict:\n\t\t\tif not self._consistentWithWA_(span, 'tgt'):\n\t\t\t\tcontinue\n\t\t\tsrcSpanList = self._scanSpan_(span, 'tgt')\n\t\t\tfor srcSpan in srcSpanList:\n\t\t\t\tif srcSpan in srcSubtreeSpanDict:\n\t\t\t\t\t#print >> debug_log, srcSpan, span\n\t\t\t\t\tframeSet.add(Frame([srcSubtreeSpanDict[srcSpan]], [tgtSubtreeSpanDict[span]], self.srcTree, self.tgtTree))\n\t\t'''\n\t\t#pdb.set_trace()\n\t\tframeList = self._mergeFrames_(frameSet)\n\t\t#print >> debug_log, len(frameList)\n\t\treturn frameList",
"def do_local_alignment(sequences, matrix, penalty):\n seq1 = '-' + sequences[0].Sequence\n seq2 = '-' + sequences[1].Sequence\n\n # scoring matrix initializer\n scoring = local_setup(len(seq1), len(seq2))\n\n # fill scoring matrix\n aa_start = ord('A')\n for i in range(1, len(seq1)):\n aa_x = seq1[i]\n for j in range(1, len(seq2)):\n aa_y = seq2[j]\n xgap = scoring[i][j-1] - penalty\n ygap = scoring[i-1][j] - penalty\n match = scoring[i-1][j-1] + \\\n matrix[ord(aa_x) - aa_start][ord(aa_y) - aa_start]\n\n # store the max score (including 0)\n scoring[i].append(max([xgap, ygap, match, 0]))\n\n # find the max score (only the last max score)\n max_i, max_j, max_score = 0, 0, -float('inf')\n for i in range(len(scoring)):\n for j in range(len(scoring[i])):\n if scoring[i][j] > max_score:\n max_i, max_j, max_score = i, j, scoring[i][j]\n\n # perform traceback\n alignment = traceback(\n scoring, seq1, seq2, penalty, matrix, max_i, max_j, local=True\n )\n # Add the sequences to the scoring matrix for visualizing\n scoring = add_sequences_to_scoring(scoring, seq1, seq2)\n\n return alignment, scoring",
"def compute_global_alignment(seq_x,seq_y,scoring_matrix,alignment_matrix):\n #initialization of start position as bottom-right corner of matrix\n x_pos = len(seq_x)\n y_pos = len(seq_y)\n\n #initialization of variables\n result_seq_x = ''\n result_seq_y = ''\n score = alignment_matrix[x_pos][y_pos]\n\n #start in bottom right corner of matrix and go upwards till we reach left or upper edge\n #in every iteration we reconstruct alignments based on value in alignment_matrix and scoring_matrix\n while x_pos != 0 or y_pos !=0:\n current_value = alignment_matrix[x_pos][y_pos]\n \n if current_value == alignment_matrix[x_pos-1][y_pos-1] + scoring_matrix[seq_x[x_pos-1]][seq_y[y_pos-1]] and x_pos > 0 and y_pos > 0:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n x_pos -= 1\n y_pos -= 1\n elif current_value == alignment_matrix[x_pos-1][y_pos] + scoring_matrix[seq_x[x_pos-1]][\"-\"]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = \"-\" + result_seq_y\n x_pos -= 1\n else: \n result_seq_x = \"-\" + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n y_pos -= 1\n\n return (score,result_seq_x,result_seq_y)",
"def align_seqs(self, chain_selection) -> Tuple[str, str]:\n chain = self.chains[chain_selection]\n alignments = pairwise2.align.globalxs(chain[f'{self.wanted_label}_sequence'],\n chain[f'{self.owned_label}_sequence'],\n -1, # open\n -0.1 # extend\n )\n al = alignments[0]\n chain[f'{self.wanted_label}_aln_sequence'] = al[0]\n chain[f'{self.owned_label}_aln_sequence'] = al[1]\n return al[0], al[1]",
"def do_semiglobal_alignment(sequences, matrix, penalty):\n seq1 = '-' + sequences[0].Sequence\n seq2 = '-' + sequences[1].Sequence\n\n # scoring matrix initializer\n scoring = local_setup(len(seq1), len(seq2))\n\n # fill scoring matrix\n aa_start = ord('A')\n for i in range(1, len(seq1)):\n aa_x = seq1[i]\n for j in range(1, len(seq2)):\n aa_y = seq2[j]\n xgap = scoring[i][j-1] - penalty\n ygap = scoring[i-1][j] - penalty\n match = scoring[i-1][j-1] + \\\n matrix[ord(aa_x) - aa_start][ord(aa_y) - aa_start]\n\n # store the max score\n scoring[i].append(max([xgap, ygap, match]))\n\n # find the max score (only the last max score)\n max_i, max_j, max_score = 0, 0, -float('inf')\n for j in range(len(scoring[-1])): # find max low road\n if scoring[-1][j] >= max_score:\n max_i, max_j, max_score = -1, j, scoring[-1][j]\n\n for i in range(len(scoring)): # find max high road (priority)\n if scoring[i][-1] >= max_score:\n max_i, max_j, max_score = i, -1, scoring[i][-1]\n\n # perform traceback\n alignment = traceback(\n scoring, seq1, seq2, penalty, matrix, max_i, max_j, semi=True\n )\n\n # add the endgaps for seq1\n if max_i == -1 and max_j != len(scoring[-1]):\n for j in range(max_j + 1, len(scoring[-1])):\n alignment[0][0] += '-'\n alignment[1][0] += ' '\n alignment[2][0] += seq2[j]\n\n # add the endgaps for seq2\n if max_j == -1 and max_i != len(scoring):\n for i in range(max_i + 1, len(scoring)):\n alignment[0][0] += seq1[i]\n alignment[1][0] += ' '\n alignment[2][0] += '-'\n\n # Add the sequences to the scoring matrix for visualizing\n scoring = add_sequences_to_scoring(scoring, seq1, seq2)\n\n return alignment, scoring",
"def _get_alignment(self, width):\n # we know the alignment is appropriate\n # if we can divide the width by the\n # alignment cleanly\n # valid alignments are 1,2,4 and 8\n # put 4 first, since it's the default\n alignments = [4, 8, 2, 1]\n for alignment in alignments:\n if width % alignment == 0:\n return alignment",
"def global_alignment(first_seq, second_seq, match_penalty_value, mismatch_penalty_value, gap_penalty_value):\n alignment_matrix = initiate_matrix(first_seq, second_seq, gap_penalty_value)\n path_matrix = np.zeros((alignment_matrix.shape[0], alignment_matrix.shape[1], 3), dtype=str)\n scores = []\n \"\"\" Second step is to apply get the max score method, then assign it to the current cell. \"\"\"\n for i in range(1, alignment_matrix.shape[0]):\n for j in range(1, alignment_matrix.shape[1]):\n row_score = alignment_matrix[i, j - 1] + gap_penalty_value\n column_score = alignment_matrix[i - 1, j] + gap_penalty_value\n if second_seq[i - 1] == first_seq[j - 1]:\n diagonal_score = alignment_matrix[i - 1, j - 1] + match_penalty_value\n else:\n diagonal_score = alignment_matrix[i - 1, j - 1] + mismatch_penalty_value\n scores.append(row_score)\n scores.append(column_score)\n scores.append(diagonal_score)\n alignment_matrix[i, j], ex_cell = get_max_score(scores)\n scores.clear()\n for I in range(ex_cell.size):\n if ex_cell[0][I] == 0:\n path_matrix[i, j, 1] = \"S\"\n elif ex_cell[0][I] == 1:\n path_matrix[i, j, 1] = \"F\"\n elif ex_cell[0][I] == 2:\n path_matrix[i, j, 1] = \"D\"\n\n max_score = alignment_matrix[i, j]\n \"\"\" Third step is to trace back.\"\"\"\n f, s = trace_back(path_matrix, first_seq, second_seq, match_penalty_value, mismatch_penalty_value,\n gap_penalty_value)\n \"\"\"Last step is to check the max score with the aligned sequences score. \"\"\"\n new_s, check = check_alignment_score(f, s, max_score, match_penalty_value, mismatch_penalty_value,\n gap_penalty_value)\n return f, s, new_s, check",
"def compute_local_alignment(seq_x,seq_y,scoring_matrix,alignment_matrix):\n #initialization of variables\n x_pos = -1\n y_pos = -1\n result_seq_x = ''\n result_seq_y = ''\n score = 0\n\n #determine start position in alignment_matrix as position with maximum value \n for row in range(len(seq_x) + 1):\n for col in range(len(seq_y) + 1):\n if alignment_matrix[row][col] > score:\n score = alignment_matrix[row][col]\n x_pos = row\n y_pos = col\n\n #start in start position and go upwards till we reach first entry with value 0\n #in every iteration we reconstruct alignments based on value in alignment_matrix and scoring_matrix\n while x_pos != 0 and y_pos !=0:\n current_value = alignment_matrix[x_pos][y_pos]\n if current_value == 0:\n break\n \n if current_value == alignment_matrix[x_pos-1][y_pos-1] + scoring_matrix[seq_x[x_pos-1]][seq_y[y_pos-1]]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n x_pos -= 1\n y_pos -= 1\n elif current_value == alignment_matrix[x_pos-1][y_pos] + scoring_matrix[seq_x[x_pos-1]][\"-\"]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = \"-\" + result_seq_y\n x_pos -= 1\n else: \n result_seq_x = \"-\" + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n y_pos -= 1\n\n return (score,result_seq_x,result_seq_y)",
"def sample(self, sentence_pair):\n sampled_alignments, best_alignment = super().sample(sentence_pair)\n return self.prune(sampled_alignments), best_alignment",
"def treepos(self, tree):\n if tree is None:\n raise ValueError(\"Parse tree not available\")\n stack = [tree]\n treepos = []\n\n wordnum = 0\n while True:\n # tree node:\n if isinstance(stack[-1], Tree):\n # Select the next child.\n if len(treepos) < len(stack):\n treepos.append(0)\n else:\n treepos[-1] += 1\n # Update the stack.\n if treepos[-1] < len(stack[-1]):\n stack.append(stack[-1][treepos[-1]])\n else:\n # End of node's child list: pop up a level.\n stack.pop()\n treepos.pop()\n # word node:\n else:\n if wordnum == self.wordnum:\n return tuple(treepos[: len(treepos) - self.height - 1])\n else:\n wordnum += 1\n stack.pop()",
"def find(self) -> bool:\n alignments = []\n for sw_idx in range(len(self.sw)):\n for nu_idx in range(len(self.nu)):\n alignments.append(Alignment(self.nu, self.sw, nu_idx, sw_idx, self.orig_nu))\n alignment = max(alignments, key=lambda align: align.score)\n if alignment.score > 0:\n self.alignment = alignment\n return True\n return False",
"def __init__(self, srcTree, tgtTree, wordAlignment, alignFunc, ruleExFlag, wordRulesFlag, extensiveRulesFlag, fractionalCountFlag, phraseRulesFlag, s2t, verbose):\n\t\tself.srcTree = srcTree\n\t\tself.srcWordList = [word.lower() for word in srcTree.leaves()]\n\t\tself.tgtTree = tgtTree\n\t\tself.basicGlueRuleTopLabels, self.basicGlueRuleLabels = extractLabels(self.tgtTree)\n\t\tself.tgtWordList = [word.lower() for word in tgtTree.leaves()]\n\t\tself.waMatrix = self._makeWaMatrix_(wordAlignment, len(srcTree.leaves()), len(tgtTree.leaves()))\n\n\t\tself.frameList = self._extractFrames_(wordRulesFlag)\n\t\tself.subtreeAlign(alignFunc)\n\n\t\tif verbose:\n\t\t\tprint >> debug_log\n\t\t\t#print >> debug_log, \"SntFrame got the word alignment matrix:\"\n\t\t\t#print >> debug_log, '\\n'.join([' '.join([str(d) for d in row]) for row in self.waMatrix])\n\t\t\t#print >> debug_log\n\t\t\tprint >> debug_log, \"SntFrame's frameList: (\" + str(len(self.frameList)) + \" lists)\" \n\t\t\tfor frame in self.frameList:\n\t\t\t\tprint frame.subtreeAlignment_waMatrixPos\n\t\t\tprint >> debug_log\n\t\t\t#print >> debug_log, 'src tree:'\n\t\t\t#print >> debug_log, self.srcTree\n\t\t\t#print >> debug_log, 'tgt tree:'\n\t\t\t#print >> debug_log, self.tgtTree\n\n\t\tself.ruleList = []\n\t\tif s2t: self.glueRuleList = []\n\t\tif ruleExFlag:\n\t\t\ttmpSubaList = [frame.subtreeAlignment_waMatrixPos for frame in self.frameList]\n\n\t\t\tif verbose:\n\t\t\t\tprint >> debug_log\n\t\t\t\tprint >> debug_log, \"SntFrame's all suba given align_func\", alignFunc.__name__, \": (this one should be the same with the above frameList)\"\n\t\t\t\tprint >> debug_log, tmpSubaList\n\t\t\t\tprint >> debug_log\n\t\t\t\tprint >> debug_log, \"rules in the Bead:\"\n\n\t\t\ttmpBead = Bead(self.srcTree, self.tgtTree, self.waMatrix, tmpSubaList, wordRulesFlag, extensiveRulesFlag, phraseRulesFlag, s2t, verbose)\n\n\t\t\tif verbose:\n\t\t\t\tprint >> debug_log\n\t\t\t\tprint >> debug_log, \"corresponding bead info: (this one should be a sorted version of the above frameList, sorted by the area of each square, biggest to smallest)\"\n\t\t\t\tprint >> debug_log, tmpBead\n\t\t\t\tprint >> debug_log\n\n\t\t\tself.ruleList = [rule.mosesFormatRule() for rule in self.consolidateRules(tmpBead.ruleList, fractionalCountFlag) if rule.count > 0]\n\t\t\tif s2t: self.glueRuleList = [rule.mosesFormatRule() for rule in tmpBead.glueRuleList]\n\n\t\tif verbose:\n\t\t\tprint >> debug_log, \"SntFrame got the following rules: (\" + str(len(self.ruleList)) + \")\"\n\t\t\tfor rule in self.ruleList:\n\t\t\t\tprint >> debug_log, ''.join(rule[0]).encode('utf-8'),\n\t\t\tprint >> debug_log\n\t\t\t\n\t\t\tif s2t:\n\t\t\t\tprint >> debug_log, \"SntFrame got the following glue rules: (\" + str(len(self.glueRuleList)) + \")\"\n\t\t\t\tfor rule in self.glueRuleList:\n\t\t\t\t\tprint >> debug_log, ''.join(rule[0]).encode('utf-8'),\n\t\t\t\tprint >> debug_log",
"def _backwardParsimony(self, aln, seq=None):\n if self.sequence == None: # no sequence has been assigned\n childbuf = [[] for _ in range(self.nChildren())]\n if self.nChildren() == 0: # no children, so terminal, cannot propagate scores\n raise RuntimeError(\"No sequence assigned to leaf node:\", self.label)\n if seq == None: # Only root can do this, no parents to consider, so we pick the lowest scoring symbol\n currbuf = []\n for col in range(aln.alignlen):\n min_score = 999999\n min_symb = None\n child_symb = [None for _ in range(self.nChildren())]\n for a_parent in range(len(aln.alphabet)):\n if self.seqscores[col][a_parent] < min_score:\n min_score = self.seqscores[col][a_parent]\n min_symb = a_parent\n for i in range(self.nChildren()):\n child_symb[i] = self.backptr[i][col][a_parent]\n currbuf.append(aln.alphabet[min_symb])\n for i in range(self.nChildren()):\n childbuf[i].append(aln.alphabet[child_symb[i]])\n self.sequence = sequence.Sequence(currbuf, aln.alphabet, self.label, gappy=True)\n else: # Non-root, but not leaf\n self.sequence = seq\n col = 0\n for sym_parent in self.sequence:\n a_parent = aln.alphabet.index(sym_parent)\n child_symb = [None for _ in range(self.nChildren())]\n for i in range(self.nChildren()):\n child_symb[i] = self.backptr[i][col][a_parent]\n childbuf.append(aln.alphabet[child_symb[i]])\n col += 1\n for i in range(self.nChildren()):\n self.children[i]._backwardParsimony(aln, sequence.Sequence(childbuf[i], aln.alphabet, self.label, gappy=True))\n return self.sequence",
"def lookup_alignment_frag(self, name):\n raise NotImplementedError()",
"def lookup_align_to_frag(self, name):\n # TODO Assert that the align-to frag is the same as the last 3 predicted atoms\n # assert result == self.predicted[-3:]\n raise NotImplementedError()",
"def parsimony(self):\n self.root._forwardParsimony(self.aln) # setup and compute scores for all nodes\n self.root._backwardParsimony(self.aln) # use scores to determine sequences\n return self.root.getSequence() # return the sequence found at the root"
] | [
"0.61281717",
"0.5682574",
"0.5621451",
"0.5610291",
"0.5484573",
"0.5340691",
"0.53166825",
"0.52982336",
"0.52693313",
"0.52550215",
"0.5244625",
"0.51851237",
"0.51479876",
"0.5139947",
"0.5126392",
"0.50898707",
"0.50732404",
"0.5068625",
"0.50669676",
"0.50468457",
"0.5036851",
"0.50360197",
"0.50148314",
"0.501021",
"0.5009426",
"0.49784443",
"0.4946028",
"0.49003053",
"0.4891263",
"0.48647904"
] | 0.623596 | 0 |
Calculates the length of the tuples inside a list | def element_length(lst: Iterable[Sequence]) -> List[Tuple[Sequence, int]]:
return [(i, len(i)) for i in lst] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lsize( lst ):\n return sum( [ x[1] for x in lst ] )",
"def __len__(self):\n n = 1\n for valTuple in self._valListOfLists:\n n *= len(valTuple)\n return n",
"def get_list_length(self):\n n = 0\n l = self\n while l.is_block():\n n+=1\n l = l.field(1)\n return n",
"def get_length(rows):\n if rows and (type(rows) is list or type(rows) is tuple):\n return [len(rows), *get_length(rows[0])]\n return []",
"def get_length(rows):\n if rows and (type(rows) is list or type(rows) is tuple):\n return [len(rows), *get_length(rows[0])]\n return []",
"def listLengths(input_list):\r\n return [len(item) for item in input_list]",
"def same_len(count, a:tuple):\n if len(a[0]) == len(a[1]):\n return count + 1\n return count",
"def count(some_list):\n return len(some_list)",
"def len_list(self) -> int:\n return 1",
"def deep_len(lst):\n s=0\n for x in lst:\n if type(x) == list:\n s = s + deep_len(x)\n\n\n else:\n s=s+1\n return s",
"def get_list_length(self):\r\n return len(self.ps)",
"def lengths(self):\n return tuple(self.__lengths)",
"def howmany_sequences(listOfTuples):\r\n #initialize number of pairs as 0\r\n pairs = 0\r\n #count pairs\r\n for n in listOfTuples:\r\n pairs += 1\r\n k = 1\r\n #find number of initial sequences \r\n while k*(k-1) != pairs*2:\r\n k += 1\r\n return(k)",
"def _getLenRecur(self, el):\n retval = 0\n if isinstance(el,list) or isinstance(el,tuple):\n retval = 1 + self._getLenRecur(el[0])\n return retval",
"def size(A):\n\treturn (len(A[0]),len(A))",
"def numdim(l):\n if not isinstance(l, (list, tuple)):\n return 0\n if not isinstance(l[-1], (list, tuple)):\n return 1\n else:\n return 1 + numdim(l[-1])",
"def _get_item_lengths(self) -> List[int]:\n return [len(x[0]) for x in self.data]",
"def count(x):\n return sum(len(y) for y in x)",
"def _chunk_length(piece: tuple[str, list[int, int]]):\n return piece[1][1]",
"def multiListSliceCount(lol):\n count = 1\n for i in range(0, len(lol)):\n count *= len(lol[i])\n #print \"multiListSliceCount of:%s is:%d\" % (lol, count)\n return count",
"def __len__(self):\n return len(self[0]) + len(self[1])",
"def aln_length(self) -> int:\n return sum([l for l, _ in self])",
"def getLength(self):\r\n return len(self.list)",
"def length(memoryManager, paramsList):\n handleEmpty(paramsList, \"cannot get length of\")\n head = paramsList[0]\n\n if not validateList(head):\n raise Exception('Tried to get length of non-list')\n # if type(head) == float:\n # return [1.0]\n\n return [float(len(head))]",
"def __len__(self) -> int:\n return len(self._list)",
"def __len__(self):\n return len(self.lst)",
"def length(self):\n return self.list.length",
"def length(self):\n return self.list.length",
"def length(self):\n # TODO: Count number of items\n # print(\"self\", self.list)\n # print(\"type\", type(self.list))\n return len(self.list)",
"def __len__(self):\n return len(self.pairs)"
] | [
"0.7522205",
"0.7252191",
"0.7017774",
"0.69745976",
"0.69745976",
"0.6790512",
"0.6776123",
"0.67288065",
"0.6696",
"0.66074085",
"0.6599637",
"0.6568948",
"0.65648407",
"0.6536912",
"0.64823395",
"0.6475998",
"0.6471026",
"0.6456544",
"0.6444813",
"0.6433039",
"0.64150035",
"0.64063495",
"0.6404006",
"0.63959277",
"0.6371812",
"0.6353985",
"0.6342619",
"0.6342619",
"0.6338351",
"0.6336257"
] | 0.797556 | 0 |
returns json data for requested digg endpoint | def get_json(endpoint):
url = ''.join([
'http://services.digg.com',
endpoint,
'?appkey=%s' % APPKEY,
'&type;=json',
])
return urllib2.urlopen(url).read() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect_data_api(self, endpoint):\n\n url = 'https://api.gdax.com' + endpoint\n res = requests.get(url)\n\n if res.status_code == 200:\n return res.json()\n else:\n raise ValueError(res.content)",
"def get_json(self):\n url = 'http://lkd.to/api/' + self.user\n response = requests.get(url)\n return response.json()",
"def getDengueInfo(request):\n return JsonResponse(DengueAPI().returnGeoJson(), safe=False)",
"async def json(request):\n requester = request.headers.get('X-FORWARDED-FOR', None)\n print(\"Serving JSON requested by\", requester)\n try:\n component = request.match_info['component']\n except:\n component = None\n json_data = await data.get_data(component=component)\n return web.json_response(json_data)",
"def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json",
"def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json",
"def getData():\n data = {\n \"name\": \"Kim\",\n \"message\": \"Hello there!\"\n }\n return jsonify(data) # respond to the API caller with a JSON representation of data. jsonify is important, as it sets response headers that indicate the respose is in JSON as well",
"def get(self, data):\n ret = self._rest_call({}, 'GET')\n return json.loads(ret[2])",
"def get():\n return jsonify({'doctor': 'Doctor API'}), 200",
"def view_get():\n\n return jsonify(get_dict(\"url\", \"args\", \"headers\", \"origin\"))",
"def _get(self, path):\n r = requests.get(self._url(path))\n assert r.status_code == 200\n return r.json",
"def get_gs_as_json(self, fname):\n return requests.get(f\"{self.gs_base_url}/{fname}\").json()",
"def get(self, url_or_path):\n return self.request.get(url_or_path).json()",
"def ng_get(self, request, *args, **kwargs):\r\n return self.build_json_response(self.get_object())",
"def fetchStateData():\n if request.method ==\"POST\":\n result = {}\n if request.get_json():\n post_requests = request.get_json()\n print(post_requests)\n result = db.getdrgdata() \n return {'data':result}",
"def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)",
"def discover_json() -> Response:\n\n device_id = int(uid[:8], 16) # Hex string to int\n valid_id = device_id + _device_id_checksum(device_id)\n\n data = {\n \"FriendlyName\": locast_service.city,\n \"Manufacturer\": \"locast2dvr\",\n \"ModelNumber\": config.device_model,\n \"FirmwareName\": config.device_firmware,\n \"TunerCount\": config.tuner_count,\n \"FirmwareVersion\": config.device_version,\n \"DeviceID\": hex(valid_id)[2:],\n \"DeviceAuth\": \"locast2dvr\",\n \"BaseURL\": f\"http://{host_and_port}\",\n \"LineupURL\": f\"http://{host_and_port}/lineup.json\"\n }\n return jsonify(data)",
"def epg() -> Response:\n return jsonify(locast_service.get_stations())",
"def json_api_call(url):\n response = requests.get(url)\n return response.json()",
"def Access_URL(url): \n r = requests.get(url) \n json = r.json() \n return json",
"def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json",
"def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json",
"def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json",
"def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json",
"def get(self):\n data = request.args.get('data')\n\n if not data:\n data = \"OK!\"\n\n return json.loads(dumps(data)), 200",
"def get_velib_data():\n api_url = \"https://api.jcdecaux.com/vls/v1/\"\n query_string = \"stations?contract=Paris&apiKey=\"\n api_key = \"ec29d3b17e5162e1459aaad45cddfe74fe832379\"\n my_url = api_url + query_string + api_key\n\n urlobj = URL.urlopen(my_url)\n data = json.load(urlobj)\n# data = urlobj.read()\n# help(data)\n return data",
"def getJson(self,url):\n r = req.get(str(url),\"GET\")\n jsonResponse = json.loads(r.text)\n return jsonResponse",
"def _get(self, path, params=None):\n return self._api.get_json(path, headers={\"Hawkular-Tenant\": self.tenant_id}, params=params)",
"def get_data(self):\n return self.data.to_json()",
"def _api_call(self, **kwargs):\n params = {\n 'format': 'json',\n }\n params.update(kwargs)\n r = requests.get(self.api_base_url, params=params)\n return r.json()"
] | [
"0.63296825",
"0.632292",
"0.6256466",
"0.61851496",
"0.6035162",
"0.6035162",
"0.6023629",
"0.6001173",
"0.5972762",
"0.59639215",
"0.59509265",
"0.5947116",
"0.58656496",
"0.58101904",
"0.57769",
"0.5770559",
"0.5740533",
"0.5736846",
"0.5727516",
"0.5722231",
"0.5714382",
"0.5714382",
"0.5714382",
"0.5714382",
"0.5710231",
"0.57091826",
"0.56954324",
"0.56891435",
"0.56765705",
"0.5673823"
] | 0.7030758 | 0 |
Exit when the Python version is too low. | def check_python_version():
if sys.version_info < MINIMUM_PYTHON_VERSION:
sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_python_version():\n version = sys.version.split()[0]\n if version < \"2.6\" or version >= \"3\":\n raise CuckooStartupError(\"You are running an incompatible version of Python, please use 2.6 or 2.7\")",
"def _check_python_version(min_version):\n if sys.version_info < min_version:\n raise RuntimeError(\"Scikit-lr requires Python {0} or later. \"\n \"The current Python version is {1} installed \"\n \"in {2}.\".format(python_version(), min_version,\n sys.executable))",
"def check_pyversion() -> None:\n pyversion = float(str(sys.version_info[0]) + '.' + str(sys.version_info[1]))\n if not pyversion >= 3.6:\n text = f'''FAIL: You are using python {pyversion}. This pipeline was built with python 3.7.\nFAIL: use 3.6 <= python version < 3.8\nFAIL: exiting cmh_test.py'''\n print(ColorText(text).fail())\n exit()\n if not pyversion < 3.8:\n print(ColorText(\"FAIL: python 3.8 has issues with the ipyparallel engine returns.\").fail())\n print(ColorText(\"FAIL: use 3.6 <= python version < 3.8\").fail())\n print(ColorText(\"FAIL: exiting cmh_test.py\").fail())\n exit()",
"def check_version():\n if sys.version_info[0:3] == PYTHON_VERSION_INFO[0:3]:\n return\n\n sys.exit(\n ansi.error() + ' your virtual env points to the wrong python version. '\n 'This is likely because you used a python installer that clobbered '\n 'the system installation, which breaks virtualenv creation. '\n 'To fix, check this symlink, and delete the installation of python '\n 'that it is brokenly pointing to, then delete the virtual env itself '\n 'and rerun lore install: ' + os.linesep + os.linesep + BIN_PYTHON +\n os.linesep\n )",
"def _check_version () -> None:\n py_version_info: typing.Tuple = sys.version_info[:2]\n\n if py_version_info < MIN_PY_VERSION:\n error_msg = \"This version of pytextrank requires Python {} or later ({} detected)\\n\"\n raise RuntimeError(error_msg.format(_versify(MIN_PY_VERSION), _versify(py_version_info)))",
"def good_py_version() -> bool:\n return sys.version_info.major >= 3 and sys.version_info.minor >= 6",
"def _check_python_version(self):\n python_exe = tools.which(\"python\")\n if not python_exe:\n msg = (\"Python must be available in PATH \"\n \"in order to build v8\")\n raise ConanInvalidConfiguration(msg)\n # In any case, check its actual version for compatibility\n from six import StringIO # Python 2 and 3 compatible\n version_buf = StringIO()\n cmd_v = \"{} --version\".format(python_exe)\n self.run(cmd_v, output=version_buf)\n p = re.compile(r'Python (\\d+\\.\\d+\\.\\d+)')\n verstr = p.match(version_buf.getvalue().strip()).group(1)\n if verstr.endswith('+'):\n verstr = verstr[:-1]\n version = tools.Version(verstr)\n # >= 2.7.5 & < 3\n py2_min = \"2.7.5\"\n py2_max = \"3.0.0\"\n py3_min = \"3.8.0\"\n if (version >= py2_min) and (version < py2_max):\n msg = (\"Found valid Python 2 required for v8:\"\n \" version={}, path={}\".format(version_buf.getvalue().strip(), python_exe))\n self.output.success(msg)\n elif version >= py3_min:\n msg = (\"Found valid Python 3 required for v8:\"\n \" version={}, path={}\".format(version_buf.getvalue().strip(), python_exe))\n self.output.success(msg)\n else:\n msg = (\"Found Python in path, but with invalid version {}\"\n \" (v8 requires >= {} and < \"\n \"{} or >= {})\".format(verstr, py2_min, py2_max, py3_min))\n raise ConanInvalidConfiguration(msg)",
"def check_python():\n out_info(\"Installed Python: {0} {1}\".format(PY_VERSION[0],\n PY_VERSION[1]))\n if not (PY_VERSION[0].split(\".\")[0] == \"3\"\n and PY_VERSION[0].split(\".\")[1] in (\"3\", \"4\", \"5\", \"6\")\n and PY_VERSION[1] == \"64bit\"):\n out_error(\"Please run this script with Python version 3.3, 3.4, 3.5 or 3.6 \"\n \"64bit and try again.\")\n exit(1)",
"def validate_required_python_version_running(minimal_required_version: str) -> None:\n try:\n parts = minimal_required_version.split(\".\")\n min_py_version = 1000000*int(parts[0]) + 1000*(int(parts[1]) if len(parts) > 1 else 0) + (int(parts[2]) if len(parts) > 2 else 0)\n running_py_version = 1000000*sys.version_info.major + 1000*sys.version_info.minor + sys.version_info.micro\n if running_py_version < min_py_version:\n raise RuntimeError(\"\")\n except:\n raise RuntimeError(f\"Kqlmagic requires python >= {Constants.MINIMAL_PYTHON_VERSION_REQUIRED}, you use python {sys.version}\")",
"def min_python_version(version: str) -> None:\n\n if isinstance(version, str):\n ver = tuple(int(x) for x in version.split('.'))\n elif isinstance(version, tuple):\n ver = version\n else:\n raise UnsupportedPythonVersionException('The python version you are currently using is not supported!')\n\n if not sys.version_info >= ver:\n pytest.exit(f'For the test to be executed properly you have to use python of {ver} of higher!')",
"def python_version_check():\n min_version_list = PYTHON_MIN_VERSION.split(\".\")\n # Truncate if the list is more the 4 items\n if len(min_version_list) > 4:\n min_version_list = min_version_list[:4]\n # Fill if the list is less then 4 items\n if len(min_version_list) == 1:\n min_version_list.append(\"0\")\n if len(min_version_list) == 2:\n min_version_list.append(\"0\")\n if len(min_version_list) == 3:\n min_version_list.append(\"f0\")\n # Calculate the minimum version and an integer, which, when displayed as\n # hex, is easily recognised as the version. E.g. 0x30502f0 is 3.5.2\n min_version_value = 0\n for index, item in enumerate(min_version_list[::-1]):\n min_version_value = min_version_value + int(item, 16) * 2**(index * 8)\n if debug: print(\"Python Version Minimum:{}, Decimal:{}, Hex:{}\"\n .format(PYTHON_MIN_VERSION, min_version_value,\n hex(min_version_value)))\n # test value and exit if below minimum revision\n if sys.hexversion < min_version_value:\n print(\"Python Version: {}. Required minimum version is: {}. Exiting...\"\n .format(sys.version.split(\" \")[0], PYTHON_MIN_VERSION))\n sys.exit()",
"def check_pythonver(reqver_text):\n\treqver = map(int, reqver_text.split('.'))\n\tpythonver = sys.version_info[:3]\n\treturn check_ver(pythonver, reqver)",
"def test_python_version(self):\n assert 2 == sys.version_info.major\n assert 7 == sys.version_info.minor\n assert 6 <= sys.version_info.micro",
"def test_python_version(container, python_next_version=\"3.10\"):\n LOGGER.info(f\"Checking that python version is lower than {python_next_version}\")\n c = container.run(\n tty=True,\n command=[\"start.sh\"],\n )\n cmd = c.exec_run(\"python --version\")\n output = cmd.output.decode(\"utf-8\")\n assert \"ERROR\" not in output\n assert \"WARNING\" not in output\n actual_python_version = version.parse(output.split()[1])\n assert actual_python_version < version.parse(\n python_next_version\n ), f\"Python version shall be lower than {python_next_version}\"",
"def test_python_after_38():\n import sys\n assert sys.version_info >= (3, 8)",
"def verify_python(self, app):\n output = self.tools[app].app_context.check_output(\n [\n f\"python{app.python_version_tag}\",\n \"-c\",\n (\n \"import sys; \"\n \"print(f'{sys.version_info.major}.{sys.version_info.minor}')\"\n ),\n ]\n )\n # Update the python version tag with the *actual* python version.\n app.python_version_tag = output.split(\"\\n\")[0]\n target_python_version = tuple(int(v) for v in app.python_version_tag.split(\".\"))\n\n if target_python_version < self.briefcase_required_python_version:\n briefcase_min_version = \".\".join(\n str(v) for v in self.briefcase_required_python_version\n )\n raise BriefcaseCommandError(\n f\"The system python3 version provided by {app.target_image} \"\n f\"is {app.python_version_tag}; Briefcase requires a \"\n f\"minimum Python3 version of {briefcase_min_version}.\"\n )\n elif target_python_version != (\n self.tools.sys.version_info.major,\n self.tools.sys.version_info.minor,\n ):\n self.logger.warning(\n f\"\"\"\n*************************************************************************\n** WARNING: Python version mismatch! **\n*************************************************************************\n\n The system python3 provided by {app.target_image} is {app.python_version_tag}.\n This is not the same as your local system ({self.python_version_tag}).\n\n Ensure you have tested for Python version compatibility before\n releasing this app.\n\n*************************************************************************\n\"\"\"\n )",
"def test_python_supported_version(self):\r\n min_acceptable_version = (2, 7, 0)\r\n min_unacceptable_version = (3, 0, 0)\r\n\r\n command = 'python --version'\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n stdout = proc.stdout.read()\r\n\r\n version_str_matches = re.findall('Python\\s+(\\S+)\\s*', stdout.strip())\r\n self.assertEqual(len(version_str_matches), 1,\r\n \"Could not determine the Python version in '%s'.\" %\r\n stdout)\r\n version_string = version_str_matches[0]\r\n\r\n try:\r\n if version_string[-1] == '+':\r\n version_string = version_string[:-1]\r\n version = tuple(map(int, version_string.split('.')))\r\n if len(version) == 2:\r\n version = (version[0], version[1], 0)\r\n pass_test = (version >= min_acceptable_version and\r\n version < min_unacceptable_version)\r\n except ValueError:\r\n pass_test = False\r\n version_string = stdout\r\n self.assertTrue(pass_test,\r\n \"Unsupported Python version. Must be >= %s and < %s, \"\r\n \"but running %s.\"\r\n % ('.'.join(map(str, min_acceptable_version)),\r\n '.'.join(map(str, min_unacceptable_version)),\r\n version_string))",
"def test_2x_only_python_version_deploy():\n pass",
"def test_python_version():\n assert sys.version_info.major == 3",
"def py_versiontest(c):\n pass",
"def warn_on_old_or_unsupported_python_version():\n\n if PY26:\n import scalyr_agent.scalyr_logging\n\n scalyr_agent.scalyr_logging.getLogger(__name__).warn(PYTHON26_EOL_WARNING)",
"def verify_system_python(self):\n system_python_bin = Path(\"/usr/bin/python3\").resolve()\n system_version = system_python_bin.name.split(\".\")\n if system_version[0] != \"python3\" or len(system_version) == 1:\n raise BriefcaseCommandError(\"Can't determine the system python version\")\n\n if system_version[1] != str(self.tools.sys.version_info.minor):\n raise BriefcaseCommandError(\n f\"The version of Python being used to run Briefcase ({self.python_version_tag}) \"\n f\"is not the system python3 (3.{system_version[1]}).\"\n )",
"def validate_python_version(cls, values: Dict) -> Dict:\n if sys.version_info < (3, 9):\n raise ValueError(\n \"This tool relies on Python 3.9 or higher \"\n \"(as it uses new functionality in the `ast` module, \"\n f\"you have Python version: {sys.version}\"\n )\n return values",
"def test_3x_only_python_versions_deploy():\n pass",
"def initialize():\n _check_python_version()",
"def check_all():\n for package, version in required_versions.items():\n try:\n module = importlib.import_module(package)\n except ImportError:\n return\n else:\n if StrictVersion(version) > StrictVersion(module.__version__):\n raise RuntimeError(\"Your version of %s is too old - it must be at least %s\" % (\n package,\n version,\n ))",
"def python_version():\n return _sys_version()[1]",
"def azure_pipelines_broken():\n return (sys.version_info.major, sys.version_info.minor) in ((2, 7), (3, 7))",
"def check_version():\n err = \"PaddlePaddle version 1.6 or higher is required, \" \\\n \"or a suitable develop version is satisfied as well. \\n\" \\\n \"Please make sure the version is good with your code.\" \\\n\n try:\n fluid.require_version('1.6.0')\n except Exception as e:\n logger.error(err)\n sys.exit(1)",
"def python_version():\n _require_environment()\n print 'Python version on virtualenv %s: %s' % (env.project['virtualenv'], _get_python_version())"
] | [
"0.8062994",
"0.7595006",
"0.74613327",
"0.73725754",
"0.7319089",
"0.7166508",
"0.7116363",
"0.7087189",
"0.69772106",
"0.69696325",
"0.68991804",
"0.682904",
"0.6719451",
"0.6688046",
"0.66628706",
"0.665088",
"0.6538358",
"0.64709836",
"0.6450609",
"0.6290435",
"0.6257957",
"0.6222585",
"0.6181354",
"0.6154716",
"0.6083187",
"0.6067994",
"0.6048401",
"0.60387635",
"0.5956945",
"0.59546185"
] | 0.81366074 | 1 |
Packet waiting & service loop | def run(self):
waiting_packet = None
while True:
if waiting_packet is not None:
packet = waiting_packet
waiting_packet = None
else:
packet = yield self.buffer.get()
self.channel.add_sender(self)
yield self.env.timeout(packet.size/self.service_rate)
self.channel.remove_sender(self)
packet.output_timestamp= env.now
if self.destination is None:
self.packet_list.append(packet)
if (not self.collision):
if self.destination is not None:
self.destination.put(packet)
self.channel.packet_list.append(packet)
else:
if self.debug:
print("Packet %d is discarded. Reason: Collision"
% (packet.id))
self.packets_drop += 1
waiting_packet = packet
self.collision = False
yield self.env.timeout(self.random_delay()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open_service_loop(self):\n\t\n\tprint \"Attempting to receive file\", self.file_read, \"from\", self.ip, \"at port\", self.port, \".\" \n\trecv_data = None\n\tnum_retransmits = 0\n\t#Start timer, retransmit after each timeout of one second. If receive response within the timer, move on to next step. \n\t#Limit number of retransmits to 60 so as not to enter infinite loop.\n\twhile(num_retransmits < 60):\n\t num_retransmits += 1\n\t self.send_open_request()\n\n\t input_socket = [self.client_socket]\n\t inputready,outputready,exceptready = select.select(input_socket,[],[], 1)\n\t #if timer expires without input becoming ready, empty list is returned. So go to next iteration of loop (retransmit)\n\t if (inputready == []):\n\t\tcontinue\n\t else:\n\t\ttry:\n\t\t recv_data = self.client_socket.recv(self.buffer_)\n\t\texcept Exception as exception_:\n\t\t print(\"Wrong port number or IP address provided, or server is not available at the moment.\")\n\t\t sys.exit()\n\t\tprint(\"Received a packet.\")\n\t\t\n\t\t#Generate a random number between 0 and 1 with uniform distribution to simulate packet loss.\n\t\tif (random.uniform(0,1) < self.p):\n\t\t recv_data = None\n\t\t print(\"Packet dropped randomly to simulate packet losses\")\n\t\t continue\n\t\t\n\t\tbit_signature = recv_data[0:4]\n\t\tresponse_type = recv_data[4:8]\n\t\trecv_payload = recv_data[8:]\n\n\t\t#Check that bit signature is valid (packet is from our network)\n\t\tif bit_signature != \"\\x00\\x00\\x00\\r\": \n\t\t recv_invalid_response(recv_data, \"bit_signature\")\n\t\t continue\n\t\telse:\n\t\t #We have only ever sent a open_request, so the only viable response at this point is an open_response. \n\t\t #If this field contains anything else, it is an invalid packet. Retransmit request.\n\t\t if response_type != \"\\x00\\x00\\x00\\x08\": \n\t\t\tself.recv_invalid_response(recv_data, \"response_type\")\n\t\t\tcontinue\t\t\n\t\t else:\n\t\t\t#Bit signature and response type fields are both valid.\n\t\t\tprint(\"Received open response from server...\")\n\t\t\tself.recv_open_response(recv_payload)\n\t\t\tbreak\n\t\n\tif (num_retransmits >= 60):\n\t print (\"Exceeded number of retransmissions allowed. Exiting program.\")\n\t sys.exit()\t\n\treturn",
"def _hpoll(self, pc):\n while True:\n mtype, mdata1, mdata2=self.gMsg()\n if mtype==\"msg\":\n self.processMsg(mdata1, mdata2)\n continue\n if mtype is None:\n break",
"def _listener(self):\n while self.thread_run.is_set():\n if self._serial_bytes_available() >= len(self.MAGIC_HEADER) and \\\n self._check_for_start():\n report, retval, payload = self._receive_packet()\n if report >= 0:\n self.queue.put((report, retval, payload))\n if self.verbose:\n self.log(\"Put report {} on queue\".format(report))",
"def read_service_loop(self):\n\t\n\t#Increment start_position each time packet sent, send a read request packet for each new position.\n\t#Expect to receive a read_response packet for each time read request sent.\n\trecv_data = None\n\tprint(\"Sending request to server to read and receive file...\")\n\tstart_position = 0\n\twhile(self.eof == False):\n\t print(\"Reading from byte \" + str(start_position))\t \n\t num_retransmits = 0 \n\t #Loop for retransmissions of the same start position\n\t while(num_retransmits < 60):\n\t\tnum_retransmits = num_retransmits + 1\n\t\tself.send_read_request(start_position)\n\t\tinput_socket = [self.client_socket]\n\t\tinputready,outputready,exceptready = select.select(input_socket,[],[], 1)\t\t\n\t\tif (inputready == []):\n\t\t continue\t\t\n\t\telse:\n\t\t recv_data = self.client_socket.recv(self.buffer_)\t\t \n\t\t if (random.uniform(0,1) < self.p):\n\t\t\trecv_data = None\n\t\t\tprint(\"Packet dropped randomly to simulate packet losses\")\n\t\t\tcontinue\t\t \n\t\t bit_signature = recv_data[0:4]\n\t\t response_type = recv_data[4:8]\n\t\t recv_payload = recv_data[8:]\t \n\t\t if bit_signature != \"\\x00\\x00\\x00\\r\":\n\t\t\tself.recv_invalid_response(recv_data, \"bit_signature\")\n\t\t\tcontinue\n\t\t else:\n\t\t\tif response_type == \"\\x00\\x00\\x00\\x02\":\n\t\t\t #Packet is valid, proceed to recv_read_response to append this bit of file received into local_filename\n\t\t\t self.file_append = open(self.local_filename, 'r+b')\n\t\t\t self.recv_read_response(recv_payload)\n\t\t\t break\n\t\t\telse:\n\t\t\t self.recv_invalid_response(recv_data, \"response_type\")\n\t\t\t continue\n\t\t\n\t start_position = start_position + self.NUM_BYTES_TO_READ\t\t\n\t if (num_retransmits >= 60):\n\t\tprint (\"Exceeded number of retransmissions allowed. Exiting program.\")\n\t\tsys.exit()\t \t\t\n\treturn",
"def run(self):\n self.stopped = False\n # receives incoming 'host up' requests\n serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n serverSocket.bind(('255.255.255.255', Globals.BROADCAST_PORT))\n \n # wait for UDP broadcast, send TCP ACK\n while 1:\n \n # open a socket and listen for a message\n value,address = serverSocket.recvfrom(256)\n host,port = address\n \n # this actually prevents a seg fault ;( for some reason\n if self.stopped:\n return\n \n if value == 'host up':\n \n sendSocket = socket.socket(socket.AF_INET, \n socket.SOCK_STREAM, 0)\n sendSocket.connect((host, Globals.ACK_PORT))\n sendSocket.send('host up ack')\n sendSocket.close()\n sendSocket = None\n self._addHost(host)\n \n elif value.find('host down') == 0:\n self._removeHost(host)\n \n elif value.find('add group') == 0:\n self._postEvent(value)\n \n elif value.find('remove group') == 0:\n self._postEvent(value)\n \n elif value.find('group beat') == 0:\n self._postEvent(value)\n \n serverSocket.close()",
"def _ir_recv_daemon(self):\n while True:\n if (time.ticks_us()-self._prev_time) > self.waittime and self.pulse_buffer != []:\n dec = self.decode_buff()\n if self.callback:\n self.callback(dec)",
"def run(self): #CHANGED FROM run to start\n\n # Open socket for communication\n self.udpSock.bind(self.addr)\n # Receive communication until stopped\n while not self.close.isSet():\n data = self.udpSock.recv(self.buffer)\n self.lock.acquire()\n self.processData(data)\n self.lock.release()\n\n\n # Close socket\n self.udpSock.close()",
"def run(self):\n\t\twhile not self.quit:\n\t\t\ttry:\n\t\t\t\tself.pcap.loop(0, self.__packetHandler)\n\t\t\texcept SystemExit:\t# raised by '__packetHandler' to force quit\n\t\t\t\tpass\n\t\t\t\t# is there a direct (simpler?) way to force return from waiting 'self.pcap.loop' ?!?\n\t\t\texcept:\t\t\t# generic error\n\t\t\t\t#warnings.warn( \"%s %s\" % sys.exc_info()[0:2] )\n\t\t\t\t#print \"\\n\".join(inspect.getframeinfo(sys.exc_info()[2]).code_context)\n\t\t\t\t#print \"\".join([ 'File \"%s\", line %i, in %s (%s)\\n%s\\n' % (f[1:4]+(\"\\n\".join(f[4]),)) for f in inspect.getinnerframes(sys.exc_info()[2]) ])\n\t\t\t\twarnings.warn( \"\\n\".join(PyLib.tb_info()[2]) )\n\t\t\t\tsys.exc_clear()",
"def _flow_in(self):\n print(\"MESSENGER: flow_in online!\")\n while self.running:\n data = b\"\"\n while data[-5:] != b\"ROGER\" and self.running:\n try:\n slc = self.sock.recv(1024)\n except socket.timeout:\n time.sleep(0.1)\n except socket.error as E:\n print(\"MESSENGER: caught socket exception:\", E)\n self.teardown(1)\n except Exception as E:\n print(\"MESSENGER: generic exception:\", E)\n self.teardown(1)\n else:\n data += slc\n if not self.running:\n if data:\n print(\"MESSENGER: data left hanging:\" + data[:-5].decode(\"utf8\"))\n return\n data = data[:-5].decode(\"utf8\")\n self.recvbuffer.extend(data.split(\"ROGER\"))\n print(\"MESSENGER: flow_in exiting...\")",
"def wait_for_simulation():\n MGMT_PORT = 54322\n SIMULATION_END_CMD_RC = 80\n SDP_HEADER_FORMAT = '<HBBBBHHIiii'\n SDP_HEADER_SIZE = struct.calcsize(SDP_HEADER_FORMAT)\n \n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(1) \n sock.bind(('0.0.0.0', MGMT_PORT)) # Bind all addresses on given port\n \n while True:\n try:\n data = sock.recv(1024) \n unpack = struct.unpack(SDP_HEADER_FORMAT, data[:SDP_HEADER_SIZE])\n command = unpack[7] \n if command == SIMULATION_END_CMD_RC: break\n except socket.timeout:\n pass",
"def run(self):\n yield self.env.timeout(self.initial_delay)\n while self.env.now < self.finish:\n # wait for next transmission\n yield self.env.timeout(self.adist)\n self.packets_sent += 1\n p = Packet(self.env.now, self.sdist, self.packets_sent, src=self.id, flow_id=self.flow_id)\n self.out.put(p)",
"def run(self):\n macId, sensorId = getRequest(self.socket)\n try:\n while True:\n self.condition.acquire()\n while True:\n data = self.lastmessage.readData(macId, sensorId)\n self.socket.send(str(data) + '\\n')\n #self.socket.send(\"done\\n\")\n self.condition.wait(5)\n except socket.error, e:\n print \"Catching broken pipe\"\n self.condition.release()\n self.socket.close()\n\n\n \"\"\"\n macId, sensorId = getRequest(self.socket)\n print macId + \"|\" +sensorId\n self.condition.acquire()\n self.condition.wait()\n data = self.lastmessage.readData(macId, sensorId)\n self.condition.release()\n print str(data)\n self.socket.send(str(data) + '\\n')\n self.socket.send(\"done\\n\")\n self.socket.close()\n\"\"\"",
"def wait_for_data(receiver):\n\n while not receiver.available(pipes[1]):\n time.sleep(0.01)",
"def run(self):\n while True:\n msg = self.recv()",
"def _listen(self):\n if not self.is_connected:\n self.connect()\n\n while True:\n data = self.recv()\n ping = PING_RE.match(data)\n if ping:\n self.handle_ping(ping.group(1))\n else:\n result = self.handle_message(data)\n\n if result:\n print(result)\n\n time.sleep(1)",
"def begin_sending_packets():\n monitoru = main_monitoring.MainMonitoring()\n monitoru.start_monitor_loop()",
"def run(self):\n try:\n while True:\n in_buff = self.stream.read_in_buf()\n for message in in_buff:\n packet = PacketFactory.parse_buffer(message)\n self.handle_packet(packet)\n self.stream.clear_in_buff()\n self.handle_user_interface_buffer()\n self.stream.send_out_buf_messages(self.reunion_mode == ReunionMode.FAILED)\n time.sleep(2)\n except KeyboardInterrupt:\n log('KeyboardInterrupt')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)",
"def _run(self):\n\n while self._thread_alive_event.is_set():\n reported_events = self._poll.poll(self.POLL_TIMEOUT)\n\n for fd_event_pair in reported_events:\n fd, event = fd_event_pair\n\n if event & select.POLLIN or event & select.POLLPRI:\n self._recv(fd)\n\n elif event & select.POLLERR:\n self.logger.error(\"Error condition of some sort\")\n self._thread_alive_event.clear()\n break\n\n elif event & select.POLLNVAL:\n self.logger.error(\"Invalid request: descriptor not open\")\n self._thread_alive_event.clear()\n break",
"def wait (self, seconds=0.0):\r\n\t\tstart_time = time.time()\r\n\t\twhile time.time() < start_time + seconds:\r\n\t\t\tself.receive()",
"def RecvAndSleep(s):\n s.recv(RECV_BYTES)\n time.sleep(SLEEP_SEC)",
"def _read_data(self):\n while True:\n try:\n data = yield from asyncio.wait_for(self._socket.recv(), 1)\n except asyncio.TimeoutError:\n continue\n except asyncio.CancelledError:\n break\n except ConnectionClosed:\n break\n\n self._push_packet(data)\n\n self._loop.call_soon(self.close)",
"def _receive_thread(self):\r\n while True:\r\n try:\r\n self.response, ip = self.socket.recvfrom(2048)\r\n print(\"Response \", self.response)\r\n except socket.error as exc:\r\n print (\"Receive Thread caught exception socket.error : %s\" % exc)",
"def run(self):\n\n lastLevel = 0\n\n MSG_SIZ = 12\n\n while self.go:\n\n buf = self.sock.recv(MSG_SIZ)\n\n while self.go and len(buf) < MSG_SIZ:\n buf += self.sock.recv(MSG_SIZ-len(buf))\n\n if self.go:\n seq, flags, tick, level = (struct.unpack('HHII', buf))\n\n if flags == 0:\n changed = level ^ lastLevel\n lastLevel = level\n for cb in self.callbacks:\n if cb.bit & changed:\n newLevel = 0\n if cb.bit & level:\n newLevel = 1\n if (cb.edge ^ newLevel):\n cb.func(cb.gpio, newLevel, tick)\n else:\n gpio = flags & 31\n for cb in self.callbacks:\n if cb.gpio == gpio:\n cb.func(cb.gpio, TIMEOUT, tick)\n\n self.sock.close()",
"def poll_data(self):\n with s.socket(s.AF_INET, s.SOCK_DGRAM) as sock:\n sock.bind(('', self.__port))\n while True:\n message, address = sock.recvfrom(1024)\n self.__address = address\n logging.debug('Received: {}'.format(message))\n self.process_data(message)",
"def _receive_thread(self):\r\n while True:\r\n try:\r\n self.response, ip = self.socket.recvfrom(3000)\r\n except socket.error as exc:\r\n print (f\"Caught exception socket.error: {exc}\")",
"def __listener__(self):\n frame_interval = 0.1\n str_list = []\n c = ''\n while True:\n with Timeout(frame_interval, False):\n while True:\n try:\n c = self.ser.read()\n except:\n self.ser.close()\n self.make_connection.go()\n self.connection_made.wait()\n str_list.append(c)\n if c == \"\\n\" or c == '':\n break\n received = ''.join(str_list)\n str_list = []\n if received:\n for i in self.read_handlers:\n gevent.spawn(i, received)\n sleep(0.001)",
"def process_AResponse(self) :\n while (1):\n str = self.recv(self.sock)\n if (len(str) > 0):\n response = amazon_pb2.AResponses()\n response.ParseFromString(str)\n print(response)\n # handle import new stock\n for arrive in response.arrived:\n things = arrive.things\n for thing in things:\n products = Whstock.objects.filter(pid = thing.id)\n if len(products) != 0:\n products[0].count = products[0].count + thing.count\n products[0].save()\n else :\n #need to specify world id\n whstock = Whstock()\n whstock.hid = arrive.whnum\n whstock.pid = thing.id\n whstock.dsc = thing.description\n whstock.count = thing.count\n whstock.save()\n # handle pack ready response\n #when ready send AU command to let UPS truck pickup,\n #use another thread for wait for UPS response\n #when receive response send ALoad command\n #when reveived loaded for Sim send AU command and let flag = 1;\n # tell UPS packages is ready and ask for trucks (provide destinaiton address)\n # tell warehouse to load when UPS trucks ready\n for currReady in response.ready:\n #save current state\n trans = Transaction.objects.get(ship_id = currReady)\n trans.ready = True\n trans.save()\n #connect to UPS\n ups_handler = threading.Thread(target=self.process_Uresponse, args=(trans,))\n ups_handler.start()\n self.AUCommand(trans, 0)\n print(\"first msg for UPS sent(to pickup)\")\n ups_handler.join()\n\n #load info from sim\n for load in response.loaded:\n #save current state\n trans = Transaction.objects.get(ship_id = load)\n trans.loaded = True\n trans.save()\n #connect to UPS\n self.AUCommand(trans, 1)\n print(\"second msg for UPS sent(get load success from sim world)\")",
"def _writeloop(self):\r\n while self._ll_alive:\r\n ## Add a thread lock\r\n if not self._uart_tx_queue.empty():\r\n data = self._uart_tx_queue.get()\r\n #clear the response list before send the command\r\n #self._uart_rx_queue.clear()\r\n #self.log.debug(\"Uart send cmd:\",data)\r\n #time.sleep(0.01)\r",
"def run(self) -> None:\n\n while not self.stop_event.is_set():\n if self.my_queue:\n # if heartbeat received at '/heartbeat' route from the monitored peer,\n # sleep until next\n self.my_queue.clear()\n time.sleep(7)\n\n else:\n # else drop peer data from database and inform central server appending '0'\n # to my queue\n self.db_access.drop_peer(self.peer_id)\n self.my_queue.append(0)\n break",
"def run(self):\n while True:\n if self.timeout - time() <= 0:\n if self.state in [State.CANDIDATE, State.FOLLOWER]:\n self.start_election()\n\n elif self.state is State.LEADER:\n self.send_appends()\n self.commit_entries()\n\n else:\n self.unknown_state()\n\n self.handle_message()"
] | [
"0.6621941",
"0.6615155",
"0.65084773",
"0.64677125",
"0.6462943",
"0.6385985",
"0.63236433",
"0.632002",
"0.63187605",
"0.6293183",
"0.6292364",
"0.6276911",
"0.62755835",
"0.6266912",
"0.6254571",
"0.62525535",
"0.6221155",
"0.6208616",
"0.6160476",
"0.61516273",
"0.61443305",
"0.6103573",
"0.609657",
"0.60899204",
"0.6070794",
"0.6067228",
"0.60598606",
"0.60592",
"0.60283595",
"0.60253406"
] | 0.6794869 | 0 |
Build arguments for the Rally task. | def build_task_args(self, test_name):
task_args = {'service_list': [test_name]}
task_args['image_name'] = str(self.image.name)
task_args['flavor_name'] = str(self.flavor.name)
task_args['flavor_alt_name'] = str(self.flavor_alt.name)
task_args['glance_image_location'] = str(self.filename)
task_args['glance_image_format'] = str(self.image_format)
task_args['tmpl_dir'] = str(self.template_dir)
task_args['sup_dir'] = str(self.support_dir)
task_args['users_amount'] = self.users_amount
task_args['tenants_amount'] = self.tenants_amount
task_args['use_existing_users'] = False
task_args['iterations'] = self.iterations_amount
task_args['concurrency'] = self.concurrency
task_args['smoke'] = self.smoke
task_args['volume_version'] = self.volume_version
task_args['volume_service_type'] = self.volume_service_type
task_args['block_migration'] = env.get("BLOCK_MIGRATION").lower()
task_args['username'] = self.username
if self.ext_net:
task_args['floating_network'] = str(self.ext_net.name)
else:
task_args['floating_network'] = ''
if self.network:
task_args['netid'] = str(self.network.id)
else:
LOGGER.warning(
'No tenant network created. '
'Trying EXTERNAL_NETWORK as a fallback')
if env.get("EXTERNAL_NETWORK"):
network = self.cloud.get_network(env.get("EXTERNAL_NETWORK"))
task_args['netid'] = str(network.id) if network else ''
else:
task_args['netid'] = ''
return task_args | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_task_args(self, test_name):\n task_args = {}\n if self.ext_net:\n task_args['floating_network'] = str(self.ext_net.name)\n else:\n task_args['floating_network'] = ''\n task_args['image_name'] = str(self.image.name)\n task_args['flavor_name'] = str(self.flavor.name)\n return task_args",
"def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )",
"def build_args(self, project_update, private_data_dir, passwords):\n args = []\n if getattr(settings, 'PROJECT_UPDATE_VVV', False):\n args.append('-vvv')\n if project_update.job_tags:\n args.extend(['-t', project_update.job_tags])\n return args",
"def CreateArgs(run_task_request, args):\n if getattr(args, \"ARGS\", None):\n args_ref = dataplex_api.FetchExecutionSpecArgs(args.ARGS)\n if len(args_ref) > 0:\n return run_task_request.ArgsValue(\n additionalProperties=[\n run_task_request.ArgsValue.AdditionalProperty(\n key=key, value=value\n )\n for key, value in sorted(args_ref.items())\n ]\n )\n return None",
"def _render_args(self, target, output_dir):\n args = []\n\n # Glossary of used aapt flags. Aapt handles a ton of action, this will continue to expand.\n # : 'package' is the main aapt operation (see class docstring for more info).\n # : '-m' is to \"make\" a package directory under location '-J'.\n # : '-J' Points to the output directory.\n # : '-M' is the AndroidManifest.xml of the project.\n # : '-S' points to the resource_dir to \"spider\" down while collecting resources.\n # : '-I' packages to add to base \"include\" set, here it is the android.jar of the target-sdk.\n args.extend([self.aapt_tool(target.build_tools_version)])\n args.extend(['package', '-m', '-J', output_dir])\n args.extend(['-M', target.manifest.path])\n args.extend(['-S', target.resource_dir])\n args.extend(['-I', self.android_jar_tool(target.manifest.target_sdk)])\n args.extend(['--ignore-assets', self.ignored_assets])\n logger.debug('Executing: {0}'.format(' '.join(args)))\n return args",
"def build_parms(args):\r\n readDir=args.dir\r\n #target_date=args.target_date\r\n target_date=args.target_date\r\n outdir=args.outdir \r\n parms = {\"readDir\":readDir,\r\n \"target_date\":target_date,\r\n \"outdir\":outdir}\r\n \r\n return(parms)",
"def build_args(self, parser):\n raise NotImplementedError('build_args() must be implemented')",
"def setup_args(self):\n self.parser = argparse.ArgumentParser()\n self.group = self.parser.add_mutually_exclusive_group()\n\n self.group.add_argument('-a', '--add', help='Adds a new task to the task list', action='store_true')\n self.group.add_argument('-r', '--remove', help='Removes a task from the task list', action='store_true')\n self.group.add_argument('-f', '--finish', help='Sets a task to be finished', action='store_true')\n self.group.add_argument('-u', '--unfinish', help='Sets a task to be not finished', action='store_true')\n self.group.add_argument('-c', '--change', help='Updates an existing task', action='store_true')\n self.group.add_argument('-v', '--view', help='View your current task list', action='store_true')\n\n return self.parser",
"def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)",
"def build_arguments(self, *cmd_args, **cmd_kwargs):\n args = []\n args.extend(cmd_args)\n\n for raw_key, value in cmd_kwargs.items():\n if len(raw_key) == 1:\n args.append('-{}'.format(raw_key))\n else:\n key = raw_key.replace('_', '-')\n args.append('--{}'.format(key))\n\n if value is True:\n # If True, it is enough.\n # e.g.: system=True translates to --system\n continue\n\n args.append(str(value))\n\n return args",
"def _get_task_args():\n task_name = FLAGS.task\n task_args = collections.OrderedDict()\n\n if task_name in TASK_FLAGS:\n task_flag_list = TASK_FLAGS[task_name]\n task_flag_dict = utils_impl.lookup_flag_values(task_flag_list)\n task_flag_prefix = TASK_FLAG_PREFIXES[task_name]\n for (key, value) in task_flag_dict.items():\n if key.startswith(task_flag_prefix):\n key = key[len(task_flag_prefix):].lstrip('_-')\n task_args[key] = value\n return task_args",
"def build_task(module_name, args=[], kwargs={}, module_attrs={}):\n kwargs = copy.deepcopy(kwargs) # Copy to avoid argument passed by reference issue\n if args:\n kwargs[\"_raw_params\"] = \" \".join(args)\n\n task_data = {\n \"action\": {\n \"module\": module_name,\n \"args\": kwargs\n },\n }\n if module_attrs:\n task_data.update(module_attrs)\n\n return task_data",
"def get_additional_args(self):\n additional = \"\"\n if not self.workflow.cleanup_scripts:\n additional += \" --skip-script-cleanup \"\n if self.workflow.shadow_prefix:\n additional += \" --shadow-prefix {} \".format(self.workflow.shadow_prefix)\n if self.workflow.use_conda:\n additional += \" --use-conda \"\n if self.workflow.conda_prefix:\n additional += \" --conda-prefix {} \".format(self.workflow.conda_prefix)\n if self.workflow.use_singularity:\n additional += \" --use-singularity \"\n if self.workflow.singularity_prefix:\n additional += \" --singularity-prefix {} \".format(\n self.workflow.singularity_prefix\n )\n if self.workflow.singularity_args:\n additional += ' --singularity-args \"{}\"'.format(\n self.workflow.singularity_args\n )\n\n if self.workflow.use_env_modules:\n additional += \" --use-envmodules\"\n\n return additional",
"def make_args(port, n, t, population, test=None, value=0, failure=None, tx_rate=0, loglevel=logging.INFO, output=None,\n broadcast=True, fan_out=10, profile=None, validate=False, ignore_promoter=False):\n res = [str(port), str(n), str(t), str(population)]\n\n if test is not None:\n res.append('--test')\n res.append(test)\n\n res.append('--value')\n res.append(str(value))\n\n if failure is not None:\n res.append('--failure')\n res.append(failure)\n\n res.append('--tx-rate')\n res.append(str(tx_rate))\n\n if loglevel == logging.DEBUG:\n res.append('--debug')\n elif loglevel == logging.INFO:\n res.append('-v')\n\n # None represents stdout\n if output is not None:\n res.append('-o')\n res.append(output)\n\n if broadcast:\n res.append('--broadcast')\n\n res.append('--fan-out')\n res.append(str(fan_out))\n\n if profile:\n res.append('--profile')\n res.append(profile)\n\n if validate:\n res.append('--validate')\n\n if ignore_promoter:\n res.append('--ignore-promoter')\n\n return res",
"def build_args():\n parser = argparse.ArgumentParser(description='Validates, edits, or creates a 22 XML file')\n subparsers = parser.add_subparsers(help='sub-command help')\n \n add_branch_parser(subparsers)\n add_edit_parser(subparsers)\n add_finalize_parser(subparsers)\n add_grade_parser(subparsers)\n add_new_parser(subparsers)\n add_validate_parser(subparsers)\n add_validate_document_parser(subparsers)\n \n return parser.parse_args()",
"def cmake_args(self):\n args = [\n self.define(\"CMAKE_C_COMPILER\", self.spec[\"mpi\"].mpicc),\n self.define(\"BUILD_SHARED_LIBS\", True),\n self.define(\"BUILD_TESTING\", self.run_tests),\n ]\n return args",
"def _generate_run_args(self, args_list, kwargs):\n return _get_args_for_run(self, args_list, kwargs)",
"def get_arguments():\n parser = argparse.ArgumentParser(description=\"TODO\")\n \n parser.add_argument('config_filepath', \n action='store', \n type=str, \n help='Path to configuration file containing paths of third parties libraries, projects, data directories, etc. See README for more information.')\n\n parser.add_argument('-C', '--config_cases', \n action='store',\n type=str,\n dest=\"config_cases\",\n help='Path to configuration file containing cases. The default one is stored at dask_io_experiments/experiment_5/cases.json',\n default=\"./dask_io_experiments/experiment_5/cases.json\")\n\n return parser.parse_args()",
"def add_arguments(cls):\n return [\n (('--yes',), dict(action='store_true', help='clean .git repo')),\n (('--variable', '-s'),\n dict(nargs='+', help='set extra variable,format is name:value')),\n (('--skip-builtin',),\n dict(action='store_true', help='skip replace builtin variable')),\n\n (('--dir',), dict(nargs='?', default=os.getcwd(),\n help='set working directory')),\n (('--debug',), dict(action='store_true', help='open debug mode')),\n (('--dry-run',), dict(action='store_true',\n help='print command instead execute it')),\n (('--verbose', '-v'), dict(action='count')),\n ]",
"def get_tasks_args(parser):\n group = parser.add_argument_group(title='data_analyzing')\n\n group.add_argument('--analyzing-task', type=str, required=True,\n default=None,\n choices=['map',\n 'reduce'],\n help='What type of analyzing task to perform.')\n group.add_argument('--analyzing-data-type', type=str, required=True,\n default=None,\n choices=['BERT',\n 'GPT'],\n help='What type of data.')\n group.add_argument('--analyzing-metric', type=str, nargs='+', default=[],\n help='What kinds of metrics to analyze.')\n group.add_argument('--analyzing-num-workers', type=int, default=1,\n help='Number of workers. Each worker could be a single CPU node.')\n group.add_argument('--analyzing-worker-id', type=int, default=0,\n help='Worker id of current node.')\n group.add_argument('--analyzing-num-threads', type=int, default=1,\n help='Number of threads for each worker.')\n group.add_argument('--analyzing-num-threads-reduce', type=int, default=1,\n help='Number of threads for each worker.')\n group.add_argument('--analyzing-specific-threads', type=int, nargs='+', default=[],\n help='Which specific threads to run. Helpful when there are specific thread failed in previous run.')\n return parser",
"def full_args():\n return setup_args()",
"def getArgs():\n ProgDesc = (\"Creates a route type csv file of the type whose contents can be \"\n \"copied and pasted in to EMIT to change the proportions of \"\n \"different vehicle categories with a particular route type.\")\n ANPRDesc = (\"The ANPR file should be a csv file created using fleetSplitFromANPR.\")\n parser = argparse.ArgumentParser(description=ProgDesc)\n parser.add_argument('anprfile', type=str,\n help=\"The ANPR file to be processed. \"+ANPRDesc)\n parser.add_argument('basefile', type=str,\n help=(\"A file containing the base route type proportions. \"\n \"This should be created by clicking 'copy' on the \"\n \"route type window of EMIT, pasteing the results in \"\n \"to a spreadsheet, and saving as a csv file.\"))\n parser.add_argument('--saveloc', metavar='save location',\n type=str, nargs='?', default='Auto',\n help=\"Path where the outpt csv file should be saved.\")\n\n\n args = parser.parse_args()\n return args",
"def add_args(parser):\n # fmt: off\n TranslationTask.add_args(parser)\n parser.add_argument('--langs', required=True, metavar='LANG',\n help='comma-separated list of monolingual language, for example, \"en,de,fr\"'\n 'be careful these langs are what you used for pretraining (the same order),'\n 'not for finetuning.'\n 'you should always add all pretraining language idx during finetuning.')\n parser.add_argument('--multilang-sampling-alpha', type=float, default=0.7,\n help='sub sampling factor')\n parser.add_argument('--common_eos', type=str,\n help='common eos symbol for all languages')\n parser.add_argument('--placeholder', type=int, default=0,\n help='number of placeholder in dictionaries')\n parser.add_argument('--gt-langs', type=str,\n help=\"languages used in generation finetuning, separated wiht -, for example, 'en-fr-de'\")\n\n # fmt: on",
"def get_arguments():\n parser = argparse.ArgumentParser(description='Generates Terraform code from datadog monitors ID numbers')\n parser.add_argument(\"--input\", \"-i\", type=str, required=False, default=\"monitors.json\", help=\"Input JSON filename that contains Monitors ID numbers. i.e monitors.json\")\n parser.add_argument(\"--output\", \"-o\", type=str, required=False, default=\"monitors.tf\", help=\"Output Terraform filename. i.e monitors.tf\")\n parser.add_argument(\"--mode\", \"-m\", type=str, required=False, choices=[\"w\",\"a\"], default=\"w\", help=\"Create new Terraform file or Append to existing one.\")\n parser.add_argument(\"--all\", action=\"store_true\", help=\"Create Terraform files per group.\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Verbose output for the script.\")\n\n return parser.parse_args()",
"def prepare_args(self):\n args = []\n if self.login:\n args.extend(['-L', cfg['tools.hydra.loginfile']])\n if self._port.is_ipv6:\n args.append('-6')\n\n args.extend(['-P', cfg['tools.hydra.passwordfile'], '-s', str(self._port.number), str(self._port.node.ip),\n self.service, ])\n return args",
"def render_build_args(options, ns):\n build_args = options.get('buildArgs', {})\n for key, value in build_args.items():\n build_args[key] = value.format(**ns)\n return build_args",
"def parse_arguments(cls):\r\n parser = argparse.ArgumentParser(description='Easy Infer for model benchmark')\r\n cls.base_arg_parse(parser)\r\n cls.model_arg_parse(parser)\r\n cls.task_arg_parse(parser)\r\n args = parser.parse_args()\r\n return args",
"def __add_common_args(parser: argparse.ArgumentParser):\n parser.add_argument(\"--model\", help=\"name of the model to use. Use query --get-models to get a list of valid names.\")\n parser.add_argument(\"--grid-type\", help=\"type of the grid to use.\")\n parser.add_argument(\"--level-type\", help=\"type of the vertical level to use.\")\n parser.add_argument(\"--init-time\", help=f\"initialization time to use. \"\n \"Integers are interpreted as hours since model start, dates formatted as \"\n f\"{__DATE_FORMAT.replace('%Y', 'YYYY').replace('%m', 'MM').replace('%d', 'DD').replace('%H', 'HH').replace('%M', 'MM')} are interpreted as absolute start dates.\")\n parser.add_argument(\"--variable\", nargs=\"+\", help=\"name of the variable to use. Use query --get-vars to get a list of valid names.\")\n parser.add_argument(\"--levels\", nargs=\"+\", type=int, help=\"levels to use.\")\n parser.add_argument(\"--lead-time\", nargs=\"+\", type=int, help=\"lead times to use in hours.\")",
"def get_unparsed_args():\n # Threads are set to 1 so that running tests doesn't completely drain\n # computing power, although it slows down the tests.\n # Specify output folder, else it will create folder in working directory\n # where the test module is run from.\n unparsed_args = [\"run.py\", PIPELINE, DB,\n \"-t\", str(1),\n \"-o\", str(test_folder),\n \"-b\", str(2)]\n return unparsed_args",
"def depfile_args(self, dep_file):\n return [\"-Wp,-MMD,%s\" % (dep_file)]"
] | [
"0.6594043",
"0.65554553",
"0.61109805",
"0.6043353",
"0.59985673",
"0.59900707",
"0.5975896",
"0.59682924",
"0.59195334",
"0.5870925",
"0.5840858",
"0.5762598",
"0.5727708",
"0.5724328",
"0.5706142",
"0.56642747",
"0.5651907",
"0.5580692",
"0.55498743",
"0.5525899",
"0.5478298",
"0.5473373",
"0.54666096",
"0.54624516",
"0.5456463",
"0.5447745",
"0.5444449",
"0.544391",
"0.5433601",
"0.5428535"
] | 0.69316447 | 0 |
Returns deployment id for active Rally deployment | def get_verifier_deployment_id():
cmd = ("rally deployment list | awk '/" +
getattr(config.CONF, 'rally_deployment_name') +
"/ {print $2}'")
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) as proc:
deployment_uuid = proc.stdout.readline().rstrip()
return deployment_uuid.decode("utf-8") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deployment_id(self) -> str:\n return pulumi.get(self, \"deployment_id\")",
"def get_verifier_deployment_id():\n cmd = (\"rally deployment list | awk '/\" +\n getattr(config.CONF, 'rally_deployment_name') +\n \"/ {print $2}'\")\n proc = subprocess.Popen(cmd, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n deployment_uuid = proc.stdout.readline().rstrip()\n return deployment_uuid",
"def deployment_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"deployment_id\")",
"def DeploymentId(self) -> _n_0_t_0:",
"def deployment_uuid(self):\n return self._deployment_uuid",
"def custom_voice_deployment_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"custom_voice_deployment_id\")",
"def elastic_cloud_deployment_id(self) -> str:\n return pulumi.get(self, \"elastic_cloud_deployment_id\")",
"def _create_deployment(self) -> Optional[str]:\n LOG.debug(\"%sTrying to create a deployment through client\", self.log_prefix)\n response_dep = cast(\n Dict, self._api_client.create_deployment(restApiId=self._api_physical_id, description=\"Created by SAM Sync\")\n )\n new_dep_id = response_dep.get(\"id\")\n LOG.debug(\"%sCreate Deployment Result: %s\", self.log_prefix, response_dep)\n return new_dep_id",
"def deployed_index_id(self) -> Optional[str]:\n return pulumi.get(self, \"deployed_index_id\")",
"def get_deployment_name(replicaset):\n api_response = api.read_namespaced_replica_set_status(replicaset,\n namespace)\n deployment_name = read_name(api_response)\n return deployment_name",
"def add_deployment(self, dep):\n res = self.client.Management_API.Create_Deployment(\n Authorization=\"foo\", deployment=dep\n ).result()\n adapter = res[1]\n loc = adapter.headers.get(\"Location\", None)\n depid = os.path.basename(loc)\n\n self.log.debug(\"added new deployment with ID: %s\", depid)\n return depid",
"def handle_get_deployment(project_id, deployment_id):\n return jsonify(get_deployment(uuid=deployment_id, project_id=project_id))",
"def resolve_deployment(self, args, context, info):\n params = {\n 'id': self.deployment_id,\n }\n return DeploymentLoader.get().load(params)[0]",
"def write_deployment(campaign_key, deployment_data):\n\n deployment = Deployment(**deployment_data)\n deployment.campaign_id = campaign_key\n deployment.save()\n\n return deployment.id",
"def create_rally_deployment(environ=None):\n # set the architecture to default\n pod_arch = env.get(\"POD_ARCH\")\n arch_filter = ['aarch64']\n\n if pod_arch and pod_arch in arch_filter:\n LOGGER.info(\"Apply aarch64 specific to rally config...\")\n with open(RALLY_AARCH64_PATCH_PATH, \"r\") as pfile:\n rally_patch_conf = pfile.read()\n\n for line in fileinput.input(RALLY_CONF_PATH):\n print(line, end=' ')\n if \"cirros|testvm\" in line:\n print(rally_patch_conf)\n\n LOGGER.info(\"Creating Rally environment...\")\n\n try:\n cmd = ['rally', 'deployment', 'destroy',\n '--deployment',\n str(getattr(config.CONF, 'rally_deployment_name'))]\n output = subprocess.check_output(cmd)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output)\n except subprocess.CalledProcessError:\n pass\n\n cmd = ['rally', 'deployment', 'create', '--fromenv',\n '--name', str(getattr(config.CONF, 'rally_deployment_name'))]\n output = subprocess.check_output(cmd, env=environ)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output)\n\n cmd = ['rally', 'deployment', 'check']\n output = subprocess.check_output(cmd)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output)\n return get_verifier_deployment_id()",
"def create_rally_deployment(environ=None):\n # pylint: disable=unexpected-keyword-arg\n # set the architecture to default\n pod_arch = env.get(\"POD_ARCH\")\n arch_filter = ['aarch64']\n\n if pod_arch and pod_arch in arch_filter:\n LOGGER.info(\"Apply aarch64 specific to rally config...\")\n with open(\n RallyBase.rally_aar4_patch_path, \"r\",\n encoding='utf-8') as pfile:\n rally_patch_conf = pfile.read()\n\n for line in fileinput.input(RallyBase.rally_conf_path):\n print(line, end=' ')\n if \"cirros|testvm\" in line:\n print(rally_patch_conf)\n\n LOGGER.info(\"Creating Rally environment...\")\n try:\n cmd = ['rally', 'deployment', 'destroy',\n '--deployment',\n str(getattr(config.CONF, 'rally_deployment_name'))]\n output = subprocess.check_output(cmd)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output.decode(\"utf-8\"))\n except subprocess.CalledProcessError:\n pass\n\n cmd = ['rally', 'deployment', 'create', '--fromenv',\n '--name', str(getattr(config.CONF, 'rally_deployment_name'))]\n output = subprocess.check_output(cmd, env=environ)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output.decode(\"utf-8\"))\n\n cmd = ['rally', 'deployment', 'check']\n output = subprocess.check_output(cmd)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output.decode(\"utf-8\"))\n return RallyBase.get_verifier_deployment_id()",
"def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")",
"def get_task_id(tag):\n cmd = [\"rally\", \"task\", \"list\", \"--tag\", tag, \"--uuids-only\"]\n output = subprocess.check_output(cmd).decode(\"utf-8\").rstrip()\n LOGGER.info(\"%s: %s\", \" \".join(cmd), output)\n return output",
"def get_id(self, app_name):\n _id = []\n apps = [app for app in self.applications.response if app.name == app_name]\n if len(apps) > 0:\n return apps[0].id",
"def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])",
"def application_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_id\")",
"def app_installation_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_installation_id\")",
"def application_id(self) -> Optional[str]:\n return pulumi.get(self, \"application_id\")",
"def single_tenant_app_id(self):\n if \"singleTenantAppId\" in self._prop_dict:\n return self._prop_dict[\"singleTenantAppId\"]\n else:\n return None",
"def getRunningId(self):\n return( int(self.id.split('.')[2]) )",
"def application_object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_object_id\")",
"def app_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_resource_id\")",
"def runtime_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"runtime_id\")",
"def get_deployment_output(account_name: Optional[pulumi.Input[str]] = None,\n deployment_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDeploymentResult]:\n ...",
"def active_directory_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"active_directory_id\")"
] | [
"0.8321824",
"0.8090205",
"0.7915562",
"0.7642716",
"0.7500653",
"0.6717956",
"0.6637013",
"0.6469506",
"0.6452627",
"0.6147499",
"0.6131181",
"0.60924846",
"0.60842574",
"0.5925855",
"0.5919809",
"0.5907035",
"0.57978237",
"0.579003",
"0.5778056",
"0.5776697",
"0.5770048",
"0.57566124",
"0.56790286",
"0.5663984",
"0.55985475",
"0.55907273",
"0.5567179",
"0.5563139",
"0.54650825",
"0.5462228"
] | 0.81415844 | 1 |
Create new rally deployment | def create_rally_deployment(environ=None):
# pylint: disable=unexpected-keyword-arg
# set the architecture to default
pod_arch = env.get("POD_ARCH")
arch_filter = ['aarch64']
if pod_arch and pod_arch in arch_filter:
LOGGER.info("Apply aarch64 specific to rally config...")
with open(
RallyBase.rally_aar4_patch_path, "r",
encoding='utf-8') as pfile:
rally_patch_conf = pfile.read()
for line in fileinput.input(RallyBase.rally_conf_path):
print(line, end=' ')
if "cirros|testvm" in line:
print(rally_patch_conf)
LOGGER.info("Creating Rally environment...")
try:
cmd = ['rally', 'deployment', 'destroy',
'--deployment',
str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
except subprocess.CalledProcessError:
pass
cmd = ['rally', 'deployment', 'create', '--fromenv',
'--name', str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd, env=environ)
LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
cmd = ['rally', 'deployment', 'check']
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
return RallyBase.get_verifier_deployment_id() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_rally_deployment(environ=None):\n # set the architecture to default\n pod_arch = env.get(\"POD_ARCH\")\n arch_filter = ['aarch64']\n\n if pod_arch and pod_arch in arch_filter:\n LOGGER.info(\"Apply aarch64 specific to rally config...\")\n with open(RALLY_AARCH64_PATCH_PATH, \"r\") as pfile:\n rally_patch_conf = pfile.read()\n\n for line in fileinput.input(RALLY_CONF_PATH):\n print(line, end=' ')\n if \"cirros|testvm\" in line:\n print(rally_patch_conf)\n\n LOGGER.info(\"Creating Rally environment...\")\n\n try:\n cmd = ['rally', 'deployment', 'destroy',\n '--deployment',\n str(getattr(config.CONF, 'rally_deployment_name'))]\n output = subprocess.check_output(cmd)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output)\n except subprocess.CalledProcessError:\n pass\n\n cmd = ['rally', 'deployment', 'create', '--fromenv',\n '--name', str(getattr(config.CONF, 'rally_deployment_name'))]\n output = subprocess.check_output(cmd, env=environ)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output)\n\n cmd = ['rally', 'deployment', 'check']\n output = subprocess.check_output(cmd)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output)\n return get_verifier_deployment_id()",
"def create_and_run_deployment(\n project_id: int = Form(...),\n model_id: Text = Form(...),\n version: Text = Form(...),\n model_uri: Text = Form(...),\n type: Text = Form(...) # pylint: disable=redefined-builtin\n) -> JSONResponse:\n\n deploy_manager = DeployManager()\n deployment_id = deploy_manager.create_deployment(\n project_id, model_id, version, model_uri, type\n )\n return JSONResponse({'deployment_id': str(deployment_id)}, HTTPStatus.ACCEPTED)",
"def test_create_deployment(self):\n pass",
"def test_create_deployment_entire(self):\n pass",
"def create_deployment(StackId=None, AppId=None, InstanceIds=None, LayerIds=None, Command=None, Comment=None, CustomJson=None):\n pass",
"def create_deployment(self, ApiId: str, Description: str = None, StageName: str = None) -> Dict:\n pass",
"def deploy():",
"def _create_deployment(self) -> Optional[str]:\n LOG.debug(\"%sTrying to create a deployment through client\", self.log_prefix)\n response_dep = cast(\n Dict, self._api_client.create_deployment(restApiId=self._api_physical_id, description=\"Created by SAM Sync\")\n )\n new_dep_id = response_dep.get(\"id\")\n LOG.debug(\"%sCreate Deployment Result: %s\", self.log_prefix, response_dep)\n return new_dep_id",
"def create(self, adt=None, url=None, params=None, dryrun=False):\n if self._id_exists():\n abort(400, \"The application ID already exists\")\n elif self.engine.app_list:\n abort(400, \"Multiple applications are not supported\")\n\n path = self._get_path(adt, url)\n tpl, adaps = self._validate(path, params, dryrun)\n try:\n self.engine.launch(tpl, adaps, self.app_id, dryrun)\n except Exception as error:\n abort(500, f\"Error while deploying: {error}\")\n\n return {\"message\": f\"Application {self.app_id} successfully deployed\"}",
"def deploy_app(device_id, app_id, app_version):\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/applications\"}\n versions = esapp.App(kargs).get_app_version_by_id(app_id)\n\n kargs.update({\"url_path\": \"/tasks\"})\n if not app_version in versions:\n sys.exit(\"Fail: app_version \\\"%s\\\" not found, available list:%s\" \\\n %(str(app_version), str(jsn.dumps(versions))))\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.create_app_task(device_id, app_version, app_id)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n sys.exit(\"Fail: error response\")\n\n try:\n click.echo(\"Success to create a task id: %s\" %(str(dict_resp[\"task_id\"])))\n except Exception as e:\n sys.exit(\"Fail: %s %s\" %(str(e), str(dict_resp)))\n\n if 'status' in dict_resp and dict_resp['status'].lower() != 'success':\n sys.exit(1)",
"def new_deployment(request, recipe, **_kwargs):\n return create_view(\n request, _(\"Deployment of recipe '%s'\") % recipe, DeploymentForm, recipe=recipe\n )",
"def _create_deployment(self) -> aws.apigateway.Stage:\n deployment = aws.apigateway.Deployment(\n f\"{self.rest_api._name}-deployment\",\n rest_api=self.rest_api.id,\n # TODO: Still want to have a triggers function\n opts=pulumi.ResourceOptions(\n parent=self, depends_on=[p.lambda_integration for p in self.proxies]\n ),\n )\n\n stage = aws.apigateway.Stage(\n f\"{self.rest_api._name}-prod-stage\",\n deployment=deployment.id,\n rest_api=self.rest_api.id,\n stage_name=\"prod\",\n opts=pulumi.ResourceOptions(parent=self),\n )\n\n return stage",
"def deploy(self):\n\n netlify_cli = getattr(settings, \"NETLIFY_PATH\", None)\n if not netlify_cli:\n raise CommandError(\"NETLIFY_PATH is not defined in settings\")\n\n deployment = Deployment()\n deployment.save()\n\n command = [netlify_cli, \"deploy\"]\n command.append(\"--dir={}\".format(settings.BUILD_DIR))\n command.append(\"--prod\")\n command.append('--message=\"Wagtail Deployment #{}\"'.format(deployment.pk))\n\n site_id = getattr(settings, \"NETLIFY_SITE_ID\", None)\n if site_id:\n command.append(\"--site={}\".format(site_id))\n\n auth_token = getattr(settings, \"NETLIFY_API_TOKEN\", None)\n if auth_token:\n command.append(\"--auth={}\".format(auth_token))\n\n subprocess.call(command)",
"def start_deployment(cloud_connector, description):\n logging.getLogger(\"root\").info(\"Preparing the connector\")\n cloud_connector.prepare()\n logging.getLogger(\"root\").info(\"Starting new deployment\")\n deployment = Deployment()\n deployment.cloud_connector = cloud_connector\n deployment.configure(description)\n logging.getLogger(\"root\").info(\"Launching deployment\")\n deployment.launch()\n logging.getLogger(\"root\").info(\"Executing deployment scripts\")\n while deployment.has_more_steps():\n deployment.execute_script()\n return deployment",
"def create_deployment(deployment_id, blueprint_id, environment):\n environment.add_cleanup(\n environment.cfy.deployments.delete,\n kwargs={\n 'deployment_id': deployment_id,\n },\n )\n environment.cfy.deployments.create(\n blueprint_id=blueprint_id,\n deployment_id=deployment_id,\n skip_plugins_validation=True,\n )",
"def _deploy_apps_function():\n app_integration_package = AppIntegrationPackage(config=config, version=apps_version)\n app_integration_package.create_and_upload()\n return app_integration_package",
"def new_app(project_name,app_name ):\n from flask_create_app.core.commands.cmd_newapp import create_new_app\n proj_dir = os.getcwd()\n create_new_app(app_name, proj_dir,project_name)",
"def test_release_deployment_run(self):\n pass",
"def handle_post_deployments(project_id):\n kwargs = request.get_json(force=True)\n kwargs = {to_snake_case(k): v for k, v in kwargs.items()}\n deployment = create_deployment(project_id=project_id, **kwargs)\n return jsonify(deployment)",
"def add_deployment(self, dep):\n res = self.client.Management_API.Create_Deployment(\n Authorization=\"foo\", deployment=dep\n ).result()\n adapter = res[1]\n loc = adapter.headers.get(\"Location\", None)\n depid = os.path.basename(loc)\n\n self.log.debug(\"added new deployment with ID: %s\", depid)\n return depid",
"def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")",
"def deploy(ctx):\n click.echo('deploying')\n ctx.deploy()\n click.echo('done')",
"def cmd_apps__create(args):\n \n if args.name is None:\n args.name = os.path.basename(os.getcwd())\n\n url = remote.create_project(args.name)\n \n if in_git_repo():\n if get_push_url('tinyserv') is None:\n git(None, 'remote', 'add', 'tinyserv', url)\n print \"Added remote 'tinyserv'.\"\n else:\n print \"This repository is already configured for app '%s'.\" % \\\n _get_current_project_name()\n \n print \"Remote repository URL is %s.\" % url",
"def deploy():\n test()\n if not env.is_staging:\n backup()\n prepare()\n restart_api()",
"def create_releases(name):\n data = get_object(name)\n if not data:\n return None\n\n data = copy.deepcopy(data)\n\n # pop out the works\n works = data.pop('work_version', None)\n\n # create a dictionary of the object parameters with release as the key\n dd = {}\n dd[data.get('release', 'DR15')] = data\n\n if works:\n # add any other work objects found\n for work in works:\n work_object = create_work_version(work, data)\n dd[f\"WORK-{work_object['version_info']['number']}\"] = work_object\n\n # expand the path envvars\n for k, v in dd.items():\n release = 'SDSSWORK' if 'WORK' in k else k\n tree.replant_tree(release.lower())\n dd[k]['path'] = os.path.expandvars(v.get('path', ''))\n\n return dd",
"def create_application(name=None, description=None):\n pass",
"def test_publish_deployment_run(self):\n pass",
"def test_launch_deployment(self):\n pass",
"def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()",
"def deploy():\n build()\n copy()\n install()"
] | [
"0.79588085",
"0.69769126",
"0.6959189",
"0.68106496",
"0.68078554",
"0.63774866",
"0.6288516",
"0.62842375",
"0.62308085",
"0.6210832",
"0.61876476",
"0.6130477",
"0.609062",
"0.6017506",
"0.6001469",
"0.60007924",
"0.5995541",
"0.59840333",
"0.59733856",
"0.59307474",
"0.58960307",
"0.58759785",
"0.58631265",
"0.5823121",
"0.581426",
"0.58134925",
"0.58055997",
"0.5800085",
"0.57708156",
"0.5744333"
] | 0.78494895 | 1 |
Set keystone_default_role in rally.conf | def update_keystone_default_role(rally_conf='/etc/rally/rally.conf'):
if env.get("NEW_USER_ROLE").lower() != "member":
rconfig = configparser.RawConfigParser()
rconfig.read(rally_conf)
if not rconfig.has_section('openstack'):
rconfig.add_section('openstack')
rconfig.set(
'openstack', 'keystone_default_role', env.get("NEW_USER_ROLE"))
with open(rally_conf, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_default_role(self, rally_conf='/etc/rally/rally.conf'):\n role = self.get_default_role(self.cloud)\n if not role:\n return\n rconfig = configparser.RawConfigParser()\n rconfig.read(rally_conf)\n if not rconfig.has_section('openstack'):\n rconfig.add_section('openstack')\n rconfig.set('openstack', 'swift_operator_role', role.name)\n with open(rally_conf, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def _users_assign_default_role(course_id):\r\n enrollments = CourseEnrollment.objects.filter(course_id=course_id)\r\n for enrollment in enrollments:\r\n assign_default_role(course_id, enrollment.user)",
"def test_003_check_default_keystone_credential_usage(self):\n\n usr = self.config.master.keystone_user\n pwd = self.config.master.keystone_password\n url = 'http://{0}:5000/v2.0'.format(self.config.nailgun_host)\n\n try:\n keystone = keystoneclient(username=usr,\n password=pwd,\n auth_url=url)\n keystone.authenticate()\n except k_exceptions.Unauthorized:\n pass\n else:\n self.fail('Step 1 failed: Default credentials '\n 'for keystone on master node were not changed')",
"def assign_default_role(course_id, user):\r\n role, __ = Role.objects.get_or_create(course_id=course_id, name=\"Student\")\r\n user.roles.add(role)",
"def set_default_org(self, name):\n org = self.get_org(name)\n self.unset_default_org()\n org.config[\"default\"] = True\n org.save()\n if org.created:\n sfdx(\n sarge.shell_format(\n \"force:config:set defaultusername={}\", org.sfdx_alias\n )\n )",
"def set_default_account(web3):\n web3.eth.defaultAccount = web3.eth.accounts[0]",
"def _validate_default_role(self, default_role):\n if default_role is '':\n raise UserException(\"default_role cannot be empty.\")\n elif default_role not in Employee.allowed_roles():\n raise UserException(\"Allowed values for default_role are %s\" %\n str(Employee.allowed_roles()))",
"def test_002_check_default_openstack_credential_usage(self):\n cluster_data = {\n 'password': self.config.identity.admin_password,\n 'username': self.config.identity.admin_username}\n\n default_data = {\n 'password': 'admin',\n 'username': 'admin'}\n\n self.verify_response_body_not_equal(\n exp_content=default_data,\n act_content=cluster_data,\n msg='Default credentials values are used. '\n 'We kindly recommend that you changed all defaults.',\n failed_step='1')",
"def test_replace_cluster_role(self):\n pass",
"def unset_default_org(self):\n for org in self.list_orgs():\n org_config = self.get_org(org)\n if org_config.default:\n del org_config.config[\"default\"]\n org_config.save()\n sfdx(\"force:config:set defaultusername=\")",
"def getEveryoneRole(store):\n return store.findOrCreate(Role, externalID=u'Everyone')",
"def default_user(self):\n self.user = self.create_user(create_token=True)\n return",
"def test_300_keystone_default_config(self):\n u.log.debug('Checking keystone config file...')\n unit = self.keystone_sentry\n conf = '/etc/keystone/keystone.conf'\n ks_ci_rel = unit.relation('identity-service',\n 'cinder:identity-service')\n my_ks_rel = self.pxc_sentry.relation('shared-db',\n 'keystone:shared-db')\n db_uri = \"mysql://{}:{}@{}/{}\".format('keystone',\n my_ks_rel['password'],\n my_ks_rel['db_host'],\n 'keystone')\n expected = {\n 'DEFAULT': {\n 'debug': 'False',\n 'admin_token': ks_ci_rel['admin_token'],\n 'use_syslog': 'False',\n 'log_config_append': '/etc/keystone/logging.conf',\n 'public_endpoint': u.valid_url, # get specific\n 'admin_endpoint': u.valid_url, # get specific\n },\n 'extra_headers': {\n 'Distribution': 'Ubuntu'\n },\n 'database': {\n 'connection': db_uri,\n 'idle_timeout': '200'\n }\n }\n\n if self._get_openstack_release() < self.trusty_mitaka:\n expected['DEFAULT']['verbose'] = 'False'\n expected['DEFAULT']['log_config'] = \\\n expected['DEFAULT']['log_config_append']\n del expected['DEFAULT']['log_config_append']\n\n if self._get_openstack_release() >= self.trusty_kilo and \\\n self._get_openstack_release() < self.trusty_mitaka:\n # Kilo and Liberty\n expected['eventlet_server'] = {\n 'admin_bind_host': '0.0.0.0',\n 'public_bind_host': '0.0.0.0',\n 'admin_port': '35347',\n 'public_port': '4990',\n }\n elif self._get_openstack_release() <= self.trusty_icehouse:\n # Juno and earlier\n expected['DEFAULT'].update({\n 'admin_port': '35347',\n 'public_port': '4990',\n 'bind_host': '0.0.0.0',\n })\n\n for section, pairs in expected.iteritems():\n ret = u.validate_config_data(unit, conf, section, pairs)\n if ret:\n message = \"keystone config error: {}\".format(ret)\n amulet.raise_status(amulet.FAIL, msg=message)",
"def set_task_role(self, task_role):\n if not isinstance(task_role, AbstractModelReference):\n raise ConfigException(\"A default task role was supplied that isn't some kind of model reference: %s\" %\n str(task_role))\n self.default_task_role = task_role",
"def get_lambda_default_assume_role_doc(self, extra_services):\n role_doc = {\n \"Version\":\"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\":\"Allow\",\n \"Principal\":{\n \"Service\": LAMBDA_DEFAULT_ASSUME_ROLES + (extra_services or [])\n },\n \"Action\":[\"sts:AssumeRole\"]\n }\n ]\n }\n\n return role_doc",
"def set_as_default (self):\n\t\ttry:\n\t\t\tself.config.set('Global', 'Default', self.currentAccount.data['name'])\n\t\texcept ConfigParser.NoSectionError:\n\t\t\tself.setup_config()\n\t\t\tself.config.set('Global', 'Default', self.currentAccount.data['name'])\n\t\tself.config.write(open(self.configFile, 'w'))",
"def default():\n return DefaultTcsMoltenSalt.default()",
"def test_create_config_roots(self):\n with self.override_role():\n self._create_config_root()",
"def assign_default_role_on_enrollment(sender, instance, **kwargs):\r\n # The code below would remove all forum Roles from a user when they unenroll\r\n # from a course. Concerns were raised that it should apply only to students,\r\n # or that even the history of student roles is important for research\r\n # purposes. Since this was new functionality being added in this release,\r\n # I'm just going to comment it out for now and let the forums team deal with\r\n # implementing the right behavior.\r\n #\r\n # # We've unenrolled the student, so remove all roles for this course\r\n # if not instance.is_active:\r\n # course_roles = list(Role.objects.filter(course_id=instance.course_id))\r\n # instance.user.roles.remove(*course_roles)\r\n # return\r\n\r\n # We've enrolled the student, so make sure they have the Student role\r\n assign_default_role(instance.course_id, instance.user)",
"def default_acl(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_acl\")",
"def default_acl(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_acl\")",
"def setup_test_role(self):\n self.test_role = rand_name('role')\n resp, self.role = self.client.create_role(self.test_role)\n self.roles.append(self.role)",
"def set_default_bundle(bundle):\n data = {\n \"default_bundle\": bundle\n }\n response = houston.put(\"/zipline/config\", data=data)\n houston.raise_for_status_with_json(response)\n return response.json()",
"def test_patch_cluster_role(self):\n pass",
"def acctLogin(self):\n self.acctObj.email = \"[email protected]\"\n self.password = \"default\"\n self._displayName = \"defaultUser\"\n return True",
"def reconfigure_keystone_to_use_ldap(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n devops_pr_controller = self.fuel_web.get_nailgun_primary_node(\n self.env.d_env.nodes().slaves[0])\n\n pr_controller = self.fuel_web.get_nailgun_node_by_devops_node(\n devops_pr_controller)\n\n self.show_step(2)\n config = utils.get_config_template('keystone_ldap')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(\n config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(4)\n self.fuel_web.task_wait(task, timeout=3600, interval=30)\n\n self.show_step(5)\n self.check_config_on_remote([pr_controller], structured_config)\n logger.info(\"New configuration was applied\")\n\n self.env.make_snapshot(\"reconfigure_keystone_to_use_ldap\")",
"def _create_default_config(self):\n self.options.setdefault('options.admin_passwd', '')\n sys.path.append(self.openerp_dir)\n sys.path.extend([egg.location for egg in self.ws])\n from openerp.tools.config import configmanager\n configmanager(self.config_path).save()",
"def init() -> None:\n appbuilder.add_permissions(update_perms=True)\n security_manager.sync_role_definitions()",
"def test_set_project_default_virtualization_realm(self):\n pass",
"def test_replace_cluster_role_binding(self):\n pass"
] | [
"0.7335462",
"0.602011",
"0.59378004",
"0.5808055",
"0.57735175",
"0.5590387",
"0.55819136",
"0.5538477",
"0.5494703",
"0.549238",
"0.5442465",
"0.54219705",
"0.5356283",
"0.53251183",
"0.5314031",
"0.52759516",
"0.5272258",
"0.5216909",
"0.5208145",
"0.51750976",
"0.51750976",
"0.5145487",
"0.5134059",
"0.51334566",
"0.5118728",
"0.5114812",
"0.50976336",
"0.50927454",
"0.5080943",
"0.50800496"
] | 0.8199875 | 0 |
Get task id from command rally result. | def get_task_id(tag):
cmd = ["rally", "task", "list", "--tag", tag, "--uuids-only"]
output = subprocess.check_output(cmd).decode("utf-8").rstrip()
LOGGER.info("%s: %s", " ".join(cmd), output)
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def get_task_result(task_id: TaskId):",
"def task_id(self) -> str:\n return self.get_from_redis(\"task_id\")",
"def get_run_id_from_result(model_result):\n if 'ml_flow' not in model_result:\n return None\n\n with model_result['ml_flow'].open('r') as f:\n return yaml.load(f).get('run_id')",
"def result_id(self) -> str:\n return self._result_id",
"def get_task_uuid(self):\n\t\treturn call_sdk_function('PrlRunningTask_GetTaskUuid', self.handle)",
"def _get_id(results, index):\n return results[index]['_id']",
"def _extract_id_from_batch_response(r, name='id'):\n names = name + 's'\n if names in r:\n # soon-to-be deprecated batch reponse\n if 'errors' in r and r['errors']:\n raise GeneralException(r['errors'][0]['desc'])\n id = r[names][0]\n else:\n # new-style simplified api response\n id = r[name]\n return int(id)",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._mpis.task_id",
"def find_test_id(xcresult_path):\n parsed = xcresulttool_json('get', '--path', xcresult_path)\n actions = parsed['actions']['_values']\n action = actions[-1]\n\n result = action['actionResult']['testsRef']['id']['_value']\n _logger.debug('Using test id %s', result)\n return result",
"def get_task_id(self):\n if self.task_id:\n return self.task_id\n return (f'{self.task_type}_{self.get_source_system().lower()}'\n f'.{self.get_source_subsystem().lower()}.{self.get_name().upper()}')",
"def _get_task_id_from_xmodule_args(xmodule_instance_args):\r\n return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID",
"def _retrieve_task_id(job_name, res_id, job_dict):\n if job_dict:\n workers = list(job_dict.keys())\n for worker in workers:\n for job in job_dict[worker]:\n if 'name' in job:\n if job['name'] == job_name:\n if res_id in job['args']:\n return job['id']\n elif 'request' in job:\n scheduled_job = job['request']\n if 'name' in scheduled_job:\n if scheduled_job['name'] == job_name:\n if res_id in scheduled_job['args']:\n return scheduled_job['id']\n\n return None",
"def getRID(self):\n return self._result.uuid",
"def get_task(self, task_id):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks WHERE id=?\", (task_id,))\n return res.fetchone()",
"def find_log_id(xcresult_path):\n parsed = xcresulttool_json('get', '--path', xcresult_path)\n actions = parsed['actions']['_values']\n action = actions[-1]\n\n result = action['actionResult']['logRef']['id']['_value']\n _logger.debug('Using log id %s', result)\n return result",
"def build_result_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"build_result_id\")",
"def src_task_id(self):\n return struct.unpack('<H', self.pkt.payload[4:6])[0]",
"def get_task_id(self, position):\n task_id = self.stn.get_task_id(position)\n if task_id:\n return task_id\n else:\n raise TaskNotFound",
"def get_spotify_id_from_result(result):\n return result['metadata']['music'][0]['external_metadata']['spotify']['track']['id']",
"def parse_res_id(response):\n pass",
"def get_run_id(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['tracking_id'].attrs['run_id']\n\t\texcept:\n\t\t\treturn None",
"def _retrieve_job_id(job_name, res_id):\n active_jobs = celery_inspector.active()\n job_id = _retrieve_task_id(job_name, res_id, active_jobs)\n if not job_id:\n reserved_jobs = celery_inspector.reserved()\n job_id = _retrieve_task_id(job_name, res_id, reserved_jobs)\n if not job_id:\n scheduled_jobs = celery_inspector.scheduled()\n job_id = _retrieve_task_id(job_name, res_id, scheduled_jobs)\n return job_id",
"def xcom_result(task_or_id):\n if isinstance(task_or_id, str):\n return \"{{ti.xcom_pull(task_ids='\" + task_or_id + \"')}}\"\n elif isinstance(task_or_id, airflow.operators.BaseOperator):\n return \"{{ti.xcom_pull(task_ids='\" + task_or_id.task_id + \"')}}\"\n else:\n raise TypeError(\"Expected str or BaseOperator, but got {}\".format(task_or_id.__class__.__name__))",
"def taskid(name):\n return \"(select id from event_type where name = '{}')\".format(name)",
"def taskid(self):\n raise NotImplementedError('Must be implemented by subclass.')",
"def dst_task_id(self):\n return struct.unpack('<H', self.pkt.payload[2:4])[0]"
] | [
"0.70485806",
"0.6791863",
"0.6777043",
"0.6657056",
"0.6641306",
"0.65473753",
"0.6410299",
"0.6408232",
"0.6408232",
"0.6408232",
"0.6408232",
"0.63412124",
"0.62609065",
"0.62482697",
"0.6221775",
"0.6181935",
"0.610406",
"0.6102988",
"0.60917234",
"0.60808253",
"0.6067667",
"0.6033168",
"0.60237324",
"0.5996889",
"0.59916896",
"0.5972463",
"0.5935219",
"0.5919659",
"0.58743656",
"0.5869251"
] | 0.7335865 | 0 |
Determine if migration is supported. | def _migration_supported(self):
if self.compute_cnt > 1:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_migration(self, migration: str) -> bool:\n pass",
"def is_migrated_before():\n\n global migration_sign\n if os.path.exists(migration_sign):\n return True\n else:\n return False",
"def is_migrated(self) -> bool:\n return pulumi.get(self, \"is_migrated\")",
"def needs_migrations(self):\n # TODO(majklk): also check models etc.\n if len(self.widgets) > 0:\n return True\n return False",
"def _needs_migrations(self, table: str, current_level: int) -> bool:\n sql_dir = self.migrations / table\n if not sql_dir.exists():\n if current_level > 0: # Where did the previous migrations go?\n raise MigrationException(f\"{table} already has {current_level}, but directory is missing\")\n else: # No migrations? That's ok\n return False\n\n # TODO return paths to migrations that need to be applied\n # (would avoid listing files)\n for migration in sorted(sql_dir.iterdir()):\n level = int(migration.name.split('_')[0])\n if level > current_level: # Not yet applied\n return True # Need to apply it later\n return False # Up to date!",
"def conditional_neutron_migration():\n if CompareOpenStackReleases(os_release('neutron-server')) <= 'icehouse':\n log('Not running neutron database migration as migrations are handled '\n 'by the neutron-server process.')\n return\n\n if not is_elected_leader(CLUSTER_RES):\n log('Not running neutron database migration, not leader')\n return\n\n allowed_units = relation_get('allowed_units')\n if not (allowed_units and local_unit() in allowed_units.split()):\n log('Not running neutron database migration, either no '\n 'allowed_units or this unit is not present')\n return\n\n migrate_neutron_database()",
"def check_supported_features(self):",
"def migration(self) -> Optional[pulumi.Input['PgPgUserConfigMigrationArgs']]:\n return pulumi.get(self, \"migration\")",
"def GetMigrationStatus(self, instance):\n raise HypervisorError(\"Migration not supported by the chroot hypervisor\")",
"def allow_migrate(self, db, app_label, model_name=None, **hints):\n return db == self.db_name",
"def check_missing_migrations():\n from django.db.migrations.autodetector import MigrationAutodetector\n from django.db.migrations.loader import MigrationLoader\n from django.db.migrations.questioner import (\n NonInteractiveMigrationQuestioner as Questioner,\n )\n from django.db.migrations.state import ProjectState\n\n loader = MigrationLoader(None, ignore_no_migrations=True)\n conflicts = loader.detect_conflicts()\n if conflicts:\n raise Exception(\n \"Migration conflicts detected. Please fix your migrations.\"\n )\n questioner = Questioner(dry_run=True, specified_apps=None)\n autodetector = MigrationAutodetector(\n loader.project_state(),\n ProjectState.from_apps(apps),\n questioner,\n )\n changes = autodetector.changes(\n graph=loader.graph,\n trim_to_apps=None,\n convert_apps=None,\n migration_name=None,\n )\n if changes:\n raise Exception(\n \"Migration changes detected. \"\n \"Please update or add to the migration file as appropriate\"\n )\n print(\"Migration-checker detected no problems.\")",
"def allow_migrate(self, db, app_label, model_name=None, **hints):\n if db == 'default':\n return True\n else:\n return False",
"def allow_migrate(self, db, app_label, model_name=None, **hints):\n if app_label == 'delivery':\n return db == 'db1'\n return None",
"def validate(self, disk_info):\n return _('The configured disk driver does not support migration '\n 'or resize.')",
"async def check_migration(hass: core.HomeAssistant, entry: ConfigEntry) -> None:\n host = entry.data[CONF_HOST]\n\n # migrate CONF_USERNAME --> CONF_API_KEY\n if CONF_USERNAME in entry.data:\n LOGGER.info(\"Migrate %s to %s in schema\", CONF_USERNAME, CONF_API_KEY)\n data = dict(entry.data)\n data[CONF_API_KEY] = data.pop(CONF_USERNAME)\n hass.config_entries.async_update_entry(entry, data=data)\n\n conf_api_version = entry.data.get(CONF_API_VERSION, 1)\n if conf_api_version == 1:\n # a bridge might have upgraded firmware since last run so\n # we discover its capabilities at every startup\n websession = aiohttp_client.async_get_clientsession(hass)\n if await is_v2_bridge(host, websession):\n supported_api_version = 2\n else:\n supported_api_version = 1\n LOGGER.debug(\n \"Configured api version is %s and supported api version %s for bridge %s\",\n conf_api_version,\n supported_api_version,\n host,\n )\n\n # the call to `is_v2_bridge` returns (silently) False even on connection error\n # so if a migration is needed it will be done on next startup\n\n if conf_api_version == 1 and supported_api_version == 2:\n # run entity/device schema migration for v2\n await handle_v2_migration(hass, entry)\n\n # store api version in entry data\n if (\n CONF_API_VERSION not in entry.data\n or conf_api_version != supported_api_version\n ):\n data = dict(entry.data)\n data[CONF_API_VERSION] = supported_api_version\n hass.config_entries.async_update_entry(entry, data=data)",
"def migration():",
"def test_Migration_columns(self):\n migration = self.DBSession.query(Migration).filter_by().first()\n if self.engine.dialect.name == 'sqlite': # pragma: no cover\n # pysqlite driver always convert the strings collumns to unicode\n self.assertIsInstance(migration.version_num, unicode)\n else: # pragma: no cover\n self.assertIsInstance(migration.version_num, str)",
"async def check_migration(migration):\n try:\n count = await db.Migrations.count_documents({migration: True})\n except Exception as e:\n print(e)\n return count > 0",
"def allow_migrate(self, db, app_label, model_name=None, **hints):\n if model_name in self.route_encuestas:\n return db == 'encuestas'\n elif model_name in self.route_uxxienc_resul:\n return db == 'uxxienc_resul'\n return None",
"def is_supported(self) -> bool:\n if self.builders and self.app.builder.name not in self.builders:\n return False\n if self.formats and self.app.builder.format not in self.formats:\n return False\n\n return True",
"def allow_migrate(self, db, app_label, model_name=None, **hints):\n if app_label == 'eotrts_student':\n return db == 'eotrts_db'\n return None",
"def environment_needs_upgrade(self, db):\n\n return False",
"def allow_migrate(self, db, app_label, model=None, **hints):\n if app_label == 'data_collection':\n return db == 'guam_groundwater'\n return None",
"def allow_migrate(self, db, app_label, model_name=None, **hints):\r\n if db == self.DB_NAME:\r\n return app_label == self.APP_LABEL\r\n elif app_label == self.APP_LABEL:\r\n return False\r\n return None",
"def allow_migrate(self, db, app_label, model_name=None, **hints):\n if app_label == self.app_label:\n return db == self.db_name\n return None",
"def allow_migrate(self, db, app_label, model_name=None, **hints):\n if 'target_db' in hints:\n return hints['target_db'] == self.db_entry\n if app_label in self.route_app_labels:\n return db == self.db_entry\n return None",
"def allow_migrate(self, db, app_label, model_name=None, **hints):\n if app_label == 'researcherquery':\n return db == 'safedb'\n return None",
"def check_migration_state(exclude: Optional[List[str]] = None):\n exclude = exclude or []\n try:\n connection = connections[DEFAULT_DB_ALIAS]\n loader = MigrationLoader(connection, ignore_no_migrations=True)\n\n graph = loader.graph\n targets = graph.leaf_nodes()\n\n plan = set()\n seen = set()\n # Generate the plan\n for target in targets:\n for migration in graph.forwards_plan(target):\n if migration not in seen:\n node = graph.node_map[migration]\n plan.add(node.key)\n seen.add(migration)\n\n # Apparently Django returns {} if no connection (which is a set not a dict).\n tmp = loader.applied_migrations\n applied_migrations = set(tmp if isinstance(tmp, set) else tmp.keys())\n\n except Exception as error:\n raise MigrationStateError(\n f\"Error when checking state of migrations conflicts:\\n{error}\"\n )\n\n if exclude:\n applied_migrations = set(\n [el for el in applied_migrations if el[0] not in exclude]\n )\n plan = set([el for el in plan if el[0] not in exclude])\n\n if applied_migrations != plan:\n data = {\n \"The DB is ahead of your tables by\": applied_migrations.difference(plan),\n \"Your tables are ahead of the DB by\": plan.difference(applied_migrations),\n }\n raise MigrationStateError(\n \"Applied migrations do not match local migration files\", data\n )",
"def ready(self):\n import django_better_migrations.migration_writer_patch # noqa",
"def supported():\n return os.path.isfile(OPENCOR)"
] | [
"0.7405025",
"0.643145",
"0.64205694",
"0.6287498",
"0.62607265",
"0.6124708",
"0.603821",
"0.5938896",
"0.58489233",
"0.58389837",
"0.583489",
"0.58281356",
"0.5824205",
"0.57566595",
"0.5745185",
"0.5720472",
"0.5694382",
"0.5677317",
"0.5673947",
"0.5661287",
"0.56218493",
"0.561374",
"0.5585906",
"0.55826217",
"0.5577622",
"0.5576696",
"0.5563387",
"0.5552436",
"0.55359155",
"0.55065936"
] | 0.81642824 | 0 |
Check if given needle is in the iterable haystack, using regex. | def in_iterable_re(needle, haystack):
# match without regex
if needle in haystack:
return True
for pattern in haystack:
# match if regex pattern is set and found in the needle
if pattern and re.search(pattern, needle) is not None:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lines_contain(haystack: Iterable[str], needle: str) -> Iterator[int]:\n pat = re.compile(rf\"^\\W*{re.escape(needle)}\\W*$\")\n yield from (idx for idx, line in enumerate(haystack) if pat.match(line))",
"def _match_regex_list(subject, expressions):\n for expr in expressions:\n if re.search(expr, subject):\n return True\n return False",
"def is_exact(needle, haystack, start, end, matchnot):\n return ((start >= 0 and end < len(haystack) and\n haystack[start:end] == needle) ^ matchnot)",
"def assertRegexIn(\n self,\n expected_pattern: str, iterable: typing.List[str],\n msg: typing.Optional[str] = None) -> None:\n if isinstance(expected_pattern, (str, bytes)):\n assert expected_pattern, \"expected_regex must not be empty.\"\n expected_regex = re.compile(expected_pattern)\n for text in iterable:\n if expected_regex.search(text):\n return\n standard_msg = \"Regex didn't match: %r not found in %r\" % (expected_pattern, iterable)\n # _formatMessage ensures the longMessage option is respected\n msg = self._formatMessage(msg, standard_msg)\n raise self.failureException(msg)",
"def regex_search_list(data, regex):\n # Create the data into a list if it isn't already\n if type(data) is not list:\n data = [data]\n for d in data:\n if re.search(regex, d):\n return True\n return False",
"def in_(needle, haystack, msg = None):\n return nose.tools.ok_(needle in haystack,\n msg = (msg if msg != None else \"'%s' not in '%s'\" % (needle, haystack))\n )",
"def __match_value(needle, haystack):\n\n if haystack in [ None, [], \"\", \"*\" ]:\n return True\n\n if isinstance(haystack, list):\n return needle in haystack\n else:\n return needle == haystack",
"def contains(self, searchstr: str):\n for x in self.sa:\n if searchstr in x:\n return True\n pass",
"def find_needle_in_haystack(self, needle, haystack): \n r = [] \n L = len(needle) \n for i in range(len(haystack)): \n if haystack[i:i+L] == needle: \n r.append(i)\n return r",
"def wanted(have, want, regex=False):\n\n assert isinstance(have, basestring)\n\n if want == '__all__':\n return True\n\n if want == '__none__' or not want:\n return False\n\n if regex:\n if isinstance(want, basestring):\n if re.match(want, have):\n return True\n else:\n for item in want:\n if re.match(item, have):\n return True\n\n else:\n if isinstance(want, basestring):\n return True if want == have else False\n\n if have in want:\n return True\n\n return False",
"def issubstring(substring, string):\n return substring in string",
"def assertSubstringIn(self, substring, container, msg=None):\n result = any(substring in item for item in container)\n if not result:\n msg = self._formatMessage(\n msg, f'{substring} is not substring in {safe_repr(container)}'\n )\n self.fail(msg)",
"def exists(self, regex: str) -> bool:\n for _ in self.find(regex):\n return True\n return False",
"def contains(str_or_list, val_to_find):\n \n return (val_to_find in str_or_list)",
"def find(self, search):\n if type(search) == str:\n search = [search]\n\n for s in search:\n if self.text.lower().find(s.lower()) != -1:\n return True\n\n return False",
"def check_the_list_for_matching(checked_list: list, phrase_to_match: str) -> bool:\n for word in checked_list:\n if phrase_to_match.startswith(word):\n return True\n return False",
"def assertAnyIn(self, needles, haystack):\n if not any(x in haystack for x in needles):\n raise AssertionError('None of \\'{needles}\\' in \\'{haystack}\\''.format(\n needles=\",\".join(needles),\n haystack=haystack\n ))",
"def text_search(self, text, stuff_to_cop):\n if any(ext in text for ext in stuff_to_cop):\n return(True)\n else:\n return(False)",
"def __search(findwhat, content, ignorecase, regexp):\n\t\tfrom re import search, IGNORECASE\n\t\tif regexp:\n\t\t\tif ignorecase:\n\t\t\t\tflag = IGNORECASE\n\t\t\telse:\n\t\t\t\tflag = 0\n\t\t\tif search(findwhat, content, flag):\n\t\t\t\treturn True\n\t\telse:\n\t\t\tif ignorecase:\n\t\t\t\tcontent = content.lower()\n\t\t\t\tfindwhat = findwhat.lower()\n\t\t\t\t\n\t\t\tif content.find(findwhat) != -1:\n\t\t\t\treturn True\n\t\treturn False",
"def is_in(elt, seq):\n return any(x is elt for x in seq)",
"def find(ss, list_seq):\n\tfor item in list_seq:\n\t\tif item in ss:\n\t\t\treturn True\n\treturn False",
"def contains (self,phrase,chars):\r\n\r\n for x in chars:\r\n\r\n if x in phrase:\r\n return True\r\n return False",
"def __reWildcard(self, regexp, string):\n regexp = re.sub(\"\\*+\", \"*\", regexp)\n match = True\n if regexp.count(\"*\") == 0:\n if regexp == string:\n return True\n else:\n return False\n blocks = regexp.split(\"*\")\n start = \"\"\n end = \"\"\n if not regexp.startswith(\"*\"):\n start = blocks[0]\n if not regexp.endswith(\"*\"):\n end = blocks[-1]\n if start != \"\":\n if string.startswith(start):\n blocks = blocks[1:]\n else:\n return False\n if end != \"\":\n if string.endswith(end):\n blocks = blocks[:-1]\n else:\n return False\n blocks = [block for block in blocks if block != \"\"]\n if blocks == []:\n return match\n for block in blocks:\n i = string.find(block)\n if i == -1:\n return False\n string = string[i + len(block):]\n return match",
"def find_str(self, find_exp, where):\n found = False\n for item in where:\n if find_exp in str(item):\n self.assertTrue(True)\n found = True\n break\n if not found:\n self.assertTrue(False)",
"def isSubsequence(x: str, y: str) -> bool:\n it = iter(y)\n return all(c in it for c in x)",
"def grep(self, needle):\n result = []\n for line in self.contents:\n if needle in line:\n result.append(line)\n if result:\n return result\n return False",
"def check_for_strings(text, strings):\n for string in strings:\n if text.find(string) >= 0:\n return True\n return False",
"def is_in(elt, seq):\n\treturn any(x is elt for x in seq)",
"def list_has_substring(substring, l):\n found_substring = False\n for item in l:\n if substring in item:\n found_substring = True\n break\n\n return found_substring",
"def verify_subseq(seq, subseq):\n\n # https://stackoverflow.com/questions/24017363/how-to-test-if-one-string-is-a-subsequence-of-another\n\n it = iter(seq)\n return all(c in it for c in subseq)"
] | [
"0.6985944",
"0.6416562",
"0.6401106",
"0.63869286",
"0.6322223",
"0.631153",
"0.6275128",
"0.6272992",
"0.6135999",
"0.6135562",
"0.6092985",
"0.6087945",
"0.59895885",
"0.5989014",
"0.5948714",
"0.5944926",
"0.591324",
"0.5881836",
"0.5838605",
"0.58077693",
"0.5800304",
"0.57949275",
"0.5786069",
"0.57860684",
"0.57782364",
"0.57574016",
"0.57238907",
"0.57207966",
"0.5716169",
"0.5704981"
] | 0.85009944 | 0 |
Build arguments for the Rally task. | def build_task_args(self, test_name):
task_args = {}
if self.ext_net:
task_args['floating_network'] = str(self.ext_net.name)
else:
task_args['floating_network'] = ''
task_args['image_name'] = str(self.image.name)
task_args['flavor_name'] = str(self.flavor.name)
return task_args | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_task_args(self, test_name):\n task_args = {'service_list': [test_name]}\n task_args['image_name'] = str(self.image.name)\n task_args['flavor_name'] = str(self.flavor.name)\n task_args['flavor_alt_name'] = str(self.flavor_alt.name)\n task_args['glance_image_location'] = str(self.filename)\n task_args['glance_image_format'] = str(self.image_format)\n task_args['tmpl_dir'] = str(self.template_dir)\n task_args['sup_dir'] = str(self.support_dir)\n task_args['users_amount'] = self.users_amount\n task_args['tenants_amount'] = self.tenants_amount\n task_args['use_existing_users'] = False\n task_args['iterations'] = self.iterations_amount\n task_args['concurrency'] = self.concurrency\n task_args['smoke'] = self.smoke\n task_args['volume_version'] = self.volume_version\n task_args['volume_service_type'] = self.volume_service_type\n task_args['block_migration'] = env.get(\"BLOCK_MIGRATION\").lower()\n task_args['username'] = self.username\n\n if self.ext_net:\n task_args['floating_network'] = str(self.ext_net.name)\n else:\n task_args['floating_network'] = ''\n\n if self.network:\n task_args['netid'] = str(self.network.id)\n else:\n LOGGER.warning(\n 'No tenant network created. '\n 'Trying EXTERNAL_NETWORK as a fallback')\n if env.get(\"EXTERNAL_NETWORK\"):\n network = self.cloud.get_network(env.get(\"EXTERNAL_NETWORK\"))\n task_args['netid'] = str(network.id) if network else ''\n else:\n task_args['netid'] = ''\n\n return task_args",
"def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )",
"def build_args(self, project_update, private_data_dir, passwords):\n args = []\n if getattr(settings, 'PROJECT_UPDATE_VVV', False):\n args.append('-vvv')\n if project_update.job_tags:\n args.extend(['-t', project_update.job_tags])\n return args",
"def CreateArgs(run_task_request, args):\n if getattr(args, \"ARGS\", None):\n args_ref = dataplex_api.FetchExecutionSpecArgs(args.ARGS)\n if len(args_ref) > 0:\n return run_task_request.ArgsValue(\n additionalProperties=[\n run_task_request.ArgsValue.AdditionalProperty(\n key=key, value=value\n )\n for key, value in sorted(args_ref.items())\n ]\n )\n return None",
"def _render_args(self, target, output_dir):\n args = []\n\n # Glossary of used aapt flags. Aapt handles a ton of action, this will continue to expand.\n # : 'package' is the main aapt operation (see class docstring for more info).\n # : '-m' is to \"make\" a package directory under location '-J'.\n # : '-J' Points to the output directory.\n # : '-M' is the AndroidManifest.xml of the project.\n # : '-S' points to the resource_dir to \"spider\" down while collecting resources.\n # : '-I' packages to add to base \"include\" set, here it is the android.jar of the target-sdk.\n args.extend([self.aapt_tool(target.build_tools_version)])\n args.extend(['package', '-m', '-J', output_dir])\n args.extend(['-M', target.manifest.path])\n args.extend(['-S', target.resource_dir])\n args.extend(['-I', self.android_jar_tool(target.manifest.target_sdk)])\n args.extend(['--ignore-assets', self.ignored_assets])\n logger.debug('Executing: {0}'.format(' '.join(args)))\n return args",
"def build_parms(args):\r\n readDir=args.dir\r\n #target_date=args.target_date\r\n target_date=args.target_date\r\n outdir=args.outdir \r\n parms = {\"readDir\":readDir,\r\n \"target_date\":target_date,\r\n \"outdir\":outdir}\r\n \r\n return(parms)",
"def build_args(self, parser):\n raise NotImplementedError('build_args() must be implemented')",
"def setup_args(self):\n self.parser = argparse.ArgumentParser()\n self.group = self.parser.add_mutually_exclusive_group()\n\n self.group.add_argument('-a', '--add', help='Adds a new task to the task list', action='store_true')\n self.group.add_argument('-r', '--remove', help='Removes a task from the task list', action='store_true')\n self.group.add_argument('-f', '--finish', help='Sets a task to be finished', action='store_true')\n self.group.add_argument('-u', '--unfinish', help='Sets a task to be not finished', action='store_true')\n self.group.add_argument('-c', '--change', help='Updates an existing task', action='store_true')\n self.group.add_argument('-v', '--view', help='View your current task list', action='store_true')\n\n return self.parser",
"def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)",
"def build_arguments(self, *cmd_args, **cmd_kwargs):\n args = []\n args.extend(cmd_args)\n\n for raw_key, value in cmd_kwargs.items():\n if len(raw_key) == 1:\n args.append('-{}'.format(raw_key))\n else:\n key = raw_key.replace('_', '-')\n args.append('--{}'.format(key))\n\n if value is True:\n # If True, it is enough.\n # e.g.: system=True translates to --system\n continue\n\n args.append(str(value))\n\n return args",
"def _get_task_args():\n task_name = FLAGS.task\n task_args = collections.OrderedDict()\n\n if task_name in TASK_FLAGS:\n task_flag_list = TASK_FLAGS[task_name]\n task_flag_dict = utils_impl.lookup_flag_values(task_flag_list)\n task_flag_prefix = TASK_FLAG_PREFIXES[task_name]\n for (key, value) in task_flag_dict.items():\n if key.startswith(task_flag_prefix):\n key = key[len(task_flag_prefix):].lstrip('_-')\n task_args[key] = value\n return task_args",
"def build_task(module_name, args=[], kwargs={}, module_attrs={}):\n kwargs = copy.deepcopy(kwargs) # Copy to avoid argument passed by reference issue\n if args:\n kwargs[\"_raw_params\"] = \" \".join(args)\n\n task_data = {\n \"action\": {\n \"module\": module_name,\n \"args\": kwargs\n },\n }\n if module_attrs:\n task_data.update(module_attrs)\n\n return task_data",
"def get_additional_args(self):\n additional = \"\"\n if not self.workflow.cleanup_scripts:\n additional += \" --skip-script-cleanup \"\n if self.workflow.shadow_prefix:\n additional += \" --shadow-prefix {} \".format(self.workflow.shadow_prefix)\n if self.workflow.use_conda:\n additional += \" --use-conda \"\n if self.workflow.conda_prefix:\n additional += \" --conda-prefix {} \".format(self.workflow.conda_prefix)\n if self.workflow.use_singularity:\n additional += \" --use-singularity \"\n if self.workflow.singularity_prefix:\n additional += \" --singularity-prefix {} \".format(\n self.workflow.singularity_prefix\n )\n if self.workflow.singularity_args:\n additional += ' --singularity-args \"{}\"'.format(\n self.workflow.singularity_args\n )\n\n if self.workflow.use_env_modules:\n additional += \" --use-envmodules\"\n\n return additional",
"def make_args(port, n, t, population, test=None, value=0, failure=None, tx_rate=0, loglevel=logging.INFO, output=None,\n broadcast=True, fan_out=10, profile=None, validate=False, ignore_promoter=False):\n res = [str(port), str(n), str(t), str(population)]\n\n if test is not None:\n res.append('--test')\n res.append(test)\n\n res.append('--value')\n res.append(str(value))\n\n if failure is not None:\n res.append('--failure')\n res.append(failure)\n\n res.append('--tx-rate')\n res.append(str(tx_rate))\n\n if loglevel == logging.DEBUG:\n res.append('--debug')\n elif loglevel == logging.INFO:\n res.append('-v')\n\n # None represents stdout\n if output is not None:\n res.append('-o')\n res.append(output)\n\n if broadcast:\n res.append('--broadcast')\n\n res.append('--fan-out')\n res.append(str(fan_out))\n\n if profile:\n res.append('--profile')\n res.append(profile)\n\n if validate:\n res.append('--validate')\n\n if ignore_promoter:\n res.append('--ignore-promoter')\n\n return res",
"def build_args():\n parser = argparse.ArgumentParser(description='Validates, edits, or creates a 22 XML file')\n subparsers = parser.add_subparsers(help='sub-command help')\n \n add_branch_parser(subparsers)\n add_edit_parser(subparsers)\n add_finalize_parser(subparsers)\n add_grade_parser(subparsers)\n add_new_parser(subparsers)\n add_validate_parser(subparsers)\n add_validate_document_parser(subparsers)\n \n return parser.parse_args()",
"def cmake_args(self):\n args = [\n self.define(\"CMAKE_C_COMPILER\", self.spec[\"mpi\"].mpicc),\n self.define(\"BUILD_SHARED_LIBS\", True),\n self.define(\"BUILD_TESTING\", self.run_tests),\n ]\n return args",
"def _generate_run_args(self, args_list, kwargs):\n return _get_args_for_run(self, args_list, kwargs)",
"def get_arguments():\n parser = argparse.ArgumentParser(description=\"TODO\")\n \n parser.add_argument('config_filepath', \n action='store', \n type=str, \n help='Path to configuration file containing paths of third parties libraries, projects, data directories, etc. See README for more information.')\n\n parser.add_argument('-C', '--config_cases', \n action='store',\n type=str,\n dest=\"config_cases\",\n help='Path to configuration file containing cases. The default one is stored at dask_io_experiments/experiment_5/cases.json',\n default=\"./dask_io_experiments/experiment_5/cases.json\")\n\n return parser.parse_args()",
"def add_arguments(cls):\n return [\n (('--yes',), dict(action='store_true', help='clean .git repo')),\n (('--variable', '-s'),\n dict(nargs='+', help='set extra variable,format is name:value')),\n (('--skip-builtin',),\n dict(action='store_true', help='skip replace builtin variable')),\n\n (('--dir',), dict(nargs='?', default=os.getcwd(),\n help='set working directory')),\n (('--debug',), dict(action='store_true', help='open debug mode')),\n (('--dry-run',), dict(action='store_true',\n help='print command instead execute it')),\n (('--verbose', '-v'), dict(action='count')),\n ]",
"def get_tasks_args(parser):\n group = parser.add_argument_group(title='data_analyzing')\n\n group.add_argument('--analyzing-task', type=str, required=True,\n default=None,\n choices=['map',\n 'reduce'],\n help='What type of analyzing task to perform.')\n group.add_argument('--analyzing-data-type', type=str, required=True,\n default=None,\n choices=['BERT',\n 'GPT'],\n help='What type of data.')\n group.add_argument('--analyzing-metric', type=str, nargs='+', default=[],\n help='What kinds of metrics to analyze.')\n group.add_argument('--analyzing-num-workers', type=int, default=1,\n help='Number of workers. Each worker could be a single CPU node.')\n group.add_argument('--analyzing-worker-id', type=int, default=0,\n help='Worker id of current node.')\n group.add_argument('--analyzing-num-threads', type=int, default=1,\n help='Number of threads for each worker.')\n group.add_argument('--analyzing-num-threads-reduce', type=int, default=1,\n help='Number of threads for each worker.')\n group.add_argument('--analyzing-specific-threads', type=int, nargs='+', default=[],\n help='Which specific threads to run. Helpful when there are specific thread failed in previous run.')\n return parser",
"def full_args():\n return setup_args()",
"def getArgs():\n ProgDesc = (\"Creates a route type csv file of the type whose contents can be \"\n \"copied and pasted in to EMIT to change the proportions of \"\n \"different vehicle categories with a particular route type.\")\n ANPRDesc = (\"The ANPR file should be a csv file created using fleetSplitFromANPR.\")\n parser = argparse.ArgumentParser(description=ProgDesc)\n parser.add_argument('anprfile', type=str,\n help=\"The ANPR file to be processed. \"+ANPRDesc)\n parser.add_argument('basefile', type=str,\n help=(\"A file containing the base route type proportions. \"\n \"This should be created by clicking 'copy' on the \"\n \"route type window of EMIT, pasteing the results in \"\n \"to a spreadsheet, and saving as a csv file.\"))\n parser.add_argument('--saveloc', metavar='save location',\n type=str, nargs='?', default='Auto',\n help=\"Path where the outpt csv file should be saved.\")\n\n\n args = parser.parse_args()\n return args",
"def add_args(parser):\n # fmt: off\n TranslationTask.add_args(parser)\n parser.add_argument('--langs', required=True, metavar='LANG',\n help='comma-separated list of monolingual language, for example, \"en,de,fr\"'\n 'be careful these langs are what you used for pretraining (the same order),'\n 'not for finetuning.'\n 'you should always add all pretraining language idx during finetuning.')\n parser.add_argument('--multilang-sampling-alpha', type=float, default=0.7,\n help='sub sampling factor')\n parser.add_argument('--common_eos', type=str,\n help='common eos symbol for all languages')\n parser.add_argument('--placeholder', type=int, default=0,\n help='number of placeholder in dictionaries')\n parser.add_argument('--gt-langs', type=str,\n help=\"languages used in generation finetuning, separated wiht -, for example, 'en-fr-de'\")\n\n # fmt: on",
"def get_arguments():\n parser = argparse.ArgumentParser(description='Generates Terraform code from datadog monitors ID numbers')\n parser.add_argument(\"--input\", \"-i\", type=str, required=False, default=\"monitors.json\", help=\"Input JSON filename that contains Monitors ID numbers. i.e monitors.json\")\n parser.add_argument(\"--output\", \"-o\", type=str, required=False, default=\"monitors.tf\", help=\"Output Terraform filename. i.e monitors.tf\")\n parser.add_argument(\"--mode\", \"-m\", type=str, required=False, choices=[\"w\",\"a\"], default=\"w\", help=\"Create new Terraform file or Append to existing one.\")\n parser.add_argument(\"--all\", action=\"store_true\", help=\"Create Terraform files per group.\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Verbose output for the script.\")\n\n return parser.parse_args()",
"def prepare_args(self):\n args = []\n if self.login:\n args.extend(['-L', cfg['tools.hydra.loginfile']])\n if self._port.is_ipv6:\n args.append('-6')\n\n args.extend(['-P', cfg['tools.hydra.passwordfile'], '-s', str(self._port.number), str(self._port.node.ip),\n self.service, ])\n return args",
"def parse_arguments(cls):\r\n parser = argparse.ArgumentParser(description='Easy Infer for model benchmark')\r\n cls.base_arg_parse(parser)\r\n cls.model_arg_parse(parser)\r\n cls.task_arg_parse(parser)\r\n args = parser.parse_args()\r\n return args",
"def render_build_args(options, ns):\n build_args = options.get('buildArgs', {})\n for key, value in build_args.items():\n build_args[key] = value.format(**ns)\n return build_args",
"def __add_common_args(parser: argparse.ArgumentParser):\n parser.add_argument(\"--model\", help=\"name of the model to use. Use query --get-models to get a list of valid names.\")\n parser.add_argument(\"--grid-type\", help=\"type of the grid to use.\")\n parser.add_argument(\"--level-type\", help=\"type of the vertical level to use.\")\n parser.add_argument(\"--init-time\", help=f\"initialization time to use. \"\n \"Integers are interpreted as hours since model start, dates formatted as \"\n f\"{__DATE_FORMAT.replace('%Y', 'YYYY').replace('%m', 'MM').replace('%d', 'DD').replace('%H', 'HH').replace('%M', 'MM')} are interpreted as absolute start dates.\")\n parser.add_argument(\"--variable\", nargs=\"+\", help=\"name of the variable to use. Use query --get-vars to get a list of valid names.\")\n parser.add_argument(\"--levels\", nargs=\"+\", type=int, help=\"levels to use.\")\n parser.add_argument(\"--lead-time\", nargs=\"+\", type=int, help=\"lead times to use in hours.\")",
"def get_unparsed_args():\n # Threads are set to 1 so that running tests doesn't completely drain\n # computing power, although it slows down the tests.\n # Specify output folder, else it will create folder in working directory\n # where the test module is run from.\n unparsed_args = [\"run.py\", PIPELINE, DB,\n \"-t\", str(1),\n \"-o\", str(test_folder),\n \"-b\", str(2)]\n return unparsed_args",
"def depfile_args(self, dep_file):\n return [\"-Wp,-MMD,%s\" % (dep_file)]"
] | [
"0.6930395",
"0.6554711",
"0.61110663",
"0.6043896",
"0.60004854",
"0.5989032",
"0.5976932",
"0.5970796",
"0.59215456",
"0.58704364",
"0.58405006",
"0.57609105",
"0.5728019",
"0.5724147",
"0.5706747",
"0.566341",
"0.5654405",
"0.5582019",
"0.55506146",
"0.5526752",
"0.5480462",
"0.54769665",
"0.54685766",
"0.5465004",
"0.5457624",
"0.5447706",
"0.54459405",
"0.5445291",
"0.54355717",
"0.5428818"
] | 0.6591516 | 1 |
potentially excessively complicated but very pretty code that makes a topography inclusive map of the UK within a figure positon ('fig1'), given a dataset on an xy grid ('indata'), lon and lat arrays on the same grid ('datlons', 'datlats'), contour levels ('clevs'), a title ('mtitle'), and the correct units ('mtitle', 'munits'). Includes a configurable scale for colour coding contours to any given function. Can be asked to mask out oceans ('maskswitch'), but if so needs a maskmap 'miss.nc' | def uk_map(fig1, indata, clevs, datlons, datlats, mtitle, munits, maskswitch):
from mpl_toolkits import basemap as bm
import matplotlib.cm as cm
from mpl_toolkits.basemap import shiftgrid
from netCDF4 import Dataset
from matplotlib.colors import LightSource
import matplotlib.pyplot as plt
import numpy as np
import hillshade
import set_shade
import colour_map
if maskswitch==1:
# import missing data map for masking out of oceans
missdata = Dataset('/exports/work/geos_cxc/users/ahardin4/output/amibatch/afixa/miss.nc', 'r', format='NETCDF3_CLASSIC')
# create the figure and axes instances.
ax = fig1.add_axes([0.1,0.1,0.8,0.8])
m = bm.Basemap(llcrnrlon=-9.5,llcrnrlat=49.5,urcrnrlon=2.5,urcrnrlat=59,rsphere=(6378137.00,6356752.3142),\
resolution='f',area_thresh=1000.,projection='laea', lat_0=54.5,lon_0=-2.75,ax=ax)
m.drawcoastlines()
# read in etopo5 topography/bathymetry.
url = 'http://ferret.pmel.noaa.gov/thredds/dodsC/data/PMEL/etopo5.nc'
etopodata = Dataset(url)
topoin = etopodata.variables['ROSE'][:]
lons = etopodata.variables['ETOPO05_X'][:]
lats = etopodata.variables['ETOPO05_Y'][:]
# shift data so lons go from -180 to 180 instead of 00 to 360.
topoin,lons = shiftgrid(180.,topoin,lons,start=False)
# transform coordinates
x,y=m(datlons[:,:],datlats[:,:])
# transform to nx x ny regularly spaced 5km native projection grid
nx = int((m.xmax-m.xmin)/5000.)+1; ny = int((m.ymax-m.ymin)/5000.)+1
topodat = m.transform_scalar(topoin,lons,lats,nx,ny)
# create light source object for topography
ls = LightSource(azdeg = 0, altdeg = 2)
# use set_shade function (also available)
rgb = set_shade(topodat)
# plot image over map with imshow.
im = m.imshow(rgb)
# apply function to colormap pointers, can be any function at all, as long as
# 0 remains 0, 1 remains 1, and values increase from one to the other.
# x^4 is good for pseudo-log plots of rainfall:
#log_jet=cmap_xmap(lambda x: (x*x*x*x), cm.hsv)
#set to lambda x: x for no change:
log_jet=cmap_xmap(lambda x: (x), cm.jet)
#apply function to colormap if desired to make whole scale 'hotter' or 'colder'
#example makes colourmap significantly hotter by confining values to upper quarter:
#log_jet=cmap_map(lambda x: x/4+0.75, cm.gist_rainbow)
# mask out oceans, but not lakes. Useful when plotting or comparing against observed
if maskswitch==1:
missmap=missdata.variables['land_map']
missmap2=missdata.variables['land_map']
# cut from big mask to small mask if necessary
#smallmap=missmap[0,6:46,0:34]
smallmap=missmap[0,:,:]
smallmap2=missmap2[0,:,:]
# expand out by one to take into account interpolation
for i in range(1,39):
for j in range(1,33):
if smallmap[i,j] == 0.0:
smallmap2[i-1,j]=0.0
smallmap2[i,j-1]=0.0
smallmap2[i+1,j]=0.0
smallmap2[i,j+1]=0.0
# perform masking
indata=np.ma.masked_array(indata,mask=(smallmap2<-0.5))
print smallmap2[0,0], smallmap2[36,0], smallmap2[20,20]
#indata[indata<=0.1]=np.nan
# produce semi-transparent contour map
contourmap=m.contourf(x,y,indata,clevs,cmap=cm.get_cmap(log_jet,len(clevs)-1),extend='both',
alpha=0.5,origin='lower',rasterized=True)
# produce simple block plot
#contourmap=m.pcolor(x,y,indata,shading='interp',cmap=cm.get_cmap(log_jet,len(clevs)-1),
# alpha=0.5)
# place colour bar on right
cb = m.colorbar(contourmap,"right", size="5%", pad='3%')
# configure colour bar labeling
cl = plt.getp(cb.ax, 'ymajorticklabels')
contourmap=plt.setp(cl, fontsize=14)
# draw parallels and meridians so as not to clash with colour bar placement
# labels = [left,right,top,bottom]
m.drawparallels(np.arange(-70.,80,1.), labels=[1,0,0,1], fontsize=13)
m.drawmeridians(np.arange(351.,362.,2.),labels=[1,0,0,1], fontsize=13)
# configure title and units
cb.ax.set_xlabel(munits, fontsize=12)
contourmap=plt.title(mtitle, fontsize=14) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def maplot_c(pdata, pdata1, title='', precip='no'):\n from mpl_toolkits.basemap import Basemap, shiftgrid\n import numpy as np\n import matplotlib.pyplot as plt\n from netcdfread import ncread\n lon = ncread('/network/aopp/hera/mad/bakerh/HAPPI/batch_518/atmos/item3236_monthly_mean/item3236_monthly_mean_a011_2006-01_2016-12.nc', 'longitude0')\n lat = ncread('/network/aopp/hera/mad/bakerh/HAPPI/batch_518/atmos/item3236_monthly_mean/item3236_monthly_mean_a011_2006-01_2016-12.nc', 'latitude0')\n plt.figure()\n\n pdata, lon1 = shiftgrid(180., pdata, lon, start=False)\n pdata1, lon = shiftgrid(180., pdata1, lon, start=False)\n meshlon, meshlat = np.meshgrid(lon, lat)\n\n m = Basemap(projection='cyl', llcrnrlat=-90, urcrnrlat=90,\n llcrnrlon=-180, urcrnrlon=180, resolution='c')\n m.drawcoastlines()\n m.drawmapboundary()\n x, y = m(meshlon, meshlat)\n plot = m.contour(x, y, pdata, [4, 5, 6], colors='k')\n plt.clabel(plot, inline=1, fontsize=10)\n plot1 = m.contour(x, y, pdata1, [4, 5, 6], colors='r', linestyles='--')\n plt.clabel(plot, inline=1, fontsize=10)\n parallels = m.drawparallels(np.arange(-90., 91., 15.))\n meridians = m.drawmeridians(np.arange(-180., 181., 30))\n m.drawparallels(parallels, labels=[True, True, True, True])\n m.drawmeridians(meridians, labels=[True, True, True, True])\n plt.title(title, y=1.08)\n plt.show()",
"def maplot_cgt(pdata, colormax=1, mask='no', title='', precip='no'):\n from mpl_toolkits.basemap import Basemap, shiftgrid, addcyclic\n import numpy as np\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n from netcdfread import ncread\n lon = ncread('/network/aopp/hera/mad/bakerh/HAPPI/batch_518/atmos/item3236_monthly_mean/item3236_monthly_mean_a011_2006-01_2016-12.nc', 'longitude0')\n lat = ncread('/network/aopp/hera/mad/bakerh/HAPPI/batch_518/atmos/item3236_monthly_mean/item3236_monthly_mean_a011_2006-01_2016-12.nc', 'latitude0')\n plt.figure()\n if mask == 'yes':\n lsm = ncread('/home/bakerh/Documents/DPhil/CPDN/\\\nWeather-at-Home_ancilmaker-master/lsm_n96_add.nc', 'lsm')[0, 0, :]\n pdata = np.ma.masked_array(pdata, mask=np.logical_not(lsm))\n pdata, lon = shiftgrid(250., pdata, lon, start=False)\n pdata, lon = addcyclic(pdata, lon)\n meshlon, meshlat = np.meshgrid(lon, lat)\n\n m = Basemap(projection='cyl', llcrnrlat=0, urcrnrlat=70,\n llcrnrlon=-110, urcrnrlon=250, resolution='c')\n m.drawcoastlines()\n m.drawmapboundary()\n x, y = m(meshlon, meshlat)\n mycmap2 = plt.cm.YlOrRd(np.arange(256))\n mycmap1 = plt.cm.Blues_r(np.arange(256))\n my_cmap = np.concatenate((mycmap1, mycmap2), axis=0)\n my_cmap[230:282, :] = 1\n if precip == 'yes':\n my_cmap = my_cmap[::-1]\n newcmap = mpl.colors.LinearSegmentedColormap.from_list(\"newjet\", my_cmap)\n ctrs = np.linspace(-colormax, colormax, 17)\n plot = m.contourf(x, y, pdata, ctrs,\n cmap=newcmap, vmin=np.min(ctrs), vmax=np.max(ctrs),\n extend='both')\n plt.colorbar(plot, orientation='horizontal',\n shrink=0.5, spacing='proportional', label='Difference (m)')\n parallels = m.drawparallels(np.arange(-90., 91., 15.))\n meridians = m.drawmeridians(np.arange(-180., 181., 30))\n m.drawparallels(parallels, labels=[True, True, True, True])\n m.drawmeridians(meridians, labels=[True, True, True, True])\n plt.title(title, y=1.08)\n plt.show()",
"def maplot(pdata, colormax=1, colormin=-999, mask='no', title='', precip='no'):\n from mpl_toolkits.basemap import Basemap, shiftgrid, addcyclic\n import numpy as np\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n from netcdfread import ncread\n if colormin == -999:\n colormin = -colormax\n lon = ncread('/network/aopp/hera/mad/bakerh/HAPPI/HadAM3P-N96/All-Hist/mon/tas/item3236_monthly_mean_a011_2006-01_2016-12.nc', 'longitude0')\n lat = ncread('/network/aopp/hera/mad/bakerh/HAPPI/HadAM3P-N96/All-Hist/mon/tas/item3236_monthly_mean_a011_2006-01_2016-12.nc', 'latitude0')\n #lat = ncread('/network/aopp/hera/mad/bakerh/HAPPI/HadAM3P-N96/Plus15-Future_LCO2/day/ua/item15201_daily_mean_a00b_2090-01_2100-12.nc', 'latitude1') \n plt.figure()\n if mask == 'yes':\n lsm = ncread('/home/bakerh/Documents/DPhil/CPDN/\\\nWeather-at-Home_ancilmaker-master/lsm_n96_add.nc', 'lsm')[0, 0, :]\n pdata = np.ma.masked_array(pdata, mask=np.logical_not(lsm))\n pdata, lon = shiftgrid(180., pdata, lon, start=False)\n pdata, lon = addcyclic(pdata, lon)\n meshlon, meshlat = np.meshgrid(lon, lat)\n\n m = Basemap(projection='cyl', llcrnrlat=-90, urcrnrlat=90,\n llcrnrlon=-180, urcrnrlon=180, resolution='c')\n m.drawcoastlines()\n m.drawmapboundary()\n x, y = m(meshlon, meshlat)\n mycmap2 = plt.cm.YlOrRd(np.arange(256))\n mycmap1 = plt.cm.Blues_r(np.arange(256))\n my_cmap = np.concatenate((mycmap1, mycmap2), axis=0)\n my_cmap[230:282, :] = 1\n if precip == 'yes':\n my_cmap = my_cmap[::-1]\n newcmap = mpl.colors.LinearSegmentedColormap.from_list(\"newjet\", my_cmap)\n ctrs = np.linspace(colormin, colormax, 17)\n plot = m.contourf(x, y, pdata, ctrs,\n cmap=newcmap, vmin=np.min(ctrs), vmax=np.max(ctrs),\n extend='both')\n b = plt.colorbar(plot, orientation='horizontal',\n aspect=50, shrink=0.75, spacing='proportional')\n b.set_label(label=r'pr (mm day$^{-1}$)')\n parallels = m.drawparallels(np.arange(-90., 91., 15.))\n meridians = m.drawmeridians(np.arange(-180., 181., 30))\n m.drawparallels(parallels, labels=[True, True, True, True])\n m.drawmeridians(meridians, labels=[True, True, True, True])\n plt.title(title, y=1.08)\n plt.show()",
"def createMap(title_in, file_in, fig_file_in, N, vmin, vmax, lon_in,\n lat_in, sss_in, colors, label='SSS [PSS]'):\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title_in)\n plt.figtext(1, 0, file_in, ha='right', va='bottom', fontsize=6)\n\n map = Basemap(projection='moll', resolution='l', lon_0=-50, ellps='WGS84', anchor='S')\n map.drawcoastlines(linewidth=0.01, antialiased=False)\n map.drawmapboundary(fill_color='white', linewidth=0.01)\n map.drawmeridians(np.arange(-180,181,60), labels=[0,0,0,0], linewidth=0.01, labelstyle=None)\n map.drawparallels(np.arange(-90,91,30), labels=[1,0,0,0], linewidth=0.01, labelstyle=None) \n map.fillcontinents(color='grey')\n\n ticks = np.linspace(vmin, vmax, N+1)\n \n lonout, z = map.shiftdata(lon_in, sss_in, lon_0=-50)\n lon, lat = np.meshgrid(lonout, lat_in)\n x, y = map(lon, lat)\n\n cmap = cm.get_cmap(colors, N)\n cmap.set_bad('1.0')\n cmap.set_under((0.0, 0.0, 0.25, 1.0))\n cmap.set_over((0.25, 0.0, 0.0, 1.0))\n\n pc = map.pcolormesh(x, y, z, vmin=vmin, vmax=vmax, cmap=cmap)\n cb = plt.colorbar(pc, shrink=0.8, orientation='horizontal', fraction=0.04, extend ='both', ticks=ticks)\n cb.set_label(label)\n plt.savefig(fig_file_in)\n logging.debug(fig_file_in +' .... created!' )\n plt.close()\n\n return None",
"def draw_composite_map(date_obj, t850, u200, v200, u500, v500, mslp, gh500, u850, v850, pwat):\n \n #Get lat and lon arrays for this dataset:\n lat = t850.lat.values\n lon = t850.lon.values\n\n #========================================================================================================\n # Create a Basemap plotting figure and add geography\n #========================================================================================================\n\n #Create a Plate Carree projection object\n proj_ccrs = ccrs.Miller(central_longitude=0.0)\n\n #Create figure and axes for main plot and colorbars\n fig = plt.figure(figsize=(18,12),dpi=125)\n gs = gridspec.GridSpec(12, 36, figure=fig) #[ytop:ybot, xleft:xright]\n ax = plt.subplot(gs[:, :-1],projection=proj_ccrs) #main plot\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax2 = plt.subplot(gs[:4, -1]) #top plot\n ax2.set_xticklabels([])\n ax2.set_yticklabels([])\n ax3 = plt.subplot(gs[4:8, -1]) #bottom plot\n ax3.set_xticklabels([])\n ax3.set_yticklabels([])\n ax4 = plt.subplot(gs[8:, -1]) #bottom plot\n ax4.set_xticklabels([])\n ax4.set_yticklabels([])\n\n #Add political boundaries and coastlines\n ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidths=1.2)\n ax.add_feature(cfeature.BORDERS.with_scale('50m'), linewidths=1.2)\n ax.add_feature(cfeature.STATES.with_scale('50m'), linewidths=0.5)\n\n #Add land/lake/ocean masking\n land_mask = cfeature.NaturalEarthFeature('physical', 'land', '50m',\n edgecolor='face', facecolor='#e6e6e6')\n sea_mask = cfeature.NaturalEarthFeature('physical', 'ocean', '50m',\n edgecolor='face', facecolor='#ffffff')\n lake_mask = cfeature.NaturalEarthFeature('physical', 'lakes', '50m',\n edgecolor='face', facecolor='#ffffff')\n ax.add_feature(sea_mask,zorder=0)\n ax.add_feature(land_mask,zorder=0)\n ax.add_feature(lake_mask,zorder=0)\n\n #========================================================================================================\n # Fill contours\n #========================================================================================================\n\n #--------------------------------------------------------------------------------------------------------\n # 850-hPa temperature\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(-40,40,1)\n cmap = plt.get_cmap('jet')\n extend = \"both\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,t850,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.1)\n\n #--------------------------------------------------------------------------------------------------------\n # PWAT\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(20,71,0.5)\n\n #Define a color gradient for PWAT\n pwat_colors = gradient([[(255,255,255),0.0],[(255,255,255),20.0]],\n [[(205,255,205),20.0],[(0,255,0),34.0]],\n [[(0,255,0),34.0],[(0,115,0),67.0]])\n cmap = pwat_colors.get_cmap(clevs)\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,pwat,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.9)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax2,shrink=0.75,pad=0.01,ticks=[20,30,40,50,60,70])\n\n #--------------------------------------------------------------------------------------------------------\n # 250-hPa wind\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n wind = calc.wind_speed(u200, v200)\n\n #Specify contour settings\n clevs = [40,50,60,70,80,90,100,110]\n cmap = col.ListedColormap(['#99E3FB','#47B6FB','#0F77F7','#AC97F5','#A267F4','#9126F5','#E118F3','#E118F3'])\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,wind,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax3,shrink=0.75,pad=0.01,ticks=clevs)\n\n #--------------------------------------------------------------------------------------------------------\n # 500-hPa smoothed vorticity\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n dx,dy = calc.lat_lon_grid_deltas(lon,lat)\n vort = calc.vorticity(u500, v500, dx=dx, dy=dy)\n smooth_vort = smooth(vort, 5.0) * 10**5\n\n #Specify contour settings\n clevs = np.arange(2,20,1)\n cmap = plt.get_cmap('autumn_r')\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,smooth_vort,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.3)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax4,shrink=0.75,pad=0.01,ticks=clevs[::2])\n \n #========================================================================================================\n # Contours\n #========================================================================================================\n\n #--------------------------------------------------------------------------------------------------------\n # MSLP\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(960,1040+4,4)\n style = 'solid' #Plot solid lines\n color = 'red' #Plot lines as gray\n width = 0.8 #Width of contours 0.25\n\n #Contour this variable\n cs = ax.contour(lon,lat,mslp,clevs,colors=color,linewidths=width,linestyles=style,transform=proj_ccrs,alpha=0.9)\n\n #Include value labels\n ax.clabel(cs, inline=1, fontsize=9, fmt='%d')\n\n #--------------------------------------------------------------------------------------------------------\n # Geopotential heights\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n gh500 = gh500 / 10.0\n\n #Specify contour settings\n clevs = np.arange(480,612,4)\n style = 'solid' #Plot solid lines\n color = 'black' #Plot lines as gray\n width = 2.0 #Width of contours\n\n #Contour this variable\n cs = ax.contour(lon,lat,gh500,clevs,colors=color,linewidths=width,linestyles=style,transform=proj_ccrs)\n\n #Include value labels\n ax.clabel(cs, inline=1, fontsize=12, fmt='%d')\n\n #--------------------------------------------------------------------------------------------------------\n # Surface barbs\n #--------------------------------------------------------------------------------------------------------\n\n #Plot wind barbs\n _ = ax.quiver(lon, lat, u850.values, v850.values, transform=proj_ccrs, regrid_shape=(38,30), scale=820, alpha=0.5)\n\n #--------------------------------------------------------------------------------------------------------\n # Label highs & lows\n #--------------------------------------------------------------------------------------------------------\n\n #Label highs and lows\n add_mslp_label(ax, proj_ccrs, mslp, lat, lon)\n\n #========================================================================================================\n # Step 6. Add map boundary, legend, plot title, then save image and close\n #========================================================================================================\n\n #Add china province boundary\n add_china_map_2cartopy(ax, name='province')\n\n #Add custom legend\n from matplotlib.lines import Line2D\n custom_lines = [Line2D([0], [0], color='#00A123', lw=5),\n Line2D([0], [0], color='#0F77F7', lw=5),\n Line2D([0], [0], color='#FFC000', lw=5),\n Line2D([0], [0], color='k', lw=2),\n Line2D([0], [0], color='k', lw=0.1, marker=r'$\\rightarrow$', ms=20),\n Line2D([0], [0], color='r', lw=0.8),]\n\n ax.legend(custom_lines, ['PWAT (mm)', '200-hPa Wind (m/s)', '500-hPa Vorticity', '500-hPa Height (dam)', '850-hPa Wind (m/s)', 'MSLP (hPa)'], loc=2, prop={'size':12})\n\n #Format plot title\n title = \"Synoptic Composite \\nValid: \" + dt.datetime.strftime(date_obj,'%Y-%m-%d %H%M UTC')\n st = plt.suptitle(title,fontweight='bold',fontsize=16)\n st.set_y(0.92)\n\n #Return figuration\n return(fig)",
"def maplot_subs(va500, cmax=1, colormin=-999, precip='no'):\n from mpl_toolkits.basemap import Basemap, shiftgrid, addcyclic\n import numpy as np\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n from netcdfread import ncread\n fig = plt.figure(facecolor='w', edgecolor='k', linewidth=2)\n\n def plotter(pdata, colormax=1, colormin=-999, title=''):\n lon = ncread('/network/aopp/hera/mad/bakerh/HAPPI/HadAM3P-N96/All-Hist/mon/tas/item3236_monthly_mean_a011_2006-01_2016-12.nc', 'longitude0')\n lat = ncread('/network/aopp/hera/mad/bakerh/HAPPI/HadAM3P-N96/All-Hist/mon/tas/item3236_monthly_mean_a011_2006-01_2016-12.nc', 'latitude0')\n #lat = ncread('/network/aopp/hera/mad/bakerh/HAPPI/HadAM3P-N96/Plus15-Future_LCO2/day/ua/item15201_daily_mean_a00b_2090-01_2100-12.nc', 'latitude1') \n if colormin == -999:\n colormin = -colormax\n pdata, lon = shiftgrid(180., pdata, lon, start=False)\n pdata, lon = addcyclic(pdata, lon)\n meshlon, meshlat = np.meshgrid(lon, lat)\n\n m = Basemap(projection='cyl', llcrnrlat=-90, urcrnrlat=90,\n llcrnrlon=-180, urcrnrlon=180, resolution='c')\n m.drawcoastlines()\n m.drawmapboundary()\n x, y = m(meshlon, meshlat)\n mycmap2 = plt.cm.YlOrRd(np.arange(256))\n mycmap1 = plt.cm.Blues_r(np.arange(256))\n my_cmap = np.concatenate((mycmap1, mycmap2), axis=0)\n my_cmap[230:282, :] = 1\n if precip == 'yes':\n my_cmap = my_cmap[::-1]\n newcmap = mpl.colors.LinearSegmentedColormap.from_list(\"newjet\", my_cmap)\n ctrs = np.linspace(colormin, colormax, 17)\n plot = m.contourf(x, y, pdata, ctrs,\n cmap=newcmap, vmin=np.min(ctrs), vmax=np.max(ctrs),\n extend='both')\n\n plt.title(title, y=1)\n plt.show()\n return plot\n\n ax1 = fig.add_subplot(3, 3, 1)\n plotter(np.mean(va500['All-Hist'][1],axis=0)-273.15,colormax=cmax*40,title='All-Hist (multiply scale by 40)')\n ax2 = fig.add_subplot(3, 3, 2)\n plotter(np.mean(va500['All-Hist'][1], axis=0)-np.mean(va500['All-Nat'][1],axis=0),colormax=cmax,title='All-Hist - All-Nat')\n ax3 = fig.add_subplot(3, 3, 3)\n plotter(np.mean(va500['Plus15-Future'][1], axis=0)-np.mean(va500['All-Hist'][1],axis=0),colormax=cmax,title='Plus15-Future - All-Hist')\n ax4 = fig.add_subplot(3, 3, 4)\n plotter(np.mean(va500['All-Hist'][1], axis=0)-np.mean(va500['GHG-Nat'][1],axis=0),colormax=cmax,title='All-Hist - GHG-Nat')\n ax5 = fig.add_subplot(3, 3, 5)\n plotter(np.mean(va500['SST-Nat'][1], axis=0)-np.mean(va500['All-Nat'][1],axis=0),colormax=cmax,title='SST-Nat - All-Nat')\n ax6 = fig.add_subplot(3, 3, 6)\n plotter(np.mean(va500['Plus15-Future_HCO2'][1], axis=0)-np.mean(va500['Plus15-Future_LCO2'][1],axis=0),colormax=cmax,title='Plus15-Future_HCO2 - Plus15-Future_LCO2')\n ax7 = fig.add_subplot(3, 3, 7)\n plotter(np.mean(va500['All-Hist'][1], axis=0)-np.mean(va500['SST-Nat'][1],axis=0),colormax=cmax,title='All-Hist - SST-Nat')\n ax8 = fig.add_subplot(3, 3, 9)\n plotter(np.mean(va500['Plus15-Future_LCO2'][1], axis=0)-np.mean(va500['All-Hist'][1],axis=0),colormax=cmax,title='Plus15-Future_LCO2 - All-Hist')\n ax9 = fig.add_subplot(3, 3, 8)\n plot = plotter(np.mean(va500['GHG-Nat'][1], axis=0)-np.mean(va500['All-Nat'][1],axis=0),colormax=cmax,title='GHG-Nat - All-Nat')\n\n cbar_ax = fig.add_axes([0.2, 0.1, 0.6, 0.015])\n b = fig.colorbar(plot, cax=cbar_ax, spacing='proportional',\n orientation='horizontal', extend='max')\n \n b.set_label(label='t200 difference ($^\\circ$C)', size=20, fontsize=20, fontname='Arial')\n plt.subplots_adjust(hspace=0, wspace=0.05, top=.97, bottom=0.15, left=.05,\n right=.95)",
"def draw(self, **kwargs):\n\n Lons = numpy.ones(self.data.shape)*0.5\n Lats = numpy.ones(self.data.shape)*0.5\n for ix in range(self.ncols):\n for iy in range(self.nrows):\n Lons[iy,ix] = self.xllcorner+float(ix)*self.cellsize\n Lats[iy,ix] = self.yllcorner+float(iy)*self.cellsize\n ContourMin = numpy.min(numpy.where(self.data != self.nodata,self.data, 1000000))\n ContourMax = numpy.max(numpy.where(self.data != self.nodata,self.data, -1000000))*1.10\n if kwargs.has_key('contours'):\n if type( kwargs['contours'] ) == type( 1 ):\n Contours = numpy.arange(ContourMin, ContourMax, (ContourMax-ContourMin)/float( kwargs['contours']+1))\n else:\n Contours = kwargs['contours']\n else:\n Contours = numpy.arange(ContourMin, ContourMax, (ContourMax-ContourMin)/11.)\n if kwargs.has_key('cmap'):\n mycmap = kwargs['cmap']\n else:\n mycmap = 'jet'\n if kwargs.has_key('dmap'):\n dmap = max(0,min(4,kwargs['dmap']))\n else:\n dmap = 4\n # Lambert Conformal Conic map.\n if kwargs.has_key('res'):\n if kwargs['res']=='med':\n mapres='i'\n elif kwargs['res']=='hi':\n mapres='h'\n else:\n mapres = 'l'\n else:\n mapres = 'l'\n if mapres not in ('c','l','i','h'):\n mapres = 'l'\n m = Basemap(llcrnrlon=Lons[0,0], llcrnrlat=Lats[0,0], urcrnrlon=Lons[self.nrows-1,self.ncols-1], urcrnrlat=Lats[self.nrows-1,self.ncols-1],\n projection='lcc',lat_1=30.,lat_2=60.,lon_0=(Lons[0,0]+Lons[self.nrows-1,self.ncols-1])/2.,\n resolution =mapres,area_thresh=1000.)\n # create figure, add axes.\n fig=p.figure()\n ax = fig.add_axes([0.1,0.1,0.7,0.7])\n #make a filled contour plot.\n x, y = m( Lons , Lats)\n CS = m.contourf(x,y,self.data, Contours, cmap=p.get_cmap(mycmap))\n\tpos = ax.get_position()\n\tl, b, w, h = getattr(pos, 'bounds', pos)\n #l,b,w,h=ax.get_position()\n cax = p.axes([l+w+0.075, b, 0.05, h]) # setup colorbar axes\n p.colorbar(drawedges=True, cax=cax) # draw colorbar\n p.axes(ax) # make the original axes current again\n\n if kwargs.has_key('shapefiles'):\n for s in kwargs['shapefiles']:\n try:\n lw = s[3]\n except:\n lw = 0.5\n try:\n clr = s[4]\n except:\n clr='k'\n shp_info = apply(m.readshapefile, (s[0],s[1]),{'drawbounds':s[2], 'linewidth':lw, 'color':clr} )\n # draw coastlines, meridians and parallels.\n if dmap > 1:\n m.drawcoastlines()\n if dmap > 2:\n m.drawcountries()\n if dmap > 3:\n m.drawstates()\n if dmap > 0:\n m.drawparallels(p.arange(10,70,10),labels=[1,1,0,0])\n m.drawmeridians(p.arange(-100,0,10),labels=[0,0,0,1])\n if kwargs.has_key('title'):\n p.title(kwargs['title'])\n else:\n p.title(self.name.title())\n if kwargs.has_key('format'):\n fn = self.name+'.'+kwargs['format']\n if kwargs.has_key('dpi'):\n dots = kwargs['dpi']\n else:\n dots = 100\n try:\n p.savefig(fn,dpi=dots)\n except:\n print 'Error saving to format : ', kwargs['format']\n else:\n p.show()",
"def buildMap(fig, ax, data, lat, lon, title ='', cbartitle = '', vmin = None, vmax = None, cmap = None, big_labels = False):\n \n labelsize = 5\n titlesize = 8\n cbarlabelsize = 6\n cbartitlesize = 7\n \n if big_labels:\n labelsize *= 2\n titlesize *= 2\n cbarlabelsize *= 2\n cbartitlesize *= 2\n \n \n im = ax.imshow(data, vmin = vmin, vmax = vmax, cmap = cmap, interpolation = 'nearest', aspect = 'auto')\n \n ax.set_yticks(np.arange(0, data.shape[0] + data.shape[0]/10, data.shape[0]/10))\n ax.set_xticks(np.arange(0, data.shape[1] + data.shape[1]/10, data.shape[1]/10))\n ax.set_yticklabels([\"%.1f\" %i for i in np.arange(lat, lat - 1.01, - 0.1)])\n ax.set_xticklabels([\"%.1f\" %i for i in np.arange(lon, lon + 1.01, 0.1)])\n ax.tick_params(labelsize = labelsize)\n ax.set_ylabel('Latitude', fontsize = labelsize)\n ax.set_xlabel('Longitude', fontsize = labelsize)\n ax.set_title(title, fontsize = titlesize)\n \n cbar = fig.colorbar(im, ax = ax, fraction = 0.046, pad = 0.04)\n cbar.ax.tick_params(labelsize = cbarlabelsize)\n cbar.set_label(cbartitle, fontsize = cbartitlesize)",
"def make_map(data,LatLonBox):\n\n proj = ccrs.LambertConformal(central_longitude=data.StationLongitude,\n central_latitude=data.StationLatitude)\n\n fig = plt.figure(figsize=(17,11))\n ax = plt.subplot(111,projection=proj)\n \n ax.coastlines('50m', 'black', linewidth=2, zorder=2)\n\n reader = shpreader.Reader('/Users/chowdahead/Documents/shapefiles/countyl010g_shp_nt00964/countyl010g.shp')\n counties = list(reader.geometries())\n COUNTIES = cfeature.ShapelyFeature(counties,ccrs.PlateCarree())\n ax.add_feature(COUNTIES, facecolor='none',edgecolor='w')\n # Grab state borders\n state_borders = cfeature.NaturalEarthFeature(\n category='cultural', name='admin_1_states_provinces_lines',\n scale='50m', facecolor='none')\n ax.add_feature(state_borders, edgecolor='w', linewidth=1, zorder=3)\n \n ocean = cfeature.NaturalEarthFeature('physical', 'ocean', scale='50m',\n edgecolor='face',\n facecolor=cfeature.COLORS['water'])\n land = cfeature.NaturalEarthFeature('physical', 'land', scale='50m',\n edgecolor='face',\n facecolor=\"k\")\n\n ax.add_feature(ocean, zorder=-1)\n ax.add_feature(land, zorder=-1)\n ax.set_facecolor('black')\n \n ax.set_extent(LatLonBox,ccrs.PlateCarree())\n \n return fig,ax,proj",
"def ccs4_map(cfg_set_tds,figsize_x=12,figsize_y=12,hillshade=True,radar_loc=True,radar_vis=True):\r\n \r\n ## Load DEM and Swiss borders\r\n shp_path_CH = os.path.join(cfg_set_tds[\"root_path\"],u\"data/shapefile/swissBOUNDARIES3D_1_3_TLM_LANDESGEBIET.shp\")\r\n shp_path_Kantone = os.path.join(cfg_set_tds[\"root_path\"],u\"data/shapefile/swissBOUNDARIES3D_1_3_TLM_KANTONSGEBIET.shp\")\r\n shp_path_count = os.path.join(cfg_set_tds[\"root_path\"],u\"data/shapefile/CCS4_merged_proj_clip_G05_countries.shp\")\r\n dem_path = os.path.join(cfg_set_tds[\"root_path\"],u\"data/DEM/ccs4.png\")\r\n visi_path = os.path.join(cfg_set_tds[\"root_path\"],u\"data/radar/radar_composite_visibility.npy\")\r\n\r\n dem = Image.open(dem_path)\r\n dem = np.array(dem.convert('P'))\r\n\r\n sf_CH = shapefile.Reader(shp_path_CH)\r\n sf_KT = shapefile.Reader(shp_path_Kantone)\r\n sf_ct = shapefile.Reader(shp_path_count)\r\n\r\n ## Setup figure\r\n fig_extent = (255000,965000,-160000,480000)\r\n fig, axes = plt.subplots(1, 1)\r\n fig.set_size_inches(figsize_x, figsize_y)\r\n \r\n ## Plot altitude / hillshading\r\n if hillshade:\r\n ls = colors.LightSource(azdeg=315, altdeg=45)\r\n axes.imshow(ls.hillshade(-dem, vert_exag=0.05),\r\n extent=fig_extent, cmap='gray', alpha=0.5)\r\n else:\r\n axes.imshow(dem*0.6, extent=fig_extent, cmap='gray', alpha=0.5)\r\n \r\n ## Get borders of Cantons\r\n try:\r\n shapes_KT = sf_KT.shapes()\r\n except UnicodeDecodeError:\r\n print(\" *** Warning: No country shape plotted (UnicodeDecodeErrror)\")\r\n else:\r\n for KT_i, shape in enumerate(shapes_KT):\r\n x = np.array([i[0] for i in shape.points[:]])\r\n y = np.array([i[1] for i in shape.points[:]])\r\n endpoint = np.where(x==x[0])[0][1]\r\n x = x[:endpoint]\r\n y = y[:endpoint]\r\n axes.plot(x,y,color='darkred',linewidth=0.5,zorder=5)\r\n\r\n ## Get borders of neighbouring countries\r\n try:\r\n shapes_ct = sf_ct.shapes()\r\n except UnicodeDecodeError:\r\n print(\" *** Warning: No country shape plotted (UnicodeDecodeErrror)\")\r\n else:\r\n for ct_i, shape in enumerate(shapes_ct):\r\n if ct_i in [0,1]:\r\n continue\r\n x = np.array([i[0] for i in shape.points[:]])\r\n y = np.array([i[1] for i in shape.points[:]])\r\n x[x<=255000] = 245000\r\n x[x>=965000] = 975000\r\n y[y<=-159000] = -170000\r\n y[y>=480000] = 490000\r\n if ct_i in [3]:\r\n axes.plot(x[20:170],y[20:170],color='black',linewidth=0.5)\r\n if ct_i in [2]:\r\n ## Delete common border of FR and CH:\r\n x_south = x[y<=86000]; y_south = y[y<=86000]\r\n x_north = x[np.logical_and(np.logical_and(y>=270577,y<=491000),x>510444)]\r\n #x_north = x[np.logical_and(y>=270577,y<=491000)]\r\n y_north = y[np.logical_and(np.logical_and(y>=270577,y<=491000),x>510444)]\r\n #y_north = y[np.logical_and(y>=270577,y<=491000)]\r\n axes.plot(x_south,y_south,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_north,y_north,color='black',linewidth=0.5,zorder=4)\r\n if ct_i in [4]:\r\n ## Delete common border of AT and CH:\r\n x_south = x[np.logical_and(x>=831155,y<235000)]\r\n y_south = y[np.logical_and(x>=831155,y<235000)]\r\n #x_north1 = x[np.logical_and(x>=756622,y>=260466)]\r\n x_north1 = x[np.logical_and(np.logical_and(x>=758622,y>=262466),x<=794261)]\r\n #y_north1 = y[np.logical_and(x>=756622,y>=260466)]\r\n y_north1 = y[np.logical_and(np.logical_and(x>=758622,y>=262466),x<=794261)]\r\n y_north2 = y[np.logical_and(np.logical_and(x>=774261,y>=229333),x<=967000)]\r\n x_north2 = x[np.logical_and(np.logical_and(x>=774261,y>=229333),x<=967000)]\r\n y_north2 = np.concatenate([y_north2[np.argmin(x_north2):],y_north2[:np.argmin(x_north2)]])\r\n x_north2 = np.concatenate([x_north2[np.argmin(x_north2):],x_north2[:np.argmin(x_north2)]])\r\n x_LI = x[np.logical_and(np.logical_and(x<=773555,y>=214400),y<=238555)]\r\n y_LI = y[np.logical_and(np.logical_and(x<=773555,y>=214400),y<=238555)]\r\n axes.plot(x_south,y_south,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_north1,y_north1,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_north2,y_north2,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_LI,y_LI,color='black',linewidth=0.5,zorder=4)\r\n else:\r\n continue\r\n #axes.plot(x,y,color='black',linewidth=1,zorder=4)\r\n\r\n ## Get Swiss borders\r\n try:\r\n #shp_records = sf_CH.shapeRecords()\r\n shapes_CH = sf_CH.shapes()\r\n except UnicodeDecodeError:\r\n print(\" *** Warning: No country shape plotted (UnicodeDecodeErrror)\")\r\n else:\r\n for ct_i, shape in enumerate(shapes_CH): #sf_CH.shapeRecords():\r\n if ct_i!=0: continue\r\n x = np.array([i[0]-2000000 for i in shape.points[:]])\r\n y = np.array([i[1]-1000000 for i in shape.points[:]])\r\n endpoint = np.where(x==x[0])[0][1]\r\n x = x[:endpoint]\r\n y = y[:endpoint]\r\n \r\n ## Convert to swiss coordinates\r\n #x,y = lonlat2xy(lon, lat)\r\n axes.plot(x,y,color='darkred',linewidth=1,zorder=3)\r\n\r\n ## Add weather radar locations:\r\n if radar_loc:\r\n weather_radar_y = [237000,142000,100000,135000,190000]\r\n weather_radar_x = [681000,497000,708000,604000,780000]\r\n axes.scatter(weather_radar_x,weather_radar_y,marker=\"D\",#s=2,\r\n color='orange',edgecolor='black',zorder=10)\r\n \r\n ## Add radar visibility:\r\n if radar_vis:\r\n arr_visi = np.load(visi_path)\r\n arr_visi[arr_visi<9000] = 0\r\n arr_visi2 = morph.binary_opening(morph.binary_erosion(arr_visi, structure=np.ones((4,4))), structure=np.ones((4,4)))\r\n arr_visi[arr_visi<9000] = np.nan\r\n axes.imshow(arr_visi, cmap=\"gray\", alpha=0.2, extent=fig_extent)\r\n arr_visi[np.isnan(arr_visi)] = 1\r\n #axes.contour(arr_visi[::-1,:], levels=[2], cmap=\"gray\", linewidths=2,\r\n # linestyle=\"solid\", alpha=0.5, extent=fig_extent)\r\n #arr_visi = arr_visi[::4, ::4]\r\n #ys, xs = np.mgrid[arr_visi.shape[0]:0:-1,\r\n # 0:arr_visi.shape[1]]\r\n #axes.scatter(xs.flatten(), ys.flatten(), s=4,\r\n # c=arr_visi.flatten().reshape(-1, 3), edgecolor='face')\r\n \r\n ## Add further elements:\r\n axes.set_xlim([255000,965000])\r\n axes.set_ylim([-160000,480000])\r\n axes.grid()\r\n axes.set_ylabel(\"CH1903 Northing\")\r\n axes.set_xlabel(\"CH1903 Easting\")\r\n axes.get_xaxis().set_major_formatter( \\\r\n ticker.FuncFormatter(lambda x, p: format(int(x), \",\").replace(',', \"'\")))\r\n axes.get_yaxis().set_major_formatter( \\\r\n ticker.FuncFormatter(lambda x, p: format(int(x), \",\").replace(',', \"'\")))\r\n plt.yticks(rotation=90, verticalalignment=\"center\")\r\n return fig, axes, fig_extent",
"def ISOMAPEmbbeding(TurosR=10,Torusr=4,Classes=[3,5,7],nei=[5,10,20], DataSet = {'Turos', 'Digits'}):\n\n S, dig = CreateDS_Torus_Digits(TurosR=TurosR,Torusr=Torusr,Classes=[3,5,7])\n ### ------ Isomap ------###\n nei = nei\n\n if 'Turos' in DataSet:\n # Ploting Torus Isomapping\n fig = plt.figure(figsize=(30, 10))\n for i, j in enumerate(nei):\n Torus_isomap = Isomap(S, 2, j)\n neighbors = j\n method = 'Torus ISOMAP'\n ax = fig.add_subplot(1, len(nei), i + 1)\n scatter = ax.scatter(Torus_isomap[:, 0], Torus_isomap[:, 1], c=S[:, 0:1], cmap=plt.cm.Spectral)\n # legend = ax.legend(*scatter.legend_elements(), loc=\"lower left\", title=\"Classes\")\n # ax.add_artist(legend)\n # ax.legend()\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Torus_isomap, pallete=S[:, 0:1], neighbors=j, method='Torus ISOMAP') #An option to plot single graphs\n plt.savefig('Torus ISOMAP embbeding for {} neighbour'.format(nei))\n\n if 'Digits' in DataSet:\n # Plotting Digits Isomapping\n for Argclass, Specificcalss in enumerate(dig):\n fig = plt.figure(figsize=(30, 10))\n for i, j in enumerate(nei):\n neighbors = j\n Digit_isomap = Isomap(Specificcalss[0], 2, j)\n method = 'Digit ISOMAP'\n ax = fig.add_subplot(1, len(nei), i + 1)\n scatter = ax.scatter(Digit_isomap[:, 0], Digit_isomap[:, 1], c=Specificcalss[1], cmap=plt.cm.Spectral)\n legend = ax.legend(*scatter.legend_elements(), loc=\"lower left\", title=\"Classes\")\n ax.add_artist(legend)\n ax.legend()\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Digit_isomap, Specificcalss[1], neighbors=j, method='Digit ISOMAP') #An option to plot single graphs\n plt.savefig('Digits up to {} - ISOMAP embbeding for {} neighbour'.format(Classes[Argclass], nei))",
"def map(lon, lat, z, z2=None, tm=None, projection='cyl', save='', ftype='png',\n crange=None, crange2=None, extend=None, cmap=cm.default, show=False,\n shiftgrd=None, orientation='landscape', title='', label='', label_xy=None,\n units='', scale=1., scale_label='', zscale='linear', da=[51, 51],\n subplot=None, adjustprops=None, loc=[], loc_args=dict(), xlim=None,\n ylim=None, lon0=None, xstep=None, ystep=None, etopo=False, profile=True,\n cbar=True, cbar_coords=None, legend=None, colors=None, alpha=1.,\n colors2='k', linestyles='-', linewidths=1, hatches=None, hook=None,\n posterize=None, fig=None, ax=None, ctype='contourf', cticks=None,\n ctick_labels=None, fmt='%.1f', mask=None, drawcoastlines=True,\n fillcontinents=True, drawcountries=True, drawstates=False, **kwargs):\n t1 = time()\n __init__()\n # Transforms input arrays in numpy arrays and numpy masked arrays.\n lat = numpy.asarray(lat)\n lon = numpy.asarray(lon)\n if tm != None:\n tm = numpy.asarray(tm)\n if not isinstance(z, numpy.ma.MaskedArray):\n z = numpy.ma.masked_invalid(z)\n\n # Determines the number of dimensions of the variable to be plotted and\n # the sizes of each dimension.\n triangulate = False\n dim = len(z.shape)\n if dim == 1:\n triangulate = True\n a, b, c = x.size, y.size, z.size\n elif dim == 3:\n c, b, a = z.shape\n elif dim == 2:\n b, a = z.shape\n c = 1\n z = z.reshape(c, b, a)\n else:\n raise Warning, ('Map plots require either bi-dimensional or tri-'\n 'dimensional data.')\n\n # Remasks data according to input mask\n if mask != None:\n if mask.ndim == 2:\n if (b, a) != mask.shape:\n raise ValueError('Shape of mask array has different shape as'\n ' data array.')\n z[None, :, :].repeat(c, axis=0)\n elif mask.ndim == 3:\n if (c, b, a) != mask.shape:\n raise ValueError('Shape of mask array has different shape as'\n ' data array.')\n z.mask = z.mask | mask\n\n if type(z2).__name__ != 'NoneType':\n z2 = numpy.ma.asarray(z2)\n z2.mask = (z2.mask | numpy.isnan(z2))\n z2 = z2.reshape(c, b, a)\n\n if lon.size != a:\n raise Warning, 'Longitude and data lengths do not match.'\n if lat.size != b:\n raise Warning, 'Latitude and data lengths do not match.'\n #if type(tm).__name__ != 'NoneType':\n # if tm.size != c:\n # raise Warning, 'Time and data lengths do not match.'\n\n # Shifts the longitude and data grid if applicable and determines central\n # latitude and longitude for the map.\n lon180 = common.lon180(lon)\n if xlim is None:\n #try:\n # mask = ~z.mask.all(axis=0).all(axis=0)\n # xlim = [lon180[mask].min(), lon180[mask].max()]\n #except:\n xlim = [lon.min(), lon.max()]\n if ylim is None:\n try:\n mask = ~z.mask.all(axis=0).all(axis=1)\n ylim = [lat[mask].min(), lat[mask].max()]\n except:\n ylim = [lat.min(), lat.max()]\n if lon0 is None:\n lon0 = numpy.mean(xlim)\n lat0 = numpy.mean(ylim)\n\n # Pad borders with NaN's to avoid distorsions\n #lon = numpy.concatenate([[lon[0] - dx], lon, [lon[-1] + dx]])\n #lat = numpy.concatenate([[lat[0] - dy], lat, [lat[-1] + dy]])\n #nan = numpy.ma.empty((c, 1, a)) * numpy.nan\n #nan.mask = True\n #z = numpy.ma.concatenate([nan, z, nan], axis=1)\n #nan = numpy.ma.empty((c, b+2, 1)) * numpy.nan\n #nan.mask = True\n #z = numpy.ma.concatenate([nan, z, nan], axis=2)\n\n # Loads topographic data, if appropriate.\n if etopo != False:\n ez = common.etopo.z\n ex = common.etopo.x\n ey = common.etopo.y\n if etopo == True:\n er = -numpy.array([100, 250, 500, 1000, 2000, 3000, 4000, 5000,\n 6000, 7000, 8000, 9000, 10000, 11000, 12000])[::-1]\n #er = -numpy.arange(1000, 12000, 1000)\n else:\n er = numpy.asarray(etopo)\n etopo = True\n #\n if (shiftgrd != 0): #| (projection in ['ortho', 'robin', 'moll']):\n ex = common.lon180(ex)\n shift = pylab.find(pylab.diff(ex) < 0)[0] + 1\n try:\n ex = numpy.roll(ex, -shift)\n ez = numpy.roll(ez, -shift)\n except:\n pass\n\n # Setting the color ranges\n if crange is None:\n cmajor, cminor, crange, cticks, extend = common.step(z/scale,\n returnrange=True)\n else:\n crange = numpy.asarray(crange)\n cminor = numpy.diff(crange).mean()\n if crange.size > 11:\n cmajor = 2 * cminor\n if cticks is None:\n crange_len = len(crange)\n #cticks_step = 2 * int(crange_len / 10.) + 1\n #cticks = crange[::cticks_step]\n if crange_len < 15 :\n cticks = crange[::2]\n elif crange_len < 30:\n cticks = crange[::5]\n elif crange_len < 60:\n cticks = crange[::10]\n else:\n cticks = crange[::20]\n xmin, xmax = z.min(), z.max()\n rmin, rmax = crange.min(), crange.max()\n\n if extend is None:\n if (xmin < rmin) & (xmax > rmax):\n extend = 'both'\n elif (xmin < rmin) & (xmax <= rmax):\n extend = 'min'\n elif (xmin >= rmin) & (xmax > rmax):\n extend = 'max'\n elif (xmin >= rmin) & (xmax <= rmax):\n extend = 'neither'\n else:\n raise Warning, 'Unable to determine extend'\n if (z2 is not None) and (crange2 is None):\n try:\n cmajor2, cminor2, crange2, cticks2, extend2 = common.step(z2,\n returnrange=True)\n except:\n cmajor2, cminor2, crange2, cticks2, extend2 = (cmajor, cminor,\n crange, cticks, extend)\n\n if cmap is None:\n ctype = 'contour'\n cbar = False\n\n # The chlorophyll-a color scale as described at\n # http://oceancolor.gsfc.nasa.gov/DOCS/standard_chlorophyll_colorscale.txt\n # Chl-a concentration are converted from mg m-3 to a log like scale, i.e.\n # pix = (log10(chlor_a) + 2) / 0.015\n # chlor_a = 10 ** (0.015 * pix - 2)\n if zscale == 'chla':\n cmap = cm.custom_chla\n pix = lambda chlor_a: (numpy.log10(chlor_a) + 2) / 0.015\n #chlor_a = lambda pix: 10 ** (0.015 * pix - 2)\n z = pix(z)\n zrange = numpy.array([0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30])\n zrange = zrange[zrange <= crange.max()]\n #crange = numpy.arange(0, 256, 16)\n crange = pix(zrange)\n cticks = pix(zrange)\n ctick_labels = zrange\n extend = 'both'\n ctype = 'pcolormesh'\n\n # Sets default location arguments\n default_args = dict(s=25, c='w', marker='o', alpha=1, zorder=99)\n for key in default_args.keys():\n if key not in loc_args.keys():\n loc_args[key] = default_args[key]\n\n\n # Turning interactive mode on or off according to show parameter.\n if show == False:\n pylab.ioff()\n elif show == True:\n pylab.ion()\n else:\n raise Warning, 'Invalid show option.'\n\n # Sets the figure properties according to the orientation parameter and to\n # the data dimensions.\n if adjustprops is None:\n if projection in ['cyl', 'eqdc', 'poly', 'omerc', 'vandg', 'nsper']:\n adjustprops = dict(left=0.1, bottom=0.15, right=0.95, top=0.9,\n wspace=0.05, hspace=0.5)\n else:\n adjustprops = dict(left=0.05, bottom=0.15, right=0.95, top=0.9,\n wspace=0.05, hspace=0.2)\n\n # Sets the meridian and the parallel coordinates and necessary parameters\n # depending on the chosen projection.\n if xstep is None:\n xstep = int(common.step(xlim, 5, kind='polar')[0])\n if ystep is None:\n ystep = int(common.step(ylim, 3, kind='polar')[0])\n merid = numpy.arange(10 * int(min(xlim) / 10 - 2),\n 10 * int(max(xlim) / 10 + 3), xstep)\n if (max(ylim) - min(ylim)) > 130 | (projection in ['ortho', 'robin',\n 'moll']):\n #paral = numpy.array([-(66. + 33. / 60. + 38. / (60. * 60.)),\n # -(23. + 26. / 60. + 22. / (60. * 60.)), 0.,\n # (23. + 26. / 60. + 22. / (60. * 60.)),\n # (66. + 33. / 60. + 38. / (60. * 60.))])\n #paral = numpy.round(paral)\n paral = numpy.array([-60, -30, 0, 30, 60])\n else:\n paral = numpy.arange(numpy.floor(min(ylim) / ystep) * ystep,\n numpy.ceil(max(ylim) / ystep) * ystep + ystep,\n ystep)\n if projection == 'eqdc':\n if not (('lat_0' in kwargs.keys()) and ('lat_1' in kwargs.keys())):\n kwargs['lat_0'] = min(ylim) + (max(ylim) - min(ylim)) / 3.\n kwargs['lat_1'] = min(ylim) + 2 * (max(ylim) - min(ylim)) / 3.\n if not ('lon_0' in kwargs.keys()):\n kwargs['lon_0'] = lon0\n elif projection == 'poly':\n if not ('lat_0' in kwargs.keys()):\n kwargs['lat_0'] = (max(ylim) - min(ylim)) / 2.\n if not ('lon_0' in kwargs.keys()):\n kwargs['lon_0'] = lon0\n elif projection == 'omerc':\n if not (('lat_0' in kwargs.keys()) and ('lat_1' in kwargs.keys())):\n kwargs['lat_1'] = min(ylim) + (max(ylim) - min(ylim)) / 4.\n kwargs['lat_2'] = min(ylim) + 3 * (max(ylim) - min(ylim)) / 4.\n if not (('lon_0' in kwargs.keys()) and ('lon_1' in kwargs.keys())):\n kwargs['lon_1'] = min(xlim) + (max(ylim) - min(ylim)) / 4.\n kwargs['lon_2'] = min(xlim) + 3 * (max(ylim) - min(ylim)) / 4.\n kwargs['no_rot'] = False\n elif projection == 'vandg':\n kwargs['lon_0'] = lon0\n elif projection == 'nsper':\n kwargs['lon_0'] = lon0\n kwargs['lat_0'] = lat0\n elif projection in ['aea', 'lcc', 'cass']:\n kwargs['lon_0'] = lon0\n kwargs['lat_0'] = (min(ylim) + max(ylim)) / 2.\n kwargs['lat_1'] = max(ylim) - (max(ylim) - min(ylim)) / 4.\n kwargs['lat_2'] = min(ylim) + (max(ylim) - min(ylim)) / 4.\n elif projection in ['ortho', 'robin', 'moll', 'laea']:\n kwargs['lat_0'] = lat0\n kwargs['lon_0'] = lon0\n if projection in ['aea', 'cyl', 'eqdc', 'poly', 'omerc', 'vandg',\n 'nsper', 'lcc', 'laea', 'cass']:\n kwargs['llcrnrlat'] = min(ylim)\n kwargs['urcrnrlat'] = max(ylim)\n kwargs['llcrnrlon'] = min(xlim)\n kwargs['urcrnrlon'] = max(xlim)\n\n\n # Setting the subplot parameters in case multiple maps per figure.\n try:\n plrows, plcols = subplot[0:2]\n except:\n if type(tm).__name__ in ['NoneType', 'float']:\n if orientation in ['landscape', 'worldmap']:\n plcols = min(3, c)\n plrows = numpy.ceil(float(c) / plcols)\n elif orientation == 'portrait':\n plrows = min(3, c)\n plcols = numpy.ceil(float(c) / plrows)\n elif orientation == 'squared':\n plrows = plcols = numpy.ceil(float(c) ** 0.5)\n else:\n plcols = plrows = 1\n\n bbox = dict(edgecolor='w', facecolor='w', alpha=0.9)\n\n # Starts the plotting routines\n if profile:\n if c == 1:\n plural = ''\n else:\n plural = 's'\n s = 'Plotting %d map%s... ' % (c, plural)\n stdout.write(s)\n stdout.flush()\n\n if fig is None:\n fig = graphics.figure(fp=dict(), ap=adjustprops,\n orientation=orientation)\n for n in range(c):\n t2 = time()\n if plcols * plrows > 1:\n try:\n ax = fig.add_subplot(plrows, plcols, subplot[2])\n except:\n ax = fig.add_subplot(plrows, plcols, n + 1)\n elif ax is None:\n fig.clear()\n ax = fig.add_subplot(plcols, plrows, 1)\n\n m = Basemap(projection=projection, ax=ax, **kwargs)\n if (projection in ['ortho', 'robin', 'moll']):\n xoffset = (m.urcrnrx - m.llcrnrx) / 50.\n else:\n xoffset = None\n\n # TODO: Check shiftgrid and projections\n dat = z[n, :, :] / scale\n if (shiftgrd != None):\n Lon = lon\n dat, lon = shiftgrid(shiftgrd, dat, Lon, start=False)\n\n x, y = m(*numpy.meshgrid(lon, lat))\n\n # Set the merdians' and parallels' labels\n try:\n nn, cc = subplot[2]-1, subplot[0] * subplot[1]\n except:\n nn, cc = n, c\n if plcols * plrows > 1:\n if (nn % plcols) == 0:\n plabels = [1, 0, 0, 0]\n else:\n plabels = [0, 0, 0, 0]\n if (nn >= cc - plcols):\n mlabels = [0, 0, 0, 1]\n else:\n mlabels = [0, 0, 0, 0]\n else:\n mlabels = [0, 0, 0, 1]\n plabels = [1, 0, 0, 0]\n if projection in ['ortho']:\n plabels = [0, 0, 0, 0]\n if projection in ['geos', 'ortho', 'aeqd', 'moll']:\n mlabels = [0, 0, 0, 0]\n\n # Plots locations\n for item in loc:\n xx, yy = m(item[0], item[1])\n m.scatter(xx, yy, **loc_args)\n\n # Plot contour\n if hatches is None:\n hatches = [None]\n elif hatches != [None]:\n m.contour(x, y, dat, len(crange), colors=colors,\n linestyles=linestyles, linewidths=linewidths, alpha=alpha)\n if ctype == 'pcolormesh':\n im = m.pcolormesh(x, y, dat, vmin=crange.min(), vmax=crange.max(),\n cmap=cmap, hold='on', alpha=alpha)\n elif ctype == 'contourf':\n im = m.contourf(x, y, dat, crange, cmap=cmap, extend=extend,\n hold='on', colors=colors, hatches=hatches, alpha=alpha)\n elif ctype == 'contour':\n im = m.contour(x, y, dat, crange, cmap=cmap, extend=extend,\n hold='on', colors=colors, linestyles=linestyles,\n linewidths=linewidths, hatches=hatches, alpha=alpha)\n if cmap is None:\n pylab.clabel(im, fmt=fmt, inline=True, fontsize='medium')\n elif ctype == 'tricontour':\n im = m.tricontour(x, y, dat, crange, cmap=cmap, extend=extend,\n hold='on', colors=colors, linestyles=linestyles,\n linewidths=linewidths, hatches=hatches, alpha=alpha)\n if cmap is None:\n pylab.clabel(im, fmt=fmt, inline=True, fontsize='normal')\n\n if type(z2).__name__ != 'NoneType':\n dat2 = z2[n, :, :]\n if shiftgrd != None:\n dat2, lon = shiftgrid(shiftgrd, dat2, Lon, start=False)\n if numpy.iscomplex(dat2).any():\n if da is None:\n im2 = m.quiver(lon, lat, dat2.real, dat2.imag, latlon=True,\n alpha=0.6)\n else:\n u, v, xx, yy = m.transform_vector(dat2.real, dat2.imag,\n lon, lat, da[1], da[0], returnxy=True, masked=True)\n im2 = m.quiver(xx, yy, u, v, alpha=0.6)\n else:\n im2 = m.contour(x, y, dat2, crange2, colors=colors2, hatch='x',\n hold='on', alpha=0.6)\n #linewidths=numpy.linspace(0.25, 2., len(crange2))\n #pylab.clabel(im2, fmt='%.1f')\n\n # Plot topography, if appropriate\n if etopo:\n if cmap == cm.GMT_relief:\n colors = 'w'\n else:\n colors = 'k'\n xe, ye = m(*numpy.meshgrid(ex, ey))\n cs = m.contour(xe, ye, ez, er, colors=colors2, linestyles='-',\n alpha=0.5, hold='on')\n if ((xlim[1] - xlim[0]) <= 5 | (ylim[1] - ylim[0] <= 5)):\n pylab.clabel(cs, fontsize='x-small', fmt='%d',\n rightside_up=False, use_clabeltext=True)\n\n # Run hook function, if appropriate\n try:\n hook(m, ax, fig)\n except:\n pass\n\n if drawcoastlines:\n m.drawcoastlines()\n if fillcontinents:\n if cmap is None:\n m.fillcontinents(color=(0.9, 0.9, 0.9))\n else:\n m.fillcontinents(color='white')\n if drawcountries:\n m.drawcountries()\n if drawstates:\n m.drawstates()\n if projection != 'nsper':\n m.drawmapboundary(fill_color='white')\n m.drawmeridians(merid, linewidth=0.5, labels=mlabels)\n m.drawparallels(paral, linewidth=0.5, labels=plabels, xoffset=xoffset)\n\n if cbar == True:\n # Draws colorbar\n #corners = ax.get_position().corners()\n #position = numpy.array([corners[0, 0], corners[0, 1],\n # corners[2, 0] - corners[0, 0], 0]) + numpy.array([0.15, -0.13,\n # -0.3, 0.03])\n if cbar_coords is None:\n if orientation == 'squared':\n cbar_coords = [0.25, 0.07, 0.5, 0.03]\n elif orientation in ['landscape', 'worldmap']:\n cbar_coords = [0.25, 0.08, 0.5, 0.03]\n elif orientation == 'portrait':\n cbar_coords = [0.25, 0.05, 0.5, 0.015]\n cax = pylab.axes(cbar_coords)\n pylab.colorbar(im, cax=cax, orientation='horizontal', ticks=cticks,\n extend=extend)\n if ctick_labels != None:\n cax.set_xticklabels(ctick_labels)\n elif legend != None:\n # Draws legend\n graphics.legend(legend, im=im, bbox=(0.5, -0.05))\n\n\n # Titles, units and other things\n ttl = None\n if type(title) == str:\n ttl = title\n ttl_sup = True\n else:\n ttl_sup = False\n try:\n ttl = title[n]\n except:\n pass\n if ttl:\n if ttl == '%date%':\n try:\n ttl = dates.num2date(tm[n]).isoformat()[:10]\n except:\n try:\n ttl = dates.num2date(tm).isoformat()[:10]\n except:\n ttl = ''\n pass\n # If only one title is a string, then assume it is a figure title,\n # otherwise assume it is a subplot title.\n if ttl_sup:\n if n == 0:\n x = 0.5 * (adjustprops['left'] + adjustprops['right'])\n y = adjustprops['top'] + 0.02\n fig.suptitle(ttl, x=x, y=y, ha='center', va='baseline',\n fontsize='large')\n else:\n ax.text(0.5, 1.05, ttl, ha='center', va='baseline',\n transform=ax.transAxes)\n\n lbl = None\n if label.__class__ == str:\n lbl = label\n else:\n try:\n lbl = label[n]\n except:\n pass\n if lbl:\n if lbl == '%date%':\n try:\n lbl = dates.num2date(tm[n]).isoformat()[:10]\n except:\n try:\n lbl = dates.num2date(tm).isoformat()[:10]\n except:\n lbl = ''\n pass\n if label_xy is None:\n lbl_x, lbl_y = 0.02, 0.79\n else:\n lbl_x, lbl_y = label_xy\n ax.text(lbl_x, lbl_y, lbl, ha='left', va='baseline',\n transform=ax.transAxes, bbox=bbox)\n\n if type(units) in [str, unicode]:\n unt = units\n sc_lbl = scale_label\n else:\n try:\n unt = units[n]\n sc_lbl = scale_label[n]\n except:\n unt = None\n pass\n if cbar & (unt not in [None, '']):\n if sc_lbl not in [None, '']:\n sc_lbl = '%s~' % (sc_lbl)\n cax.text(1.05, 0.5, r'$\\left[%s %s\\right]$' % (sc_lbl, unt),\n ha='left', va='center', transform=cax.transAxes)\n\n # Drawing and saving the figure if appropriate.\n pylab.draw()\n if save:\n if (c == 1) | (plcols * plrows > 1):\n pylab.savefig('%s.%s' % (save, ftype), dpi=150)\n else:\n pylab.savefig('%s%06d.%s' % (save, n+1, ftype), dpi=150)\n\n if profile:\n stdout.write(len(s) * '\\b')\n s = 'Plotting %d map%s... %s ' % (c, plural, common.profiler(c,\n n + 1, 0, t1, t2),)\n stdout.write(s)\n stdout.flush()\n\n # Posterizes the figure\n try:\n posterize(m, ax, fig)\n if save:\n if (c == 1) | (plcols * plrows > 1):\n pylab.savefig('%s.%s' % (save, ftype), dpi=150)\n else:\n pylab.savefig('%s%06d.%s' % (save, n+1, ftype), dpi=150)\n except:\n pass\n #\n if profile:\n stdout.write('\\n')\n return fig, m",
"def basic_map(proj):\n fig = plt.figure(figsize=(15, 10))\n add_metpy_logo(fig, 0, 80, size='large')\n view = fig.add_axes([0, 0, 1, 1], projection=proj)\n view.set_extent([-120, -70, 20, 50])\n view.add_feature(cfeature.STATES.with_scale('50m'))\n view.add_feature(cfeature.OCEAN)\n view.add_feature(cfeature.COASTLINE)\n view.add_feature(cfeature.BORDERS, linestyle=':')\n return fig, view",
"def main(config):\n file_paths_info = [('GLOFRIS','WATCH','ARG_inunriver_historical_000000000WATCH_1980_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP45','ARG_inunriver_rcp4p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP85','ARG_inunriver_rcp8p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('FATHOM','AR_fluvial_undefended_merged','AR-FU-1000.tif'),\n \t\t\t\t('FATHOM','AR_pluvial_undefended_merged','AR-PU-1000.tif')\n \t\t\t\t]\n figure_names = ['GLOFRIS-WATCH-fluvial','GLOFRIS-RCP45-fluvial','GLOFRIS-RCP85-fluvial','FATHOM-fluvial','FATHOM-pluvial']\n figure_titles = ['current fluvial flooding','RCP4.5 fluvial flooding','RCP8.5 fluvial flooding','current fluvial flooding','current pluvial flooding']\n for f_i in range(len(file_paths_info)):\n\t hazard_file = os.path.join(config['paths']['data'],'flood_data', file_paths_info[f_i][0],file_paths_info[f_i][1],file_paths_info[f_i][2])\n\t output_file = os.path.join(config['paths']['figures'], 'flood-map-{}.png'.format(figure_names[f_i]))\n\t ax = get_axes()\n\t plot_basemap(ax, config['paths']['data'])\n\t scale_bar(ax, location=(0.8, 0.05))\n\t plot_basemap_labels(ax, config['paths']['data'], include_regions=True,include_zorder=3)\n\n\t proj_lat_lon = ccrs.PlateCarree()\n\n\n\t # Create color map\n\t colors = plt.get_cmap('Blues')\n\n\t # Read in raster data\n\t data, lat_lon_extent = get_data(hazard_file)\n\t data[(data <= 0) | (data > 5)] = np.nan\n\t max_val = np.nanmax(data)\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val)\n\n\t # Plot population data\n\t im = ax.imshow(data, extent=lat_lon_extent,transform=proj_lat_lon, cmap=colors,norm =norm, zorder=2)\n\n\t # Add colorbar\n\t cbar = plt.colorbar(im, ax=ax,fraction=0.1, shrink=0.87,pad=0.01, drawedges=False, orientation='horizontal',\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val), ticks=list(np.linspace(0,max_val,3)))\n\t cbar.set_clim(vmin=0,vmax=max_val)\n\n\n\t cbar.outline.set_color(\"none\")\n\t cbar.ax.yaxis.set_tick_params(color='black')\n\t cbar.ax.set_xlabel('Flood depths (m)',fontsize=12,color='black')\n\n\t plt.title('1 in 1000 year {}'.format(figure_titles[f_i]), fontsize = 14)\n\t save_fig(output_file)\n\t plt.close()",
"def heatmap2d(self, x, y, cs, labels, ptype, pval, save_path=None,\n show=False, draw=False, fixed=None, colorsMap='jet'):\n # cs = np.flipud(cs)\n cm = plt.get_cmap(colorsMap)\n if np.iscomplexobj(cs):\n self.log.warning('Plotting only real part of %s in heatmap',\n labels[2])\n cs = cs.real\n if fixed:\n if 'dielectric_profile' in save_path:\n cNorm = matplotlib.colors.Normalize(\n vmin=np.amin(0), vmax=np.amax(16))\n else:\n pass\n cNorm = matplotlib.colors.Normalize(\n vmin=np.amin(cs), vmax=np.amax(cs))\n # cNorm = matplotlib.colors.Normalize(\n # vmin=np.amin(0), vmax=np.amax(2.5))\n else:\n cNorm = matplotlib.colors.Normalize(\n vmin=np.amin(cs), vmax=np.amax(cs))\n # cNorm = matplotlib.colors.LogNorm(vmin=np.amin(cs)+.001, vmax=np.amax(cs))\n # cNorm = matplotlib.colors.LogNorm(vmin=1e13, vmax=np.amax(cs))\n scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(111)\n # ax.imshow(cs,cmap=cm,norm=cNorm,extent=[x.min(),x.max(),y.min(),y.max()],aspect='auto')\n ax.invert_yaxis()\n ax.pcolormesh(x, y, cs, cmap=cm, norm=cNorm)\n # extent=[x.min(),x.max(),y.min(),y.max()],aspect='auto')\n ax.grid(False)\n scalarMap.set_array(cs)\n # div = make_axes_locatable(ax)\n # zoom_ax = div.append_axes(\"right\",size='100%', pad=.5)\n # zoom_ax.imshow(cs[75:100,:], extent=[x.min(), x.max(), .8, 1.4])\n # zoom_ax.grid(False)\n # cax = div.append_axes(\"right\",size=\"100%\",pad=.05)\n cb = fig.colorbar(scalarMap)\n cb.set_label(labels[2])\n ax.set_xlabel(labels[0])\n ax.set_ylabel(labels[1])\n if draw:\n self.log.info('Beginning geometry drawing routines ...')\n ax = self.draw_geometry_2d(ptype, pval, ax)\n if save_path:\n fig.savefig(save_path, bbox_inches='tight')\n if show:\n plt.show()\n plt.close(fig)",
"def drawHeatMap(xdat, name=None, colors=pylab.cm.Reds, dendro=False, protColors=None, cIndex=None, km=None, \r\n nameDict={}, scale=None, saveName=None, colorBar=False, figSize=(6,6), topDendro=False, fig=None, axData=None):\r\n\r\n data = xdat['data']\r\n if nameDict is None:\r\n nameList = [i for i in xdat['fractions']]\r\n else:\r\n nameList = [nameDict[i] for i in xdat['fractions']]\r\n\r\n proteins = [i for i in xdat['proteins']]\r\n if fig is None:\r\n fig = pylab.figure(figsize=figSize)\r\n if not (name is None):\r\n fig.suptitle(name)\r\n ##Draw heatmap\r\n xOffset = 0.05\r\n if colorBar:\r\n xLess = 0.10\r\n else:\r\n xLess = 0.00\r\n if dendro:\r\n xStart = 0.375\r\n xLength = 0.55-xLess\r\n else:\r\n xStart = 0.125\r\n xLength = 0.85-xLess\r\n if (km is None) and (topDendro is False):\r\n yStart = 0.05\r\n yLength = 0.9\r\n else:\r\n yStart = 0.05\r\n yLength = 0.8\r\n figAxes = heatMapAxes(data, dims = [xStart, yStart, xLength, yLength], columns=nameList, rows=proteins, protColors=protColors, cIndex=cIndex, fig=fig, colors=colors, axData=axData)\r\n ##Draw colorbar\r\n if colorBar:\r\n fig.colorbar(figAxes)\r\n\r\n if dendro:\r\n ax2Data = fig.add_axes([xOffset, yStart, xLength-0.3, yLength])\r\n sch.dendrogram(xdat['rightDendro'], orientation='right', color_threshold=0.0)\r\n ax2Data.set_xticks([])\r\n ax2Data.set_yticks([])\r\n \r\n if topDendro:\r\n ax4Data = fig.add_axes([xStart, yStart+yLength, xLength, 0.1])\r\n sch.dendrogram(xdat['topDendro'], orientation='down', color_threshold=0.0)\r\n ax4Data.set_xticks([])\r\n ax4Data.set_yticks([])\r\n \r\n if not km is None:\r\n small = data.min()\r\n big = data.max()\r\n if math.fabs(small) > math.fabs(big):\r\n big = 0-small\r\n else:\r\n small = 0-big\r\n offset=0.0\r\n ax3Data = fig.add_axes([xStart, yLength+offset, xLength-0.1, 0.1])\r\n ax3Data.matshow(km, aspect='auto', origin='lower', cmap=colors, vmin=small, vmax=big)\r\n for i in range(len(km)):\r\n ax3Data.text(-0.75, i, 'clus'+str(i), verticalalignment=\"center\", horizontalalignment=\"right\", fontsize=10, color=cIndex(float(i)/(protColors.max()+1)))\r\n ax3Data.set_xticks([])\r\n ax3Data.set_yticks([])\r\n #fig.tight_layout()\r\n if not (saveName is None):\r\n pylab.savefig(saveName)\r\n \r\n return fig",
"def plot_map(fig=None,\n coast='110m',\n figsize=(10, 10),\n bounds=None,\n cp=None,\n grid_linewidth=1,\n **kwargs,\n ):\n crs = ccrs.PlateCarree()\n #\n if fig is None:\n fig = plt.figure(figsize=figsize)\n else:\n fig.clf()\n\n if bounds is None:\n if cp is not None:\n bounds = cp.bounds\n else:\n bounds = _bounds_default\n\n ax = fig.add_subplot(111, projection=crs)\n ax.set_extent(bounds, crs=crs)\n gl = ax.gridlines(crs=crs,\n draw_labels=True,\n linewidth=grid_linewidth,\n color='k',\n alpha=0.5, linestyle='--',\n )\n gl.xlabels_top = False\n #\n if coast in ['10m', '50m', '110m']:\n ax.coastlines(resolution=coast, color='k')\n elif coast in ['auto', 'coarse', 'low', 'intermediate', 'high', 'full']:\n shpfile = shapereader.gshhs('h')\n shp = shapereader.Reader(shpfile)\n ax.add_geometries(\n shp.geometries(), crs, edgecolor='black', facecolor='none')\n elif coast=='med':\n # conda install -c conda-forge gdal\n # ogr2ogr -f \"ESRI Shapefile\" med_coast.shp /Users/aponte/.local/share/cartopy/shapefiles/gshhs/h/GSHHS_h_L1.shp -clipsrc 5 42. 7 44.\n shp = shapereader.Reader(os.getenv('HOME')+'/data/OSM/med/med_coast.shp')\n for record, geometry in zip(shp.records(), shp.geometries()):\n ax.add_geometries([geometry], crs, facecolor='None', edgecolor='black')\n elif coast=='med_high':\n # conda install -c conda-forge gdal\n # ogr2ogr -f \"ESRI Shapefile\" med_high_coast.shp ../coastlines/lines.shp -clipsrc 6 7 42.5 43.5\n shp = shapereader.Reader(os.getenv('HOME')+'/data/OSM/med/med_high_coast.shp')\n for record, geometry in zip(shp.records(), shp.geometries()):\n ax.add_geometries([geometry], crs, facecolor='None', edgecolor='black')\n\n return [fig, ax, crs]",
"def fig_craco_fiducial(outfile='fig_craco_fiducial.png',\n zmax=2.5,DMmax=2500,\n show_Macquart=False,\n log=True,\n label='$\\\\log_{10} \\; p(DM_{\\\\rm EG},z)$',\n Aconts=[0.01, 0.1, 0.5],\n cmap='jet', show=False, figsize=None,\n vmnx=(None,None),\n grid=None, survey=None):\n # Generate the grid\n if grid is None or survey is None:\n survey, grid = analy_H0_I.craco_mc_survey_grid()\n\n # Unpack\n full_zDMgrid, zvals, dmvals = grid.rates, grid.zvals, grid.dmvals\n FRBZ=survey.frbs['Z']\n FRBDM=survey.DMEGs\n \n ##### imshow of grid #######\n fsize = 14.\n plt.figure(figsize=figsize)\n ax1=plt.axes()\n plt.sca(ax1)\n \n plt.xlabel('z')\n plt.ylabel('${\\\\rm DM}_{\\\\rm EG}$')\n #plt.title(title+str(H0))\n \n # Cut down grid\n zvals, dmvals, zDMgrid = figures.proc_pgrid(\n full_zDMgrid, \n zvals, (0, zmax),\n dmvals, (0, DMmax))\n ddm=dmvals[1]-dmvals[0]\n dz=zvals[1]-zvals[0]\n nz, ndm = zDMgrid.shape\n\n # Contours\n alevels = figures.find_Alevels(full_zDMgrid, Aconts, log=True)\n \n # Ticks\n tvals, ticks = figures.ticks_pgrid(zvals)# , fmt='str4')\n plt.xticks(tvals, ticks)\n tvals, ticks = figures.ticks_pgrid(dmvals, fmt='int')# , fmt='str4')\n plt.yticks(tvals, ticks)\n\n # Image \n im=plt.imshow(zDMgrid.T,cmap=cmap,origin='lower', \n vmin=vmnx[0], vmax=vmnx[1],\n interpolation='None',\n aspect='auto')\n \n styles=['--','-.',':']\n ax=plt.gca()\n cs=ax.contour(zDMgrid.T,levels=alevels,origin='lower',colors=\"white\",linestyles=styles)\n\n ax=plt.gca()\n \n muDMhost=np.log(10**grid.state.host.lmean)\n sigmaDMhost=np.log(10**grid.state.host.lsigma)\n meanHost = np.exp(muDMhost + sigmaDMhost**2/2.)\n medianHost = np.exp(muDMhost) \n print(f\"Host: mean={meanHost}, median={medianHost}\")\n plt.ylim(0,ndm-1)\n plt.xlim(0,nz-1)\n zmax=zvals[-1]\n nz=zvals.size\n #DMbar, zeval = igm.average_DM(zmax, cumul=True, neval=nz+1)\n DM_cosmic = pcosmic.get_mean_DM(zvals, grid.state)\n\n \n #idea is that 1 point is 1, hence...\n zeval = zvals/dz\n DMEG_mean = (DM_cosmic+meanHost)/ddm\n DMEG_median = (DM_cosmic+medianHost)/ddm\n\n # Check median\n f_median = scipy.interpolate.interp1d(\n zvals, DM_cosmic+medianHost, \n fill_value='extrapolate')\n eval_DMEG = f_median(FRBZ)\n above = FRBDM > eval_DMEG\n print(f\"There are {np.sum(above)/len(FRBZ)} above the median\")\n\n if show_Macquart:\n plt.plot(zeval,DMEG_mean,color='gray',linewidth=2,\n label='Macquart relation (mean)')\n plt.plot(zeval,DMEG_median,color='gray',\n linewidth=2, ls='--',\n label='Macquart relation (median)')\n l=plt.legend(loc='lower right',fontsize=12)\n #l=plt.legend(bbox_to_anchor=(0.2, 0.8),fontsize=8)\n #for text in l.get_texts():\n #\ttext.set_color(\"white\")\n \n # limit to a reasonable range if logscale\n if log and vmnx[0] is None:\n themax=zDMgrid.max()\n themin=int(themax-4)\n themax=int(themax)\n plt.clim(themin,themax)\n \n ##### add FRB host galaxies at some DM/redshift #####\n if FRBZ is not None:\n iDMs=FRBDM/ddm\n iZ=FRBZ/dz\n # Restrict to plot range\n gd = (FRBDM < DMmax) & (FRBZ < zmax)\n plt.plot(iZ[gd],iDMs[gd],'ko',linestyle=\"\",markersize=2.)\n\n cbar=plt.colorbar(im,fraction=0.046, shrink=1.2,aspect=15,pad=0.05)\n cbar.set_label(label)\n\n fig_utils.set_fontsize(ax, fsize)\n \n plt.tight_layout()\n \n if show:\n plt.show()\n else:\n plt.savefig(outfile, dpi=300)\n print(f\"Wrote: {outfile}\")\n plt.close()",
"def heatMapAxes(data, dims=[0.1, 0.1, 0.7, 0.7], colors=pylab.cm.autumn, columns=None, rows=None, protColors=None, cIndex=None, fig=None, colorBar=False, axData=None):\r\n if fig is None:\r\n fig = pylab.figure()\r\n if axData is None:\r\n axData = fig.add_axes(dims)\r\n for i in range(len(columns)):\r\n axData.text(i, -0.5 , ' '+str(columns[i]), rotation=270, verticalalignment=\"top\", horizontalalignment=\"center\", fontsize=12)\r\n if protColors == None:\r\n for i in range(len(rows)):\r\n axData.text(-0.75, i, ' '+str(rows[i]), verticalalignment=\"center\", horizontalalignment=\"right\", fontsize=12)\r\n else:\r\n for i in range(len(rows)):\r\n axData.text(-0.75, i, ' '+str(rows[i]), verticalalignment=\"center\", horizontalalignment=\"right\", fontsize=12, color=cIndex(float(protColors[i])/(protColors.max()+1)))\r\n small = data.min()\r\n big = data.max()\r\n if math.fabs(small) > math.fabs(big):\r\n big = 0-small\r\n else:\r\n small = 0-big\r\n masked_array = numpy.ma.array (data, mask=numpy.isnan(data))\r\n colors.set_bad('grey',1.)\r\n figData = axData.imshow(masked_array, interpolation='nearest', cmap=colors, aspect='auto', origin='lower')\r\n if colorBar:\r\n fig.colorbar(figData, ax=axData, ticks=[0, 0.25, 0.50, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], pad=0.01, extend='neither')\r\n axData.set_xticks([])\r\n axData.set_yticks([])\r\n return figData",
"def setplot(plotdata):\n#-------------------------- \n\n\n from pyclaw.plotters import colormaps, geoplot\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n\n def set_drytol(current_data):\n # The drytol parameter is used in masking land and water and\n # affects what color map is used for cells with small water depth h.\n # The cell will be plotted as dry if h < drytol.\n # The best value to use often depends on the application and can\n # be set here (measured in meters):\n current_data.user.drytol = 1.e-4\n\n\n plotdata.beforeframe = set_drytol\n\n # To plot gauge locations on pcolor or contour plot, use this as\n # an afteraxis function:\n\n def addgauges(current_data):\n from pyclaw.plotters import gaugetools\n gaugetools.plot_gauge_locations(current_data.plotdata, \\\n gaugenos='all', format_string='ko', add_labels=True)\n \n\n #-----------------------------------------\n # Figure for imshow plot\n #-----------------------------------------\n plotfigure = plotdata.new_plotfigure(name='imshow', figno=0)\n plotfigure.show = False\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes('imshow')\n plotaxes.title = 'Surface'\n plotaxes.scaled = True\n\n # Water\n plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')\n #plotitem.plot_var = geoplot.surface\n plotitem.plot_var = geoplot.surface_or_depth\n plotitem.imshow_cmap = geoplot.tsunami_colormap\n plotitem.imshow_cmin = -0.02\n plotitem.imshow_cmax = 0.02\n plotitem.add_colorbar = True\n plotitem.amr_gridlines_show = [0,0,0]\n plotitem.amr_gridedges_show = [1]\n\n # Land\n plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')\n plotitem.plot_var = geoplot.land\n plotitem.imshow_cmap = geoplot.land_colors\n plotitem.imshow_cmin = 0.0\n plotitem.imshow_cmax = 0.05\n plotitem.add_colorbar = False\n plotitem.amr_gridlines_show = [0,0,0]\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = 'auto'\n\n \n\n #-----------------------------------------\n # Figure for imshow plot\n #-----------------------------------------\n plotfigure = plotdata.new_plotfigure(name='Surface and Gauge 1', figno=20)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes('imshow')\n plotaxes.axescmd = \"axes([.1,.5,.8,.4])\"\n plotaxes.title = 'Surface'\n plotaxes.scaled = True\n plotaxes.afteraxes = addgauges\n\n # Water\n plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')\n #plotitem.plot_var = geoplot.surface\n plotitem.plot_var = geoplot.surface_or_depth\n plotitem.imshow_cmap = geoplot.tsunami_colormap\n plotitem.imshow_cmin = -0.03\n plotitem.imshow_cmax = 0.03\n plotitem.add_colorbar = True\n plotitem.amr_gridlines_show = [0,0,0]\n plotitem.amr_gridedges_show = [1]\n\n # Land\n plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')\n plotitem.plot_var = geoplot.land\n plotitem.imshow_cmap = geoplot.land_colors\n plotitem.imshow_cmin = 0.0\n plotitem.imshow_cmax = 0.05\n plotitem.add_colorbar = False\n plotitem.amr_gridlines_show = [0,0,0]\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = 'auto'\n\n\n # Gauge trace:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.show = False\n plotaxes.axescmd = \"axes([.1,.1,.8,.3])\"\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = [-0.02, 0.05]\n plotaxes.title = 'Gauge 1'\n\n # Plot surface as blue curve:\n plotitem = plotaxes.new_plotitem(plot_type='1d_gauge_trace')\n plotitem.plot_var = 3\n plotitem.plotstyle = 'b-'\n plotitem.gaugeno = 1\n\n\n #-----------------------------------------\n # Figure for zoom\n #-----------------------------------------\n plotfigure = plotdata.new_plotfigure(name='Zoom', figno=10)\n #plotfigure.show = False\n plotfigure.kwargs = {'figsize':[7,7]}\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes('monai')\n #plotaxes.axescmd = 'axes([0.0,0.1,0.6,0.6])'\n plotaxes.title = 'Monai Valley'\n plotaxes.scaled = True\n #plotaxes.xlimits = [4.0, 5.2]\n #plotaxes.ylimits = [1.3, 2.5]\n plotaxes.xlimits = [4.7, 5.2]\n plotaxes.ylimits = [1.5, 2.2]\n\n # Water\n plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')\n #plotitem.plot_var = geoplot.surface\n plotitem.plot_var = geoplot.surface_or_depth\n plotitem.imshow_cmap = geoplot.tsunami_colormap\n plotitem.imshow_cmin = -0.02\n plotitem.imshow_cmax = 0.02\n plotitem.add_colorbar = False\n plotitem.amr_gridlines_show = [0]\n plotitem.amr_gridedges_show = [1]\n\n # Land\n plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')\n plotitem.plot_var = geoplot.land\n plotitem.imshow_cmap = geoplot.land_colors\n plotitem.imshow_cmin = 0.0\n plotitem.imshow_cmax = 0.05\n plotitem.add_colorbar = False\n plotitem.amr_gridlines_show = [0]\n\n # Add contour lines of bathymetry:\n plotitem = plotaxes.new_plotitem(plot_type='2d_contour')\n plotitem.plot_var = geoplot.topo\n from numpy import arange, linspace\n plotitem.contour_levels = arange(-0.02, 0., .0025)\n plotitem.amr_contour_colors = ['k'] # color on each level\n plotitem.kwargs = {'linestyles':'solid'}\n plotitem.amr_contour_show = [1] # show contours only on finest level\n plotitem.gridlines_show = 0\n plotitem.gridedges_show = 0\n plotitem.show = True\n \n # Add contour lines of topography:\n plotitem = plotaxes.new_plotitem(plot_type='2d_contour')\n plotitem.plot_var = geoplot.topo\n from numpy import arange, linspace\n plotitem.contour_levels = arange(0., .2, .01)\n plotitem.amr_contour_colors = ['w'] # color on each level\n plotitem.kwargs = {'linestyles':'solid'}\n plotitem.amr_contour_show = [1] # show contours only on finest level\n plotitem.gridlines_show = 0\n plotitem.gridedges_show = 0\n plotitem.show = True\n\n # Add dashed contour line for current shoreline\n plotitem = plotaxes.new_plotitem(plot_type='2d_contour')\n plotitem.plot_var = 0\n plotitem.contour_levels = [0.002]\n plotitem.amr_contour_colors = ['k'] # color on each level\n plotitem.kwargs = {'linestyles':'dashed','linewidths':2}\n plotitem.amr_contour_show = [1] # show contours only on finest level\n plotitem.gridlines_show = 0\n plotitem.gridedges_show = 0\n plotitem.show = True\n\n\n\n\n #-----------------------------------------\n # Figures for gauges\n #-----------------------------------------\n plotfigure = plotdata.new_plotfigure(name='Surface & topo', figno=300, \\\n type='each_gauge')\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = [0,25]\n plotaxes.ylimits = [-0.02, 0.05]\n plotaxes.title = 'Surface'\n\n # Plot surface as blue curve:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 3\n plotitem.plotstyle = 'b-'\n\n # Plot topo as green curve:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n\n def gaugetopo(current_data):\n q = current_data.q\n h = q[:,0]\n eta = q[:,3]\n topo = eta - h\n return topo\n \n plotitem.plot_var = gaugetopo\n plotitem.clf_each_gauge = False\n plotitem.plotstyle = 'g-'\n def afteraxes(current_data):\n from pylab import plot, legend, loadtxt\n t = current_data.t\n plot(t, 0*t, 'k')\n gaugeno = current_data.gaugeno\n \n if gaugeno in [5,7,9]:\n col = (gaugeno-3)/2\n plot(labgage[:,0],0.01*labgage[:,col],'r')\n legend(('GeoClaw','topography','sea level','lab data'),loc='upper left')\n else:\n legend(('GeoClaw','topography','sea level'),loc='upper right')\n \n \n\n plotaxes.afteraxes = afteraxes\n\n\n #-----------------------------------------\n # Figure for grids alone\n #-----------------------------------------\n plotfigure = plotdata.new_plotfigure(name='grids', figno=2)\n plotfigure.show = False\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = [0,1]\n plotaxes.ylimits = [0,1]\n plotaxes.title = 'grids'\n plotaxes.scaled = True\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='2d_grid')\n plotitem.amr_grid_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']\n plotitem.amr_gridlines_show = [1,1,0] \n plotitem.amr_gridedges_show = [1] \n\n #-----------------------------------------\n \n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via pyclaw.plotters.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n #plotdata.print_framenos = [4,6,8,10,12]\n plotdata.print_framenos = [5,7,9,11,13]\n plotdata.print_gaugenos = [0,5,7,9] # list of gauges to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.html_homelink = '../README.html' # pointer for top of index\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n\n return plotdata",
"def make_plot(\n X: np.ndarray, Y: np.ndarray, Z: np.ndarray, \n int_coords: List[np.ndarray], levels: np.ndarray, \n color_range: List[str], inter_color: List[Union[str, float]], \n SAVE: bool, SAVE_NAME: str):\n color1 = Color(color_range[0])\n color2 = Color(color_range[1])\n \n # List of Color objects, need to convert for matplotlib\n color_list = list(color1.range_to(color2, len(levels)))\n \n contour_colors = []\n for color in color_list:\n contour_colors.append(color.hex)\n\n fig = plt.figure(figsize = [8, 6])\n ax1 = fig.add_subplot(111)\n\n # Set title\n fig.suptitle('Quartz Deformation Mechanism Map')\n \n CS = ax1.contour(X, Y, Z, levels, colors = contour_colors) \n \n # Add intersection lines to plot\n for coord_arr in int_coords:\n # Get x indices and y indices\n x, y = coord_arr.T\n \n # Get actual points to plot\n points_x = X[x, y]\n points_y = Y[x, y]\n\n ax1.plot(points_x, points_y, \n color = inter_color[0], alpha = inter_color[1])\n\n # Labels... hardcoded. May find a way to make this generic\n sr_labels = [\n r'$10^{-15}$', r'$10^{-14}$', r'$10^{-13}$', \n r'$10^{-12}$', r'$10^{-11}$', r'$10^{-10}$', \n r'$10^{-9}$', r'$10^{-8}$', r'$10^{-7}$', \n r'$10^{-6}$']\n\n # Make legend equal to contour values from sr_labels\n for i in range(len(sr_labels)):\n CS.collections[i].set_label(sr_labels[i])\n\n # Reverse legend order, set location and title\n handles, labels = ax1.get_legend_handles_labels()\n ax1.legend(reversed(handles), reversed(labels), \n loc = 'upper right', title = r'$\\.\\epsilon$')\n\n # Set log scale and ylim\n ax1.set_yscale('log')\n ax1.set_ylim([10 ** -6, 1])\n \n # Set labels... could be done better in Illustrator...\n ax1.set_ylabel(\n r'Shear Stress $log\\left( \\frac{\\sigma}{\\mu}\\right)$'\n )\n ax1.set_xlabel(\n r'Homologous Temperature $\\left( \\frac{T}{T_m} \\right)$'\n )\n\n # Plot tick marks\n ax1.grid()\n\n if SAVE:\n fig.savefig(SAVE_NAME)\n else:\n plt.show()",
"def plotmap(ll, dd, outputfile, alpha, linewidth=1, bounds=None, maxdist=1, maxlinewidth=6,\n colorcontinents=False, plotintensity=False, legendfile=None, linewidthbyn=False):\n global verbose\n\n if verbose:\n sys.stderr.write(\"Plotting the map\\n\")\n\n # there are three different alphas that we are looking at\n # the lines between samples (and potentially split those to the lines within and between continents)\n # the dots\n # the circles to themselves.\n\n # These are the maximum values\n #\n # Primer: A B C\n # Cirlces: 530 485 289\n # Lines: 10 9 13\n # Dots: 2040 1806 680\n # at most out of these we only have 30 different numbers.\n\n\n # These numbers adjust the size of the things drawn\n # markersize is for the black dots\n markersize = 10 # this was 10 originally, but maybe 50 on a big image\n # this is the width of the lines.\n pixelwidth = [1, 2, 4] # may be 2, 10, 20 on a big image\n\n\n ax = plt.axes(projection=ccrs.Robinson())\n\n # make the map global rather than have it zoom in to\n # the extents of any plotted data\n if not bounds:\n ax.set_global()\n\n ax.stock_img()\n ax.coastlines()\n\n ## color the lines based on the maximum distance value\n jet = cm = plt.get_cmap('jet')\n cNorm = colors.Normalize(vmin=0, vmax=maxdist)\n scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)\n\n # Using contourf to provide my colorbar info, then clearing the figure\n Z = [[0, 0], [0, 0]]\n levels = range(0, int(100 * maxdist) + 10, 10)\n CS3 = plt.contourf(Z, levels, cmap=jet)\n# plt.clf()\n\n\n # NOTE: longitude before latitude!!\n # plt.plot([sdlon, brislon], [sdlat, brislat], color='blue', linewidth=2, transform=ccrs.Geodetic())\n\n # plot the circles for each sample site\n # markerfacecolor=\"None\",\n\n\n # note that now we calculate where everything should be and then plot it based on maximum values!\n dotat = {}\n for lid in ll:\n if lid not in dd:\n continue\n lonlat = ll[lid]\n if bounds and ((lonlat[1] < bounds[0] or lonlat[1] > bounds[2]) or (lonlat[0] < bounds[1] or lonlat[0] > bounds[3])):\n if verbose:\n sys.stderr.write(\"Not in bounding box: {}\\n\".format(lonlat))\n continue\n if verbose:\n sys.stderr.write(\"Kept location: {}\\n\".format(lonlat))\n # plt.plot(lonlat[0], lonlat[1], 'o', color='Black', alpha=alpha, markersize=10, transform=ccrs.PlateCarree())\n dotat[(lonlat[0], lonlat[1])] = dotat.get((lonlat[0], lonlat[1]), 0) + 1\n\n maxdot = max(dotat.values())\n sys.stderr.write(f\"Maximum dot density is {maxdot}\\n\")\n # we make the mean 50% intensity this time\n meandot = np.mean(list(dotat.values()))\n sys.stderr.write(f\"The mean dot density is {meandot}\\n\")\n print()\n # now we color the dots based on the intensity of each dot!\n dotlegend = []\n dotlabels = []\n dotadded = set()\n for tple in sorted(dotat, key=dotat.get):\n dotalpha = (dotat[tple] / meandot) * 0.5\n if dotalpha > 1:\n dotalpha = 1\n if dotat[tple] not in dotadded:\n rect = Rectangle((0, 100), 100, 100, linewidth=5, edgecolor='black', facecolor='black', alpha=dotalpha)\n dotlegend.append(rect)\n dotlabels.append(dotat[tple])\n dotadded.add(dotat[tple])\n markeredgewidth = markersize // 5\n plt.plot(tple[0], tple[1], 'o', color='Black', alpha=dotalpha, markersize=markersize, transform=ccrs.PlateCarree())\n plt.plot(tple[0], tple[1], 'o', color='Black', fillstyle='none', markersize=markersize, mew=markeredgewidth, transform=ccrs.PlateCarree())\n\n # how many lines and circles do we draw?\n circleat = {}\n circledata = {}\n lineat = {}\n linedata = {}\n for idx1 in dd:\n for idx2 in dd[idx1]:\n # this should only happen when we do best DNA distances\n if idx1 not in ll:\n sys.stderr.write(\"NO Lat/Lon for {}\\n\".format(idx1))\n continue\n if idx2 not in ll:\n sys.stderr.write(\"NO Lat/Lon for {}\\n\".format(idx2))\n continue\n\n linecolor = 'red'\n scaledalpha = alpha\n samecontinent = True\n if colorcontinents:\n # figure out if they are from the same continent\n m = re.search('\\d{8}_(\\w+)\\_\\d', idx1)\n cont1 = country2continent.get(m.groups(0)[0], \"unknown\")\n m = re.search('\\d{8}_(\\w+)\\_\\d', idx2)\n cont2 = country2continent.get(m.groups(0)[0], \"unknown\")\n if cont1 != cont2:\n linecolor = 'yellow'\n scaledalpha = alpha * 0.25\n samecontinent = False\n\n if bounds and ((ll[idx1][1] < bounds[0] or ll[idx1][1] > bounds[2]) or (ll[idx1][0] < bounds[1] or ll[idx1][0] > bounds[3])):\n if verbose:\n sys.stderr.write(\"{} out of bounds. Skipped\\n\".format(idx1))\n continue\n\n if bounds and ((ll[idx2][1] < bounds[0] or ll[idx2][1] > bounds[2]) or (ll[idx2][0] < bounds[1] or ll[idx2][0] > bounds[3])):\n if verbose:\n sys.stderr.write(\"{} out of bounds. Skipped\\n\".format(idx2))\n continue\n\n if linewidth == 0:\n linewidth = dd[idx1][idx2]\n linewidth = (linewidth/maxdist) * maxlinewidth\n if verbose:\n sys.stderr.write(\"{} to {}: distance: {} km. Genetic distance {}. Line width {}\\n\".format(\n idx1, idx2, latlon2distance(ll[idx1][1], ll[idx1][0], ll[idx2][1], ll[idx2][0]), dd[idx1][idx2], linewidth))\n\n if latlon2distance(ll[idx1][1], ll[idx1][0], ll[idx2][1], ll[idx2][0]) < 100:\n if verbose:\n sys.stderr.write(\"Adding a circle for {} and {}\\n\".format(ll[idx1][0], ll[idx1][1]))\n # add a red circle for this object.\n # we need to use some simple trig to find the center of the circle whose point on the circumference\n # is at our lat lon\n radius = 3\n if bounds:\n radius = 1.5\n circlon = ll[idx1][0] - (radius * math.sin(2 * math.pi))\n circlat = ll[idx1][1] - (radius * math.cos(2 * math.pi))\n\n #circ = Circle((circlon, circlat), transform=ccrs.Geodetic(), radius=radius,\n # linewidth=linewidth, alpha=scaledalpha, color=linecolor, fill=False)\n # ax.add_artist(circ)\n\n circleat[(circlon, circlat)] = circleat.get((circlon, circlat), 0) + 1\n circledata[(circlon, circlat)] = {\n 'radius' : radius,\n 'linewidth' : linewidth,\n 'alpha' : scaledalpha,\n 'color' : linecolor,\n 'fill' : False\n }\n\n\n\n else:\n # plot a red line between two points\n #plt.plot([ll[idx1][0], ll[idx2][0]], [ll[idx1][1], ll[idx2][1]], color=linecolor, linewidth=linewidth,\n # alpha=scaledalpha, transform=ccrs.Geodetic())\n\n linecoords = \"\\t\".join(map(str, [ll[idx1][0], ll[idx2][0], ll[idx1][1], ll[idx2][1]]))\n\n lineat[linecoords] = lineat.get(linecoords, 0) + 1\n\n linedata[linecoords] = {\n 'x' : [ll[idx1][0], ll[idx2][0]],\n 'y' : [ll[idx1][1], ll[idx2][1]],\n 'color' : linecolor,\n 'linewidth' : linewidth,\n 'alpha' : scaledalpha,\n 'samecontinent' : samecontinent\n }\n\n\n # plot the circles and lines\n\n # now we are considering lines and circles as part of the same set, since they kind of are.\n # and we use the same color gradiaten for them\n\n allvals = list(circleat.values()) + list(lineat.values())\n lmean = np.mean(allvals)\n\n lvals = set(circleat.values())\n lvals.update(lineat.values())\n lvals = sorted(lvals)\n lmax = max(lvals)\n\n normalizer = lmax # this could be lmean or lmax or something!\n\n sys.stderr.write(f\"The maximum circle or line is {lmax}. The mean is {lmean}. The normalizer is {normalizer}\\n\")\n sys.stderr.write(f\"There are {len(lvals)} circle or line values\\n\")\n # evenly select these colors from the list\n colorgradient = green2red\n selcolors = list(compress(colorgradient, evenly_select(len(colorgradient), len(lvals))))\n\n altcolorgradient = green2yellow\n altselcolors = list(compress(altcolorgradient, evenly_select(len(altcolorgradient), len(lvals))))\n\n colorcountsmin = {}\n colorcountsmax = {}\n colorvals = {}\n\n if linewidthbyn:\n linewidthvals = list(compress(lvals, evenly_select(len(lvals), 3)))\n # an alternative here is [1,2,3] or so.\n # if you adjust these, make sure you adjust the dot size\n for t in lineat:\n if lineat[t] <= linewidthvals[0]:\n linedata[t]['linewidth'] = pixelwidth[0]\n elif lineat[t] <= linewidthvals[1]:\n linedata[t]['linewidth'] = pixelwidth[1]\n else:\n linedata[t]['linewidth'] = pixelwidth[2]\n\n for t in circleat:\n if circleat[t] <= linewidthvals[0]:\n circledata[t]['linewidth'] = pixelwidth[0]\n elif circleat[t] <= linewidthvals[1]:\n circledata[t]['linewidth'] = pixelwidth[1]\n else:\n circledata[t]['linewidth'] = pixelwidth[2]\n\n\n # plot the lines first so the circles are on top!\n for tple in lineat:\n if plotintensity:\n idx = int((lineat[tple] / normalizer) * (len(colorgradient)-1))\n if idx >= len(colorgradient): idx = len(colorgradient) -1\n if linedata[tple]['samecontinent']:\n colorline = colorgradient[idx]\n else:\n colorline = altcolorgradient[idx]\n else:\n idx = lvals.index(lineat[tple])\n if linedata[tple]['samecontinent']:\n colorline = selcolors[idx]\n else:\n colorline = altselcolors[idx]\n\n if colorline in colorcountsmin:\n if colorcountsmin[colorline] > lineat[tple]:\n colorcountsmin[colorline] = lineat[tple]\n if colorcountsmax[colorline] < lineat[tple]:\n colorcountsmax[colorline] = lineat[tple]\n else:\n colorcountsmin[colorline] = lineat[tple]\n colorcountsmax[colorline] = lineat[tple]\n\n if colorline in colorvals:\n colorvals[colorline].append(lineat[tple])\n else:\n colorvals[colorline] = [lineat[tple]]\n\n plt.plot(linedata[tple]['x'], linedata[tple]['y'], color=colorline,\n linewidth=linedata[tple]['linewidth'], alpha=linedata[tple]['alpha'],\n zorder=idx+5, transform=ccrs.Geodetic())\n\n\n # do we want to do this by intensity or by number\n for tple in circleat:\n if plotintensity:\n idx = int((circleat[tple] / normalizer) * (len(colorgradient) - 1))\n if idx >= len(colorgradient): idx = len(colorgradient) -1\n circlecolor = colorgradient[idx]\n else:\n idx = lvals.index(circleat[tple])\n circlecolor = selcolors[idx]\n\n\n if circlecolor in colorcountsmin:\n if colorcountsmin[circlecolor] > circleat[tple]:\n colorcountsmin[circlecolor] = circleat[tple]\n if colorcountsmax[circlecolor] < circleat[tple]:\n colorcountsmax[circlecolor] = circleat[tple]\n else:\n colorcountsmin[circlecolor] = circleat[tple]\n colorcountsmax[circlecolor] = circleat[tple]\n\n\n if circlecolor in colorvals:\n colorvals[circlecolor].append(circleat[tple])\n else:\n colorvals[circlecolor] = [circleat[tple]]\n\n\n circ = Circle((tple[0], tple[1]), transform=ccrs.Geodetic(), radius=circledata[tple]['radius'],\n linewidth=circledata[tple]['linewidth'], alpha=circledata[tple]['alpha'],\n color=circlecolor, fill=circledata[tple]['fill'],\n zorder=100+idx)\n ax.add_artist(circ)\n\n plt.savefig(outputfile)\n\n if legendfile:\n # create a new figure for the legend\n plt.figure(1)\n ax2 = plt.axes()\n # create the boxes for the colors\n\n legends = []\n labels = []\n for c in colorgradient:\n if c in colorcountsmin:\n # here we create an Artist object but don't need to add it anywhere\n rect = Rectangle((10, 10), 10, 10, linewidth=5, edgecolor=c, facecolor=c)\n legends.append(rect)\n if colorcountsmin[c] == colorcountsmax[c]:\n labels.append(f\"{colorcountsmin[c]}\")\n else:\n labels.append(f\"{colorcountsmin[c]}-{colorcountsmax[c]}\")\n\n # combine both legends and labels to make a single legend for this figure\n alleg = legends + dotlegend\n allab = labels + dotlabels\n\n ax2.legend(alleg, allab)\n\n plt.savefig(legendfile)\n\n\n # sys.stderr.write(\"We drew a max of {} circles\\n\".format(max(circleat.values())))\n # sys.stderr.write(\"And we drew a max of {} lines\\n\".format(max(lineat.values())))\n sys.stderr.write(\"Circles,{}\\nLines,{}\\n\".format(\",\".join(map(str, circleat.values())), \",\".join(map(str, lineat.values()))))\n sys.stderr.write(\"Dots,{}\\n\".format(\",\".join(map(str, dotat.values()))))\n\n sys.stderr.write(\"\\nMAXIMUM VALUES\\nCirlces: {}\\nLines: {}\\nDots: {}\\n\".format(max(circleat.values()),\n max(lineat.values()),\n max(dotat.values())\n ))",
"def moisture_on_map(basemap, x, title=None, bounds=None, xcolormap='Blues_r', alpha=0.5, \n vmin=None, vmax=None, fig_nr=None):\n #xcolormap = RdYlGn\n #note: bacgroundmap must be cropped to same coordinates than x but resoltion\n # can be different\n r, c = np.shape(basemap[0])\n extent1 = (-0.5, c - 0.5, r - 0.5, -0.5) # extent into where x is re-scaled\n \n if not fig_nr:\n h = plt.figure()\n else:\n h = plt.figure(fig_nr)\n \n if not vmin:\n vmin = np.nanmin(x)\n if not vmax:\n vmax = np.nanmax(x)\n\n # show peruskartta \n plt.imshow(basemap[0], cmap=basemap[1], alpha=0.8)\n \n ## overlay moisture\n #cmap = plt.cm.get_cmap(xcolormap, 4)\n # \n plt.imshow(x, extent=extent1, cmap=xcolormap, vmin=vmin, vmax=vmax, alpha=alpha)\n #plt.colorbar()\n # overlay bounds\n if bounds is not None:\n plt.imshow(bounds,extent=extent1, cmap='RdYlGn_r')\n plt.title(title)\n return h",
"def worldplot_2(data, cc, pc):\n # define the columns of input\n # cc = data.columns[checkcol]\n #pc = data.columns[plotcol]\n \n plt.rcParams['font.size'] = 18\n # generate standart geopandas dataframe\n world_df = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));\n #check indicies of the input dataframe and modify standart geopandas df\n world_df = world_df[world_df[\"iso_a3\"].isin(data[cc])];\n\n #world_2df.[\"OFa_all_con\"] = np.nan;\n #world_2df.sort_values(by=\"iso_a3\").head()\n for i in world_df.index:\n for j in data.index:\n if world_df.loc[i,\"iso_a3\"] == data.loc[j, cc]:\n try:\n world_df.loc[i,pc] = data.loc[j, pc];\n except: \n print(\"\\nError! Invalid Input. Example for input: OFa_all_con\")\n return\n \n\n fig, ax = plt.subplots(1,1, figsize=(22,12))\n ax.axis('off')\n \n \n if pc == \"OFa_all_con\":\n fig.suptitle('Chinese Development Finance (financial amount)', fontsize=25)\n world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={\"label\":\"\\n Chinese Development Finance in $10 bln (2000-2014)\",\n \"orientation\": \"horizontal\"}, \n missing_kwds={\"color\": \"lightgrey\",\n \"edgecolor\": \"red\",\n \"hatch\": \"///\",\n \"label\": \"Missing values\"});\n else:\n fig.suptitle('Chinese Development Finance (probability)', fontsize=25)\n world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={\"label\":\"\\n Probability of receiving Chinese Development Finance (2000-2014)\",###ADDDDJUST!!!!!\n \"orientation\": \"horizontal\"}, \n missing_kwds={\"color\": \"lightgrey\",\n \"edgecolor\": \"red\",\n \"hatch\": \"///\",\n \"label\": \"Missing values\"});",
"def plot_grid(information,\n var_to_plot,\n vmin,\n vmax,\n xmin,\n xmax,\n ymin,\n ymax,\n file_name,\n c_scheme,\n yax_shift,\n subplot):\n\n grain_type = True if \"grain type\" in var_to_plot else False\n\n grid = information['grid']\n dates = information['dates']\n\n # Make the plot\n\n if subplot:\n ax = subplot\n else:\n fig, ax = plt.subplots(1 ,1 ,figsize=(10 ,6))\n\n # Specify the colormap\n\n if grain_type:\n my_cmap = grain_type_colormap()\n grid = transform_grid_for_grain_type(grid)\n else:\n my_cmap = plt.get_cmap(c_scheme)\n\n\n # Set the limits of the plot\n\n x_lims = mdates.date2num([dates[0] ,dates[-1]])\n if xmin and xmax:\n x_lims_specified = mdates.date2num([xmin, xmax])\n # for spec in x_lims_specified:\n # assert (x_lims[0] < spec < x_lims[1])\n x_lims = x_lims_specified\n\n if ymin and ymax:\n information['min_height'] = ymin\n information['max_height'] = ymax\n\n extent = [x_lims[0],\n x_lims[1] ,\n information['min_height'] - yax_shift,\n information['max_height'] - yax_shift]\n\n ax.xaxis_date()\n date_format = mdates.DateFormatter('%m/%d')\n ax.xaxis.set_major_formatter(date_format)\n\n #######################\n\n im = ax.imshow(grid,\n extent= extent,\n aspect='auto',\n vmin=vmin ,vmax=vmax,\n cmap=my_cmap)\n\n\n ax.tick_params(right=True)\n\n ax.set_ylabel('Height (cm)', fontsize='x-large')\n\n if subplot:\n return ax\n else:\n\n cbar = fig.colorbar(im, ax=ax, pad=0.075)\n\n if grain_type: cbar.ax.set_yticklabels(get_grain_tick_labels())\n else: cbar.set_label(var_to_plot, fontsize='x-large')\n\n\n if file_name:\n fig.savefig(file_name ,dpi=500, bbox_inches='tight')\n\n plt.show()\n\n return 0",
"def overlay2d(img: xarray.DataArray,\n ofn: Path=None,\n mlp: Dict[str, Any]={},\n lattick: Union[float, int, list]=None,\n lontick: Union[float, int, list]=None,\n scalefn: Path=None,\n verbose: bool=False) -> dict:\n if figure is None:\n logging.error('skipping overlay plot')\n return {}\n\n title = img.filename.stem[6:-3]\n\n def _savemap(ofn, fg):\n ofn=str(ofn)\n print('\\n Saving Nexrad map:', ofn)\n plt.savefig(ofn, dpi=DPI)\n plt.close(fig)\n \n fig = cm.plotCartoMap(latlim=LATLIM,lonlim=LONLIM,figsize=FIGSIZE,\n title=title,projection=PROJECTION,\n parallels=PARALLELS,meridians=MERIDIANS,\n grid_linewidth=grid_linewidth,\n grid_color=grid_color)\n\n plt.imshow(img, origin='upper',\n extent=[img.lon[0], img.lon[-1], img.lat[0], img.lat[-1]],\n transform=ccrs.PlateCarree())\n \n plt.tight_layout()\n \n if scalefn and scalefn.is_file():\n scale = np.rot90(imageio.imread(scalefn), 2)\n ax = fig.add_axes([0.9, 0.15, 0.055, 0.3])\n ax.imshow(scale)\n ax.axis('off') # turn off all ticks, etc.\n \n if ofn is not None:\n _savemap(ofn, fig)\n else:\n plt.show()\n \n return mlp",
"def uturuncu_map(surfaceFile,dem,comp=2):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n #print(datadir)\n #print(dem)\n geosurface = pu.surface2geotiff(dem,surfaceFile,outname=None,comp=comp,nanval=-9999)\n\n #load georeferenced fem output from pu.surface2geotiff\n #geosurface = '/home/scott/research/models/pylith/scripts/geo_fem_Uz.tif'\n data,geotrans,proj = pu.load_gdal(geosurface)\n data = data*100 # N-up, units=m\n nLat, nLon = data.shape\n\n\n #NOTE: are basmap ll and ur corner or center pixel locations??\n bmap = bm.Basemap(projection='tmerc', #NOTE: if changing to 'merc' have to use latlon=True\n resolution='i',\n lon_0=-67.18,\n lat_0=-22.27,\n width=200000.0,\n height=200000.0,\n suppress_ticks=True, #set to true if using drawmeridians\n ax=ax)\n\n # Set map background\n #dem = '/home/scott/data/dems/cgiar/uturuncu_1000_1000.tif'\n # full res\n dem = os.path.join(datadir,'dems/cgiar/srtm_23_17.tif')\n bmap.background(style='srtm', file=dem, zscale=1.5)\n\n # Annotate\n bmap.drawcountries(linewidth=1,color='k')\n bmap.drawcoastlines(linewidth=1,color='k')\n meridians = np.arange(-68,-65,1)\n md = bmap.drawmeridians(meridians, labels=[0,0,0,1])\n parallels = np.arange(-24,-20,1)\n pl = bmap.drawparallels(parallels, labels=[1,0,0,0])\n\n # Overlay FEM result\n compdict = {0:'Ux',1:'Uy',2:'Uz',3:'Ur'}\n im = bmap.imshow(data, origin='upper', alpha=0.7) #alternatively flipud(data)\n cb = bmap.colorbar(im)\n cb.set_label('{} [cm]'.format(compdict[comp]))\n\n # Uturunu Summit\n bmap.plot(-67.18, -22.27, 'r^', latlon=True,label='Uturuncu')\n\n # Location of maximum uplift\n # NOTE: multiple coordinate transforms needed here\n maxval = np.nanmax(data)\n indflat = np.nanargmax(data)\n ind = np.unravel_index(indflat, data.shape) #NOTE row,col --> (y,x)\n lon,lat = ind2latlon(ind, geosurface)\n bmap.plot(lon,lat,'y*',latlon=True,label='Uz_max')\n print('Maximum={} at ({:.2f},{:.2f})\\n'.format(maxval, lon, lat))\n\n # PLUTONS seismometers\n path = os.path.join(datadir,'vector/uturuncu_plutons_seis')\n sm = bmap.readshapefile(path,'seis',drawbounds=False)\n x,y = np.hsplit(np.array(bmap.seis),2)\n bmap.plot(x,y,'wv', mec='k', markersize=10, mew=2, label='3T')\n\n # Continuous GPS\n path = os.path.join(datadir,'vector/uturuncu_contGPS')\n bmap.readshapefile(path,'cGPS',drawbounds=False)\n x,y = np.hsplit(np.array(bmap.cGPS),2)\n bmap.plot(x,y,'go', mec='k', markersize=10, mew=2, label='cGPS')\n\n # Scalebar\n length = 50 #km\n # Scale in lower left\n lon = bmap.llcrnrlon + (length/2.0/100) + (bmap.lonmax - bmap.lonmin)*0.05 #pad by 5% of length, also add 1/2 length of scale length\n lat = bmap.llcrnrlat + (bmap.latmax - bmap.latmin)*0.05\n # Upper Right (todo)\n scale = bmap.drawmapscale(lon, lat, bmap.projparams['lon_0'],bmap.projparams['lon_0'],\n length, #50km\n barstyle='fancy',\n #barstyle='simple',\n fontsize=14)\n\n # More Annotations\n plt.legend(loc='upper right',numpoints=1)\n plt.title('FEM Model Output')\n #plt.savefig('map_fem.png',bbox_inches='tight')\n plt.show()",
"def grid_results(infile, resolution = 0.01, clip_shp = None, \n overwrite=True, contour=False):\n outfile = infile.rstrip('().csv') + '_gridded.tif'\n # if not overwrite:\n if os.path.isfile(outfile):\n if not overwrite:\n print('Not creating file %s as already exists' % outfile)\n print('To re-create file (e.g if inputs changed) set overwrite=True)')\n return\n else:\n try:\n os.remove(outfile)\n os.remove((outfile.rstrip('.tif') + '_clip.tif'))\n except:\n pass\n data = np.genfromtxt(infile, delimiter=',')\n max_lon = max(data[:,0])\n min_lon = min(data[:,0])\n max_lat = max(data[:,1])\n min_lat = min(data[:,1])\n #print max_lon, min_lon, max_lat, min_lat\n xi = np.arange(min_lon, max_lon, resolution)\n yi = np.arange(min_lat, max_lat, resolution)\n XI,YI = np.meshgrid(xi,yi)\n xsize = len(xi)\n ysize = len(yi)\n\n print('Interpolating results')\n gridded_results = griddata((data[:,0],data[:,1]),data[:,2],(XI,YI),method='linear')\n #print gridded_results\n #outfile = infile.rstrip('().csv') + '_gridded.tif'\n print('Writing gridded data to %s' % outfile)\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create(outfile, xsize, ysize, 1, gdal.GDT_Float32)\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection(srs.ExportToWkt())\n gt = [(min_lon - (resolution/2)), resolution, 0, \n (min_lat - (resolution/2)), 0, resolution]\n ds.SetGeoTransform(gt)\n outband=ds.GetRasterBand(1)\n outband.SetStatistics(np.min(gridded_results), np.max(gridded_results), np.average(gridded_results), np.std(gridded_results))\n outband.WriteArray(gridded_results)\n # Need to close output dataset before we can do clipping\n ds = None\n # now clip by shapefile\n if clip_shp is not None:\n clipfile = outfile.rstrip('.tif') + '_clip.tif'\n cmd = ['gdalwarp',\n '-cutline',\n clip_shp,\n '-crop_to_cutline',\n '-dstalpha',\n outfile,\n clipfile]\n print(cmd)\n call(cmd, shell=False)\n if contour is True:\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (outfile, outfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (clipfile, clipfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)",
"def xx_map_plot(x, use_basemap=False, ax=None, cticks=None, region=None,\n nclasses=10, cmap_data='jet',\n title=None, regions_to_plot=None, logplot=False,\n logoffset=None, show_stat=False,\n f_kdtree=False, show_colorbar=True, latvalues=None,\n lonvalues=None, show_zonal=False,\n zonal_timmean=True, show_timeseries=False,\n scal_timeseries=1., vmin_zonal=None, vmax_zonal=None,\n bluemarble=False, contours=False, overlay=None,\n titlefontsize=14, drawparallels=True, drawcountries=True,\n show_histogram=False,\n contourf=False, land_color=(0.8, 0.8, 0.8),\n regionlinewidth=1, bins=10,\n colorbar_orientation='vertical', stat_type='mean',\n cax_rotation=0., cticklabels=None, proj='robin',\n plot_method='colormesh', boundinglat=60.,\n savefile=None, lon_0=0., lat_0=0., savegraphicfile=None,\n **kwargs):\n\n def _get_unstructured_collection(vlon, vlat, xm, vmin, vmax, basemap_object=None):\n \"\"\"\n get collection of patches for plotting of unstructured grid\n\n vlon,vlat : vertex lon/lat [ncells,npoints]\n xm: mean field, generated by timmean function\n \"\"\"\n\n #init\n Path = mpath.Path\n patches = []\n pdata = xm[0, :] * 1. # full list of data\n vmsk = np.ones_like(pdata).astype('bool') # mask to indicate which cells contain valid data\n\n for i in xrange(x.ncell):\n if np.any(vlon[i, :]) > 180.: # todo fix this properly !!!\n vmsk[i] = False\n continue\n if basemap_object is None:\n xv = vlon[i, :]\n yv = vlat[i, :]\n else:\n xv, yv = basemap_object(vlon[i, :], vlat[i, :]) # todo: how to properly deal with boundary problem ????\n if (vlon[i, :].min() < -100.) & (vlon[i, :].max() > 100.): # todo\n #... triangles across the boundaries of the projection are a problem\n # ... solution: generate two triangles ! TODO\n vmsk[i] = False\n continue\n\n verts = np.asarray([xv, yv]).T\n\n #--- specify how vertices are interconnected (here simple connection by lines)\n codes = [Path.MOVETO, Path.LINETO, Path.LINETO]\n\n #--- construct object and append to library of objects ---\n path = mpath.Path(verts, codes, closed=True)\n patches.append(mpatches.PathPatch(path))\n\n pdata = np.asarray(pdata)\n\n if vmin is None:\n vmin = pdata.min()\n if vmax is None:\n vmax = pdata.max()\n\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n collection = PatchCollection(patches, cmap=cmap, norm=norm, alpha=1., match_original=False, edgecolors='grey') # construct library of all objects\n collection.set_array(pdata[vmsk]) # assign data values here\n\n return collection\n\n if savegraphicfile is not None:\n savegraphicfile = savegraphicfile.replace(' ', '_')\n\n if 'vmin' in kwargs.keys():\n vmin = kwargs['vmin']\n else:\n vmin = None\n if 'vmax' in kwargs.keys():\n vmax = kwargs['vmax']\n else:\n vmax = None\n\n if plot_method not in ['colormesh', 'scatter']:\n raise ValueError('Invalid plotting option %s' % plot_method)\n\n # checks\n if proj not in ['robin', 'npstere', 'spstere']:\n raise ValueError('ERROR: projection type not validated for map_plot so far: %s' % proj)\n if proj == 'npstere': # todo: for stereographic projection, scatter is used as method at the moment\n plot_method = 'scatter'\n if proj == 'spstere': # todo: for stereographic projection, scatter is used as method at the moment\n plot_method = 'scatter'\n\n if overlay is not None:\n if x.data.ndim == 2:\n if overlay.shape != x.data.shape:\n print overlay.shape, x.data.shape\n raise ValueError('Invalid geometry for overlay !')\n elif x.data.ndim == 3:\n if overlay.shape != x.data[0, :, :].shape:\n print overlay.shape, x.data.shape\n raise ValueError('Invalid geometry for overlay !')\n else:\n raise ValueError('Overlay for this geometry not supported!')\n\n #--- create new figure\n if ax is None:\n fig = plt.figure()\n\n # with timeseries plot?\n if show_timeseries:\n gs = gridspec.GridSpec(2, 1, wspace=0.05, hspace=0.05, bottom=0.2, height_ratios=[5, 1])\n ax = fig.add_subplot(gs[0])\n ax2 = fig.add_subplot(gs[1])\n else:\n ax = fig.add_subplot(111)\n else:\n fig = ax.figure\n if show_timeseries:\n raise ValueError('Showing timeseries and providing some prior axis is currently not impelmented!') # ##TODO\n\n # if cmap provided in kwargs, then remove it and set cmap_data\n kwargs1 = kwargs.copy()\n if 'cmap' in kwargs:\n cmap_data = kwargs1.pop('cmap')\n if ('levels' in kwargs) and (contours is False): # levels not needed\n dummy = kwargs1.pop('levels')\n\n #--- create colormap\n if hasattr(cmap_data, 'monochrome'):\n # colormap object was given\n cmap = cmap_data\n else:\n cmap = plt.cm.get_cmap(cmap_data, nclasses)\n\n # temporal mean fields as data to plot\n xm = x.timmean() # returns an array\n\n # logscale plot ?\n if logplot:\n if logoffset is None:\n if xm.min() < 0.:\n logoffset = abs(xm.min()) * 1.01\n else:\n logoffset = 0.\n else:\n logoffset = logoffset\n print ' logoffset: ', logoffset\n xm = np.log10(xm + logoffset)\n\n #--- save field that is plotted as file\n if savefile is not None:\n if savefile[:-3] != '.nc':\n savefile += '.nc'\n tmp = x.copy()\n tmp.data = xm * 1.\n tmp.time = None\n tmp.save(savefile, varname='temporal_mean_field')\n del tmp\n\n #--- set projection parameters\n if proj == 'robin': # todo: more flexible handling of projection parameters (dictionary ??)\n lon_0 = lon_0\n lat_0 = lat_0\n elif proj == 'npstere':\n lon_0 = lon_0\n lat_0 = lat_0\n boundinglat = boundinglat\n elif proj == 'spstere':\n lon_0 = lon_0\n lat_0 = lat_0\n boundinglat = -boundinglat\n else:\n raise ValueError('Unsupported projection in map_plot (unsupported means, that it was not tested yet)')\n\n #--- plot using basemap\n if use_basemap:\n llcrnrlon = None\n llcrnrlat = None\n urcrnrlon = None\n urcrnrlat = None\n\n #if a region is specfied, then the plotting boundaries are set\n if region is not None:\n if not hasattr(region, 'lonmin'):\n print 'WARNING map boundaries can not be set, as region ' + region.label.upper() + ' has not lat/lon information'\n else:\n dlat = (region.latmax - region.latmin) * 0.25\n dlon = (region.lonmax - region.lonmin) * 0.25\n di = 0. # with 0 it works; for other values problems may occur for negative lon!\n llcrnrlon = region.lonmin - di\n llcrnrlat = region.latmin - di\n urcrnrlon = region.lonmax + di\n urcrnrlat = region.latmax + di\n proj = 'tmerc' # use mercator projection at regional scale as robinson does not work!\n\n ############################################\n # generate Basemap map\n ############################################\n m1 = Basemap(projection=proj, lon_0=lon_0, lat_0=lat_0, ax=ax,\n llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat,\n urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat,\n boundinglat=boundinglat)\n\n if bluemarble:\n m1.bluemarble()\n\n if x.gridtype == 'unstructured':\n #support of unstructured grid types, like e.g. provided by ICON model\n #it assumes that the data object has a list of center coordinates which correspond to the data\n #and vlon/vlat attributes with corresponding vertices corresponding to the center coordinates\n\n if not hasattr(x, 'vlon'):\n raise ValueError('Plotting for unstructured grid not possible, as VLON attribute missing!')\n if not hasattr(x, 'vlat'):\n raise ValueError('Plotting for unstructured grid not possible, as VLAT attribute missing!')\n\n #--- generate collection of patches for Basemap plot\n collection = _get_unstructured_collection(x.vlon, x.vlat, xm, vmin, vmax, basemap_object=m1)\n\n else: # unstructured gridtype\n\n #check if all longitudes are the same. If so, then a plotting with different options is possible.\n #otherwise, f_kdtree is activates as default option to ensure valid plotting\n #f_kdtree_act = f_kdtree\n #if x._lon360:\n # if x._equal_lon():\n # f_kdtree_act = f_kdtree\n # else:\n # print 'WARNING: longitudes are not all the same! f_kdtree=True is therefore used!'\n # f_kdtree_act = True\n if f_kdtree:\n #use KDTRee nearest neighbor resampling to avoid stripes in plotting\n lons = np.unique(x.lon)\n lats = np.unique(x.lat)\n lons.sort()\n lats.sort()\n TLON, TLAT = np.meshgrid(lons, lats) # generate target coordinates\n XT, YT = m1(TLON, TLAT)\n X = XT.copy()\n Y = YT.copy()\n shape0 = np.shape(XT)\n XT.shape = (-1)\n YT.shape = (-1) # ... vectorize them for inertpolation\n tree = KDTree(zip(XT, YT)) # generate tree from TARGET coordinates\n\n #prepare data and interpolate\n xmap, ymap = m1(x.lon, x.lat)\n xmap.shape = (-1)\n ymap.shape = (-1)\n pts = zip(xmap, ymap) # generate points to interpolate from source data\n dist, idx = tree.query(pts, k=1) # perform nearest neighbor interpolation (returns distance and indices)\n\n #- map data to output matrix for plotting\n Z = np.ones(shape0) * np.nan\n Z.shape = (-1) # generate target vector\n omask = np.ones(shape0).astype('bool')\n omask.shape = (-1)\n\n msk1 = xm.mask.copy()\n msk1.shape = (-1)\n omask[idx] = msk1\n\n #~ omask[dist != 0.] = True\n\n xm1 = xm.copy()\n xm1.shape = (-1)\n Z[idx] = xm1 # assign data and reshape it and set generate masked array\n Z[omask] = np.nan\n Z = np.reshape(Z, shape0)\n Z = np.ma.array(Z, mask=np.isnan(Z))\n\n else: # f_kdtree --> not kdtree\n\n #/// in the following, we check if the longitudes are in the right order\n # to allow for an appropirate plotting. Basemap assumes ascending order\n # of longitudes. For global projections, problems occur if lon are given\n # as 0...360 deg. It needs to be shifted then. Otherwise the data will produce\n # strange stripes when plotting.\n #\n # REFERENCES:\n # * http://pl.digipedia.org/usenet/thread/15998/16891/\n if plot_method == 'colormesh':\n print 'Projection: ', proj\n if x._lon360: # if lon 0 ... 360, then shift data\n tmp_lon = x._get_unique_lon() # get unique longitudes\n tmplon1 = tmp_lon.copy()\n Z, tmp_lon = shiftgrid(180, xm, tmp_lon, start=False)\n if overlay is not None:\n overlay, nope = shiftgrid(180, overlay, tmplon1, start=False)\n lon, lat = np.meshgrid(tmp_lon, np.arange(Z.shape[0]))\n lat = x.lat\n else:\n print '*** WARNING: not lon360 not validated yet, try KDTREE option if stripes in plot ***'\n lon = x.lon\n lat = x.lat\n Z = xm\n elif plot_method == 'scatter':\n lon = x.lon\n lat = x.lat\n Z = xm\n else:\n raise ValueError('Invalid option')\n\n X, Y = m1(lon, lat)\n\n #here is still a problem in the plotting over land; masking does not work properly,\n #while the data as such is o.k.!\n #~ im1=m1.pcolormesh(xmap,ymap,xm,cmap=cmap,**kwargs) #,vmin=vmin,vmax=vmax,cmap=ccmap,norm=norm)\n\n if not bluemarble:\n\n if x.gridtype == 'unstructured':\n #--- do actual plotting\n im1 = m1.ax.add_collection(collection) # add unstructure grid plots (e.g. triangles)\n else:\n if contours:\n if 'levels' in kwargs1.keys():\n levels = kwargs1.pop('levels')\n else:\n raise ValueError('When plotting with contours, you need to specify the levels option (see contour documnetation)')\n if contourf:\n im1 = m1.contourf(X, Y, Z, levels, cmap=cmap, **kwargs1)\n else:\n im1 = m1.contour(X, Y, Z, levels, cmap=cmap, **kwargs1)\n ax.clabel(im1, inline=1, fontsize=10) # contour label\n\n else:\n if plot_method == 'colormesh':\n im1 = m1.pcolormesh(X, Y, Z, cmap=cmap, **kwargs1) # ,vmin=vmin,vmax=vmax,cmap=ccmap,norm=norm)\n elif plot_method == 'scatter':\n im1 = m1.scatter(X, Y, c=Z, marker='8', edgecolor='None', cmap=cmap, **kwargs1)\n else:\n raise ValueError('Invalid plotting option! %s' % plot_method)\n\n if overlay is not None:\n xcoordnew = X[overlay]\n ycoordnew = Y[overlay]\n m1.scatter(xcoordnew, ycoordnew, marker='x', s=50, c='white', edgecolors='white', linewidth=1)\n #todo: there is still a problem that the coordinates are not properly aligned with the grid cells!!!\n\n #/// ANCILLARY PLOT FOR BASEMAP ///\n __basemap_ancillary(m1, latvalues=latvalues, lonvalues=lonvalues, drawparallels=drawparallels, drawcountries=drawcountries, land_color=land_color)\n\n else: # use_basemap = False\n\n if x.gridtype == 'unstructured':\n #--- generate collection of patches for Basemap plot\n collection = _get_unstructured_collection(x.vlon, x.vlat, xm, vmin, vmax, basemap_object=None)\n im1 = ax.add_collection(collection)\n ax.set_xlim(max(x.vlon.min(), -180.), min(x.vlon.max(), 180.))\n ax.set_ylim(max(x.vlat.min(), -90.), min(x.vlat.max(), 90.))\n else:\n\n #- normal plots\n if contours:\n if contourf:\n im1 = ax.contourf(xm, cmap=cmap, **kwargs1)\n else:\n im1 = ax.contour(xm, cmap=cmap, **kwargs1)\n ax.clabel(im1, inline=1, fontsize=10) # contour label\n else:\n im1 = ax.imshow(xm, cmap=cmap, interpolation='nearest', **kwargs1)\n\n #--- overlay\n if overlay is not None:\n ny, nx = xm.shape\n xnew = arange(nx)\n ynew = arange(ny)\n XN, YN = np.meshgrid(xnew, ynew)\n del xnew, ynew\n xcoordnew = XN[overlay]\n ycoordnew = YN[overlay]\n pl.scatter(xcoordnew, ycoordnew, marker='.', s=1, c='white', edgecolors='white', alpha=1.)\n\n ax.set_xticks([])\n ax.set_yticks([])\n\n # set legend aligned with plot (nice looking)\n divider = make_axes_locatable(ax)\n if show_zonal:\n caxv = divider.new_horizontal(size=\"3%\", pad=0.1, axes_class=maxes.Axes)\n caxh = divider.new_vertical(size=\"5%\", pad=0.1, axes_class=maxes.Axes, pack_start=True)\n caxzonaldummy = divider.new_horizontal(size=\"15%\", pad=0.1, axes_class=maxes.Axes, pack_start=True)\n # this is still not working properly !\n else:\n caxv = divider.new_horizontal(size=\"3%\", pad=0.1, axes_class=maxes.Axes)\n caxh = divider.new_vertical(size=\"5%\", pad=0.1, axes_class=maxes.Axes, pack_start=True)\n if colorbar_orientation == 'vertical':\n cax = caxv\n caxdummy = caxh\n elif colorbar_orientation == 'horizontal':\n cax = caxh\n caxdummy = caxv\n else:\n raise ValueError('Invalid option for colorbar! %s' % colorbar_orientation)\n\n ax.figure.add_axes(cax)\n\n vmin = im1.get_clim()[0]\n vmax = im1.get_clim()[1]\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n\n # dummy axis to ensure equal spacing in multiple plots\n caxdummy.set_xticks([])\n caxdummy.set_yticks([])\n caxdummy.set_frame_on(False)\n\n if show_colorbar:\n #plot actual colorbar\n cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm, ticks=cticks, orientation=colorbar_orientation)\n if cticklabels is not None:\n cb.set_ticklabels(cticklabels)\n else:\n cax.set_xticks([])\n cax.set_yticks([])\n cax.set_frame_on(False)\n\n #--- add a histogram below the map plot\n if show_histogram:\n add_histogram(ax, x, bins=bins)\n\n # Zonal plot\n if show_zonal:\n if x._latitudecheckok:\n add_zonal_plot(ax, x, timmean=zonal_timmean, vmin=vmin_zonal, vmax=vmax_zonal) # ,vmin=im1.get_clim()[0],vmax=im1.get_clim()[1])\n else:\n print('WARNING: zonal plot not possible due to invalid latitude configurations')\n\n def _add_region_basemap(m, r, color='red', linewidth=1):\n \"\"\"\n plot region r on top of basemap map m\n\n Parameters\n ----------\n m : Basemap object\n map\n r : Region\n region to plot\n color : str\n color to plot region\n \"\"\"\n corners = r.get_corners() # get list of corner coordinates\n corners = np.asarray(corners)\n lons = corners[:, 0]\n lats = corners[:, 1]\n x, y = m(lons, lats)\n xy = list(zip(x, y))\n mapboundary = Polygon(xy, edgecolor=color, linewidth=linewidth, fill=False, linestyle='dashed')\n m.ax.add_patch(mapboundary)\n\n def _add_region_standard(ax, r, color='red', linewidth=1):\n \"\"\"\n plot region r on top of a normal map plot\n\n Parameters\n ----------\n m : Basemap object\n map\n r : Region\n region to plot\n color : str\n color to plot region\n \"\"\"\n corners = r.get_corners() # get list of corner coordinates\n corners = np.asarray(corners)\n xcoords = corners[:, 0]\n ycoords = corners[:, 1]\n xy = list(zip(xcoords, ycoords))\n mapboundary = Polygon(xy, edgecolor=color, linewidth=linewidth, fill=False, linestyle='dashed')\n ax.add_patch(mapboundary)\n\n # plot regions in the map ---\n if regions_to_plot is not None:\n if use_basemap:\n for region in regions_to_plot:\n if region.type == 'latlon':\n _add_region_basemap(m1, region, linewidth=regionlinewidth)\n else:\n for region in regions_to_plot:\n if region.type == 'index':\n _add_region_standard(ax, region, linewidth=regionlinewidth)\n\n # set title\n if title is None:\n title = x._get_label()\n else:\n pass\n\n #--- show field statistics in title ?\n # calculates first the temporal mean and results shown then\n # are the means and std of the temporal mean fields which are weighted\n # appropriately according to the cell area\n if show_stat:\n tmp_xm = x.timmean(return_object=True) # from temporal mean\n if stat_type == 'mean':\n me = tmp_xm.fldmean()\n st = tmp_xm.fldstd()\n assert(len(me) == 1)\n assert(len(st) == 1)\n me = me[0]\n st = st[0]\n atitle = 'mean: $' + str(round(me, 2)) + ' \\pm ' + str(round(st, 2)) + '$'\n elif stat_type == 'sum': # area sum\n me = tmp_xm.areasum()\n assert(len(me) == 1)\n me = me[0]\n atitle = 'sum: $' + str(round(me, 2)) + '$'\n else:\n me = np.ma.median(tmp_xm.data)\n atitle = 'median: $' + str(round(me, 2)) + '$'\n ax.set_title(atitle, size=titlefontsize - 2, loc='left')\n\n ax.set_title(title + '\\n', size=titlefontsize, loc='center')\n ax.set_title(x._get_unit(), size=titlefontsize - 2, loc='right')\n\n #/// show timeseries? ///\n if show_timeseries:\n ax2.plot(x.num2date(x.time), x.fldmean())\n ax2.grid()\n ax2.set_ylim(im1.get_clim()[0] * scal_timeseries, im1.get_clim()[1] * scal_timeseries)\n ti = ax2.get_yticks()\n n = len(ti) / 2\n ax2.set_yticks([ti[0], ti[n], ti[-1]])\n ax2.set_ylabel(x._get_unit())\n\n if savegraphicfile is not None:\n # save graphics to file\n if os.path.exists(savegraphicfile):\n os.remove(savegraphicfile)\n fig.savefig(savegraphicfile, bbox_inches='tight', dpi=200)\n return fig",
"def cartesian_map_array(self, fn, vmin=None, vmax=None, bands=8, title='',cblabel='', \n ecliptic=False, equatorial=False, nocolorbar=False, cmap=plt.get_cmap('coolwarm')):\n if vmin is None:vmin=fn.vmin\n if vmax is None: vmax=fn.vmax\n nrows, ncols = ((bands+1)//4, 4 ) if bands>=4 else (1, bands)\n \n fig, axx = plt.subplots(nrows, ncols, figsize=(3+3*ncols,1+3*nrows), sharex=True, sharey=True)\n plt.subplots_adjust(left=0.1, right=0.92, hspace=0.15, wspace=0.01, bottom=0.15)\n if ecliptic:\n lon, sinlat = self.ecliptic_coords()\n elif equatorial:\n lon, sinlat = self.equatorial_coords()\n else:\n lon = self.df.glon\n sinlat = self.singlat\n for iband,energy in enumerate(self.energy[:bands]):\n ax = axx.flatten()[iband] if bands>1 else axx\n scat=self.basic_skyplot(ax, lon, sinlat, fn(iband).clip(vmin,vmax),\n title='%d MeV'%energy,\n vmin=vmin,vmax=vmax, s=30, edgecolor='none', colorbar=False, labels=False, cmap=cmap)\n\n fig.text(0.5, 0.95, getattr(fn, 'title', title), ha='center', size=14)\n if nocolorbar: return fig\n #put colorbar at right \n cbax = fig.add_axes((0.92, 0.15, 0.02, 0.7) )\n cb=plt.colorbar(scat, cbax, orientation='vertical')\n cb.set_label(getattr(fn, 'cblabel', cblabel))\n fig.text(0.5, 0.025, 'longitude', ha='center', fontsize=14)\n fig.text(0.05, 0.5, 'sin(latitude)', rotation='vertical', va='center', fontsize=14)\n return fig"
] | [
"0.67058265",
"0.6691103",
"0.66687226",
"0.65568155",
"0.6486296",
"0.64630854",
"0.62353355",
"0.6190396",
"0.6162545",
"0.60807335",
"0.60436964",
"0.60345477",
"0.6025459",
"0.6024494",
"0.5966506",
"0.5934152",
"0.58613974",
"0.58610916",
"0.5854339",
"0.5782889",
"0.5759707",
"0.5758464",
"0.5726162",
"0.57243246",
"0.57079947",
"0.5703114",
"0.56968695",
"0.56927615",
"0.56721354",
"0.5663647"
] | 0.80338895 | 0 |
Loop through the student marking directory. | def mark_students(submitdir):
all_students = os.listdir(submitdir)
all_students.remove("copyToMarking")
for student in all_students:
# construct the path to the individual student's submission
studentdir = submitdir + os.sep + student + os.sep + "marking" + os.sep
# extract the results
extract_marks(studentdir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iterStamps(self):\n try:\n names = os.listdir(self.path)\n except OSError:\n return\n for name in names:\n if name and name[0] != '.':\n try:\n yield self.stampType(name)\n except:\n pass",
"def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)",
"def run(self):\n\n config = self.config\n\n # Start fuzzy matcher\n files = self.load_files_info()\n real = config.get('students.ids', [])\n matcher = FuzzyMatcher(files.keys(), real)\n\n # Remove certain matches\n author_map = matcher.remove_exact(config.get_all_student_aliases())\n matcher.fill_distances()\n matcher.set_distance_threshold(0.90)\n\n # Match each missing author with the given real name\n while matcher.shape[0] != 0 and False:\n given_, real_ = matcher.closest_pair()\n click.echo(f'\\nBest match for {given_}')\n matches = self.ask_matches(given_, matcher.best_matches(given_, 5))\n\n if matches:\n for match in matches:\n matcher.remove_pair(given_, match)\n config.add_student_aliases(match, [given_])\n author_map[given_] = match\n else:\n matcher.remove_given(given_)\n\n # Save files\n read_zip = lambda x: self.zipfile.open(x).read()\n\n for k, f in files.items():\n if k in author_map:\n data = read_zip(f.filename)\n for name in author_map[k]:\n path = Path(f'submitted/{name}/{self.category}/{self.name}.ipynb')\n path.parent.mkdir(parents=True, exist_ok=True)\n if not os.path.exists(path):\n with open(path, 'wb') as fd:\n fd.write(data)",
"def walkthrough(software_map):\n\n for i in software_map:\n\n if not i[\"is_file\"]:\n\n # for each directory: make a index.md\n dname = \"./docs/\" + i[\"name\"]\n index = \"./docs/\" + i[\"name\"] + \"/index.md\"\n print(index)\n os.mkdir(dname)\n\n with open(index, \"w+\") as f:\n\n children = i[\"children\"]\n\n # list files\n f.write(\"Files:\\n\\n\")\n for i in children:\n if i[\"is_file\"]:\n\n fname = i[\"name\"]\n fext = fname.split(\".\")\n if len(fext) == 2:\n fext = fext[1]\n else:\n fext = \"none\"\n # for each file, note name and extension\n f.write(fname + \" : \" + fext + \"\\n\")\n\n # list subdirectories\n f.write(\"\\nSubdirectories:\\n\\n\")\n for i in children:\n if not i[\"is_file\"]:\n\n dirname = i[\"name\"]\n\n # note the number of files and subdirs in it\n num_files, num_dirs = 0, 0\n for child in i[\"children\"]:\n if child[\"is_file\"]:\n num_files += 1\n elif not child[\"is_file\"]:\n num_dirs += 1\n\n # note down name and numbers for each dir\n f.write(dirname + \" : \" + str(num_files) + \" files, \" +\n str(num_dirs) + \" directories\\n\")\n\n # goto subdir\n if len(i[\"children\"]) > 0:\n walkthrough(i[\"children\"])",
"def _set_dirs(self, datafolder):\n self.List_of_dir = []\n self.List_of_files = dict()\n folders = os.listdir(datafolder)\n folders.sort()\n for i in folders:\n if os.path.isdir(os.path.join(datafolder,i)) and i != '.ipynb_checkpoints': # ignore .ipynb_checkpoints, allowing the generator to work in Amazon\n self.List_of_dir.append(os.path.join(datafolder,i))\n self.List_of_files[os.path.join(datafolder,i)]=[]\n for file in os.listdir(os.path.join(datafolder, i, 'Input')):\n if file.split('.')[-1] == 'hdf5':\n self.List_of_files[os.path.join(datafolder,i)].append(file.split('.')[-2])\n self._nb_dir = len(self.List_of_dir)",
"def scanGrabFolder(self):\n fnames = sorted(os.listdir(self.downloadFolder))\n self.seenIDs, self.seenTimes, self.seenHashes = [], [], []\n for fname in fnames:\n fname = fname.split(\".\")\n if len(fname) != 4:\n continue\n self.seenIDs.append(fname[0])\n self.seenTimes.append(int(fname[1]))\n self.seenHashes.append(fname[2])",
"def walk(self):\n if os.path.exists(self.folder):\n for root_path, _, f_files in os.walk(self.folder):\n yield root_path, f_files\n if not self.recursive:\n break\n else:\n print(f\"[!e] Passed folder doesn't exist. Path: {self.folder}\",\n file=sys.stdout)\n exit(0)",
"def process_loop(entries: List[StudentEntry]):\n pass",
"def scanDirectory(directoryName=\".\"):\n cluster=Cluster()\n\n #reading files and folders \n for path, folders, files in os.walk(directoryName):\n for afile in files:\n cluster.addFile(os.path.join(path,afile))\n \n cluster.clustering()",
"def walk(self):\n for _root, _dirs, files in os.walk(self.root):\n for filename in files:\n if self.is_key(filename):\n yield filename",
"def _walk_dirs(self):\n for project_name in self.new_source_paths.keys():\n # print \"-------- Now mapping ---- \" + project_name\n search_path = self.root + project_name + '\\\\Data'\n for dirpath, subdirs, files in os.walk(search_path):\n for file in files:\n self.new_source_paths[project_name][file] = dirpath\n # print \"------------ Finished mapping ------- \" + project_name\n return self.new_source_paths",
"def load_data(self):\n for set_name in self.image_dir_path:\n if self.verbose:\n print('\\n> Loading data files for the set: ' + set_name)\n\n # image dir\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n\n # annotation file path\n annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])\n\n if 'test' in set_name:\n yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)\n else:\n yield self.load_data_trainval(set_name, image_dir, annot_filepath)",
"def g_n():\n for gname in os.listdir(sroot):\n if gname != 's1-league1-game1':\n continue\n if gname.startswith('s1'):\n p0 = os.path.join(sroot, gname)\n p1 = os.path.join(p0, 'commitment', 'jperret')\n p2 = os.path.join(p0, 'commitment', 'sa')\n if os.path.isdir(p1) and os.path.isdir(p2):\n for fname in os.listdir(p1):\n if fname.endswith('.aa'):\n bname = fname[:-3]\n #~ if bname == 's1-league1-game2_07':\n #~ continue\n a = ad.Annotations(os.path.join(p1, fname))\n a.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a.gen_full_struct()\n a.commitments = list(u for u in a.units if u.type == 'Commitment')\n a2 = ad.Annotations(os.path.join(p2, fname))\n a2.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a2.gen_full_struct()\n a2.commitments = list(u for u in a2.units if u.type == 'Commitment')\n yield bname, (a, a2)",
"def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))",
"def do_2004(in_dir, out_dir):\n dir_items = setup_outdir_and_get_input(in_dir, out_dir)\n for idx, item in enumerate(dir_items):\n full_path = in_dir + os.path.sep + item\n print(f\"{full_path} -> {out_dir}/{idx}\")\n create_dirs_and_write_files(full_path, idx, in_dir, item, out_dir)",
"def process_patients(self):\n if not os.path.exists(self.out_dir):\n os.makedirs(self.out_dir)\n\n for patient in os.listdir(self.root_dir):\n if \".csv\" in patient or \".md\" in patient:\n continue\n patient_pth = os.path.join(self.root_dir, patient)\n out_patient_pth = os.path.join(self.out_dir, patient)\n num_imgs = len(os.listdir(patient_pth)) // 2 # Half the length to exclude mask counts\n img_stack, msk_stack = self._stack_images_masks_flair(patient_pth, patient, num_imgs)\n if not os.path.exists(out_patient_pth):\n os.mkdir(out_patient_pth)\n self._make_slices(img_stack, msk_stack, patient, out_patient_pth)",
"def walk(dirname):\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)",
"def walk(dirname): \n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)",
"def get_students(self, student_file_info):\n students_file = os.path.join(self.path, \"students.txt\")\n sep, header = student_file_info\n try:\n for student in file_reading_gen(students_file, 3, sep, header):\n # CWID | Name | Major\n cwid = student[0]\n name = student[1]\n major = student[2]\n self.students[cwid] = Student(cwid, name, major)\n except ValueError:\n raise ValueError(\"Invalid data in students.txt\")\n except FileNotFoundError as e:\n print('Missing students.txt.\\n' + str(e))",
"def identify_folder(self, folder):",
"def dirGenerator(datadirectory):\n\n subdirectories = [row for row in os.listdir(datadirectory) if '$' not in row]\n\n #iterate through subdirectories\n for day in subdirectories:\n\n #collect raw data set file names in sub directories\n fileNames = [row for row in os.listdir(datadirectory + day + '\\\\RawDataFiles\\\\')]\n\n #iterate over the raw datasets\n print 'There are ' + str(len(fileNames)) + ' datasets in ' + day\n for index, datafile in enumerate(fileNames):\n yield datadirectory + day + '\\\\RawDataFiles\\\\' + datafile, day, datafile, index",
"def AnnotateDirectory(self, root_path):\n \n annotation_data = []\n \n for dirpath, _, filenames in os.walk(root_path):\n for filename in filenames:\n if not self._DefaultFileNameFilter(filename):\n continue\n \n file_path = os.path.abspath(os.path.join(dirpath, filename))\n logging.info(\"Processing '%s'\" % file_path)\n \n if self.source_filter and not self.source_filter.IsValid(file_path):\n logging.info(\" *SKIPPING*\")\n continue\n \n annotated_file = self.AnnotateSourceFile(file_path)\n annotation_data.append(annotated_file)\n\n return annotation_data",
"def _collect_reads(self, wildcards, _library_name, prefix):\n folder_name = get_ngs_library_folder_name(self.parent.sheets, wildcards.library_name)\n pattern_set_keys = (\"right\",) if prefix.startswith(\"right-\") else (\"left\",)\n seen = []\n for _, path_infix, filename in self.path_gen.run(folder_name, pattern_set_keys):\n path = os.path.join(self.base_path_in, path_infix, filename).format(**wildcards)\n if path in seen:\n print(\"WARNING: ignoring path seen before %s\" % path, file=sys.stderr)\n else:\n seen.append(path)\n yield path",
"def test_marking_path_parsing(self):\n \n # paths to attempt for a global AMBER marking\n global_xpaths = [\n {\n \"path\": \"//node() | //@*\",\n \"should_pass\": True\n },\n {\n \"path\": \"this is not a real xpath\",\n \"should_pass\": False\n }\n ]\n # paths to attempt for a local RED marking\n local_xpaths = [\n {\n \"path\": \"../../../descendant-or-self::node() | ../../../descendant-or-self::node()/@*\",\n \"should_pass\": True\n },\n {\n \"path\": \"this is not a real xpath\",\n \"should_pass\": False\n }\n ]\n\n for global_path_dict in global_xpaths:\n for local_path_dict in local_xpaths:\n # Format our STIX XML template\n xml = STIX_XML_TEMPLATE_GLOBAL_AND_COMPONENT.format(global_path_dict[\"path\"], local_path_dict[\"path\"])\n xml_readable = StringIO(xml)\n\n # Build and parse the MarkingContainer\n try:\n container = stixmarx.parse(xml_readable)\n except etree.XPathEvalError:\n self.assertTrue(global_path_dict[\"should_pass\"] is False or local_path_dict[\"should_pass\"] is False)\n continue\n\n package = container.package\n\n colors = [marking_spec.marking_structures[0].color for marking_spec in container.get_markings(package.indicators[0])]\n\n self.assertTrue(('AMBER' in colors) == global_path_dict[\"should_pass\"])\n self.assertTrue(('RED' in colors) == local_path_dict[\"should_pass\"])",
"def find_records():\r\n\r\n print(\"begin find records\")\r\n\r\n study_list = retrieve_ref('study_list')\r\n sensor_list = retrieve_ref('sensor_list')\r\n # sensor_unit_list = retrieve_ref('sensor_unit_list')\r\n\r\n for study in study_list:\r\n # print('study = ' + str(study))\r\n source_path = os.path.join(study, 'source')\r\n # print('source_path = ' + str(source_path))\r\n\r\n source_folders = os.listdir(source_path)\r\n # print(str(study) + ' source_folders = ')\r\n # print(source_folders)\r\n\r\n df_meta = pd.DataFrame()\r\n df_meta['source_path'] = source_folders\r\n save_meta(study, df_meta)\r\n record_to_summary(study, 'Records found', str(len(source_folders)))\r\n\r\n print(\"completed find records\")",
"def label_dir(self):\n for lblname in self._vallabs:\n print(lblname)",
"def fetch_stanford_labels():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'stanford_hardi')\n baseurl = 'https://stacks.stanford.edu/file/druid:yx282xq2090/'\n\n files = {}\n files[\"aparc-reduced.nii.gz\"] = (baseurl + \"aparc-reduced.nii.gz\",\n '742de90090d06e687ce486f680f6d71a')\n files[\"label-info.txt\"] = (baseurl + \"label_info.txt\",\n '39db9f0f5e173d7a2c2e51b07d5d711b')\n fetch_data(files, folder)\n return files, folder",
"def _GetLabels(self, directory, scan_subdirs, label, predicate):\n\n labels = []\n\n # Go through all of the files (and subdirectories) in that\n # directory.\n for entry in dircache.listdir(directory):\n entry_label = self._GetLabelFromBasename(entry)\n # If the label is not valid then pretend it\n # does not exist. It would not be valid to create an entity\n # with such an id.\n if not self.IsValidLabel(entry_label):\n continue\n # Compute the full path to 'entry'.\n entry_path = os.path.join(directory, entry)\n # If it satisfies the 'predicate', add it to the list.\n if predicate(entry_path):\n labels.append(self.JoinLabels(label, entry_label))\n # If it is a subdirectory, recurse.\n if (scan_subdirs and os.path.isdir(entry_path)\n and self._IsSuiteFile(entry_path)):\n labels.extend(self._GetLabels(entry_path,\n scan_subdirs,\n self.JoinLabels(label, \n entry_label),\n predicate))\n\n return labels",
"def loadSets(self, indir=\"\"):\n\n if indir==\"\":\n print(\"specify folder\")\n return -1\n\n self.train = pd.read_pickle(\"{}/train.pkl\".format(indir))\n self.valid = pd.read_pickle(\"{}/valid.pkl\".format(indir))\n self.test = pd.read_pickle(\"{}/test.pkl\".format(indir))\n\n print(\"sets loaded\")",
"def analyze_dir(self, dirname):\n if self.exceeded_max():\n return\n\n for (dirpath, dirnames, filenames) in os.walk(dir_name):\n for filename in filenames:\n self.analyze_file(dirname + \"/\" + filename)"
] | [
"0.5722948",
"0.5477371",
"0.5338399",
"0.53299177",
"0.5326988",
"0.5218644",
"0.5190301",
"0.5170352",
"0.5155074",
"0.5140579",
"0.5110162",
"0.5099991",
"0.5098159",
"0.50959295",
"0.5093526",
"0.5088655",
"0.5086895",
"0.5079497",
"0.5062495",
"0.50536406",
"0.50320464",
"0.50263345",
"0.5022952",
"0.49893138",
"0.49866423",
"0.49844626",
"0.498067",
"0.49786106",
"0.49782774",
"0.49750486"
] | 0.7576824 | 0 |
this function reads a sequence file in FASTA format and stores in a dictionary format for future manipulation | def read_fasta_to_dictionary(genome_file):
filename = genome_file
dct = {}
id_name = ""
sequence = ""
first_pass = 1
read_fh = open(filename, 'r')
for i, line in enumerate(read_fh):
line = line.rstrip()
if re.search(r'^>(\S+)(\s+)(\S+)(\s+)(\S+)(\s+)(\S+)(\s+)(\S+)(\s+)(\S+)(.*)', line):
match_obj = re.search(r'^>(\S+)(\s+)(\S+)(\s+)(\S+)(\s+)(\S+)(\s+)(\S+)(\s+)(\S+)(.*)', line)
if not first_pass:
dct[id_name] = sequence
id_name = match_obj.group(1)
id_name = re.sub(r',', "", id_name)
first_pass = 0
sequence = ""
elif re.search(r'^>(\S+)(.*)', line):
match_obj = re.search(r'^>(\S+)(.*)', line)
if not first_pass:
dct[id_name] = sequence
id_name = match_obj.group(1)
id_name = re.sub(r'(\d+)_', "", id_name)
id_name = re.sub(r'.*\|', "", id_name)
first_pass = 0
sequence = ""
else:
sequence += line
dct[id_name] = sequence
return dct | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_fasta(fasta_file):\n\n seq_dict = dict() # Declare a new dictionary\n\n with open(fasta_file,'r') as f:\n lines = f.readlines()\n defline = \"\"\n for li in lines:\n li = li.rstrip() # remove newlines\n if '>' in li:\n defline = li # if i use 'id' it is blue; why?\n seq_dict[defline] = \"\"\n else:\n li = li.upper() # just to clean up sequence\n seq_dict[defline] += li\n\n return seq_dict",
"def readFastaFile(filename):\n if os.path.exists(filename)==False:return {}\n sequences={}\n fhr=open(filename,\"r\")\n for line in fhr:\n if line[0]==\">\":\n sequences[line.strip()[1:].split()[0]]=fhr.readline().strip()\n fhr.close()\n return sequences",
"def return_fasta_dic(file):\n seq_dict = {rec.id: rec.seq for rec in SeqIO.parse(file, \"fasta\")}\n return seq_dict",
"def Parse_Fasta(filename):\n dic = {}\n name = None\n seq = ''\n with open(filename) as F:\n for line in F:\n if line.startswith('>'):\n if name is not None:\n dic[name] = seq\n seq = ''\n name = line.strip()\n else:\n seq += line\n if not name in dic:\n dic[name] = seq\n return dic",
"def fasta_to_dict(fasta_file):\n deflines = []\n sequences = []\n sequence = \"\"\n with open(fasta_file, \"r\") as file:\n for line in file:\n if line.startswith(\">\"):\n deflines.append(line.rstrip().lstrip('>'))\n if sequence:\n sequences.append(sequence)\n sequence = \"\"\n else:\n sequence += line.rstrip()\n sequences.append(sequence)\n fasta_dict = {}\n for x, defline in enumerate(deflines):\n fasta_dict[defline]=sequences[x]\n return fasta_dict",
"def sequenceDict(self):\n\t\twith open(self.ff) as fastaFile:\n\t\t\tsequences = {}\n\t\t\tfor name, seq in self.readFasta(fastaFile):\n\t\t\t\tsequences[name] = seq\n\t\treturn sequences",
"def FASTAfile_to_dict(FASTAfile):\n FASTADict = {}\n for line in FASTAfile:\n if '>' in line:\n FASTALabel = line\n FASTADict[FASTALabel] = \"\"\n else:\n FASTADict[FASTALabel] += line\n return FASTADict",
"def read_fasta_to_dict(path_to_file):\n if options.verbose:\n syserr(\"Reading sequences from %s \\n\" % (path_to_file))\n try:\n seq_obj = open(path_to_file, 'Ur')\n seqs = {}\n for seq in SeqIO.parse(seq_obj, 'fasta'):\n seqs[str(seq.id)] = str(seq.seq)\n except IOError:\n raise IOError('Cannot read from %s' % (path_to_file))\n\n return seqs",
"def fasta_parser(filename):\n fasta = {}\n with open(filename) as f:\n contents = f.read()[1:].split('\\n>')\n for section in contents:\n sample = section.split('\\n')\n sample_id = sample[0]\n seq = ''.join(sample[1:]).strip()\n fasta[sample_id] = seq\n return fasta",
"def read_fasta(sequence_file :str):\n\n #for gziped files:\n\n if sequence_file.endswith(\".gz\"):\n with gzip.open(sequence_file, \"rt\") as file:\n seqDict = SeqIO.to_dict(SeqIO.parse(file, 'fasta'))\n ident = ident.split(\"|\")[1]\n return seqDict\n\n # for no gziped fasta files:\n else:\n seqRecord = SeqIO.read(sequence_file, \"fasta\")\n sequence = seqRecord.seq\n ident = seqRecord.id\n ident = ident.split(\"|\")[1]\n return ident, sequence",
"def read_cDNA_file_to_dict(filename):\n \n #initialize dictionary\n cDNA_dictionary = {}\n\n #open file\n with open(cDNA_file) as f:\n \n #loop through file line by line\n for line in f:\n\n #remove newline\n line = line.rstrip()\n \n #get gene name\n if line.startswith(\">\"):#If the line starts with the character \">\" then,\n gene_name = line.split(\"|\")[1]#I separate the line by the character \"|\" and assign index 1 to gene_name\n \n #read in sequence in uppercase\n if not line.startswith(\">\"):#If the line does not start with the character \">\" then,\n line = line.upper()#I make all of the characters within the line uppercase\n\n #put name and sequence in dictionary\n cDNA_dictionary[gene_name] = line#I assign the gene_name as the key and the line (sequence) as the value\n\n #return dictionary \n return cDNA_dictionary",
"def get_fasta_dict(input_fasta_path):\n\n\ttry:\n\t\tnew_file = open(input_fasta_path, \"rU\")\n\t\tsequence_record_dict = SeqIO.to_dict(SeqIO.parse(new_file, \"fasta\"))\n\t\tnew_file.close()\n\t\treturn sequence_record_dict\n\texcept IOError as e:\n\t\tprint(str(e))\n\t\tsys.exit(1) # Aborts program. (exit(1) indicates that an error occurred)",
"def read_fasta_file(filename):\n sequences_lines = {}\n current_sequence_lines = None\n with open(filename) as fp:\n for line in fp:\n line = line.strip()\n if line.startswith(';') or not line:\n continue\n if line.startswith('>'):\n sequence_name = line.lstrip('>')\n current_sequence_lines = []\n sequences_lines[sequence_name] = current_sequence_lines\n else:\n if current_sequence_lines is not None:\n current_sequence_lines.append(line)\n sequences = {}\n for name, lines in sequences_lines.items():\n sequences[name] = ''.join(lines)\n return sequences",
"def read_fasta_file(filename):\n sequences_lines = {}\n current_sequence_lines = None\n with open(filename) as fp:\n for line in fp:\n line = line.strip()\n if line.startswith(';') or not line:\n continue\n if line.startswith('>'):\n sequence_name = line.lstrip('>')\n current_sequence_lines = []\n sequences_lines[sequence_name] = current_sequence_lines\n else:\n if current_sequence_lines is not None:\n current_sequence_lines.append(line)\n sequences = {}\n for name, lines in sequences_lines.items():\n sequences[name] = ''.join(lines)\n return sequences",
"def read_FASTA_dictionary(filename, splitstr='|', SplitHeader=False):\n return {info[0]: seq for info, seq in read_FASTA(filename, splitstr=splitstr, SplitHeader=SplitHeader)}",
"def parse_fasta(fasta_filename):\n\n sequences = {}\n\n with open(fasta_filename, \"r\") as fasta:\n\n # do our best to accept any input that looks vaguely valid\n for line in fasta:\n \n if line.startswith(\">\"):\n # take everything up to the first space as the id\n # get rid of the leading >\n # and get rid of the newline\n fasta_id = line.split(\" \")[0].replace(\">\", \"\", 1).rstrip('\\n')\n \n seq = []\n wholeseq = ''\n if fasta_id == \"\":\n raise Exceptions.MissingId(\"invalid if there is no fasta_id\")\n \n else:\n seq.append(line.rstrip('\\n'))\n # handle sequences on multiple lines\n wholeseq = \"\".join(seq)\n if len(wholeseq) == 0:\n raise Exceptions.MissingSequence(\"invalid if there is no sequence\")\n sequences[fasta_id] = wholeseq\n\n if len(sequences) == 0:\n raise Exceptions.EmptyFasta(\"invalid if there is nothing in the fasta file\")\n\n return sequences",
"def read_fasta_file(path):\n with open(path) as data_file:\n output = {}\n sequence_name = None\n for line in data_file.readlines():\n if line.startswith(\">\"):\n sequence_name = line[1:].strip()\n else:\n output.setdefault(sequence_name, \"\")\n line = \"\".join(re.findall(\"[acgtACGT]+\", line))\n\n output[sequence_name]+=line.upper()\n return output",
"def fasta_reader(path, fasta_file):\n fasta_dict = dict()\n try:\n for seq_record in SeqIO.parse(path + fasta_file, \"fasta\"):\n id_fasta = seq_record.id\n sequence = seq_record.seq\n fasta_dict[id_fasta] = sequence\n except FileNotFoundError:\n GRAPH_LOGGER.debug('External fasta file not exist!')\n return None\n\n return fasta_dict",
"def load_fasta(filepath, trim_desc=True):\n \n with open(filepath) as f:\n seqs = parse_fasta(f, trim_desc=trim_desc)\n return dict(seqs)",
"def read_file(filetxt):\n\n fasta_dict = {}\n with open(filetxt,'r') as text:\n dataset = text.readlines()\n\n for line in dataset:\n line = line.strip()\n if line.startswith('>'):\n fasta_dict[line[1:]] = ''\n current_line = line[1:]\n else:\n fasta_dict[current_line] += line\n\n return fasta_dict",
"def getseq(genomefasta):\n genomedict = {}\n for i in SeqIO.parse(open(genomefasta), \"fasta\"):\n genomedict[i.id] = str(i.seq)\n return genomedict",
"def extract_seqs(seq_filepath):\n seqs = {}\n for record in SeqIO.parse(seq_filepath.as_posix(), \"fasta\"):\n seqs[record.id] = record\n return seqs",
"def readFastaFile(filename):\n info={}\n fhr=open(filename,\"r\")\n while(True):\n line=fhr.readline()\n if not line: break\n if(\">\" in line):\n try:\n info[line.strip()[1:].split()[0]]=fhr.readline().strip()\n except ValueError:\n pass\n return info",
"def fastaDictionary(inFile, chrName=None):\n\n d = {}\n for (title, seq) in FastaIterator(inFile):\n title = title.split()[0]\n if not chrName:\n d[title] = seq\n elif chrName == title:\n d[title] = seq\n return d\n\n if chrName:\n print \"NOT ABLE TO FIND!\", chrName\n return d",
"def parse_fasta(fasta_file):\n\n fasta_dct = {}\n\n with open(fasta_file,'r') as text:\n label = ''\n for line in text:\n if line.startswith('>'):\n if label in fasta_dct.keys():\n fasta_dct[current_line] = str(''.join(fasta_dct[current_line]))\n label = line.strip()[1:]\n fasta_dct[label] = []\n current_line = label\n else:\n fasta_dct[current_line].append(line.strip())\n fasta_dct[current_line] = str(''.join(fasta_dct[current_line]))\n\n return fasta_dct",
"def parse_fasta(self, filename):\n id = ''\n desc = ''\n tempseq = []\n try:\n seqfile = open(filename,'r')\n for line in seqfile:\n if line.startswith('>'):\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n if ' ' in line:\n (id, desc) = line[1::].split(' ', 1)\n else:\n id = line[1::].strip()\n desc = ''\n tempseq = []\n elif not line.startswith('>'):\n tempseq.append(line.rstrip())\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n except OSError:\n raise PathError(''.join(['ERROR: cannot open', refseqpath]))",
"def read_fasta_to_dicts(fname, CONFIG):\n from Bio import SeqIO\n logger.info('Reading in FASTA from {}.'.format(fname))\n\n data_dicts = []\n with open(fname, \"rU\") as f:\n\n for record in SeqIO.parse(f, \"fasta\"):\n data = {}\n if record.description in CONFIG[\"fasta_header_swaps\"]:\n record.description = CONFIG[\"fasta_header_swaps\"][record.description]\n head = record.description.split(CONFIG['fasta_separator_character'])\n\n if len(head) != len(CONFIG[\"fasta_headers\"]):\n logger.warn(\"Skipping {} which had {} fields (expected {})\".format(record.description, len(head), len(CONFIG[\"fasta_headers\"])))\n continue\n for i in range(len(CONFIG[\"fasta_headers\"])):\n try:\n data[CONFIG[\"fasta_headers\"][i]] = head[i]\n data['sequence'] = str(record.seq)\n except KeyError:\n logger.critical(\"Error parsing FASTA header. Header: {}. CONFIG specifies: {}\".format(head, CONFIG[\"fasta_headers\"])); sys.exit(2)\n data_dicts.append(data)\n return data_dicts",
"def sam_parsed(sam_file):\n\n sam_file= open(sam_file)\n\n sam_dic = {}\n read_frame_dic ={}\n count = 0\n counter_1 = 0\n counter_2 = 0\n #.sam file parsed - crucial information was retrited (scaffold information)\n # start - the starting position of the locus_sequence\n #reading_frame - locus in the correct sense [0] or CR [16]\n #sequence_locus - locus sequence information\n\n for line in sam_file:\n\n if line.startswith(\"@\"):\n pass\n\n else:\n line_information = line.strip().split()\n scaffold = line_information[2]\n loci = line_information[0]\n mapping_beginning = line_information[3]\n read_frame = line_information [1]\n locus_sequence = line_information [9]\n cigar = line_information [5]\n if \"D\" in cigar or \"I\" in cigar:\n count += 1\n if \"D\" in cigar and \"I\" in cigar:\n counter_2 +=1\n a = count - counter_2\n if scaffold != \"*\":\n sam_dic[loci] = {\"scaffold\": scaffold,\n \"start\": int(mapping_beginning),\n \"reading_frame\": read_frame,\n \"sequence_locus\": locus_sequence,\n \"cigar\": cigar}\n counter_1 +=1\n print (\"Number of loci mappead on Cg: {}\".format(len(sam_dic)))\n\n print (\"Step 1 - Parse the .sam file -- Done\")\n\n #The sam_dic return a dictionary where the key is the locus(read) and the\n #value has the scaffold information, the position of the gene beginin,\n #the correct read frame of the gene, and finally the sequence of locus, in\n #the same reading frame of the Cg\n\n # \n # print (\"Number of locus with insertion or deletion \" + str(count))\n # print (\"Number of locus with insertion and deletion \" + str(counter_2))\n # print (\"Number of locus with problems \" + str(a))\n return sam_dic",
"def parse_fasta(filename):\n fn2 = '%s.shelve' % filename[:filename.rfind('.fasta')]\n with shelve.open(fn2) as dic:\n name = None\n seq = ''\n with open(filename) as F:\n for line in tqdm(F, desc=\"Parsing %s\" % filename):\n if line.startswith('>'):\n if name is not None:\n dic[name] = seq\n seq = ''\n name = '%s' % (line.strip())\n else:\n seq += line\n if name not in dic:\n dic[name] = seq\n return fn2",
"def parse_fasta(fasta_f, contig_data):\n\n basen = os.path.basename(fasta_f)\n [soil, ecotype, media] = basen.split(\"_\")[:3]\n\n with open(fasta_f, 'rU') as IN:\n for record in SeqIO.parse(IN, \"fasta\"):\n contig_data[record.description] = {'length': len(record.seq), 'soil': soil, 'ecotype': ecotype, 'media': media}"
] | [
"0.80082375",
"0.7946598",
"0.7937227",
"0.7748772",
"0.77080435",
"0.7703732",
"0.76890284",
"0.76189744",
"0.75971746",
"0.75659543",
"0.7555362",
"0.75448287",
"0.74920994",
"0.74830425",
"0.7425911",
"0.74154216",
"0.73772836",
"0.7331254",
"0.7291377",
"0.7243661",
"0.7210678",
"0.7112563",
"0.7081501",
"0.70161533",
"0.69896996",
"0.6850566",
"0.68356043",
"0.6819348",
"0.6787722",
"0.6783392"
] | 0.8087481 | 0 |
Update dependencies in the virtualenv. | def update_dependencies():
pip = env.virtualenv.child('bin', 'pip')
reqs = env.code_dir.child('deploy-requirements.txt')
sudo('%s -q install -U pip' % pip)
sudo('%s -q install -r %s' % (pip, reqs)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_project():\n with cd(env.code_dir):\n with _virtualenv():\n run('git pull origin master')\n install_requirements()\n perform_migration()\n collect_static()",
"def update_requirements():\n\n with virtualenv(VIRTUALENV_PATH):\n cmd = ['pip install']\n cmd += ['--requirement %s' % os.path.join(CODE_DIR,'requirements.txt')]\n run(' '.join(cmd))",
"def upgrade_dependencies():\n # upgrade pip\n print(\"Upgrading/installing any required dependencies...\")\n subprocess.run([\"python\", \"-m\", \"pip\", \"install\", \"--user\",\n \"--upgrade\", \"pip\", \"--no-warn-script-location\"],\n shell=True, check=True)\n print(\"pip package manager has been upgraded to the latest version\")\n\n # upgrade/install dependencies such as robot framework\n subprocess.run([\"python\", \"-m\", \"pip\", \"install\", \"--user\",\n \"--upgrade\", \"--no-warn-script-location\", \"-r\",\n os.path.join(os.path.curdir, \"requirements.txt\")],\n shell=True, check=True)\n print(\"Robot framework has been upgraded to the latest version\")\n print(\"PyQT5 has been upgraded to the latest version\")",
"def update_go_deps(self):\n self.go_version()\n env = self.m.step.get_from_context('env', {})\n env.update(self.go_env)\n with self.m.step.context({'env': env}):\n self.m.run.with_retry(\n self.m.step,\n 'update go pkgs',\n UPDATE_GO_ATTEMPTS,\n cmd=[self.go_exe, 'get', '-u', '-t', '%s/...' % INFRA_GO_PKG])",
"def update_requirements():\n\n require('code_root', provided_by=env.environments)\n requirements = os.path.join(env.code_root, 'requirements')\n sdists = os.path.join(requirements, 'sdists')\n base_cmd = ['pip install']\n base_cmd += ['-q -E %(virtualenv_root)s' % env]\n base_cmd += ['--no-index --find-links=file://%s' % sdists]\n # install GDAL by hand, before anything else that might depend on it\n cmd = base_cmd + ['--no-install \"GDAL==1.6.1\"']\n sudo(' '.join(cmd), user=env.deploy_user)\n # this directory won't exist if GDAL was already installed\n if files.exists('%(virtualenv_root)s/build/GDAL' % env):\n sudo('rm -f %(virtualenv_root)s/build/GDAL/setup.cfg' % env, user=env.deploy_user)\n with cd('%(virtualenv_root)s/build/GDAL' % env):\n sudo('%(virtualenv_root)s/bin/python setup.py build_ext '\n '--gdal-config=gdal-config '\n '--library-dirs=/usr/lib '\n '--libraries=gdal1.6.0 '\n '--include-dirs=/usr/include/gdal '\n 'install' % env, user=env.deploy_user)\n # force reinstallation of OpenBlock every time\n with settings(warn_only=True):\n sudo('pip uninstall -y -E %(virtualenv_root)s ebpub ebdata obadmin' % env)\n for file_name in ['ebpub.txt', 'ebdata.txt', 'obadmin.txt', 'openrural.txt']:\n apps = os.path.join(requirements, file_name)\n cmd = base_cmd + ['--requirement %s' % apps]\n sudo(' '.join(cmd), user=env.deploy_user)",
"def sync_virtualenv(ctx):\n if not path.isfile(\"./pyenv/bin/pip\"):\n ctx.run(\"virtualenv --no-site-packages --python=/usr/bin/python2.7 pyenv\")\n ctx.run(\"PIP_DOWNLOAD_CACHE=/var/tmp/ ./pyenv/bin/pip install -r requirements.txt\")\n print(\"\"\"\n Installation completed. Please check any error messages above.\n\n If you are going to use `openstack` or ansible directly on the command line, run\n\n . ./pyenv/bin/activate\n\n or even add it to your ~/.bashrc\n \"\"\")",
"def update(self):\n with settings(user=self.serviceUser):\n self.venv.create()\n\n self.venv.install_twisted()\n self.venv.install(\" \".join(\"\"\"\n psycopg2==2.7.5\n pygments==2.2.0\n spambayes==1.1b3\n trac==1.2.2\n trac-github==2.3\n requests_oauthlib==1.0.0\n svn+https://svn.edgewall.org/repos/trac/plugins/1.2/spam-filter@15310\n git+https://github.com/twisted-infra/twisted-trac-plugins.git\n \"\"\".split()))\n\n # This is txacme v2 but is not yet released.\n # Should be replaced on we have txacme v2.\n # See https://github.com/twisted/txacme/pull/158\n self.venv.install(\n \"--index=https://pypi.chevah.com/simple txacme==1.0.0.chevah4\")\n\n run('mkdir -p ' + self.configDir)\n put(os.path.dirname(__file__) + '/*', self.configDir,\n mirror_local_mode=True)",
"def pipupdate():\n\n packages = [d for d in pkg_resources.working_set]\n subprocess.call('pip install --upgrade ' + ' '.join(packages))",
"def update_requirements():\n\n check_prompt = (\n not env.prompt or\n console.confirm(\n \"Update virtualenv requirements based on requirements.txt file?\",\n default=True,\n )\n )\n\n if check_prompt:\n with cd(\"%s\" % env.repo_path):\n with prefix(\"source %s/bin/activate\" % env.env_path):\n run(\n \"pip install\"\n \" --requirement %s/requirements.txt\" % env.repo_path\n )",
"def upgrade_packages():\n\n require('environment', provided_by=env.environments)\n system.update_apt_sources()\n system.upgrade_apt_packages()",
"def do_update(args, config):\n error_detected = 0\n output(\"Updating virtualenv packages...\")\n sections = args.venvs or config.sections()\n for s in sections:\n try:\n v = config[s]\n except KeyError:\n error(f\"Unknown virtualenv {s}\")\n error_detected = 1\n continue\n if \"skip\" in v:\n rc = subprocess.run(v[\"skip\"]).returncode\n if not rc:\n output(\"Skipping...\")\n continue\n path = get_path(s, config)\n if not os.path.exists(os.path.expanduser(path)):\n output(f\"Virtualenv {s} does not exist ({path})\")\n error_detected = 1\n continue\n output(f\"Updating virtualenv {s} at {path}\")\n # Kudos: https://stackoverflow.com/a/3452888/197789\n rc = subprocess.run(f'source {path}/bin/activate'\n f' && {PIP} install --upgrade pip'\n \" && pip list --outdated --format=freeze |\"\n \" grep -v '^\\\\-e' | cut -d = -f 1 |\"\n \" xargs -n1 pip install -U\",\n shell=True).returncode\n if rc:\n error_detected = 1\n continue\n return 1 if error_detected else 0",
"def install_deps():\n pipenv_dev = run('pipenv install --dev'.split(), check=True)\n print('Installed dependencies and virtual environment. Type `pipenv shell` to activate later.')",
"def setup():\r\n global venvs\r\n\r\n try:\r\n os.mkdir(basedir)\r\n except OSError, e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n os.chdir(basedir)\r\n\r\n # Delete virtualenvs and recreate\r\n for venv in glob('venv-*'):\r\n shutil.rmtree(venv)\r\n for py in available_python_versions():\r\n check_call(['virtualenv', '-p', py,\r\n '--system-site-packages', 'venv-%s' % py])\r\n venvs.append((py, 'venv-%s' % py))\r\n\r\n # Check out and update the repository\r\n if not os.path.exists('Theano'):\r\n try:\r\n check_call(['git', 'clone', ipy_repository])\r\n except CalledProcessError:\r\n check_call(['git', 'clone', ipy_http_repository])\r\n os.chdir(repodir)\r\n check_call(['git', 'checkout', 'master'])\r\n try:\r\n check_call(['git', 'pull', ipy_repository, 'master'])\r\n except CalledProcessError:\r\n check_call(['git', 'pull', ipy_http_repository, 'master'])\r\n os.chdir(basedir)",
"def setup():\n global venvs\n \n try:\n os.mkdir(basedir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n os.chdir(basedir)\n \n # Delete virtualenvs and recreate\n for venv in glob('venv-*'):\n shutil.rmtree(venv)\n for py in available_python_versions():\n check_call(['virtualenv', '-p', py, '--system-site-packages', 'venv-%s' % py])\n venvs.append((py, 'venv-%s' % py))\n \n # Check out and update the repository\n if not os.path.exists('ipython'):\n try :\n check_call(['git', 'clone', ipy_repository])\n except CalledProcessError :\n check_call(['git', 'clone', ipy_http_repository])\n os.chdir(repodir)\n check_call(['git', 'checkout', 'master'])\n try :\n check_call(['git', 'pull', ipy_repository, 'master'])\n except CalledProcessError :\n check_call(['git', 'pull', ipy_http_repository, 'master'])\n os.chdir(basedir)",
"def _reinstall_all_dependencies() -> None:\n _pip_install_requirements(\n common.THIRD_PARTY_PYTHON_LIBS_DIR,\n common.COMPILED_REQUIREMENTS_FILE_PATH\n )",
"def upgrade(self, dependencies = False):\n pip_args = []\n proxy = environ.get('http_proxy')\n if proxy:\n pip_args.append('--proxy')\n pip_args.append(proxy)\n pip_args.append('install')\n pip_args.append(self.pkg)\n if self.index is not None:\n pip_args.append('-i')\n pip_args.append(\"{}/\".format(self.index))\n if not dependencies:\n pip_args.append(\"--no-deps\")\n if self._get_current() != [-1]:\n pip_args.append(\"--upgrade\")\n a=pip.main(pip_args)\n return a==0",
"def sub_install_python_requirements():\n # Activate the virtualenv\n activate = 'source {0}/{1}/bin/activate'.format(\n env.virtualenv['dir'], env.virtualenv['name'])\n run(activate)\n\n # Install Python requirements\n install = 'pip install -r /vagrant/Flask_app/requirements.txt'\n\n # Join and execute the commands\n run(activate + '; ' + install)",
"def install_dependencies():\n\n # check python version and verify we are using Python 3\n if sys.version[0] < '3':\n print(\"ERROR: python version 3 required. You are using version \"\n \"{}\".format(sys.version))\n print(\"You must install python 3 from https://www.python.org\")\n print(\"Make sure to check the 'pip' package manager option when\")\n print(\"installing python\")\n return\n try:\n import pip\n except ModuleNotFoundError:\n print(\"The python 'pip' package manager is required.\")\n print(\"Go to https://www.python.org and download Python 3\")\n print(\"When re-installing, select 'modify' and make sure\")\n print(\"to check the 'pip' option\")\n return\n\n print(\"Python 3 and pip is installed\")\n\n # upgrade/install dependencies such as robot framework\n subprocess.run([\"python\", \"-m\", \"pip\", \"install\", \"-q\", \"--user\",\n \"--no-warn-script-location\", \"-r\",\n os.path.join(os.path.curdir, \"requirements.txt\")],\n shell=True, check=True)\n print(\"Robot framework is installed and up to date\")\n print(\"PyQT5 is installed and up to date\")",
"def update_requirements():\n with cd(REMOTE_REPO_DIR):\n cmd = ['npm install']\n # cmd += ['--requirement %s' % os.path.join(CODE_DIR,'requirements.txt')]\n run(' '.join(cmd))",
"def install_requirements():\n _git_pull()\n _install_requirements()\n _syncdb()\n _migrate()\n _restart_webserver()",
"def update(self):\n self.content = self.get_content()\n self.dependencies = self.content['requirements']['run']\n self.pythonversion = self.content['extra']['pythonversion']\n self.package_name = self.content['package']['name']",
"def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()",
"def update_project():\n _require_environment()\n\n # Grants write rights on log dir for the admin group\n log_dir = '%s/log' % _interpolate(VIRTUALENV_DIR)\n if files.exists(log_dir):\n sudo('chmod -R g+w %s' % log_dir)\n\n # Updates from git, issues Django syncdb, South migrate, Collecstatic and resets Apache\n branch = env.project.get('git_branch', 'master')\n with prefix(_django_prefix()):\n with cd(_django_project_dir()):\n with settings(hide('warnings'), warn_only=True):\n run('git fetch origin %s:%s' % (branch, branch))\n run('git checkout %s' % branch)\n with settings(hide('warnings'), warn_only=True):\n run('git pull origin %s' % branch)\n run('django-admin.py syncdb --noinput')\n run('django-admin.py migrate')\n run('touch config/wsgi*')\n run('django-admin.py collectstatic --noinput')",
"async def update(self, ctx):\n # read original contents of pipfile\n with open('Pipfile') as f:\n original_pipfile = f.read()\n\n # run git pull. If nothing new is pulled, exit here.\n pull_output = await ctx.invoke(ctx.bot.get_command('pull'))\n\n if 'updating' not in pull_output.lower():\n return\n\n commit_message = subprocess.run(['git', 'log', '-1', '--pretty=%B'], stdout=subprocess.PIPE)\n await ctx.send('```yaml\\n{}```'.format(commit_message.stdout.decode('utf-8')))\n\n # read new contents of pipfile\n with open('Pipfile') as f:\n new_pipfile = f.read()\n\n # if no package changes, we just reload the changed extensions.\n # Unless if the main file was changed, which cannot be reloaded,\n # in which case the bot must be restarted.\n if new_pipfile == original_pipfile:\n pattern = r\" cogs\\/(.*).py *\\| [0-9]{1,9} \\+{0,}-{0,}\\n\"\n names = re.findall(pattern, pull_output)\n if not names or 'main' not in names:\n reload_cmd = ctx.bot.get_command('reload')\n for name in names:\n # first subgroup is either helpers or commandcogs, which we don't care about\n await ctx.invoke(reload_cmd, extension_name=name[0])\n await ctx.send('Up to date.')\n return\n\n else:\n # run pipenv install to get all the latest packages\n await ctx.send('Running `pipenv install`, please hold...')\n # Note: when tested in the wild, the bot seemed to be restarted by systemd hereish\n res = subprocess.run(['pipenv', 'install'])\n if res.returncode != 0:\n await ctx.send(\n 'Uh oh, found an error while running `pipenv install`. Time for you to get on fixing it.')\n return\n\n # give a verbal notice if our service file (which restarts us) is not running\n res = subprocess.run(['systemctl', 'status', 'mothbot'], stdout=subprocess.PIPE)\n if res.returncode != 0:\n await ctx.send('WARNING: Error fetching mothbot.service status. Make sure I get restarted.')\n elif 'Active: active (running)' not in res.stdout.decode('utf-8'):\n await ctx.send('WARNING: mothbot.service does not appear to be running. Restart me manually.')\n\n # logout\n await ctx.bot.logout()",
"def __gitSubmodulesUpdate(self):\n self.vcs.gitSubmoduleUpdate(self.project.getProjectPath())",
"def deploy():\n git_pull()\n if confirm(\"Install/upgrade requirements with pip?\"):\n install_requeriments()\n django_command('collectstatic')\n django_command('migrate')\n restart()",
"def bootstrap():\n sub_install_packages()\n sub_install_virtualenv()\n sub_create_virtualenv()\n sub_install_python_requirements()",
"def update(self, env):\n del env\n return",
"def build_virtualenv():\n\n puts(yellow(\"Install dependencies from requirements.txt\"))\n with cd(env.source_dir):\n with prefix('source %s' % in_rwd('bin/activate')):\n sudo('pip install -r %s' % env.requirements_file,\n user=env.app_user)\n sudo('python setup.py develop', user=env.app_user)",
"def update_deps(self, **kwargs):\n enzi = kwargs.get('enzi', self.enzi)\n if not isinstance(enzi, Enzi):\n return\n self.info('start updating')\n enzi.init(update=True)\n self.info('updating finished')"
] | [
"0.7614464",
"0.75438637",
"0.74239606",
"0.7362422",
"0.7184676",
"0.71728396",
"0.709127",
"0.7058197",
"0.70214695",
"0.68973327",
"0.6793373",
"0.6724697",
"0.6512721",
"0.64484334",
"0.6406469",
"0.63691974",
"0.6361465",
"0.6345305",
"0.62930983",
"0.6189988",
"0.6175142",
"0.61648375",
"0.61560345",
"0.6147601",
"0.61277586",
"0.60812825",
"0.6026542",
"0.60182357",
"0.6014777",
"0.6003176"
] | 0.81182396 | 0 |
Copy the production DB locally for testing. | def copy_db():
local('ssh %s pg_dump -U djangoproject -c djangoproject | psql djangoproject' % env.hosts[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy_db():\n with cd(\"/tmp\"), lcd(\"/tmp\"):\n sudo(\"pg_dump gsi > /tmp/latest.sql\", user=\"postgres\")\n run(\"tar zcvf latest.sql.tgz latest.sql\")\n get(\"/tmp/latest.sql.tgz\", \"latest.sql.tgz\")\n sudo(\"rm /tmp/latest.sql.tgz /tmp/latest.sql\")",
"def create_prod_db():\n _create_database(is_production=True)",
"def exportDB(self):\n sourcesession=svc.connect(self.__source,accessMode=coral.access_Update)\n destsession=svc.connect(self.__dest,accessMode = coral.access_Update)\n try:\n dbcp=DBCopy(sourcesession,destsession,1024)\n if self.__all:\n dbcp.copyDB()\n elif self.__inv:\n dbcp.copyInventory()\n elif len(self.__tree) != 0:\n dbcp.copyTrees([self.__tree])\n del sourcesession\n del destsession\n except Exception, e:\n print str(e)\n del sourcesession\n del destsession",
"def update_dev_db():\n with cd(\"/tmp\"), lcd(\"/tmp\"):\n sudo(\"pg_dump gsi > /tmp/latest.sql\", user=\"postgres\")\n run(\"tar zcvf latest.sql.tgz latest.sql\")\n get(\"/tmp/latest.sql.tgz\", \"latest.sql.tgz\")\n sudo(\"rm /tmp/latest.sql.tgz /tmp/latest.sql\")\n\n local(\"dropdb gsi\")\n local(\"createdb gsi\")\n local(\"tar zxvf latest.sql.tgz\")\n local(\"psql gsi < latest.sql\")\n local(\"rm latest.sql latest.sql.tgz\")",
"def sync_prod_db(env=None, reset_db=False, haus_vars={}):\n print green('sync/migrate DB')\n if reset_db:\n # uncomment below and replace DATABSE_URL with the prod database url\n # note that this is destructive of the PROD DB\n #local('heroku pg:reset DATABASE_URL') #add \"--confirm haus\" to remove required input\n pass\n local('heroku run ./manage.py migrate -a {}'.format(APP_INFO[env][\"heroku_app_name\"]))",
"def backup_database():\n db_path = os.path.join(config.cum_dir, 'cum.db')\n backup_path = os.path.join(config.cum_dir, 'cum.db.bak')\n copyfile(db_path, backup_path)",
"def _dump_remote_db(c):\n env = c.config\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%Hh%Mm%Ss\")\n dump_filename_base = \"{project_name}-{file_key}-{timestamp}.sql\"\n file_key = env.verbose_name\n dump_dir = env.db_dump_dir\n database_name = env.db_name\n file_key = \"{}-full\".format(file_key)\n\n dump_filename = dump_filename_base.format(\n project_name=env.project_name,\n file_key=file_key,\n timestamp=timestamp\n )\n\n backup_location = os.path.join(\n dump_dir, dump_filename\n )\n\n with Connection(env.hosts, user=env.user, config=c.config) as c:\n\n c.run(\n 'echo Dumping {} database...'.format(env.verbose_name)\n )\n c.run(\n 'mysqldump --defaults-file={defaults_file} '\n '{database_name} > {backup_location}'.format(\n defaults_file=env.mysql_defaults_file,\n database_name=database_name,\n backup_location=backup_location\n )\n )\n return backup_location",
"def copy_db(src=FRESHDB, dst=[APPDB]):\n for dest in dst:\n try:\n x = shutil.copy2(src, dest)\n print('File copied to {}'.format(x))\n except shutil.SameFileError:\n print('Both source and destination are identical.')",
"def reset_database_to_default(self):\n _src = os.path.abspath(self.default_db)\n _dst = os.path.abspath(self.db_path)\n copyfile(_src, _dst)",
"def mysqldump():\n run(\"mysqldump -u database_user database_name -p > ~/tmp/exported_db.sql\")",
"def mysql_import():\n # first make another copy of the db\n run(\"mysqldump -u database_user database_name -p > ~/tmp/exported_db_temp.sql\")\n # then import from the backup\n run(\"mysql -u database_user -p -D database_name < ~/tmp/exported_db.sql\")",
"def backup_database(self):\n backup_file = \"{}-{}.sql\".format(\n config.DATABASE_NAME, datetime.today().strftime(\"%Y-%m-%d--%H%M\")\n )\n backup_uri = \"{}/{}\".format(config.DATABASE_BACKUP_BUCKET, backup_file)\n step = \"Backing Up Database:\\nbackup={}\".format(backup_uri)\n try:\n self.slacker.send_thread_reply(step)\n backup_command = [\n \"gcloud\",\n \"sql\",\n \"export\",\n \"sql\",\n config.DATABASE_INSTANCE_NAME,\n backup_uri,\n \"--database={}\".format(config.DATABASE_NAME),\n \"--verbosity=debug\",\n ]\n subprocess.run(backup_command, check=True)\n except Exception as e:\n self.raise_step_error(step=step, error=e)",
"def prepare_push():\n print(\"Preparing to push\")\n cur = conn.cursor()\n try:\n for tname in TABLES:\n with open(f'{tname}.db', 'w') as f:\n print(f\"Copying {tname}\")\n cur.copy_to(f, f'\"{tname}\"')\n return True\n except IOError:\n print(\"IO ERROR\")\n return False\n finally:\n cur.close()",
"def db_small_path():\n return os.path.join(_here, 'fixtures/databases/db-small/database')",
"def backup_database():\n logger.info(\"start database_backup\")\n management.call_command('dbbackup', compress=True)\n logger.info(\"end database_backup\")",
"def dump_testdb(c, dbname=\"test_template\", fpath=\"tests/test_db.sql\"):\n default_env = {\n \"PATH\": os.environ[\"PATH\"],\n \"LANG\": \"en_US.UTF-8\",\n }\n\n env = os.environ\n env.update(default_env)\n\n c.run(f\"pg_dump -h localhost -p 5432 -U postgres {dbname} > {fpath}\", env=env)",
"def restore_base_data():\n\n db_dirname = os.path.dirname(os.path.realpath(__file__))\n shutil.copyfile(src=os.path.join(db_dirname, 'consolemini.base.json'),\n dst=os.path.join(db_dirname, 'consolemini.test.json'))",
"def backup():\n backup_shift(os, config.utils.tasks.backup_depth)\n if config.utils.tasks.secret_key is None:\n shutil.copyfile(config.core.database_name, config.core.database_name+'.1')\n else:\n data = get_encrypted_database()\n with open(config.core.database_name+'.1', 'wb') as f:\n f.write(data)",
"def sync_database(c, environment, ingest=True):\n ingest = _prep_bool_arg(ingest)\n env = c.config[environment]\n c.config.load_overrides(env)\n settings.configure()\n current_settings = _get_settings_file()\n remote_location = _dump_remote_db(c)\n local_dumpfile_location = _retrieve_db_dumpfile(c, remote_location)\n\n if ingest is True:\n pass # _ingest_db(env, local_dumpfile_location)",
"def copy_build():\n\n print 'Copying build file to Android assets directory...',\n\n src = BUILD_PATH + DB_NAME\n dst = ASSETS_PATH + DB_NAME\n \n shutil.copyfile(src, dst)\n \n print 'done.'",
"def clone(source, destination):\n\t\treturn \"CREATE DATABASE {0} WITH TEMPLATE {1};\".format(destination, source)",
"def reset_local_db():\n require('root', provided_by=('production', 'staging'))\n question = 'Are you sure you want to reset your local ' \\\n 'database with the %(environment)s database?' % env\n if not console.confirm(question, default=False):\n utils.abort('Local database reset aborted.')\n if env.environment == 'staging-environment':\n from mwana.malawi.settings_staging import DATABASES as remote_dbs\n elif env.environment == 'production-environment':\n from mwana.malawi.settings_production import DATABASES as remote_dbs\n from mwana.localsettings import DATABASES as local_dbs\n remote_db = remote_dbs['default']['NAME']\n local_db = local_dbs['default']['NAME']\n with settings(warn_only=True):\n local('dropdb %s' % local_db)\n local('createdb %s' % local_db)\n host = '%s@%s' % (env.user, env.hosts[0])\n local('ssh -C %s pg_dump -Ox %s | psql %s' % (host, remote_db, local_db))",
"def db():\n\n db_obj = dump_db.DumpDB()\n db_obj.load_from_csv(CONF.BACKUP_DB_PATH)\n return db_obj",
"def reload_db(self):\n if not settings.DATABASE_ENGINE in ['sqlite3', 'postgresql_psycopg2']:\n return None\n # Close connection to cleanly swap databases.\n connection.close()\n if settings.DATABASE_ENGINE == 'sqlite3':\n shutil.copyfile(self.db_backup_path, self.db_path)\n if settings.DATABASE_ENGINE == 'postgresql_psycopg2':\n # Establish a temporal connection to template1 database and\n # recreate TEST_DB_NAME.\n connection.settings_dict[\"DATABASE_NAME\"] = 'template1'\n cursor = connection.cursor()\n connection.creation.set_autocommit()\n cursor.execute(\"DROP DATABASE IF EXISTS %s\" % self.db_name)\n cursor.execute(\"CREATE DATABASE %s WITH TEMPLATE %s_backup\" % (\n self.db_name, self.db_name))\n connection.close()\n # Change the connection to the new test database.\n settings.DATABASE_NAME = self.db_name\n connection.settings_dict[\"DATABASE_NAME\"] = self.db_name\n # Get a cursor (even though we don't need one yet). This has\n # the side effect of initializing the test database.\n connection.cursor()\n return True",
"def create_db_from_scratch():\n if os.path.isfile('data.db'):\n os.remove('data.db')\n Base.metadata.create_all(engine)",
"def backup_debug_data(context):\n if config.is_debug():\n # The debugdata is a folder generated by dnf when using the --debugsolver dnf option. We switch on the\n # debug_solver dnf config parameter in our rhel-upgrade dnf plugin when LEAPP_DEBUG env var set to 1.\n try:\n context.copytree_from('/debugdata', DNF_DEBUG_DATA_PATH)\n except OSError as e:\n api.current_logger().warning('Failed to copy debugdata. Message: {}'.format(str(e)), exc_info=True)",
"def backup_database(db_host=None, db_name=None, cfg='project'):\n data = __salt__['mc_project.get_configuration'](cfg)\n db = data['data']['django_settings']['DATABASES']['default']\n if not db_host:\n db_host = db['HOST']\n if not db_name:\n db_name = db['NAME']\n dump_filename = '/tmp/{0}-{1}.dump'.format(\n db_name,\n datetime.now().strftime('%Y-%m-%d-%H-%M'))\n script = BACKUP.format(**locals())\n script += \"exit $?\\n\"\n ret = run(host=db_host, script=script)\n if ret['retcode']:\n pprint(ret)\n raise Exception('dump failed')\n return dump_filename",
"def production():\n env.run = run\n env.cd = cd\n env.deployment = 'remote'",
"def distributed_clean_db(empty_db):\n team.load_file(GOOD_TEST_TEAM_FILE, False)\n game.load_file(GOOD_TEST_GAME_FILE)\n game.load_file(join(TEST_DATA_DIR, \"distribution2.csv\"))\n game.load_file(join(TEST_DATA_DIR, \"distribution3.csv\"))\n service.set_player_codes()\n team.set_matches()\n return empty_db",
"def init_db(self):\n if self.is_client() or not self.is_responsible_validator():\n return\n\n ip, _ = self.experiment.get_peer_ip_port_by_id(self.my_id)\n\n self.db_path = os.path.join(\"/tmp\", \"postgres-data\", ip)\n shutil.rmtree(self.db_path, ignore_errors=True)\n os.makedirs(self.db_path, exist_ok=True)\n\n os.system(\"/usr/lib/postgresql/11/bin/initdb %s > postgres.out\" % self.db_path)"
] | [
"0.7579767",
"0.73710656",
"0.6812004",
"0.67789894",
"0.6553059",
"0.65024436",
"0.64336926",
"0.64127076",
"0.63445675",
"0.6238479",
"0.6173804",
"0.6080428",
"0.6049761",
"0.6043762",
"0.5998744",
"0.5943636",
"0.5920916",
"0.5919961",
"0.5919428",
"0.5902814",
"0.58932894",
"0.58925253",
"0.5873082",
"0.58320826",
"0.5830201",
"0.5818928",
"0.575769",
"0.57481873",
"0.57473326",
"0.5741038"
] | 0.7857592 | 0 |
Copy build docs locally for testing. | def copy_docs():
local('rsync -av --delete --exclude=.svn %s:%s/ /tmp/djangodocs/' %
(env.hosts[0], env.deploy_base.child('docbuilds'))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deploy_nucleondocs():\n\n # Copy generated docs to docs_webserver on target machine\n rsync_project(\n remote_dir= '/srv/docs_webserver/docs/nucleon/',\n local_dir=join(dirname(__file__), 'docs/_build/html/'),\n delete=True)",
"def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')",
"def build_docs(session):\n envbindir = session.bin\n session.install(\"-e\", \".[all,docs]\")\n with session.chdir(\"docs/\"):\n session.run(\n \"sphinx-autobuild\",\n \"-j\",\n \"auto\",\n \"--open-browser\",\n \"-qT\",\n \".\",\n f\"{envbindir}/../tmp/html\",\n )",
"def build_docs(open_docs):\n python_call(\"pip\", [\"install\", \"src/[docs]\"])\n python_call(\"pip\", [\"install\", \"-r\", \"src/requirements.txt\"])\n python_call(\n \"ipykernel\", [\"install\", \"--user\", \"--name=za_covid_map\"]\n )\n shutil.rmtree(\"docs/build\", ignore_errors=True)\n call(\n [\n \"sphinx-apidoc\",\n \"--module-first\",\n \"-o\",\n \"docs/source\",\n \"src/za_covid_map\",\n ]\n )\n call([\"sphinx-build\", \"-M\", \"html\", \"docs/source\", \"docs/build\", \"-a\"])\n if open_docs:\n docs_page = (Path.cwd() / \"docs\" / \"build\" / \"html\" / \"index.html\").as_uri()\n secho(\"Opening {}\".format(docs_page))\n webbrowser.open(docs_page)",
"def upload():\n env.user = 'webcontent'\n rsync_project(DOCDIR, 'doc/_build/html/', delete=True)",
"def generate():\n local('cd doc && make clean && make html')",
"def update_docs():\n site_path = os.path.join(PROJECTS_ROOT, CURRENT_SITE)\n docs_path = os.path.join(site_path, 'doc_src')\n with cd(docs_path):\n run('git reset --hard && git pull --all')\n run('workon djangopatterns && cd doc_src && make clean')\n run('workon djangopatterns && cd doc_src && make json')",
"def deploy_sphinx_docs():\n require('docs_root', 'docs_install_dir')\n sphinx.build_html_docs(env.docs_root)\n sudo('mkdir -p {}'.format(env.docs_install_dir))\n sphinx.deploy_html_docs(env.docs_root,\n env.docs_install_dir)",
"def copy_project_docs(srctree):\n docdir = os.path.join(srctree, 'Doc')\n\n # This block shouldn't be here, but I do not yet know how to\n # embed this in ReST files.\n extra_info = {}\n if os.path.exists(os.path.join(docdir, 'website.lst')):\n fd = open(os.path.join(docdir, 'website.lst'))\n for ln in fd.readlines():\n if ln.startswith('#'): continue\n fields = ln.split(',')\n extra_info[fields[0].strip()] = {\n 'section': fields[1].strip(),\n 'priority': int(fields[2].strip()),\n }\n\n docs = [ os.path.join(docdir, fn)\n for fn in os.listdir(docdir) if fn.endswith('.txt') ]\n docs.append(os.path.join(srctree, 'Install.txt'))\n docs.append(os.path.join(srctree, 'NEWS.txt'))\n docs.append(os.path.join(docdir, 'tutorial', 'tutorial.txt'))\n docs.append(os.path.join(docdir, 'tutorial_embed', 'extending_objc_with_python.txt'))\n NAMES = {\n os.path.join(srctree, 'Examples', '00ReadMe.txt') : 'Examples.txt',\n }\n docs.extend(NAMES)\n\n alldocs = {}\n\n for fname in docs:\n print \"-\", fname\n docinfo = {}\n\n bn = NAMES.get(fname)\n if bn is None:\n bn = os.path.split(fname)[-1]\n if bn in ('index.txt', 'announcement.txt'):\n continue\n if extra_info.has_key(bn):\n docinfo.update(extra_info[bn])\n\n if bn.endswith('.txt'):\n bn = bn[:-3].lower() + \"php\"\n else:\n bn = bn.lower() + '.php'\n fd = open(fname)\n input = fd.read()\n fd.close()\n output = docutils.core.publish_string(\n source = input,\n source_path = fname,\n destination_path = bn,\n writer_name = 'hthtml')\n \n output_lines = output.split('\\n')\n for i in range(len(output_lines)):\n if output_lines[i] == '':\n break\n idx = output_lines[i].find(':')\n if idx == -1:\n break\n\n key = output_lines[i][:idx].strip()\n value = output_lines[i][idx+1:].strip()\n docinfo[key] = value\n\n output = '\\n'.join(output_lines[i:])\n if not docinfo.has_key('title'):\n docinfo['title'] = bn\n alldocs[bn] = docinfo\n \n fd = open(os.path.join('docroot', 'doc', bn), 'w')\n fd.write(PHP_HEADER%docinfo)\n\n fd.write(output);\n\n fd.write(PHP_FOOTER)\n\n # Calculate indices for user and developer documentation\n docs = alldocs.keys()\n developer_docs = []\n user_docs = []\n\n for doc in alldocs:\n if not alldocs[doc].has_key('section'):\n print \"Skipping\", doc\n continue\n\n if alldocs[doc]['section'] == 'user':\n user_docs.append([alldocs[doc]['title'], doc])\n elif alldocs[doc]['section'] == 'developer':\n developer_docs.append([alldocs[doc]['title'], doc])\n\n def doccmp(a, b):\n r = cmp(alldocs[a[1]]['priority'], alldocs[b[1]]['priority'])\n if r != 0: return r\n\n return cmp(a[1], b[1])\n user_docs.sort(doccmp)\n developer_docs.sort(doccmp)\n \n # Rewrite the indices (substitute the current document lists)\n for fname in ('index.php', 'usage.php', 'developer.php'):\n fd = open(os.path.join('docroot', 'doc', fname), 'r')\n index_php = fd.readlines()\n fd.close()\n\n fd = open(os.path.join('docroot', 'doc', fname), 'w')\n skip = 0\n for ln in index_php:\n if not skip:\n fd.write(ln)\n if ln.find('/USERDOC') != -1:\n skip = 0\n fd.write(ln)\n elif ln.find('USERDOC') != -1:\n skip = 1\n for title, link in user_docs:\n fd.write('<LI><A HREF=\"%s\">%s</A>\\n'%(link, title))\n if ln.find('/DEVDOC') != -1:\n skip = 0\n fd.write(ln)\n elif ln.find('DEVDOC') != -1:\n skip = 1\n for title, link in developer_docs:\n fd.write('<LI><A HREF=\"%s\">%s</A>\\n'%(link, title))\n\n EXAMPLES = os.path.join('docroot', 'doc', 'examples.php')\n OUTEXAMPLES = os.path.join('docroot', 'examples', 'index.php')\n replace_examples_svn(EXAMPLES)\n if os.path.exists(OUTEXAMPLES):\n os.unlink(OUTEXAMPLES)\n shutil.copyfile(EXAMPLES, OUTEXAMPLES)\n\n # Copy tutorial files\n TUTORIAL_ENDINGS = ['.nib', '.py', '-src', '.h', '.m']\n tutdir = os.path.join(docdir, 'tutorial')\n files = os.listdir(tutdir)\n replacements = []\n for fn in files:\n for ext in TUTORIAL_ENDINGS:\n if fn.endswith(ext):\n dstname = os.path.join('docroot', 'doc', fn)\n replacements.append(copy_tutorial_file(fn, tutdir, dstname))\n break\n replace_tutorial_zips(os.path.join('docroot', 'doc', 'tutorial.php'), replacements)\n \n tutdir = os.path.join(docdir, 'tutorial_embed', 'src')\n files = os.listdir(tutdir)\n if not os.path.exists(os.path.join('docroot', 'doc', 'src')):\n os.mkdir(os.path.join('docroot', 'doc', 'src'))\n for fn in files:\n for ext in TUTORIAL_ENDINGS:\n if fn.endswith(ext):\n dstname = os.path.join('docroot', 'doc', 'src', fn)\n replacements.append(copy_tutorial_file(fn, tutdir, dstname))\n break\n replace_tutorial_zips(os.path.join('docroot', 'doc', 'tutorial.php'), replacements)\n\n #print \"Don't forget to update docroot/doc/tutorial.php: it's references to\"\n #print \"'step3-MainMenu.nib' and 'step12-src' should be changed to ZIP files\"",
"def doc(self):\n from distutils.dir_util import copy_tree\n\n def copy_tree_checker(src, dst):\n \"\"\"Wrap copy_tree to avoid pydoit error.\"\"\"\n copy_tree(src, dst)\n return True\n\n return {\n \"actions\": [\n (create_dir, [\"build/doc/source\"]),\n (copy_tree_checker, [\"docs\", \"build/doc/source\"]),\n TaskCreator.get_sphinx() + \"-apidoc -o build/doc/source --force --separate --module-first \" + self.project_name_sc,\n TaskCreator.get_sphinx() + \"-build -j auto -n build/doc/source build/doc/html\"\n ],\n \"verbosity\": 2\n }",
"def build_docs(options):\r\n verbose = getattr(options, 'verbose', False)\r\n\r\n cmd = \"cd {dir}; make html quiet={quiet}\".format(\r\n dir=doc_path(options),\r\n quiet=\"false\" if verbose else \"true\"\r\n )\r\n\r\n sh(cmd)",
"def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])",
"def docs(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_sphinx(session)",
"def clean_docs(c):\n c.run(f\"rm -fr {DOCS_BUILD_DIR}\")",
"def do_build():\n tmp_dir = Path(tempfile.mkdtemp())\n (tmp_dir / \"integrations-docs.js\").write_text(render_js_module(collect_docs(), *collect_metrics(), collect_meta()))\n\n symlink_images(tmp_dir)\n\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n tmp_dir.replace(OUT_DIR)",
"def html():\n builtdocs = path(\"docs\") / options.sphinx.builddir / \"html\"\n destdir = path(PACKAGE) / \"docs\"\n destdir.rmtree()\n builtdocs.move(destdir)",
"def docs_build(directory, site_name, view=True, assume_yes=False):\n context = toolkit.load_data_context_with_error_handling(directory)\n build_docs(context, site_name=site_name, view=view, assume_yes=assume_yes)\n toolkit.send_usage_message(\n data_context=context, event=\"cli.docs.build\", success=True\n )",
"def test_build(self):\n self.createFakeSphinxProject()\n self.builder.build(self.sphinxDir)\n self.verifyBuilt()",
"def build_docs(branch):\n os.chdir(os.path.join(gitdname, 'docs'))\n sphinx_dir = os.path.join(virtual_dir,'bin')\n retcode = subprocess.call(\"make clean\", shell=True)\n if retcode != 0:\n os.chdir(dname)\n msg = \"\"\"Could not clean the html docs for branch %s\"\"\" % branch\n raise Exception(msg)\n #NOTE: The python call in the below makes sure that it uses the Python\n # that is referenced after entering the virtualenv\n sphinx_call = \" \".join(['make','html',\n \"SPHINXBUILD=' python /usr/local/bin/sphinx-build'\"])\n activate = os.path.join(virtual_dir, \"bin\", \"activate\")\n activate_virtualenv = \". \" + activate\n #NOTE: You have to enter virtualenv in the same call. As soon as the\n # child process is done, the env variables from activate are lost.\n # getting the correct env from bin/activate and passing to env is\n # annoying\n retcode = subprocess.call(\" && \".join([activate_virtualenv, sphinx_call]),\n shell=True,\n env = {'MATPLOTLIBRC' : # put this in the environment to use local rc\n '/home/skipper/statsmodels/statsmodels/tools/',\n # Need this for my openblas setup on my laptop\n 'LD_LIBRARY_PATH' : os.getenv('LD_LIBRARY_PATH')})\n\n if retcode != 0:\n os.chdir(dname)\n msg = \"\"\"Could not build the html docs for branch %s\"\"\" % branch\n raise Exception(msg)\n os.chdir(dname)",
"def build_docs(source, destination, doctrees):\n sphinx_argv = [\n '-b', 'html',\n '-d', doctrees,\n source,\n destination]\n\n sphinx_main(['sphinx-build'] + sphinx_argv)",
"def opendocs():\n _open_file('_build/index.html')",
"def main():\n # We know that qidoc build will set the correct cwd\n qibuild_dir = \"..\"\n qibuild_dir = os.path.abspath(qibuild_dir)\n this_file = __file__\n this_dir = os.path.dirname(this_file)\n cmake_api = os.path.join(this_dir, \"../source/advanced/cmake/api\")\n cmake_api = os.path.abspath(cmake_api)\n if not os.path.exists(cmake_api):\n os.makedirs(cmake_api)\n qibuild_cmake = os.path.join(qibuild_dir, \"cmake\", \"qibuild\")\n for filename in DOCUMENTED_FILES:\n cmake_file = os.path.join(qibuild_cmake, filename + \".cmake\")\n rst_file = os.path.join(cmake_api, filename + \".rst\")\n gen_cmake_doc(cmake_file, rst_file)",
"def build(target_dir):\n prepare_demo_site(target_dir)\n\n patch_config(\n target_dir, (\"# CREATE_FULL_ARCHIVES = False\", \"CREATE_FULL_ARCHIVES = True\")\n )\n\n with cd(target_dir):\n __main__.main([\"build\"])",
"def beehive_make_doc(self):\n run_data = {\n u'tags':[u'doc'],\n u'local_package_path':self.local_package_path\n } \n self.ansible_playbook(u'docs', run_data, \n playbook=self.beehive_doc_playbook)",
"def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()",
"def copy_files():\n os.makedirs('build/usr/lib/python3/dist-packages', exist_ok=True)\n os.makedirs('build/usr/share/doc', exist_ok=True)\n\n shutil.copytree('applications', 'build/usr/share/applications')\n shutil.copytree('doc', 'build/usr/share/doc/qastetray')\n shutil.copytree('icons', 'build/usr/share/icons')\n shutil.copytree('locale', 'build/usr/share/locale')\n shutil.copytree('qastetray',\n 'build/usr/lib/python3/dist-packages/qastetray')",
"def buildDocumentation():\n helptext = 'usage: build_doc.py <output format> <type of documentation>' \\\n '\\n - html: for html output' \\\n '\\n - pdf: for pdf output' \\\n '\\n\\n - all: complete documentation' \\\n '\\n - dev: only developer documentation' \\\n '\\n - user: only user documentation'\n if len(sys.argv) != 3:\n print helptext\n sys.exit(1)\n\n if sys.argv[1] not in ['pdf', 'html']:\n print helptext\n sys.exit(1)\n if sys.argv[2] not in ['all', 'dev', 'user']:\n print helptext\n sys.exit(1)\n\n copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory\n os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))\n os.remove('index.rst') # delete config file from root directory",
"def run_local_doc():\n\tcfg = settings.LocalConfig()\n\tapp = make_app(blueprints.developer_portal, settings.LocalConfig)\n\tapp.run(host = cfg.SERVERNAME, port = cfg.DOC_PORT, debug = True)",
"def setUp(self):\n self.builder = SphinxBuilder()\n\n # set up a place for a fake sphinx project\n self.twistedRootDir = FilePath(self.mktemp())\n self.sphinxDir = self.twistedRootDir.child(\"docs\")\n self.sphinxDir.makedirs()\n self.sourceDir = self.sphinxDir",
"def _build_html():\n\n # Build twice until getnikola/nikola#1032 is fixed.\n local('nikola build && nikola build')\n\n ## Remove all the source files, we only want the output!\n local('ls | grep -v output | xargs rm -rf')\n with settings(warn_only=True):\n local('mv output/* output/.* .')"
] | [
"0.6975561",
"0.6930959",
"0.6866347",
"0.68147665",
"0.67821646",
"0.6724307",
"0.67216283",
"0.66714007",
"0.65250874",
"0.6519332",
"0.6382937",
"0.63510007",
"0.62922466",
"0.62160635",
"0.6208253",
"0.62051785",
"0.61902285",
"0.61704403",
"0.6158973",
"0.6112555",
"0.5986065",
"0.59763086",
"0.5973885",
"0.59508026",
"0.59176433",
"0.5910637",
"0.59058625",
"0.5869258",
"0.58374965",
"0.5835946"
] | 0.78566396 | 0 |
Southify an app remotely. This fakes the initial migration and then migrates forward. Use it the first time you do a deploy on app that's been newly southified. | def southify(app):
managepy('migrate %s 0001 --fake' % app)
managepy('migrate %s' % app) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deploy():\n\n require('environment', provided_by=env.environments)\n update_source()\n update_requirements()\n mgmt('syncdb', '--migrate')\n restart_supervisor()",
"def _deploy_app():\n rsync_project(env.remote_directory, env.local_directory,\n exclude=['.git/', '*.pyc', 'tests.py', 'migrations/'])\n sudo('service installer_app restart')",
"def migrate(ctx):\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"migrate\")",
"def deploy():\n from flask_migrate import upgrade\n\n upgrade() # upgrade to the latest db schema\n\n # setup necessary data to initialize database\n if Conference.query.filter_by(short_name='main').first():\n print('database already initialized')\n else:\n # add registration form questions\n FormConfiguration.insert_formConfiguration()\n Role.insert_roles() # create user roles\n generate_main_conf() # generate default main conference\n generate_admin() # generate the site admin",
"def deploy(app_to_migrate=\"\"):\n mysqldump() # backup database before making changes\n with cd(code_dir):\n run(\"git pull\")\n run(python_add_str + \"python manage.py migrate %s\" % app_to_migrate)\n run(python_add_str + \"python manage.py createinitialrevisions\") # only if using reversion\n run(python_add_str + \"python manage.py collectstatic --noinput\")\n run(\"../apache2/bin/restart\")",
"def migrate(*apps):\n # First sync db\n print(apps)\n\n if len(apps) > 0:\n for app in apps:\n try:\n _manage('migrate %s' % app)\n except Exception as e:\n print(red('Failed to migrate {} app! {}'.format(app, str(e))))\n else:\n _manage('migrate')",
"def migrate(heroku_app=HEROKU_APP):\n subprocess.run([\n 'heroku', 'run',\n '--app', heroku_app,\n '--env', 'PYTHON_PATH=/app',\n '--exit-code',\n '--',\n 'python', '-m', 'frank.manage', 'db', 'upgrade',\n ])\n subprocess.run(['heroku', 'restart', '--app', heroku_app])",
"def migrate(self):\n\tpass",
"def deploy():\n db.drop_all()\n create_DB()\n app.run()",
"def deploy():",
"def migrate(where='local'):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n\n run('bin/django syncdb')\n try:\n run('bin/django schemamigration dasa --auto')\n except:\n pass\n run('bin/django migrate dasa')",
"def deploy_app(self, app_info):\n raise NotImplementedError",
"def migrate():\n puts(yellow(\"Run South migrations\"))\n django_manage('migrate')",
"def setup_before_migration(self, apps):",
"def smart_syncdb_migrate(self):\n local('python manage.py syncdb')\n local('python manage.py migrate')\n local('python manage.py syncdb --all')",
"def deploy_app(host_=None):\n run_command_on_selected_server(_deploy_app, host_=host_)",
"def migratefuture(config, comm, collection, database, host, port):\n des_db = database if database else 'ccsdm'\n des_tbl = collection if collection else 'sfdc_dump'\n CleanSFDCDump(comm, des_tbl, des_db, host=host, port=port).execute()\n return",
"def migrate_new_apps():\n new_apps = run('%s %s/fabfiles/django_scripts/get_apps_without_migration.py'\n % (env.PYTHON_BIN, env.SRC_PATH))\n # The script denotes the start of its output by \"{% output %}\" tag so we\n # only take whatever's after that\n new_apps = new_apps.split('{% output %}')[1].split()\n with cd(env.SRC_PATH):\n for app in new_apps:\n sudo(\"%s manage.py schemamigration %s --initial\" %\n (env.PYTHON_BIN, app.strip()))\n sudo(\"%s manage.py migrate %s --no-initial-data\" %\n (env.PYTHON_BIN, app.strip()))",
"def migrate_database():\n log('Migrating the keystone database.', level=INFO)\n service_stop(keystone_service())\n # NOTE(jamespage) > icehouse creates a log file as root so use\n # sudo to execute as keystone otherwise keystone won't start\n # afterwards.\n cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']\n subprocess.check_output(cmd)\n service_start(keystone_service())\n time.sleep(10)\n peer_store('db-initialised', 'True')",
"def upgrade_app_db(app, user):\n ctx.logger.info('Upgrading %s DB', app.capitalize())\n run('db-migrate', app, user)",
"def migrate():\n if apply_migrations():\n click.echo(OK)\n else:\n sys.exit(1)",
"def migrate_database(self):\n\n self.db.migrate_database()",
"def migratedb(rollback=False):\n\n require(\"virtualenv_path\", \"project_path\", \"sudo_user\")\n\n #\n # Some things need to be done first (i.e. if they need a different\n # database connection or some custom args)\n #\n if \"migratedb_first\" in env:\n\n for app, args in env.migratedb_first.iteritems():\n\n version = get_south_migrate_version(app, rollback)\n\n migrate_app_db(app, version, args)\n\n #\n # Do the rest afterwards\n #\n if has_version_info():\n\n apps = env.south_migrations.keys()\n\n for app in apps:\n\n print app\n\n version = get_south_migrate_version(app, rollback)\n\n migrate_app_db(app, version)\n\n #\n # If we know nothing, just migrate everything\n #\n else:\n migrate_app_db()",
"def perform_migration():\n with cd(env.code_dir):\n with _virtualenv():\n sudo('python manage.py migrate --settings=prod_settings', pty=True)",
"def migration():",
"def migrate_external_courseware(apps, schema_editor):\n\n migrate_external_courses(apps, schema_editor)\n migrate_external_programs(apps, schema_editor)",
"def run(syncdb=False):\n from fabdeploy.django import migrate as django_migrate, syncdb as django_syncdb\n import time\n env.release = time.strftime('%Y%m%d%H%M%S')\n prepare_deploy() # pull, test, push\n git.remote_pull()\n app.install_requirements()\n django_migrate(syncdb) # syncdb in case is first time\n deploy_static()",
"def create_automatic_migration():\n with cd(env.SRC_PATH):\n apps = run('%s fabfiles/django_scripts/get_apps_to_migrate.py' %\n env.PYTHON_BIN).split('\\n')\n with settings(hide('warnings'), warn_only=True):\n for app in apps:\n output = sudo('%s manage.py schemamigration %s --auto' %\n (env.PYTHON_BIN, app.strip()))\n\n # Raise any error other than nothing seems to have changed\n if output.failed:\n if 'Nothing seems to have changed' not in output:\n raise Exception('Error when running automated schema migration')",
"def configure_ext_migrate(app):\n migrate = Migrate(app, models.db)",
"def migrate(cr, version):\n pass"
] | [
"0.6649584",
"0.6453384",
"0.64402455",
"0.64024436",
"0.637955",
"0.6271012",
"0.61587465",
"0.60783327",
"0.605457",
"0.6015112",
"0.6007292",
"0.59989643",
"0.5960935",
"0.595262",
"0.5940856",
"0.59332865",
"0.589287",
"0.5877876",
"0.5836774",
"0.58165526",
"0.57907",
"0.57833505",
"0.57733893",
"0.5737969",
"0.57361287",
"0.5716558",
"0.56985307",
"0.56975096",
"0.5646533",
"0.56398404"
] | 0.70444345 | 0 |
self.nl = self.analyze_folder("NL") self.nl.to_csv(self.folder + "/nl.csv") self.pl = self.analyze_folder("PL") self.pl.to_csv(self.folder + "/pl.csv") self.nt = self.analyze_folder("NT") self.nt.to_csv(self.folder + "/nt.csv") self.pt = self.analyze_folder("PT") self.pt.to_csv(self.folder + "/pt.csv") | def analyze_data(self):
self.truth = self.analyze_folder("Truth")
self.truth.to_csv(self.folder + "/truth.csv")
self.false = self.analyze_folder("False")
self.flase.to_csv(self.folder + "/false.csv") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n languages = ['Greek']\n counts = [24]\n dataset = []\n\n for i in range(len(languages)):\n for j in range(1,counts[i]+1):\n if j >= 10:\n charPath = languages[i] + '/character' + str(j)\n else:\n charPath = languages[i] + '/character0' + str(j)\n\n dataset = readFolder(dataset, charPath, i, j)\n print(\"~~ Writing Model Names to parsedData.csv ~~\")\n with open('parsedData24.csv', \"w+\") as csv_file:\n csv_file.truncate()\n writer = csv.writer(csv_file)\n for elem in dataset:\n writer.writerow(elem)\n print(\"~~ Done ~~\")",
"def collect_data(folder):\n folder = pathlib.Path(folder)\n cases = []\n for case_folder in folder.iterdir():\n print(f'start collecting data for location {case_folder.name}')\n for tr_folder in case_folder.iterdir():\n case = calculate_values(tr_folder)\n cases.append(case)\n \n df = pd.DataFrame(cases)\n print(folder.parent.joinpath(f'{folder.stem}.csv'))\n df.to_csv(folder.parent.joinpath(f'{folder.stem}.csv'), index=False)",
"def download_report():\n entities = get_names()\n save_csv(entities)",
"def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])",
"def convert_to_csv(self, branch):\n names = [\"CSE_results.csv\", \"IT_results.csv\"]\n self.results = {\"ROLL_NO\": self.roll_nos, \"Name\": self.names, \"SGPA\": self.sgpa}\n print(self.results)\n df = DataFrame.from_dict(self.results)\n df.to_csv(names[branch], index=False)",
"def write_to_csv(self, verbose: bool = False) -> None: \n Path(self.csv_dir).mkdir(exist_ok=True)\n with open(f\"{self.csv_dir}/train.csv\", \"wt\", encoding=\"utf-8\", newline=\"\") as train_file:\n with open(f\"{self.csv_dir}/test.csv\", \"wt\", encoding=\"utf-8\", newline=\"\") as test_file:\n csv_header = (\"phone\", \"phone_class_index\", \"f1\", \"f2\", \"f3\", \"f4\", \"f5\")\n train_csvwriter = csv.writer(train_file)\n test_csvwriter = csv.writer(test_file)\n train_csvwriter.writerow(csv_header)\n test_csvwriter.writerow(csv_header)\n for vowels_and_formants, wav_path, category in self:\n if verbose:\n print(f\"File: {wav_path} (category: {category})\")\n writer = train_csvwriter if category == \"TRAIN\" else test_csvwriter\n for vowel_and_formants in vowels_and_formants:\n phone, formants = vowel_and_formants\n row = (phone, ipa_class_index[phone]) + tuple(formants)\n writer.writerow(row)\n if verbose:\n print(row)",
"def makeCsv(net, date, opt, path, minlat, maxlat, minlon, maxlon, variables, estaciones):\n\n # data_lon = Dataset('/ServerData/KRAKEN/Reanalisis/a1979/wrfout_c15d_d01_1979-08-15_00:00:00.1979')\n # LON = data_lon.variables['XLONG'][:]\n # LAT = data_lon.variables['XLAT'][:]\n #\n # LON = LON[0][0]\n # LAT = LAT[0]\n #\n # LONsize = len(LON)\n # LATsize = len(LAT)\n #\n # celda = []\n var_cut = []\n for i in variables:\n var = net.variables[i][:,int(minlat):int(maxlat),int(minlon):int(maxlon)]\n #print(LON)\n #print(var)\n #return\n # celda.append(var)\n # result = ne(var, LON, LAT, LONsize, LATsize, minlat, maxlat, minlon, maxlon)\n var_cut.append(var)\n\n for ls in range(len(var_cut)):\n saveData(var_cut[ls], variables[ls], date, opt, path, estaciones)",
"def save_report(keyword):\n for file in glob.glob(keyword+\"_CNA/*\"):\n df_cna = pd.read_table(file, sep=\"\\t\", index_col=0)\n df_cna_report = generate_report(df_cna)\n new_folder1 = keyword+\"_report\"\n if not os.path.exists(new_folder1):\n os.mkdir(new_folder1)\n filename = os.path.split(file)[1]\n output_name = os.path.join(new_folder1, filename)\n df_cna_report.to_csv(output_name, sep=\"\\t\")\n yield df_cna_report",
"def county_file_merger(folder_path):\n\n print(\"\\n*******************--- Starting File Merger for .csv files ---*******************\")\n with open(\"result.csv\",\"wb\") as outfile:\n for filename in os.listdir(folder_path):\n with open(filename,\"rb\") as infile:\n for line in infile:\n outfile.write(line)\n infile.close()\n outfile.close()\n print(\"\\nResult saved to -----> result.csv \")\n print(\"\\n*******************--- Finished File Merger for .csv files ---*******************\")",
"def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()",
"def test_batch(from_dir, to_dir, doc_type):\n\n if from_dir[-1] != \"/\":\n from_dir = from_dir + \"/\"\n if to_dir[-1] != \"/\":\n to_dir = to_dir + \"/\"\n\n os.chdir(from_dir)\n for pdf_file in os.listdir(from_dir):\n if pdf_file.endswith(\".pdf\"):\n # Appends a row to the csv file \"output.csv\" with the stats from that particular document\n analyze(from_dir, pdf_file, doc_type)\n\n # Moving to the 'to' directory since we're done analyzing it.\n destination = to_dir + pdf_file\n shutil.move(from_dir+ pdf_file, destination)",
"def loadFiles(analyzer,totalFiles):\n for filename in totalFiles:\n if filename.endswith('.csv'):\n print('Cargando archivo: ' + filename)\n loadTrips(analyzer, filename)\n print(\"Cargando información extra...\")\n model.findPopulars(analyzer)\n model.findPopularsAdd(analyzer)\n return analyzer",
"def write_all_users(folder_name: str, label: bool):\n make_directory(folder_name)\n for user in get_user_ids():\n print(\"Analysis of user: \" + user)\n subfolder_name = folder_name + \"/\" + user\n make_directory(subfolder_name)\n for session in get_user_session_ids(user):\n print(\"Session: \" + session)\n file_name = subfolder_name + \"/\" + session + \".csv\"\n data = get_feature_vector(user, session)\n if data == None:\n continue\n if label:\n data = [labels] + data\n write_to_csv(data, file_name)",
"def parse_to_csv(data,namee):\n pth = BASE_DIR + '/reports/' + csv_name\n if not os.path.isfile(namee):\n csv_file = open(namee, 'wb')\n csv_writer = csv.writer(csv_file)\n top_row = [\n 'IP', 'Host', 'os', 'Proto', 'Port',\n 'Service','Service_version', 'Product', 'Service FP',\n 'NSE Script ID', 'NSE Script Output', 'Notes'\n ]\n csv_writer.writerow(top_row)\n print('\\n[+] The file {} does not exist. New file created!\\n'.format(\n csv_name))\n # else:\n # # try:\n # csv_file = open(csv_name, 'w')\n\n # csv_writer = csv.writer(csv_file)\n # print('\\n[+] {} exists. Appending to file!\\n'.format(csv_name))\n\n \n for item in data:\n csv_writer.writerow(item)\n csv_file.close()",
"def make_csv(idir, dates):\n for path, dirs, files in os.walk(idir):\n for date in dates:\n # first loop over output dir\n if not path.endswith(str(date)):\n continue\n arr = path.split('/')\n oname = '%s-%s.csv' % (arr[-2], arr[-1])\n print(\"write %s\" % oname)\n with open(oname, 'w') as ostream:\n headers = None\n for ifile in files:\n if 'part-' not in ifile:\n continue\n iname = os.path.join(path, ifile)\n with open(iname) as istream:\n first_line = istream.readline()\n if not headers:\n headers = first_line\n ostream.write(headers)\n while True:\n line = istream.readline().replace('\"', '')\n if not line:\n break\n ostream.write(line)",
"def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))",
"def saveCSV(self):\n filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',\n initialdir=os.getcwd(),\n filetypes=[(\"csv\",\"*.csv\"),(\"All files\",\"*.*\")])\n if not filename:\n return\n for m in self.matrices:\n matrix = self.matrices[m] \n if matrix != None: \n c=matrix.csvRepresentation()\n f=open(filename,'w')\n f.write(c)\n f.close()\n return",
"def main():\n master_csv = open(\"master.csv\", \"w\")\n master_csv.write(\"lat, lon, year, month, day, T_max, T_min, PrecipMM, T_ave, PrecipCM, RelHum\\n\")\n MoLS_comp_csv = open(\"MoLS_comp.csv\", \"w\")\n lat_lon_csv = open(\"lat_lon.csv\", \"w\")\n\n for csvFilename in os.listdir('.'):\n\n if not csvFilename.endswith('.csv') or csvFilename == \"master.csv\" or csvFilename == \"MoLS_comp.csv\" or csvFilename == \"lat_lon.csv\":\n continue # skip non-csv files\n\n csvFileObj = open(csvFilename)\n readerObj = csv.reader(csvFileObj)\n print(\"Currently parsing \" + str(csvFilename))\n for row in readerObj:\n if readerObj.line_num <= 8 :\n continue\n\n year = int(row[0])\n vp = float(row[8]) / 1000.0\n month, day = get_month(int(row[1]))\n T_ave = (float(row[6]) + float(row[7])) / 2\n PrecipCM = float(row[3]) / 10\n svp = .611 * math.e ** (5321 * ((1 / 273.0) - (1 / (T_ave + 273.15))))\n rh_ave = round((vp / svp) * 100, 2)\n\n # print([[csvFilename], , row[0], row[1], row[3], row[6], row[7], row[8]])\n # print(readerObj.line_num)\n # print([csvFilename.split(\"_\")[0], csvFilename.split(\"_\")[1].split(\".csv\")[0], row[0], row[1], row[3],\n # row[6], row[7], row[8]])\n\n master_csv.write(str(csvFilename.split(\"_\")[0]) + \",\" + str(csvFilename.split(\"_\")[1].split(\".csv\")[0]) +\n \",\" + str(year) + \",\" + str(month) + \",\" + str(day) + \",\" + str(row[6]) + \",\" + str(row[7])\n + \",\" + str(row[3]) + \",\" + str(T_ave) + \",\" + str(PrecipCM) + \",\" + str(rh_ave) + \"\\n\")\n MoLS_comp_csv.write(str(year) + \",\" + str(month) + \",\" + str(day) + \",\" + str(row[6]) + \",\" + str(row[7])\n + \",\" + str(row[3]) + \",\" + str(T_ave) + \",\" + str(PrecipCM) + \",\" + str(rh_ave) + \"\\n\")\n lat_lon_csv.write(str(csvFilename.split(\"_\")[0]) + \",\" + str(csvFilename.split(\"_\")[1].split(\".csv\")[0]) + \"\\n\")\n\n master_csv.close()\n MoLS_comp_csv.close()\n lat_lon_csv.close()",
"def managecsv(data):\n\n checkfolderdata()\n if not datafileexist(data[7]):\n createcsv(data[7])\n managecsv(data)\n else:\n addcsv(data, data[7])",
"def analysis_1_result(primary_person_df,output_folder_path):\n male_death_count_df = primary_person_df\\\n .filter(col(\"PRSN_GNDR_ID\") == \"MALE\").agg(count(\"PRSN_GNDR_ID\").alias(\"MALE_DEATH_CNT\"))\n print(\"Analysis 1: \\nTotal number of crashes (accidents) in which number of persons killed are male is :\")\n male_death_count_df.show() #Dispalying result\n write_df_to_csv(male_death_count_df,output_folder_path+\"analysis_1_result\") #Writing to csv file",
"def write_output(self) -> None:\n self.home.round(2).to_csv(var.indicators_base_cumsum + \"home_\" + str(self.year) + \".csv\")\n self.away.round(2).to_csv(var.indicators_base_cumsum + \"away_\" + str(self.year) + \".csv\")",
"def create_output(root_folder_to_save_csv):\n df = pd.DataFrame(columns=['IMAGE Name', 'Original Class Name', 'Predictions On Original Images',\n 'Predictions On Perturbed Images'])\n df.to_csv(os.path.join(root_folder_to_save_csv, 'output.csv'), index=False)",
"def save_results(self, path):\n create_folder(path)\n self.get_scores().to_csv(path + r'/scores.csv', index=False)\n self.get_results().to_csv(path + r'/results.csv', index=False)\n self.get_pivot_last_epoch().to_csv(path + r'/pivot_last_epoch.csv', index=True)",
"def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)",
"def writeCSV(csvPath, usedmpicommands, first_table_values,second_table_values,third_table_values, df):\n\n print(\"Saving CSV files in directory '\" + os.path.realpath(csvPath) +\"'\")\n\n #routine Summary by rank metrics table\n metric_csv_table = df.to_csv(sep=';')\n with open(os.path.join(csvPath,'routineSummaryByRank_metric_table.csv'), 'w') as outfileMetricTable:\n outfileMetricTable.write(metric_csv_table)\n outfileMetricTable.close()\n\n #routine Summary by rank data table (just the data from the instrumenation file in csv format)\n with open(os.path.join(csvPath,'routineSummaryByRank_summary.csv'), 'w') as outfileMPICommands:\n wr = csv.writer(outfileMPICommands, delimiter=';')\n wr.writerows(usedmpicommands)\n outfileMPICommands.close()\n\n #application Summary by rank data (first table)\n #Columns: \"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_1st_table.csv'), 'w') as outfile_first_table:\n wr = csv.writer(outfile_first_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"])\n wr.writerows(first_table_values)\n outfile_first_table.close()\n \n #application Summary by rank data (second table) \n #Columns: \"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_2st_table.csv'), 'w') as outfile_second_table:\n wr = csv.writer(outfile_second_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"])\n wr.writerows(second_table_values)\n outfile_second_table.close()\n\n #application Summary by rank data (third table)\n #Columns: \"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_3rd_table.csv'), 'w') as outfile_third_table:\n wr = csv.writer(outfile_third_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"])\n wr.writerows(third_table_values)\n outfile_third_table.close()\n\n #In case, you are wondering, where the last part of the instrumentation file is (message Summary by rank),\n #it is currently not saved as a csv file. This is because:\n #\n #1st: In the platform_mpi instrumentation file, the data is somehow visualized beautifully\n #2nd: It is very hard to save the data in a 2-dimensional csv file format\n #Therefore we decided, not to export this data in a csv file format",
"def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )",
"def report(self, output_dir):",
"def __init__(self, root_dir):\n self.paths = glob.glob(root_dir + \"/*.csv\")\n self.target = 'Default'\n # Grouping variable names",
"def dwn_saved_result_csv(request):\n source_id = request.GET.get('source_id')\n data = []\n objs = ExtractedRelation.objects.filter(source=source_id)\n s = Source.objects.filter(source_id=source_id)[0]\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, s.source, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))",
"def csv_to_txt():\n print('csv to text')\n input_files = sys.argv[1]\n i = 0\n for filename in os.listdir(input_files):\n print(i, filename[11:-4])\n output_txt_file = ''\n current_csv_df = pd.read_csv(sys.argv[1] + filename)\n for index, row in current_csv_df.iterrows():\n if (row['task_number'] == TASK_3[0] or row['task_number'] == TASK_3[1]) and type(\n row['spoken_word']) != float:\n output_txt_file += \" \" + row['spoken_word']\n txt_file = open('jan27_memory_texts/' + filename[11:-4] + '.txt', \"a\")\n txt_file.write(output_txt_file.lstrip(' '))\n txt_file.close()\n i+=1"
] | [
"0.7004485",
"0.68498296",
"0.6444635",
"0.62651443",
"0.6250562",
"0.61392814",
"0.6122473",
"0.6085559",
"0.60484815",
"0.6020855",
"0.5994407",
"0.5986495",
"0.59810555",
"0.5963203",
"0.5939775",
"0.58972245",
"0.58890104",
"0.58844423",
"0.58789444",
"0.5876309",
"0.58672655",
"0.5845372",
"0.58284897",
"0.58254534",
"0.5825174",
"0.58238983",
"0.5811211",
"0.58105725",
"0.57763773",
"0.5769005"
] | 0.7053575 | 0 |
Changes the theme between dark and normal | def dark_theme(self):
if self.actionDark_Theme.isChecked():
QApplication.setStyle(QStyleFactory.create("Fusion"))
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColor(QPalette.Base, QColor(15, 15, 15))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, Qt.white)
palette.setColor(QPalette.ToolTipText, Qt.white)
palette.setColor(QPalette.Text, Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, Qt.white)
palette.setColor(QPalette.BrightText, Qt.red)
palette.setColor(QPalette.Highlight, QColor(0, 24, 193).lighter())
palette.setColor(QPalette.HighlightedText, Qt.black)
palette.setColor(QPalette.Disabled, QPalette.Text, Qt.darkGray)
palette.setColor(
QPalette.Disabled, QPalette.ButtonText, Qt.darkGray)
app.setPalette(palette)
return
app.setPalette(self.defaultPalette) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dark_mode(grid: bool = False) -> sns.set_theme:\n if grid:\n return sns.set_theme(style=\"darkgrid\")\n return sns.set_theme(style=\"dark\")",
"def dark_mode(app):\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(30, 30, 30))\n palette.setColor(QPalette.WindowText, QColor(225, 225, 225))\n palette.setColor(QPalette.Light, Qt.white)\n palette.setColor(QPalette.Midlight, QColor(225, 225, 225))\n palette.setColor(QPalette.Dark, QColor(65, 65, 65))\n palette.setColor(QPalette.Mid, QColor(160, 160, 160))\n palette.setColor(QPalette.BrightText, QColor(255, 51, 51))\n palette.setColor(QPalette.Button, QColor(40, 40, 40))\n palette.setColor(QPalette.Base, QColor(65, 65, 65))\n palette.setColor(QPalette.AlternateBase, QColor(50, 50, 50))\n palette.setColor(QPalette.ToolTipBase, Qt.white)\n palette.setColor(QPalette.ToolTipText, Qt.white)\n palette.setColor(QPalette.Text, QColor(225, 225, 225))\n palette.setColor(QPalette.ButtonText, QColor(225, 225, 225))\n palette.setColor(QPalette.Link, QColor(42, 130, 218))\n palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n palette.setColor(QPalette.HighlightedText, Qt.black)\n app.setPalette(palette)\n return app",
"def change_theme(self):\n # get the QApplication instance, or crash if not set\n app = QApplication.instance()\n if app is None:\n raise RuntimeError(\"No Qt Application found.\")\n\n if self.darkCheckBox.isChecked():\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n else:\n app.setStyleSheet(\"\")",
"def change_theme(self):\n # get the QApplication instance, or crash if not set\n app = QtWidgets.QApplication.instance()\n if app is None:\n raise RuntimeError(\"No Qt Application found.\")\n\n if self.darkCheckBox.isChecked():\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n else:\n app.setStyleSheet(\"\")",
"def new_with_dark_light(dark_theme, light_theme):\n theme = {\n \"dark\": Theme(dark_theme),\n \"light\": Theme(light_theme)\n }\n return theme",
"def light_mode(grid: bool = False) -> sns.set_theme:\n if grid:\n return sns.set_theme(style=\"whitegrid\")\n sns.set_theme(style=\"white\")",
"def apply_style(self, app):\n\n darkPalette = QPalette()\n\n # base\n darkPalette.setColor(QPalette.WindowText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Button, QColor(53, 53, 53))\n darkPalette.setColor(QPalette.Light, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Midlight, QColor(90, 90, 90))\n darkPalette.setColor(QPalette.Dark, QColor(35, 35, 35))\n darkPalette.setColor(QPalette.Text, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.BrightText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.ButtonText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Base, QColor(42, 42, 42))\n darkPalette.setColor(QPalette.Window, QColor(53, 53, 53))\n darkPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))\n darkPalette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n darkPalette.setColor(QPalette.HighlightedText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Link, QColor(56, 252, 196))\n darkPalette.setColor(QPalette.AlternateBase, QColor(66, 66, 66))\n darkPalette.setColor(QPalette.ToolTipBase, QColor(53, 53, 53))\n darkPalette.setColor(QPalette.ToolTipText, QColor(180, 180, 180))\n\n # disabled\n darkPalette.setColor(\n QPalette.Disabled, QPalette.WindowText, QColor(127, 127, 127)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.Text, QColor(127, 127, 127)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.ButtonText, QColor(127, 127, 127)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.Highlight, QColor(80, 80, 80)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.HighlightedText, QColor(127, 127, 127)\n )\n\n app.setPalette(darkPalette)\n self._apply_base_theme(app)\n\n IconSet.current.set_color(QColor(180, 180, 180))",
"def apply_theme(self, ax):\n pass",
"def toggle_mode(mode: ThemeMode):\n with winreg.OpenKey(\n winreg.HKEY_CURRENT_USER,\n (r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Themes'\n r'\\Personalize'),\n access=winreg.KEY_ALL_ACCESS) as hkey:\n # Change mode for both apps and system theme to sync mode.\n winreg.SetValueEx(\n hkey, 'AppsUseLightTheme', 0, winreg.REG_DWORD, mode.value)\n winreg.SetValueEx(\n hkey, 'SystemUsesLightTheme', 0, winreg.REG_DWORD, mode.value)",
"def setWidget(self, widget: QtWidgets.QWidget):\n super().setWidget(widget)\n if globalstuff.theme == 'dark':\n w = self.widget()\n w.setPalette(globalstuff.textpal)\n if hasattr(w, 'TreeWidget'):\n w.TreeWidget.setStyleSheet(globalstuff.treeqss)",
"def main(themes):\n # Get toggled mode based on current system mode.\n toggled_mode = get_toggled_mode(get_current_mode())\n print('\\nSetting themes...')\n\n for theme in themes:\n # Set toggled mode.\n theme.mode = toggled_mode\n theme.toggle_callback(theme)\n if IS_WINDOWS:\n print(f'Setting system theme to: {toggled_mode.name}')\n toggle_mode(toggled_mode)\n print()",
"def set_terminal_theme(theme: ApplicationTheme):\n # Change current mode.\n path = theme.path\n try:\n with open(path, 'r', encoding='utf-8') as f:\n pass\n except FileNotFoundError:\n path = theme.windows_path\n try:\n with open(path, 'r+', encoding='utf-8') as f:\n settings_string = ''\n key = theme.keys.split(theme.settings_delimiter)[-1]\n toggled_theme_name = theme.names[theme.mode == ThemeMode.light]\n\n # Search for line with key.\n current_theme_name = ''\n for line in f:\n if not current_theme_name and key in line and any(\n name in line for name in theme.names\n ):\n # Replace current theme with toggled theme.\n # Requires that theme name is either dark or\n # light theme. Else keep \"invalid\" theme rather\n # than raising an exception.\n current_theme_name = theme.names[theme.light_name in line]\n line = line.replace(current_theme_name, toggled_theme_name)\n settings_string += line\n if current_theme_name:\n f.seek(0)\n f.truncate()\n # Write new settings to file.\n f.write(settings_string)\n\n print('Set Terminal theme to:', toggled_theme_name)\n else:\n raise ValueError(\n 'Failed to find valid current theme for Terminal.')\n except Exception as e:\n print('Failed to set Terminal theme.')\n print(repr(e))",
"def apply_style(self, app):\n\n lightPalette = QPalette()\n\n # base\n lightPalette.setColor(QPalette.WindowText, QColor(0, 0, 0))\n lightPalette.setColor(QPalette.Button, QColor(240, 240, 240))\n lightPalette.setColor(QPalette.Light, QColor(180, 180, 180))\n lightPalette.setColor(QPalette.Midlight, QColor(200, 200, 200))\n lightPalette.setColor(QPalette.Dark, QColor(225, 225, 225))\n lightPalette.setColor(QPalette.Text, QColor(0, 0, 0))\n lightPalette.setColor(QPalette.BrightText, QColor(0, 0, 0))\n lightPalette.setColor(QPalette.ButtonText, QColor(0, 0, 0))\n lightPalette.setColor(QPalette.Base, QColor(237, 237, 237))\n lightPalette.setColor(QPalette.Window, QColor(240, 240, 240))\n lightPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))\n lightPalette.setColor(QPalette.Highlight, QColor(76, 163, 224))\n lightPalette.setColor(QPalette.HighlightedText, QColor(0, 0, 0))\n lightPalette.setColor(QPalette.Link, QColor(0, 162, 232))\n lightPalette.setColor(QPalette.AlternateBase, QColor(225, 225, 225))\n lightPalette.setColor(QPalette.ToolTipBase, QColor(240, 240, 240))\n lightPalette.setColor(QPalette.ToolTipText, QColor(0, 0, 0))\n\n # disabled\n lightPalette.setColor(\n QPalette.Disabled, QPalette.WindowText, QColor(115, 115, 115)\n )\n lightPalette.setColor(\n QPalette.Disabled, QPalette.Text, QColor(115, 115, 115)\n )\n lightPalette.setColor(\n QPalette.Disabled, QPalette.ButtonText, QColor(115, 115, 115)\n )\n lightPalette.setColor(\n QPalette.Disabled, QPalette.Highlight, QColor(190, 190, 190)\n )\n lightPalette.setColor(\n QPalette.Disabled, QPalette.HighlightedText, QColor(115, 115, 115)\n )\n\n app.setPalette(lightPalette)\n\n self._apply_base_theme(app)\n IconSet.current.set_color(QColor(0, 0, 0))",
"def updateTheme(self):\n self.myUpdate(stateDict=None)",
"def theme(self, theme):\n\n self._theme = theme",
"async def update_theme(q: Q):\n\n copy_expando(q.args, q.client)\n\n if q.client.theme_dark:\n logging.info('Updating theme to dark mode')\n\n q.client.path_architecture = q.app.paths_architecture['dark']\n\n q.page['meta'].theme = 'neon'\n q.page['header'].icon_color = 'black'\n else:\n logging.info('Updating theme to light mode')\n\n q.client.path_architecture = q.app.paths_architecture['light']\n\n q.page['meta'].theme = 'light'\n q.page['header'].icon_color = '#CDDD38'\n\n q.page['misc'].items[3].toggle.value = q.client.theme_dark\n\n if q.client['#'] == 'home':\n q.page['home'].items[2].text.content = f'''<center>\n <img src=\"{q.client.path_architecture}\" width=\"540px\"></center>'''\n elif q.client['#'] == 'resources':\n q.page['code_examples'] = cards.code_examples(\n code_function=q.client.code_function,\n theme_dark=q.client.theme_dark\n )\n\n await q.page.save()",
"def _set_dark_mode(dark_mode: bool):\n cmd = [\"osascript\", \"-l\", \"JavaScript\", \"-e\"]\n if dark_mode:\n cmd += [\"Application('System Events').appearancePreferences.darkMode = true\"]\n else:\n cmd += [\"Application('System Events').appearancePreferences.darkMode = false\"]\n subprocess.run(cmd)",
"def dark_style(stylename):\n return dark_color(get_style_by_name(stylename).background_color)",
"def use_my_theme():\n # register and enable the theme\n alt.themes.register(\"my_theme\", my_theme)\n alt.themes.enable(\"my_theme\")",
"def use_my_theme():\n # register and enable the theme\n alt.themes.register(\"my_theme\", my_theme)\n alt.themes.enable(\"my_theme\")",
"def _apply_base_theme(self, app):\n\n app.setStyle(\"Fusion\")\n\n with open(self._STYLESHEET) as stylesheet:\n app.setStyleSheet(stylesheet.read())",
"def setup_logging_theme(handler, colors=\"light\"):\n if colors not in (\"light\", \"dark\"):\n logging.getLogger(\"delfick_logging\").warning(\n lc( \"Told to set colors to a theme we don't have\"\n , got=colors\n , have=[\"light\", \"dark\"]\n )\n )\n return\n\n # Haven't put much effort into actually working out more than just the message colour\n if colors == \"light\":\n handler._column_color['%(message)s'][logging.INFO] = ('cyan', None, False)\n else:\n handler._column_color['%(message)s'][logging.INFO] = ('blue', None, False)",
"def themes(self, themes):\n\n self._themes = themes",
"def toggle(self) -> None:\n if self.value is None:\n raise ValueError('Cannot toggle dark mode when it is set to auto.')\n self.value = not self.value",
"def theme_change(request):\n if request.method == \"POST\":\n form = ThemeForm(request.POST, instance=request.user.settings)\n if form.is_valid():\n form.save()\n return home(request, \"Theme Changed Successfully\")\n else:\n form = ThemeForm(instance=request.user.settings)\n \n ctx = _make_context(request, \"theme_form\", form)\n \n return TemplateResponse(request, \"users/index.html\", ctx)",
"def changeTheme(id, theme):\n try:\n Dashboard.objects(id=id).update(set__theme=theme)\n except Exception as e:\n print e\n return False\n return \"Dashboard updated successfully.\"",
"def change_light(self):\n self._light_status = not self._light_status",
"def install_dark_preferences(config_path: str):\n preferences_path = os.path.join(config_path, \"preferences\")\n if not os.path.exists(preferences_path):\n logging.error(\"Please open Ghidra at least once to fully install dark mode.\")\n sys.exit(-1)\n\n # Check if the current L&f is system\n using_system = False\n with open(preferences_path, \"r\") as fp:\n for line in fp:\n if \"LastLookAndFeel=System\" in line:\n using_system = True\n break\n\n # Set the L&f to system\n if not using_system:\n with open(preferences_path, \"a\") as fp:\n fp.write(\"LastLookAndFeel=System\\n\")\n\n # Backup and modify the current tcd and tool files\n for tcd in TCD_LIST:\n tcd_path = os.path.join(config_path, \"tools\", tcd)\n backup_path = os.path.join(config_path, \"tools\", f\"{tcd}.bak\")\n try:\n shutil.copy(tcd_path, backup_path)\n browser = TCDBrowser(tcd_path)\n browser.update(preferences)\n except FileNotFoundError:\n if tcd == \"_code_browser.tcd\":\n logging.warning(\n \"Please open Ghidra at least once to fully install dark mode.\"\n )\n else:\n logging.debug(\"Could not open %s\", tcd)",
"def setUIBrightness(self, value):\n\n\t\t# print(value)\n\t\tself.col['window'] = QtGui.QColor(value, value, value)\n\t\tself.computeUIPalette()\n\t\tself.loadStyleSheet()",
"async def set_configurator_theme(self, request, theme):\n\n if theme in defaults.KERNEL_CONFIGURATOR_THEMES:\n self._configurator_theme = theme\n request.ret(READY)\n else:\n request.ret(KERNEL_CONFIGURATOR_THEME_DOES_NOT_EXIST)"
] | [
"0.78414094",
"0.74640346",
"0.74385226",
"0.7424977",
"0.73269147",
"0.7200979",
"0.70900446",
"0.6759279",
"0.6744168",
"0.666364",
"0.66361445",
"0.6613044",
"0.65831906",
"0.65280896",
"0.6456559",
"0.6409506",
"0.6269526",
"0.61850554",
"0.61629736",
"0.61629736",
"0.6108729",
"0.6106185",
"0.60140234",
"0.5984769",
"0.5792366",
"0.5767568",
"0.57145077",
"0.5666482",
"0.5618292",
"0.5594996"
] | 0.8108005 | 0 |
Check for errors in linkage | def update_linkage_error(links=None):
links = links or Linkage.objects.all()
for idx, link in enumerate(links):
link.error_check(depth=0)
update_task_info('PROGRESS', meta={'current': idx, 'total': len(links)}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_errors(self) -> None:",
"def has_errors_fatal(self) -> bool:",
"def has_errors(self) -> bool:",
"def check_linking(self):\n\n # This one checks if the linking command works out of the box or\n # if any specific flag is required. For example if the linker if the\n # Intel FORTRAN compiler, then the \"-nofor_main\" is usually required.\n # This function only checks if linker works but does not automatically\n # detect the required flags\n print 'Checking loader...',\n sys.stdout.flush()\n writefile('tmpf.f',\"\"\"\n subroutine fsub()\n write(*,*)'success'\n stop\n end\\n\"\"\")\n writefile('tmpc.c',\"\"\"\n #if defined ADD_\n #define fsub fsub_\n #elif defined NOCHANGE\n #define fsub fsub\n #elif defined fcIsF2C\n #define fsub fsub_\n #elif defined UPCASE\n #define fsub FSUB\n #endif\n void main(){\n fsub();}\\n\"\"\")\n\n ccomm = self.config.cc+' '+self.config.ccflags+' '+self.mangling+' -c -o tmpc.o tmpc.c'\n fcomm = self.config.fc+' '+self.config.fcflags+' -c -o tmpf.o tmpf.f'\n lcomm = self.config.fc+' '+self.config.ldflags_fc+' '+self.config.ld_fcmain+' -o lnk tmpf.o tmpc.o'\n\n (output, error, retz) = runShellCommand(ccomm)\n if retz:\n print '\\n\\nCOMMON: in check_linking: cannot compile'\n print 'command is: ',ccomm\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n (output, error, retz) = runShellCommand(fcomm)\n if retz:\n print '\\n\\nCOMMON: in check_linking: cannot compile'\n print 'command is: ',fcomm\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n (output, error, retz) = runShellCommand(lcomm)\n if retz:\n print \"\"\"\\n\\nCOMMON: in check_linking: cannot link\n Cannot link a C main program to a Fortran77 subroutine\n Make sure that the appropriate flags are passed to the linker.\"\"\"\n print 'command is: ',lcomm\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n\n killfiles(['lnk', 'tmpf.f', 'tmpf.o', 'tmpc.c', 'tmpc.o'])\n\n print 'works'\n return 1;",
"def _checkErrors(self, landPage):\n noLicenseTags = ['Purchase a Subscription',\n 'Purchase This Content',\n 'to gain access to this content',\n 'purchaseItem',\n 'Purchase Full Text',\n 'Purchase access',\n 'Purchase PDF',\n 'Pay Per Article',\n 'Purchase this article.',\n 'Online access to the content you have requested requires one of the following',\n 'To view this item, select one of the options below',\n 'PAY PER VIEW',\n 'This article requires a subscription.',\n 'leaf-pricing-buy-now',\n 'To access this article, please choose from the options below',\n 'Buy this article',\n 'Your current credentials do not allow retrieval of the full text.',\n 'Access to the content you have requested requires one of the following:',\n 'Online access to the content you have requested requires one of the following']\n if pageContains(landPage, noLicenseTags):\n logging.info(\"generic crawler found 'No license' on \" + landPage['url'])\n raise pubGetError('No License', 'noLicense', landPage['url'])\n errTags = ['This may be the result of a broken link',\n 'please verify that the link is correct',\n 'Sorry, we could not find the page you were looking for',\n 'We are now performing maintenance',\n 'DOI cannot be found in the DOI System']\n if pageContains(landPage, errTags):\n raise pubGetError('Error Message', 'errorMessage', landPage['url'])",
"def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success",
"def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success",
"def validate(self):\n valid = True\n \n # Check that link information is valid\n for ij in self.link:\n valid = valid and self.link[ij].head in self.node\n valid = valid and self.link[ij].tail in self.node\n if not valid:\n print(\"Error: Link tail/head not found: %s %s\" % (self.link[ij].tail, self.link[ij].head))\n raise utils.BadFileFormatException\n valid = valid and self.link[ij].capacity >= 0\n valid = valid and self.link[ij].length >= 0\n valid = valid and self.link[ij].freeFlowTime >= 0\n valid = valid and self.link[ij].alpha >= 0\n valid = valid and self.link[ij].beta >= 0\n valid = valid and self.link[ij].speedLimit >= 0\n valid = valid and self.link[ij].toll >= 0\n if not valid:\n print(\"Link %s has negative parameters.\" % ij)\n \n # Then check that all OD pairs are in range\n for ODpair in self.ODpair:\n (origin, destination) = (self.ODpair[ODpair].origin, self.ODpair[ODpair].destination)\n valid = valid and origin in self.node\n valid = valid and destination in self.node\n if not valid:\n print(\"Error: Origin/destination %s not found\" % ODpair)\n raise utils.BadFileFormatException\n valid = valid and self.node[origin].isZone == True\n valid = valid and self.node[destination].isZone == True\n if not valid:\n print(\"Error: Origin/destination %s does not connect two zones\" % str(ODpair))\n raise utils.BadFileFormatException\n valid = valid and self.ODpair[ODpair].demand >= 0\n if not valid:\n print(\"Error: OD pair %s has negative demand\" % ODpair)\n raise utils.BadFileFormatException\n \n # Now error-check using metadata\n if self.numNodes != None and len(self.node) != self.numNodes:\n print(\"Warning: Number of nodes implied by network file %d different than metadata value %d\" % (len(self.node), self.numNodes))\n self.numNodes = len(self.node)\n if self.numLinks != None and len(self.link) != self.numLinks:\n print(\"Warning: Number of links given in network file %d different than metadata value %d\" % (len(self.link), self.numLinks))\n self.numLinks = len(self.link)\n if self.numZones != None and len([i for i in self.node if self.node[i].isZone == True]) != self.numZones:\n print(\"Warning: Number of zones given in network file %d different than metadata value %d\" % (len([i for i in self.node if self.node[i].isZone == True]), self.numZones))\n self.numLinks = len(self.link)\n if self.totalDemandCheck != None:\n if self.totalDemand != self.totalDemandCheck:\n print(\"Warning: Total demand is %f compared to metadata value %f\" % ( self.totalDemand, self.totalDemandCheck))",
"def check_links(self,df,node,dd):\n errors = []\n links = self.list_links(node, dd)\n if \"core_metadata_collections\" in links:\n links.remove(\"core_metadata_collections\")\n if \"core_metadata_collections.submitter_id\" in links:\n links.remove(\"core_metadata_collections.submitter_id\")\n for link in links:\n link_col = \"{}.submitter_id\".format(link)\n if link_col not in df:\n error = \"'{}' link header not found in '{}' TSV.\".format(link_col,node)\n print(error) # this is not necessarily an error, as some links may be optional, but must have at least 1 link\n errors.append(error)\n return errors",
"def path_link_errors(self):\n return self._path_link_errors",
"def test_invalid_link(self):\r\n\r\n # Setup the peer grading module with no linked locations.\r\n peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location, valid_linked_descriptor=False)\r\n\r\n self.assertFalse(peer_grading.use_for_single_location_local)\r\n self.assertTrue(peer_grading.use_for_single_location)",
"def _errcheck_link(value, func, args): # pylint: disable=W0613\n # The windows api returns nonzero if the call was successful\n if value != 0:\n return\n\n last_error = ctypes.windll.kernel32.GetLastError()\n # Somehow CreateSymbolicLinkW and CreateHardLinkW retuns zero\n # and the last error is 2 (The system cannot find the file specified)\n # but the link is created successfuly\n # it seems like a bug in the WinAPI\n if last_error == 0 or last_error == 2:\n return\n if last_error == 183:\n raise OSError(errno.EEXIST,\n \"Cannot create a file when that file already exists\",\n args[0])",
"def test_check_source_11(self):\n self.src1._organism_host_genus = \"Mycobacterio\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\")\n self.assertEqual(count, 0)",
"def hasErrors(self):\n return False",
"def has_error(self):\r\n return self._arm.has_error",
"def validate_dependencies(self, session, entry):",
"def check(self):\n missing = []\n for name in self.data[\"locations\"]:\n try:\n n = self.data[\"names\"][name]\n except KeyError:\n missing.append(name)\n if missing:\n raise RuntimeError(\"\\\"names\\\" list lacks:\\n \" + \"\\n \".join(missing))",
"def check_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")",
"def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.data_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.data_dir))\n if not osp.exists(self.raw_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.raw_mat_path))\n if not osp.exists(self.split_new_det_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.split_new_det_mat_path))\n if not osp.exists(self.split_new_lab_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.split_new_lab_mat_path))",
"def test_check_source_10(self):\n self.src1._lab_host_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)",
"def check(self):\n nonReferencedMesh = list()\n\n for mesh in pm.ls(type=\"mesh\"):\n if not pm.objExists(mesh.name() + \".grid_noCheck\"):\n if not mesh.isReferenced():\n nonReferencedMesh.append(mesh)\n\n if not nonReferencedMesh:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = nonReferencedMesh\n for mesh in nonReferencedMesh:\n self.addError(\"%s is not referenced\" % mesh.name())\n self.errorMessage = \"%s non referenced mesh(es)\" % (\n len(nonReferencedMesh))",
"def check():",
"def check_errors():\n\n for error in errors:\n ERROR('%s' % str(error))\n\n if len(errors) != 0:\n sys.exit(1)",
"def check_for_setup_error(self):\n lcfg = self.configuration\n\n self.zfssa.verify_pool(lcfg.zfssa_nfs_pool)\n self.zfssa.verify_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project)\n self.zfssa.verify_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,\n lcfg.zfssa_nfs_share)\n self.zfssa.verify_service('http')\n self.zfssa.verify_service('nfs')",
"def is_valid(self):\n return not self.missing_from_disk and not self.missing_dependency",
"def test_check_source_9(self):\n self.src1._host_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)",
"def _default_checker(x, y):\r\n if x[0] != y[0]:\r\n raise Exception(\"Output mismatch.\",\r\n {'performlinker': x[0], 'clinker': y[0]})",
"def have_error(self):\n return (hasattr(self, \"got_error\") and\n self.got_error)",
"def has_warnings(self) -> bool:",
"def test_gcc_crash(self):\r\n self.validate((1, 10, 213, 129), (46, 10, 212, 1), 'valid',\r\n verify_grad=False)"
] | [
"0.6475067",
"0.6206964",
"0.61585003",
"0.6133443",
"0.60374117",
"0.60219735",
"0.60219735",
"0.59338003",
"0.5918931",
"0.5886056",
"0.58538455",
"0.5843536",
"0.57988787",
"0.5684941",
"0.5683721",
"0.56663525",
"0.56592804",
"0.56536925",
"0.56520957",
"0.5601067",
"0.5599835",
"0.55721325",
"0.55491537",
"0.55430126",
"0.5533608",
"0.5528278",
"0.55249304",
"0.5494273",
"0.5485145",
"0.5482972"
] | 0.6460065 | 1 |
Check for errors in workitems | def update_workitem_error(cases=None):
cases = cases or WorkItem.objects.all()
for idx, case in enumerate(cases):
case.error_check(depth=0)
update_task_info(state='PROGRESS', meta={'current': idx, 'total': len(cases)}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_errors(self) -> None:",
"def has_errors(self) -> bool:",
"def check_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")",
"def error_check(command):\r\n\r\n # TODO\r",
"def test_results_error_stacktrace(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError('Shopping'))\n batch_job = BatchJob(affiliate_items, updater)\n\n with_message = 0\n for result in batch_job.run():\n with_message += (result.is_error and 'Shopping' in result.details)\n\n assert with_message == 4",
"def delete_error():\r\n item = core.get_all_items()\r\n for i in item:\r\n if \"Error\" in i or \"Warning\" in i:\r\n if core.does_item_exist(i):\r\n reset_error(i)",
"def test_notebook_no_errors(executed_notebook):\n for c in executed_notebook['cells']:\n _assert_cell_no_errors(c)",
"def check_errors():\n\n for error in errors:\n ERROR('%s' % str(error))\n\n if len(errors) != 0:\n sys.exit(1)",
"def check(self,item):\r\n raise AbstractError\r\n return False",
"def test_notebook_no_errors(executed_notebook):\n for c in executed_notebook[\"cells\"]:\n _assert_cell_no_errors(c)",
"def _validate_error(cls, item):\n if item.error and item.status_code not in [\n job_models.STATUS_CODE_FAILED, job_models.STATUS_CODE_CANCELED]:\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_ERROR_CHECK,\n 'Entity id %s: error: %s for job is not empty but '\n 'job status is %s' % (item.id, item.error, item.status_code))\n\n if not item.error and item.status_code in [\n job_models.STATUS_CODE_FAILED, job_models.STATUS_CODE_CANCELED]:\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_ERROR_CHECK,\n 'Entity id %s: error for job is empty but '\n 'job status is %s' % (item.id, item.status_code))",
"def lch_mw_check_failed(shell, obj):\n trade_qualifying_query_folder_name = CONFIG_PARAMS['Front_Trade_qualifying_query_folder']\n query_folder = acm.FStoredASQLQuery[trade_qualifying_query_folder_name.Text()]\n trade = obj\n if query_folder:\n if query_folder.Query().IsSatisfiedBy(trade):\n if not trade.AdditionalInfo().CCPmiddleware_id():\n message = 'Cannot book the trade in LCH portfolio.\\n\\nPlease enter Clearing ID in MarkitWire tab and try again.'\n popup_error(shell, message)\n return True\n \n return False",
"def check_set_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")",
"def check_get_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")",
"def hasErrors(self):\n return False",
"def check_validation_results():\n with open('prep/datapackage_validation.json') as report_file:\n report = json.load(report_file)\n\n tasks = report['tasks']\n assert len(tasks) == 5\n\n for task in tasks:\n\n errors = task['errors']\n\n # as a first approximation, allow up to 300 errors on the appearances file\n # this is to account for a common foreign key exception caused by the source data\n if task['resource']['name'] == 'appearances':\n errors_threshold = 300\n # for the rest of the files do nor allow errors at all\n else:\n errors_threshold = 0\n\n if len(errors) > errors_threshold:\n print(f\">={len(errors)} rows did not pass validations!\")\n return False\n else:\n return True",
"def test_results_errors(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError())\n batch_job = BatchJob(affiliate_items, updater)\n\n error_count = 0\n for result in batch_job.run():\n error_count += int(result.is_error)\n\n assert error_count == 4",
"def has_errors_fatal(self) -> bool:",
"def check_entry(self, controller, entries, list_of_project_info, error_label):\r\n\r\n for x in range(0, len(entries)):\r\n if entries[x].get() == \"\":\r\n messagebox.showerror(\"Error\", \"Expected no empty fields\")\r\n return\r\n if not entries[2].get().isalpha():\r\n messagebox.showerror(\"Error\", \"Expected column in letter not number, e.g. 'B' \")\r\n return\r\n name_col = self.col_to_num(entries[2].get())\r\n self.write_to_indata(entries)\r\n\r\n list_error,error_present = [], []\r\n list_error = controller.start_config(entries, name_col, list_error, list_of_project_info)\r\n if len(list_error) == 0:\r\n message = \"Successfully generated all state files\"\r\n error_present.append(message)\r\n error_label.config(text=\"Successfully generated all state files\")\r\n else:\r\n for element in list_error:\r\n if element.error_type == \"1\": # error in loop_trough_row\r\n message = \"expected error in excel spreadsheet at row\" + str(element.file_name) + \"\\n\"\r\n elif element.error_type == \"2\": #filname missing\r\n message = \"expected error in file \" + str(element.file_name)+ \"\\n\"\r\n elif element.error_type == \"3\": # Filename error\r\n message = \"expected error in file name at row \" + str(element.file_name) + \"\\n\"\r\n elif element.error_type == \"4\": # \"Seems like error in 1:st or 3:rd line in excel sheet\"\r\n message = \"expected error in excel spreadsheet on 1:st or 3:rd row \" + \"\\n\"\r\n error_present.append(message)\r\n error_report = open(\"error_report.txt\", \"w+\")\r\n error_report.write(''.join(error_present))\r\n error_report.close()\r\n error_label.config(text=\"Error occured, check error report in \"+ entries[1].get())\r\n # error_label.config(text=(''.join(error_present)))\r",
"def _nupicHyperSearchHasErrors(hyperSearchJob):\n # TODO flesh me out\n\n # Get search ID for our latest hypersearch\n\n # Query Nupic for experiment failures in the given search\n\n return False",
"def check_errors(self, data):\n for entry in data:\n if entry.find('ERROR') != -1:\n return entry\n return False",
"def _assert_cell_no_errors(c):\n if c[\"cell_type\"] != \"code\":\n return\n errors = [\n \"Error name: {}, Error Value: {}, trace: {}\".format(\n o[\"ename\"], o[\"evalue\"], \"\\n\".join(o.get(\"traceback\"))\n )\n for o in c[\"outputs\"]\n if o[\"output_type\"] == \"error\"\n ]\n\n if errors:\n pytest.fail(\"Found notebook errors: {}\".format(\"\\n\".join(errors)))",
"def check(self):\n self.isNodes = True\n self.isFixable = True\n errorNodes = list()\n for each in pm.ls(type='unknown'):\n errorNodes.append(each)\n self.status = 'OK'\n if len(errorNodes):\n self.setErrorNodes(errorNodes)\n self.setStatus('ERROR')",
"def not_existing_error_test(self):\n client = TestClient()\n error = client.run(\"upload some_nonsense\", ignore_error=True)\n self.assertTrue(error)\n self.assertIn(\"ERROR: No packages found matching pattern 'some_nonsense'\",\n client.user_io.out)",
"def check_errors(self):\n\n errors = []\n while True:\n err = self.values(\"SYST:ERR?\")\n if int(err[0]) != 0:\n errmsg = \"Agilent 5313xA: {0}: {1}\".format(err[0], err[1])\n log.error(errmsg + '\\n')\n errors.append(errmsg)\n else:\n break\n\n return errors",
"def hasFailed(self):\n record = self.getRunRecord().getRecord(\"run\")\n return record.state is FAIL",
"def _check_error(self):\n\n if self.error_code_test != 0:\n return False\n else:\n return True",
"def test_long_items_no_input_files(mock_logger, items):\n with pytest.raises(ValueError, match=VALUE_ERROR_MSG):\n _run_preproc_function(failing_function, items, KWARGS)\n assert len(mock_logger.mock_calls) == 2\n\n # Debug call\n assert_debug_call_ok(mock_logger, items)\n assert mock_logger.debug.call_args[0][3] == \"\"\n\n # Error call\n assert_error_call_ok(mock_logger)\n error_call_args = mock_logger.error.call_args[0]\n items = list(items)\n for item in items[:4]:\n assert repr(item) in error_call_args[2]\n for item in items[4:]:\n assert repr(item) not in error_call_args[2]\n assert \"\\n(and 2 further argument(s) not shown here;\" in error_call_args[2]\n assert error_call_args[3] == \"\"",
"def errorCheck(self):\n\t\twhile 1:\n #check for bad state\n\t\t\tif epics.caget(self.error_bypass) == 1:\n\t\t\t\tout_msg=\"Bypass flag is TRUE\"\n elif epics.caget(self.error_bcs) != 1:\n out_msg=\"BCS tripped\"\n elif epics.caget(self.error_mps) != 0:\n out_msg=\"MPS tripped\"\n elif epics.caget(self.error_gaurdian) != 0:\n out_msg=\"Gaurdian tripped\"\n\t\t\n #elif epics.caget(self.error_und_tmit) < 5.0e7:\n # out_msg=\"UND Tmit Low\"\n else:\n out_msg='Everything Okay'\n\n #exit if the stop button is set\n #if not self.mi.getter.caget(\"SIOC:SYS0:ML03:AO702\"):\n\t\t\tif not epics.caget(\"SIOC:SYS0:ML03:AO702\"):\n break\n\n #set the error check message\n epics.caput (\"SIOC:SYS0:ML00:CA000\",out_msg)\n print out_msg\n\n #break out if error check is bypassed\n if (out_msg==\"Bypass flag is TRUE\"):\n break\n\n #break out if everything is okay\n if (out_msg==\"Everything Okay\"):\n epics.caput(self.error_tripped,0)\n break\n\t\t\t\t#return\n else:\n epics.caput(self.error_tripped,1)\n time.sleep(0.1)",
"def test_results_lookup_error(self, affiliate_items):\n item_pks = affiliate_items.values_list('pk', flat=True)\n\n def error_first(item):\n if item.name == \"0\":\n raise LookupError()\n\n update_function = mock.Mock(side_effect=error_first)\n batch_job = BatchJob(affiliate_items, update_function)\n\n success_count = 0\n for result in batch_job.run():\n success_count += int(not result.is_error)\n\n assert success_count == 3\n\n items = AffiliateItem.objects.filter(pk__in=item_pks)\n assert items.count() == 3"
] | [
"0.7019857",
"0.64408",
"0.6280564",
"0.6200474",
"0.61952287",
"0.61811614",
"0.6174349",
"0.6168359",
"0.6143135",
"0.613679",
"0.61158603",
"0.61091346",
"0.60561216",
"0.60122675",
"0.60075825",
"0.60042787",
"0.5979714",
"0.5968149",
"0.5967855",
"0.5961485",
"0.5925314",
"0.5916076",
"0.587586",
"0.58643216",
"0.5851943",
"0.5849281",
"0.58373934",
"0.5795353",
"0.57933736",
"0.578243"
] | 0.65584564 | 1 |
Clean all models except Error. | def clean_all_db():
for model in [
Component, Arch, AutoCase, AutoCaseFailure, Bug, Linkage, WorkItem,
Document, Project, Framework]:
model.objects.all().delete() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_models(self):\n # TODO: Add the exclude parameter in the signature of the method.\n # Call full_clean with ``exclude``, so that we can exclude any models\n # fields we want from the validation.\n for element in isinstance(self.request.data, self.model) \\\n and [self.request.data] or self.request.data:\n try:\n element.full_clean()\n except ValidationError, e:\n # When a ValidationError exception e is raised by\n # ``model.clean_fields``, it has the parameter:\n # e.message_dict = {'field1': 'error string',\n # 'field2': 'error string, ...}\n # When it's raised by ``clean`` e has the parameter:\n # e.message_dict = {NON_FIELD_ERRORS: [<error string>]}\n raise exceptions.BadRequest(e.message_dict)",
"def clear_errors(self) -> None:",
"def clear_errors(self) -> None:",
"def clean(self):\n pass",
"def clean(self):",
"def clean(self):\n\n pass",
"def clean(self):\n raise NotImplementedError",
"def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()",
"def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()",
"def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()",
"def clean(self):\r\n return self.clean()",
"def clean():\n clean_files()",
"def clean_all(log_step=1000, keep_days=7, batch_size=100, max_delete=None):\n clean_errors(log_step=log_step, keep_days=keep_days, batch_size=batch_size, max_delete=max_delete)\n for J in JobRegistry:\n clean_job_model(J, log_step=log_step, keep_days=keep_days, batch_size=batch_size, max_delete=max_delete)",
"def clean_up(self):\n pass",
"def clean_up(self):\n pass",
"def clean(self):\n return",
"def cleanup(self):\n with hide(\"output\", \"warnings\", \"running\"):\n self.stop_all()\n self._execute_standard(\"rm -rf {model_repo}\".format(model_repo=MODEL_REPO))\n self._execute_root(\"docker rmi --force $(docker images -q)\", warn_only=True)\n self._execute_root(\"docker network rm clipper_nw\", warn_only=True)",
"def fix(self):\n exceptionError = ''\n for each in self.errorNodes:\n try:\n pm.delete(each)\n except exceptionError:\n print exceptionError",
"def dev_clean():\n clean_files(\"csv\", True)\n clean_files(\"jsontxt\", True)",
"def clean(self):\n self.df = _data.prune(self.df, [REGEX_PATTERN_GCI, REGEX_PATTERN_DB_ID])\n self.df, _ = _data.remove_totally_failed_tests(self.df)\n self.is_cleaned = True",
"def _unload(apps, schema_editor):\n for modelname in models:\n model = apps.get_model(appname, modelname)\n model.objects.all().delete()",
"def _clean_database(self):\n # noinspection PyUnresolvedReferences\n env = self.env\n cr = env.cr\n modules_to_resolve = [\n 'ch_vendor_info',\n 'API_PDA_receiver',\n 'delivery_report_custom',\n 'myevo_base',\n 'myevo_nobutton_sending_email',\n 'myevo_web',\n 'purchase_order_custom']\n\n # Rename model module ch_vendor_info\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'ch_vendor_info'\"\"\")\n # Delete module soupese_base models that exists in old models\n cr.execute(\"\"\"DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_res_users'\"\"\")\n # Rename\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'API_PDA_receiver'\"\"\")\n\n # Rename module\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'delivery_report_custom'\"\"\")\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'myevo_base'\"\"\")\n cr.execute(\n \"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'myevo_nobutton_sending_email'\"\"\")\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'myevo_web'\"\"\")\n\n # Delete module soupese_base models that exists in old models\n cr.execute(\"\"\"DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_measure_scale'\"\"\")\n cr.execute(\"\"\"DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_pda_operation'\"\"\")\n cr.execute(\"\"\"DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_res_partner'\"\"\")\n\n # Rename module\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'purchase_order_custom'\"\"\")\n\n # Rename module_ in base\n for x in modules_to_resolve:\n cr.execute(\"\"\"\n DELETE FROM ir_model_data\n WHERE name = 'module_%s' AND module = 'base' AND model = 'ir.module.module'\"\"\", (x,))\n\n # Uninstall modules\n cr.execute(\"\"\"UPDATE ir_module_module SET state = 'uninstalled' WHERE name = '%s'\"\"\", (x,))\n\n # Remove vendor.information.scale table\n cr.execute(\"DROP TABLE vendor_information_scale\")\n\n # Commit finally\n cr.commit()",
"def _finalize(self):\n for model in self.models:\n model._finalize()",
"def full_clean(self):\n self._errors = ErrorDict()\n if not self.is_bound: # Stop further processing.\n return\n self.cleaned_data = {}\n if self.empty_permitted and not self.has_changed():\n self.cleaned_data = None\n return\n for name, field in self.fields.items():\n self.clean_field(name, field)\n try:\n self.cleaned_data = self.clean()\n except ValidationError, e:\n self._errors[NON_FIELD_ERRORS] = e.messages\n if self._errors:\n delattr(self, 'cleaned_data')",
"def clean(_context):",
"def clearmodels(self):\n \n dbpath, config = self._start() \n ModelDescriptionTable(dbpath).empty()\n ModelPhenotypeTable(dbpath).empty()\n ModelScoreTable(dbpath).empty() \n self._end()",
"def clean(self):\n self.clean_rally_conf()\n self.clean_rally_logs()\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()",
"def clean(self):\n self.clean_rally_conf()\n rally.RallyBase.clean_rally_logs()\n if self.image_alt:\n self.cloud.delete_image(self.image_alt)\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()",
"def clean_up(model_path):\n cmds = [\"rm */grad*.pickle\",\n \"rm -r checkpoints\",\n \"rm */train_len\",\n \"rm log_human_read.csv\",\n \"rm */log_human_read.csv\",\n \"rm -r best_model\",\n \"rm */*epoch*\"]\n\n for cmd in cmds:\n os.system(\"cd {} && {}\".format(model_path, cmd))",
"def clean_all(request):\n\treturn 0"
] | [
"0.74855083",
"0.67072684",
"0.67072684",
"0.6591304",
"0.6566097",
"0.6535888",
"0.6350453",
"0.62663954",
"0.62663954",
"0.62663954",
"0.624777",
"0.6182531",
"0.6175318",
"0.61041176",
"0.61041176",
"0.6100422",
"0.60844874",
"0.6070654",
"0.60535806",
"0.6028027",
"0.6025549",
"0.6023125",
"0.6018765",
"0.59943336",
"0.59925646",
"0.59904593",
"0.5990108",
"0.5967298",
"0.5963533",
"0.5960831"
] | 0.6942854 | 1 |
Tests geometry acess methods of the class GeometryAccess. | def test_access(geometry):
geometry.print_list_of_geos()
geometry.print_list_of_geos_children()
logger.info('TOP GEO:')
top_geo = geometry.get_top_geo()
top_geo.print_geo_children()
logger.info('INTERMEDIATE GEO (QUAD):')
geo = geometry.get_geo('QUAD:V1', 0)
#geo = geometry.get_top_geo()
geo.print_geo_children()
t0_sec = time()
X,Y,Z = geo.get_pixel_coords(do_tilt=True)
#X,Y = geo.get_2d_pixel_coords()
s = 'X: %s' % str(X)
s+= '\n Consumed time to get 3d pixel coordinates = %7.3f sec' % (time()-t0_sec)
s+= '\n Geometry object: %s:%d X.shape:%s' % (geo.oname, geo.oindex, str(X.shape))
logger.info(s)
logger.info('Test of print_pixel_coords() for quad:')
geometry.print_pixel_coords('QUAD:V1', 1)
logger.info('Test of print_pixel_coords() for CSPAD:')
geometry.print_pixel_coords()
s = 'Test of get_pixel_areas() for QUAD:'
A = geo.get_pixel_areas()
s+= '\n Geometry object: %s:%d A.shape:%s' % (geo.oname, geo.oindex, str(A.shape))
s+= '\n A[0,0:5,190:198]:\n' + str(A[0,0:5,190:198])
logger.info(s)
s = 'Test of get_pixel_areas() for CSPAD:'
A = top_geo.get_pixel_areas()
s+= '\n Geometry object: %s:%d A.shape:%s' % (geo.oname, geo.oindex, str(A.shape))
s+= '\n A[0,0,0:5,190:198]:\n' + str(A[0,0,0:5,190:198])
logger.info(s)
s = 'Test of get_size_geo_array()'
s+= '\n for QUAD: %d' % geo.get_size_geo_array()
s+= '\n for CSPAD: %d' % top_geo.get_size_geo_array()
logger.info(s)
s = 'Test of get_pixel_scale_size()'
s+= '\n for QUAD : %8.2f' % geo.get_pixel_scale_size()
s+= '\n for CSPAD : %8.2f' % top_geo.get_pixel_scale_size()
s+= '\n for geometry: %8.2f' % geometry.get_pixel_scale_size()
logger.info(s)
s = 'Test of get_dict_of_comments():'
d = geometry.get_dict_of_comments()
s+= '\n d[0] = %s' % str(d[0])
logger.info(s) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_02_get_geometry_collection_details(self):\n geometry_collection = GeometryCollection(**self.test_data)\n geometry_collection.save()\n response = self.client.get('/api/v1/collection/%s/' % geometry_collection.pk)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_geometry():\n surf_path = pjoin(data_path, \"surf\", \"%s.%s\" % (\"lh\", \"inflated\"))\n coords, faces = read_geometry(surf_path)\n assert_equal(0, faces.min())\n assert_equal(coords.shape[0], faces.max() + 1)\n\n # Test quad with sphere\n surf_path = pjoin(data_path, \"surf\", \"%s.%s\" % (\"lh\", \"sphere\"))\n coords, faces = read_geometry(surf_path)\n assert_equal(0, faces.min())\n assert_equal(coords.shape[0], faces.max() + 1)",
"def test_elements_geometry():\n # GIVEN\n bboxes = \"8.67066,49.41423,8.68177,49.4204\"\n time = \"2010-01-01\"\n keys = [\"landuse\"]\n values = [\"grass\"]\n\n # WHEN\n client = ohsome.OhsomeClient()\n response = client.elements.geometry.post(bboxes=bboxes, time=time, keys=keys, values=values)\n result = response.as_geodataframe()\n del client\n\n # THEN\n assert len(result.geometry) == 9",
"def test_cover_geometry_empty_geoms(tiler):\n assert not cover_geometry(tiler, geometry.Point(), 0) == True\n assert not cover_geometry(tiler, geometry.MultiPoint(), 0) == True\n assert not cover_geometry(tiler, geometry.LineString(), 0) == True\n assert not cover_geometry(tiler, geometry.MultiLineString(), 0) == True\n assert not cover_geometry(tiler, geometry.Polygon(), 0) == True\n assert not cover_geometry(tiler, geometry.MultiPolygon(), 0) == True\n assert not cover_geometry(tiler, geometry.GeometryCollection(), 0) == True",
"def geometry():\n return Geometry()",
"def test_extract_geometry():\r\n file_path = 'C:/Oregon_State/Spring_2019/Soft_dev_eng/StoveOpt/tests/Stove_test_Geometry.xlsx'\r\n pt1x, pt1z, pt1y, pt2x, pt2z, pt2y, pt3x, pt3z, pt3y, pt4x, pt4z, pt4y, pt5x, pt5z, pt5y, pt6x, pt6z, pt6y, pt7x, pt7z, pt7y, pt8x, pt8z, pt8y, pt9x, pt9z, pt9y, pt10x, pt10z, pt10y, pt11x, pt11z, pt11y, pt12x, pt12z, pt12y, pt13x, pt13z, pt13y, pt14x, pt14z, pt14y, pt15x, pt15z, pt15y, pt16x, pt16z, pt16y = extract_geometry(file_path)\r\n assert pt2x == 0.1\r\n assert pt2z == 0\r\n assert pt2y == 0\r\n assert pt3x == 0\r\n assert pt3z == 0.15\r\n assert pt3y == 0\r\n assert pt4x == 0.1\r\n assert pt4z == 0.15\r\n assert pt4y == 0\r\n assert pt5x == 0.1\r\n assert pt5z == 0.16\r\n assert pt5y == 0\r\n assert pt6x == 0\r\n assert pt6z == 0.16\r\n assert pt6y == 0\r\n assert pt7x == 0\r\n assert pt7z == 0.3\r\n assert pt7y == 0\r\n assert pt8x == 0.1\r\n assert pt8z == 0.3\r\n assert pt8y == 0\r\n assert pt9x == 0.17\r\n assert pt9z == 0.3\r\n assert pt9y == 0\r\n assert pt10x == -0.07\r\n assert pt10z == 0.3\r\n assert pt10y == 0\r\n assert pt11x == -0.07\r\n assert pt11z == 0.5\r\n assert pt11y == 0\r\n assert pt12x == -.04\r\n assert pt12z == 0.5\r\n assert pt12y == 0\r\n assert pt13x == 0.14\r\n assert pt13z == 0.5\r\n assert pt13y == 0\r\n assert pt14x == 0.17\r\n assert pt14z == 0.5\r\n assert pt14y == 0\r\n assert pt15x == -0.04\r\n assert pt15z == 0.33\r\n assert pt15y == 0\r\n assert pt16x == 0.14\r\n assert pt16z == 0.33\r\n assert pt16y == 0\r\n #assert U_100x == 1\r\n #assert U_100y == 0\r\n #assert U_100z == 0\r",
"def checkGeom(geodataframe):\n for geometry in geodataframe.geometry:\n if explain_validity(geometry) != 'Valid Geometry':\n print(explain_validity(geometry))",
"def reader():\n return GeometryTestGen()",
"def test_run_spatial_function(session):\n factories.ConnectionNodeFactory()\n q = session.query(func.ST_AsGeoJSON(models.ConnectionNode.the_geom))\n q.first()",
"def test_geo_ops_smoke(geo_table):\n t = geo_table\n\n # alias for fields\n point = t.geo_point\n linestring = t.geo_linestring\n polygon = t.geo_polygon\n multipolygon = t.geo_multipolygon\n\n # test ops\n point.srid()\n point.x()\n point.y()\n\n linestring.contains(point)\n linestring.end_point()\n linestring.length()\n linestring.max_distance(point)\n linestring.point_n(1)\n linestring.start_point()\n linestring.x_max()\n linestring.x_min()\n linestring.y_max()\n linestring.y_min()\n\n polygon.area()\n polygon.perimeter()\n\n multipolygon.n_points()\n multipolygon.n_rings()",
"def test_cover_geometry_nonshapely_geom(tiler):\n with pytest.raises(ValueError):\n for tile in cover_geometry(tiler, None, 0):\n pass",
"def test_get_xy_space():\n pass",
"def test_DistanceMatrices_getter(self):\r\n self.assertEqual(self.empty_dms.DistanceMatrices, [])\r\n self.assertEqual(self.single_dms.DistanceMatrices, [self.overview_dm])\r\n self.assertEqual(self.double_dms.DistanceMatrices,\r\n [self.overview_dm, self.single_ele_dm])",
"def test_by_accession_geo_platform_accession_get(self):\n pass",
"def spatial(self):",
"def get_geometry(self, selection_name):",
"def test_polygonize():\n # A collection with one non-zero-area Polygon is returned as a Polygon.\n geom1 = GeometryCollection([POLY, ZERO_POLY])\n result1 = polygonize(geom1)\n assert result1.geom_type == \"Polygon\"\n assert result1.area == 1.0\n\n # A collection with multiple non-zero-area polygons is returned as a MultiPolygon.\n geom2 = GeometryCollection([POLY, POLY])\n result2 = polygonize(geom2)\n assert result2.geom_type == \"MultiPolygon\"\n assert result2.area == 2.0\n\n # Zero-area geometries are not permitted.\n with pytest.raises(ValueError) as err:\n _ = polygonize(ZERO_POLY)\n assert err.match(\"Geometry has zero area\")",
"def test_01_get_geometry_collection_list(self):\n response = self.client.get(reverse('collection-lc'), format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def ground_contact_geoms(self):\n raise NotImplementedError",
"def test_path_coordinates():\n\t# check that coordinates array is not empty\n\ttest_coordinates, _ = path_instance.generate_random_path(trial_samples)\n\tassert test_coordinates.any() != 0.\n\t# check that generated particle has enough samples to go close to panes\n\t# assert\n\t# max(map(lambda p: np.linalg.norm(p.center - geom_instance.source_position), pane_instance.z_offset))",
"def test_reading_and_writing_of_vector_polygon_data(self):\n\n # Read and verify test data\n vectorname = 'kecamatan_jakarta_osm.shp'\n\n filename = '%s/%s' % (TESTDATA, vectorname)\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n attributes = layer.get_data()\n\n assert layer.is_polygon_data\n\n # Check basic data integrity\n N = len(layer)\n\n assert len(geometry) == N\n assert len(attributes) == N\n assert len(attributes[0]) == 2\n\n assert FEATURE_COUNTS[vectorname] == N\n assert isinstance(layer.get_name(), basestring)\n\n # Check projection\n wkt = layer.get_projection(proj4=False)\n assert wkt.startswith('GEOGCS')\n\n assert layer.projection == Projection(DEFAULT_PROJECTION)\n\n # Check each polygon\n for i in range(N):\n geom = geometry[i]\n n = geom.shape[0]\n assert n >= 2\n assert geom.shape[1] == 2\n\n # Check that polygon is closed\n assert numpy.allclose(geom[0], geom[-1], rtol=0)\n\n # But that not all points are the same\n max_dist = 0\n for j in range(n):\n d = numpy.sum((geom[j] - geom[0]) ** 2) / n\n if d > max_dist:\n max_dist = d\n assert max_dist > 0\n\n # Check integrity of each feature\n expected_features = {13: {'KAB_NAME': 'JAKARTA PUSAT',\n 'KEC_NAME': 'SAWAH BESAR'},\n 20: {'KAB_NAME': 'JAKARTA SELATAN',\n 'KEC_NAME': 'MAMPANG PRAPATAN'}}\n\n for i in range(N):\n # Consistency with attributes read manually with qgis\n\n if i in expected_features:\n att = attributes[i]\n exp = expected_features[i]\n\n for key in exp:\n msg = ('Expected attribute %s was not found in feature %i'\n % (key, i))\n assert key in att, msg\n\n a = att[key]\n e = exp[key]\n msg = 'Got %s: \"%s\" but expected \"%s\"' % (key, a, e)\n assert a == e, msg\n\n # Write data back to file\n # FIXME (Ole): I would like to use gml here, but OGR does not\n # store the spatial reference! Ticket #18\n out_filename = unique_filename(suffix='.shp')\n Vector(geometry=geometry, data=attributes, projection=wkt,\n geometry_type='polygon').write_to_file(out_filename)\n\n # Read again and check\n layer = read_layer(out_filename)\n assert layer.is_polygon_data\n geometry_new = layer.get_geometry()\n attributes_new = layer.get_data()\n\n N = len(layer)\n assert len(geometry_new) == N\n assert len(attributes_new) == N\n\n for i in range(N):\n assert numpy.allclose(geometry[i],\n geometry_new[i],\n rtol=1.0e-6) # OGR works in single precision\n\n assert len(attributes_new[i]) == 2\n for key in attributes_new[i]:\n assert attributes_new[i][key] == attributes[i][key]",
"def is_geometry(geometry, **kwargs):\n return lib.is_geometry(geometry, **kwargs)",
"def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000",
"def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000",
"def test_query(config):\n\n p = PostgreSQLProvider(config)\n feature_collection = p.query()\n assert feature_collection.get('type', None) == 'FeatureCollection'\n features = feature_collection.get('features', None)\n assert features is not None\n feature = features[0]\n properties = feature.get('properties', None)\n assert properties is not None\n geometry = feature.get('geometry', None)\n assert geometry is not None",
"def test_coord_preceding_fs(self):",
"def test_get_coord_by_attr_valid():\n pass",
"def test_query_empty_geometry():\n empty = Polygon()\n point = Point(1, 0.5)\n geoms = [empty, point]\n tree = STRtree(geoms)\n query = Polygon([(0, 0), (1, 1), (2, 0), (0, 0)])\n results = tree.query(query)\n assert len(results) == 1\n assert results[0] == point",
"def test_vector_class_geometry_types(self):\n\n # So far the admissible classes are Point, Line and Polygon\n tmp_filename = unique_filename(suffix='.shp')\n\n # Check that one single polygon works\n P = numpy.array([[106.79, -6.23],\n [106.80, -6.24],\n [106.78, -6.23],\n [106.77, -6.21]])\n v = Vector(geometry=[P])\n assert v.is_polygon_data\n assert len(v) == 1\n\n v_ref = Vector(geometry=[P], geometry_type='polygon')\n assert v_ref.is_polygon_data\n assert len(v_ref) == 1\n\n v_ref.write_to_file(tmp_filename)\n v_file = read_layer(tmp_filename)\n for i in range(len(v_ref)):\n x = v_ref.get_geometry()[i]\n y = v_file.get_geometry()[i]\n msg = 'Read geometry %s, but expected %s' % (y, x)\n assert numpy.allclose(x, y), msg\n\n assert v_file == v_ref\n assert v_ref == v_file\n assert v_file.is_polygon_data\n assert v_file.geometry_type == 3\n\n # Then a more complex dataset\n test_data = [numpy.array([[122.226889, -8.625599],\n [122.227299, -8.624500],\n [122.227409, -8.624221],\n [122.227536, -8.624059]]),\n numpy.array([[122.237129, -8.628637],\n [122.233170, -8.627332],\n [122.231621, -8.626837],\n [122.231021, -8.626557]]),\n numpy.array([[122.247938, -8.632926],\n [122.247940, -8.633560],\n [122.247390, -8.636220]]),\n numpy.array([[122.22, -8.6256],\n [122.23, -8.6245],\n [122.24, -8.6242],\n [122.22, -8.6240]]),\n numpy.array([[122.24, -8.63],\n [122.23, -8.63],\n [122.23, -8.62],\n [122.23, -8.61]]),\n numpy.array([[122.25, -8.63],\n [122.24, -8.633],\n [122.23, -8.64]])]\n\n # Point data\n v_ref = Vector(geometry=test_data[0])\n assert v_ref.is_point_data\n assert v_ref.geometry_type == 1\n data_bbox = v_ref.get_bounding_box()\n\n v_ref.write_to_file(tmp_filename)\n v_file = read_layer(tmp_filename)\n assert v_file == v_ref\n assert v_ref == v_file\n assert v_file.is_point_data\n assert v_file.geometry_type == 1\n assert numpy.allclose(v_file.get_bounding_box(), data_bbox,\n rtol=1.0e-12, atol=1.0e-12)\n\n v = Vector(geometry=test_data[0], geometry_type='point')\n assert v.is_point_data\n assert v_ref == v\n\n v = Vector(geometry=test_data[0], geometry_type=1)\n assert v.is_point_data\n assert v_ref == v\n\n # Line data\n v_ref = Vector(geometry=test_data, geometry_type='line')\n assert v_ref.is_line_data\n assert v_ref.geometry_type == 2\n data_bbox = v_ref.get_bounding_box()\n\n v_ref.write_to_file(tmp_filename)\n v_file = read_layer(tmp_filename)\n assert v_file == v_ref\n assert v_ref == v_file\n assert v_file.is_line_data\n assert v_file.geometry_type == 2\n assert numpy.allclose(v_file.get_bounding_box(), data_bbox,\n rtol=1.0e-12, atol=1.0e-12)\n\n v = Vector(geometry=test_data, geometry_type=2)\n assert v == v_ref\n\n # Polygon data\n v_ref = Vector(geometry=test_data)\n assert v_ref.is_polygon_data\n assert v_ref.geometry_type == 3\n data_bbox = v_ref.get_bounding_box()\n\n v_ref.write_to_file(tmp_filename)\n v_file = read_layer(tmp_filename)\n assert v_file == v_ref\n assert v_ref == v_file\n assert v_file.is_polygon_data\n assert v_file.geometry_type == 3\n assert numpy.allclose(v_file.get_bounding_box(), data_bbox,\n rtol=1.0e-12, atol=1.0e-12)\n\n v = Vector(geometry=test_data, geometry_type='polygon')\n assert v == v_ref\n\n v = Vector(geometry=test_data, geometry_type=3)\n assert v == v_ref",
"def test_authentication_is_not_required(self):\n # Act\n response = self.client.get(self.url)\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json, self.test_project.get_aoi_geometry_as_geojson())"
] | [
"0.6327447",
"0.62089115",
"0.61037767",
"0.60280645",
"0.59578305",
"0.58143765",
"0.5794815",
"0.5771627",
"0.57589114",
"0.5756427",
"0.5716527",
"0.56913483",
"0.56528866",
"0.5630486",
"0.5605773",
"0.55709183",
"0.55142444",
"0.54942834",
"0.5490078",
"0.5443508",
"0.54096764",
"0.54051787",
"0.540056",
"0.540056",
"0.539246",
"0.53877234",
"0.53818214",
"0.53774446",
"0.5373149",
"0.53719753"
] | 0.7388955 | 0 |
Test cspad2x2 geometry table. | def test_cspad2x2():
basedir = '/reg/g/psdm/detector/alignment/cspad2x2/calib-cspad2x2-01-2013-02-13/'
fname_geometry = basedir + 'calib/CsPad2x2::CalibV1/MecTargetChamber.0:Cspad2x2.1/geometry/0-end.data'
fname_data = basedir + 'cspad2x2.1-ndarr-ave-meca6113-r0028.dat'
geometry = GeometryAccess(fname_geometry, pbits=0o377, use_wide_pix_center=False)
amp_range = (0,15000)
# get pixel coordinate index arrays:
#xyc = xc, yc = 1000, 1000
#rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc)
rows, cols = geometry.get_pixel_coord_indexes(do_tilt=True)
root, ext = os.path.splitext(fname_data)
arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)
arr.shape= (185,388,2)
logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))
img = img_from_pixel_arrays(rows,cols,W=arr)
axim = gg.plotImageLarge(img,amp_range=amp_range)
gg.move(500,10)
gg.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_access(geometry):\n geometry.print_list_of_geos()\n geometry.print_list_of_geos_children()\n\n logger.info('TOP GEO:')\n top_geo = geometry.get_top_geo()\n top_geo.print_geo_children()\n\n logger.info('INTERMEDIATE GEO (QUAD):')\n geo = geometry.get_geo('QUAD:V1', 0)\n #geo = geometry.get_top_geo()\n geo.print_geo_children()\n\n t0_sec = time()\n X,Y,Z = geo.get_pixel_coords(do_tilt=True)\n #X,Y = geo.get_2d_pixel_coords()\n s = 'X: %s' % str(X)\n s+= '\\n Consumed time to get 3d pixel coordinates = %7.3f sec' % (time()-t0_sec)\n s+= '\\n Geometry object: %s:%d X.shape:%s' % (geo.oname, geo.oindex, str(X.shape))\n logger.info(s)\n\n logger.info('Test of print_pixel_coords() for quad:')\n geometry.print_pixel_coords('QUAD:V1', 1)\n logger.info('Test of print_pixel_coords() for CSPAD:')\n geometry.print_pixel_coords()\n\n s = 'Test of get_pixel_areas() for QUAD:'\n A = geo.get_pixel_areas()\n s+= '\\n Geometry object: %s:%d A.shape:%s' % (geo.oname, geo.oindex, str(A.shape))\n s+= '\\n A[0,0:5,190:198]:\\n' + str(A[0,0:5,190:198])\n logger.info(s)\n\n s = 'Test of get_pixel_areas() for CSPAD:'\n A = top_geo.get_pixel_areas()\n s+= '\\n Geometry object: %s:%d A.shape:%s' % (geo.oname, geo.oindex, str(A.shape))\n s+= '\\n A[0,0,0:5,190:198]:\\n' + str(A[0,0,0:5,190:198])\n logger.info(s)\n\n s = 'Test of get_size_geo_array()'\n s+= '\\n for QUAD: %d' % geo.get_size_geo_array()\n s+= '\\n for CSPAD: %d' % top_geo.get_size_geo_array()\n logger.info(s)\n\n s = 'Test of get_pixel_scale_size()'\n s+= '\\n for QUAD : %8.2f' % geo.get_pixel_scale_size()\n s+= '\\n for CSPAD : %8.2f' % top_geo.get_pixel_scale_size()\n s+= '\\n for geometry: %8.2f' % geometry.get_pixel_scale_size()\n logger.info(s)\n\n s = 'Test of get_dict_of_comments():'\n d = geometry.get_dict_of_comments()\n s+= '\\n d[0] = %s' % str(d[0])\n logger.info(s)",
"def test_extract_geometry():\r\n file_path = 'C:/Oregon_State/Spring_2019/Soft_dev_eng/StoveOpt/tests/Stove_test_Geometry.xlsx'\r\n pt1x, pt1z, pt1y, pt2x, pt2z, pt2y, pt3x, pt3z, pt3y, pt4x, pt4z, pt4y, pt5x, pt5z, pt5y, pt6x, pt6z, pt6y, pt7x, pt7z, pt7y, pt8x, pt8z, pt8y, pt9x, pt9z, pt9y, pt10x, pt10z, pt10y, pt11x, pt11z, pt11y, pt12x, pt12z, pt12y, pt13x, pt13z, pt13y, pt14x, pt14z, pt14y, pt15x, pt15z, pt15y, pt16x, pt16z, pt16y = extract_geometry(file_path)\r\n assert pt2x == 0.1\r\n assert pt2z == 0\r\n assert pt2y == 0\r\n assert pt3x == 0\r\n assert pt3z == 0.15\r\n assert pt3y == 0\r\n assert pt4x == 0.1\r\n assert pt4z == 0.15\r\n assert pt4y == 0\r\n assert pt5x == 0.1\r\n assert pt5z == 0.16\r\n assert pt5y == 0\r\n assert pt6x == 0\r\n assert pt6z == 0.16\r\n assert pt6y == 0\r\n assert pt7x == 0\r\n assert pt7z == 0.3\r\n assert pt7y == 0\r\n assert pt8x == 0.1\r\n assert pt8z == 0.3\r\n assert pt8y == 0\r\n assert pt9x == 0.17\r\n assert pt9z == 0.3\r\n assert pt9y == 0\r\n assert pt10x == -0.07\r\n assert pt10z == 0.3\r\n assert pt10y == 0\r\n assert pt11x == -0.07\r\n assert pt11z == 0.5\r\n assert pt11y == 0\r\n assert pt12x == -.04\r\n assert pt12z == 0.5\r\n assert pt12y == 0\r\n assert pt13x == 0.14\r\n assert pt13z == 0.5\r\n assert pt13y == 0\r\n assert pt14x == 0.17\r\n assert pt14z == 0.5\r\n assert pt14y == 0\r\n assert pt15x == -0.04\r\n assert pt15z == 0.33\r\n assert pt15y == 0\r\n assert pt16x == 0.14\r\n assert pt16z == 0.33\r\n assert pt16y == 0\r\n #assert U_100x == 1\r\n #assert U_100y == 0\r\n #assert U_100z == 0\r",
"def test_plot_cspad(geometry, fname_data, amp_range=(0,0.5)):\n #rad1 = 93\n #rad2 = 146\n rad1 = 655\n rad2 = 670\n\n # get pixel coordinate index arrays:\n xyc = xc, yc = 500, 500# None\n\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=None)\n rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc, do_tilt=True)\n\n ixo, iyo = geometry.point_coord_indexes(xy0_off_pix=xyc, do_tilt=True)\n logger.info('Detector origin indexes ixo:%d iyo:%d' % (ixo, iyo))\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n arr.shape= (4,8,185,388)\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n\n arr.shape = rows.shape\n img = img_from_pixel_arrays(rows, cols, W=arr)\n\n rcc_ring = (iyo, ixo)\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.drawCircle(axim, rcc_ring, rad1, linewidth=1, color='w', fill=False)\n gg.drawCircle(axim, rcc_ring, rad2, linewidth=1, color='w', fill=False)\n gg.drawCenter(axim, rcc_ring, rad1, linewidth=1, color='w')\n gg.move(500,10)\n gg.show()",
"def test_elements_geometry():\n # GIVEN\n bboxes = \"8.67066,49.41423,8.68177,49.4204\"\n time = \"2010-01-01\"\n keys = [\"landuse\"]\n values = [\"grass\"]\n\n # WHEN\n client = ohsome.OhsomeClient()\n response = client.elements.geometry.post(bboxes=bboxes, time=time, keys=keys, values=values)\n result = response.as_geodataframe()\n del client\n\n # THEN\n assert len(result.geometry) == 9",
"def test_cspad_xy_at_z():\n ## 'CxiDs1.0:Cspad.0)' or 'DscCsPad'\n basedir = '/reg/g/psdm/detector/alignment/cspad/calib-cxi-camera1-2014-09-24/'\n fname_geometry = basedir + '2016-06-03-geometry-cxi06216-r25-camera1-z175mm.txt'\n fname_data = basedir + '2016-06-03-chun-cxi06216-0025-DscCsPad-max.txt'\n\n geometry = GeometryAccess(fname_geometry, pbits=0o377)\n\n # get pixel coordinate index arrays:\n xyc = xc, yc = 1000, 1000\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc)\n #rows, cols = geometry.get_pixel_coord_indexes(do_tilt=True)\n #rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=None, xy0_off_pix=xyc)\n rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=150000)\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n\n #logger.info('arr.shape=', arr.shape\n arr.shape= (32,185,388)\n\n #ave, rms = arr.mean(), arr.std()\n #amp_range = (ave-rms, ave+3*rms)\n amp_range = (0, 1000)\n logger.info('amp_range:' + str(amp_range))\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n img = img_from_pixel_arrays(rows,cols,W=arr)\n\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.move(500,10)\n gg.show()",
"def test_get_xy_space():\n pass",
"def test_geometry():\n surf_path = pjoin(data_path, \"surf\", \"%s.%s\" % (\"lh\", \"inflated\"))\n coords, faces = read_geometry(surf_path)\n assert_equal(0, faces.min())\n assert_equal(coords.shape[0], faces.max() + 1)\n\n # Test quad with sphere\n surf_path = pjoin(data_path, \"surf\", \"%s.%s\" % (\"lh\", \"sphere\"))\n coords, faces = read_geometry(surf_path)\n assert_equal(0, faces.min())\n assert_equal(coords.shape[0], faces.max() + 1)",
"def test_under_11km():\n z = np.array([500.0, 2500.0, 6500.0, 9000.0, 11000.0])\n h = util.geometric_to_geopotential(z)\n expected_h = np.array([500.0, 2499.0, 6493.0, 8987.0, 10981.0])\n expected_T = np.array([284.900, 271.906, 245.943, 229.733, 216.774])\n expected_p = np.array([95461.0, 74691.0, 44075.0, 30800.0, 22699.0])\n expected_rho = np.array([1.1673, 0.95695, 0.62431, 0.46706, 0.36480])\n\n h, T, p, rho = coesa.table(h)\n \n assert_array_almost_equal(h, expected_h, decimal=0)\n assert_array_almost_equal(T, expected_T, decimal=3)\n assert_array_almost_equal(p, expected_p, decimal=0)\n assert_array_almost_equal(rho, expected_rho, decimal=4)",
"def test_1_square_attributes(self):\r\n self.assertEqual(self.S0.width, 2)\r\n self.assertEqual(self.S0.height, 2)\r\n self.assertEqual(self.S0.x, 0)\r\n self.assertEqual(self.S0.y, 0)",
"def test_under_86km():\n z = np.array([50000.0, 70000.0, 86000.0])\n h = util.geometric_to_geopotential(z)\n expected_h = np.array([49610.0, 69238., 84852.0])\n expected_T = np.array([270.65, 219.585, 186.87])\n expected_p = np.array([79.779, 5.2209, 0.37338])\n expected_rho = np.array([0.0010269, 0.000082829, 0.000006958])\n\n h, T, p, rho = coesa.table(h)\n \n assert_array_almost_equal(h, expected_h, decimal=0)\n assert_array_almost_equal(T, expected_T, decimal=2)\n assert_array_almost_equal(p, expected_p, decimal=3)\n assert_array_almost_equal(rho, expected_rho, decimal=7)",
"def test_DFT_rect_adj():\n test_DFT_rect(centering='ADJUSTIBLE', outname='DFT1Radj_')",
"def test_qcschema_round_trip(self):\n\n # get a molecule qcschema\n import qcportal as ptl\n\n client = ptl.FractalClient()\n ds = client.get_collection(\"OptimizationDataset\", \"SMIRNOFF Coverage Set 1\")\n # grab an entry from the optimization data set\n entry = ds.get_entry(\"coc(o)oc-0\")\n # now make the molecule from the record instance with the geometry\n mol = Molecule.from_qcschema(entry, client)\n # now grab the initial molecule record\n qca_mol = client.query_molecules(id=entry.initial_molecule)[0]\n # mow make sure the majority of the qcschema attributes are the same\n # note we can not compare the full dict due to qcelemental differences\n qcschema = mol.to_qcschema()\n assert qcschema.atom_labels.tolist() == qca_mol.atom_labels.tolist()\n assert qcschema.symbols.tolist() == qca_mol.symbols.tolist()\n # due to conversion useing different programs there is a slight difference here\n assert qcschema.geometry.flatten().tolist() == pytest.approx(\n qca_mol.geometry.flatten().tolist(), rel=1.0e-5\n )\n assert qcschema.connectivity == qca_mol.connectivity\n assert qcschema.atomic_numbers.tolist() == qca_mol.atomic_numbers.tolist()\n assert qcschema.fragment_charges == qca_mol.fragment_charges\n assert qcschema.fragment_multiplicities == qca_mol.fragment_multiplicities\n assert qcschema.fragments[0].tolist() == qca_mol.fragments[0].tolist()\n assert qcschema.mass_numbers.tolist() == qca_mol.mass_numbers.tolist()\n assert qcschema.name == qca_mol.name\n assert qcschema.masses.all() == qca_mol.masses.all()\n assert qcschema.molecular_charge == qca_mol.molecular_charge\n assert qcschema.molecular_multiplicity == qca_mol.molecular_multiplicity\n assert qcschema.real.all() == qca_mol.real.all()",
"def test_create_field_3(self):\n # Dimension coordinates\n data = numpy.arange(9.0) + 20\n data[-1] = 34\n dim0 = cfdm.DimensionCoordinate(data=cfdm.Data(data))\n dim0.set_property(\"standard_name\", \"grid_longitude\")\n dim0.set_property(\"units\", \"degrees\")\n\n array = dim0.data.array\n\n array = numpy.array([array - 0.5, array + 0.5]).transpose((1, 0))\n array[-2, 1] = 30\n array[-1, :] = [30, 36]\n dim0.set_bounds(cfdm.Bounds(data=cfdm.Data(array)))\n\n dim1 = cfdm.DimensionCoordinate(data=cfdm.Data(numpy.arange(10.0)))\n dim1.set_property(\"standard_name\", \"grid_latitude\")\n dim1.set_property(\"units\", \"degrees\")\n\n dim2 = cfdm.DimensionCoordinate(\n data=cfdm.Data([1.5]),\n bounds=cfdm.Bounds(data=cfdm.Data([[1, 2.0]])),\n )\n dim2.set_property(\n \"standard_name\", \"atmosphere_hybrid_height_coordinate\"\n )\n dim2.set_property(\"computed_standard_name\", \"altitude\")\n\n dim3 = cfdm.DimensionCoordinate(data=cfdm.Data(numpy.array([15.0])))\n dim3.set_property(\"standard_name\", \"time\")\n dim3.set_property(\"units\", \"days since 2004-06-01\")\n\n dim3.set_bounds(cfdm.Bounds(data=cfdm.Data([[0, 30.0]])))\n\n # dim3.set_geometry('climatology')\n\n # Auxiliary coordinates\n ak = cfdm.DomainAncillary(data=cfdm.Data([10.0]))\n ak.set_property(\"units\", \"m\")\n ak.set_bounds(cfdm.Bounds(data=cfdm.Data([[5, 15.0]])))\n\n bk = cfdm.DomainAncillary(data=cfdm.Data([20.0]))\n bk.set_bounds(cfdm.Bounds(data=cfdm.Data([[14, 26.0]])))\n\n aux2 = cfdm.AuxiliaryCoordinate(\n data=cfdm.Data(numpy.arange(-45, 45, dtype=\"int32\").reshape(10, 9))\n )\n aux2.set_property(\"units\", \"degree_N\")\n aux2.set_property(\"standard_name\", \"latitude\")\n\n aux3 = cfdm.AuxiliaryCoordinate(\n data=cfdm.Data(numpy.arange(60, 150, dtype=\"int32\").reshape(9, 10))\n )\n aux3.set_property(\"standard_name\", \"longitude\")\n aux3.set_property(\"units\", \"degreeE\")\n\n array = numpy.ma.array(\n [\n \"alpha\",\n \"beta\",\n \"gamma\",\n \"delta\",\n \"epsilon\",\n \"zeta\",\n \"eta\",\n \"theta\",\n \"iota\",\n \"kappa\",\n ],\n )\n array[0] = numpy.ma.masked\n aux4 = cfdm.AuxiliaryCoordinate(data=cfdm.Data(array))\n aux4.set_property(\"standard_name\", \"greek_letters\")\n\n # Cell measures\n msr0 = cfdm.CellMeasure(\n data=cfdm.Data(1 + numpy.arange(90.0).reshape(9, 10) * 1234)\n )\n msr0.set_measure(\"area\")\n msr0.set_property(\"units\", \"km2\")\n\n # Data\n data = cfdm.Data(numpy.arange(90.0).reshape(10, 9))\n\n properties = {\"units\": \"m s-1\"}\n\n f = cfdm.Field(properties=properties)\n f.set_property(\"standard_name\", \"eastward_wind\")\n\n axisX = f.set_construct(cfdm.DomainAxis(9))\n axisY = f.set_construct(cfdm.DomainAxis(10))\n axisZ = f.set_construct(cfdm.DomainAxis(1))\n axisT = f.set_construct(cfdm.DomainAxis(1))\n\n f.set_data(data, axes=[axisY, axisX])\n\n x = f.set_construct(dim0, axes=[axisX])\n y = f.set_construct(dim1, axes=[axisY])\n z = f.set_construct(dim2, axes=[axisZ])\n f.set_construct(dim3, axes=[axisT])\n\n lat = f.set_construct(aux2, axes=[axisY, axisX])\n lon = f.set_construct(aux3, axes=[axisX, axisY])\n f.set_construct(aux4, axes=[axisY])\n\n ak = f.set_construct(ak, axes=[axisZ])\n bk = f.set_construct(bk, axes=[axisZ])\n\n # Coordinate references\n # ref0 = cfdm.CoordinateReference(\n # parameters={'grid_mapping_name': 'rotated_latitude_longitude',\n # 'grid_north_pole_latitude': 38.0,\n # 'grid_north_pole_longitude': 190.0,\n # 'earth_radius': 6371007,},\n # coordinates=[x, y, lat, lon]\n # )\n\n coordinate_conversion = cfdm.CoordinateConversion(\n parameters={\n \"grid_mapping_name\": \"rotated_latitude_longitude\",\n \"grid_north_pole_latitude\": 38.0,\n \"grid_north_pole_longitude\": 190.0,\n }\n )\n\n datum = cfdm.Datum(parameters={\"earth_radius\": 6371007})\n\n ref0 = cfdm.CoordinateReference(\n coordinate_conversion=coordinate_conversion,\n datum=datum,\n coordinates=[x, y, lat, lon],\n )\n\n f.set_construct(msr0, axes=[axisX, axisY])\n\n f.set_construct(ref0)\n\n orog = cfdm.DomainAncillary(data=f.get_data())\n orog.set_property(\"standard_name\", \"surface_altitude\")\n orog.set_property(\"units\", \"m\")\n orog = f.set_construct(orog, axes=[axisY, axisX])\n\n datum1 = cfdm.Datum({\"earth_radius\": 6371007})\n\n coordinate_conversion1 = cfdm.CoordinateConversion(\n parameters={\n \"standard_name\": \"atmosphere_hybrid_height_coordinate\",\n \"computed_standard_name\": \"altitude\",\n },\n domain_ancillaries={\"orog\": orog, \"a\": ak, \"b\": bk},\n )\n\n ref1 = cfdm.CoordinateReference(\n datum=datum1,\n coordinate_conversion=coordinate_conversion1,\n coordinates=[z],\n )\n\n ref1 = f.set_construct(ref1)\n\n # Field ancillary variables\n # g = f.transpose([1, 0])\n g = f.copy()\n # g.standard_name = 'ancillary0'\n # g *= 0.01\n anc = cfdm.FieldAncillary(data=g.get_data())\n anc.standard_name = \"ancillaryA\"\n f.set_construct(anc, axes=[axisY, axisX])\n\n g = f[0]\n g = g.squeeze()\n # g.standard_name = 'ancillary2'\n # g *= 0.001\n anc = cfdm.FieldAncillary(data=g.get_data())\n anc.standard_name = \"ancillaryB\"\n f.set_construct(anc, axes=[axisX])\n\n g = f[..., 0]\n g = g.squeeze()\n # g.standard_name = 'ancillary3'\n # g *= 0.001\n anc = cfdm.FieldAncillary(data=g.get_data())\n anc.standard_name = \"ancillaryC\"\n f.set_construct(anc, axes=[axisY])\n\n f.set_property(\"flag_values\", numpy.array([1, 2, 4], \"int32\"))\n f.set_property(\"flag_meanings\", \"a bb ccc\")\n f.set_property(\"flag_masks\", [2, 1, 0])\n\n cm0 = cfdm.CellMethod(\n axes=[axisX],\n method=\"mean\",\n qualifiers={\"interval\": [cfdm.Data(1, \"day\")], \"comment\": \"ok\"},\n )\n\n cm1 = cfdm.CellMethod(\n axes=[axisY], method=\"maximum\", qualifiers={\"where\": \"sea\"}\n )\n\n cm2 = cfdm.CellMethod(\n axes=[axisT], method=\"maximum\", qualifiers={\"within\": \"years\"}\n )\n\n cm3 = cfdm.CellMethod(\n axes=[axisT], method=\"minimum\", qualifiers={\"over\": \"years\"}\n )\n\n f.set_construct(cm0)\n f.set_construct(cm1)\n f.set_construct(cm2)\n f.set_construct(cm3)\n\n cfdm.write(f, self.filename, fmt=\"NETCDF3_CLASSIC\", verbose=verbose)\n\n g = cfdm.read(self.filename, verbose=verbose)\n\n self.assertEqual(\n len(g), 1, f\"Read produced too many fields: {len(g)} != 1\"\n )\n\n g = g[0].squeeze()\n\n self.assertEqual(\n sorted(f.constructs),\n sorted(g.constructs),\n f\"\\n\\nf (created in memory)\"\n f\"\\n{f.constructs}\"\n f\"\\n\\n{f.constructs.items()}\"\n f\"\\n\\ng (read from disk)\"\n f\"\\n{g.constructs}\"\n f\"\\n\\n{g.constructs.items()}\",\n )\n\n self.assertTrue(\n f.equals(f.copy(), verbose=verbose),\n \"Field f not equal to a copy of itself\",\n )\n\n self.assertTrue(\n g.equals(g.copy(), verbose=verbose),\n \"Field g not equal to a copy of itself\",\n )\n\n self.assertTrue(\n g.equals(f, verbose=verbose),\n \"Field not equal to itself read back in\",\n )\n\n x = g.dump(display=False)\n x = f.dump(display=False)\n\n g = cfdm.read(\n self.filename,\n verbose=verbose,\n extra=[\"domain_ancillary\"],\n warnings=warnings,\n )",
"def test_qcschema_round_trip(self):\n\n # get a molecule qcschema\n import qcportal as ptl\n\n client = ptl.FractalClient()\n ds = client.get_collection(\"OptimizationDataset\", \"SMIRNOFF Coverage Set 1\")\n # grab an entry from the optimization data set\n entry = ds.get_entry(\"coc(o)oc-0\")\n # now make the molecule from the record instance with the geometry\n mol = Molecule.from_qcschema(entry, client)\n # now grab the initial molecule record\n qca_mol = client.query_molecules(id=entry.initial_molecule)[0]\n # mow make sure the majority of the qcschema attributes are the same\n # note we can not compare the full dict due to qcelemental differences\n qcschema = mol.to_qcschema()\n assert qcschema.atom_labels.tolist() == qca_mol.atom_labels.tolist()\n assert qcschema.symbols.tolist() == qca_mol.symbols.tolist()\n # due to conversion using different programs there is a slight difference here\n assert qcschema.geometry.flatten().tolist() == pytest.approx(\n qca_mol.geometry.flatten().tolist(), rel=1.0e-5\n )\n assert qcschema.connectivity == qca_mol.connectivity\n assert qcschema.atomic_numbers.tolist() == qca_mol.atomic_numbers.tolist()\n assert qcschema.fragment_charges == qca_mol.fragment_charges\n assert qcschema.fragment_multiplicities == qca_mol.fragment_multiplicities\n assert qcschema.fragments[0].tolist() == qca_mol.fragments[0].tolist()\n assert qcschema.mass_numbers.tolist() == qca_mol.mass_numbers.tolist()\n assert qcschema.name == qca_mol.name\n assert qcschema.masses.all() == qca_mol.masses.all()\n assert qcschema.molecular_charge == qca_mol.molecular_charge\n assert qcschema.molecular_multiplicity == qca_mol.molecular_multiplicity\n assert qcschema.real.all() == qca_mol.real.all()",
"def test_c0q1(self):\n self.check_c0q1(test_hexMesh_3x3=False,use_petsc=True, name=\"_proteusMesh_\")",
"def test_geo_ops_smoke(geo_table):\n t = geo_table\n\n # alias for fields\n point = t.geo_point\n linestring = t.geo_linestring\n polygon = t.geo_polygon\n multipolygon = t.geo_multipolygon\n\n # test ops\n point.srid()\n point.x()\n point.y()\n\n linestring.contains(point)\n linestring.end_point()\n linestring.length()\n linestring.max_distance(point)\n linestring.point_n(1)\n linestring.start_point()\n linestring.x_max()\n linestring.x_min()\n linestring.y_max()\n linestring.y_min()\n\n polygon.area()\n polygon.perimeter()\n\n multipolygon.n_points()\n multipolygon.n_rings()",
"def test_cover_geometry_nonshapely_geom(tiler):\n with pytest.raises(ValueError):\n for tile in cover_geometry(tiler, None, 0):\n pass",
"def test_surface_feature(self):\n\n # Fully valid image\n sf1 = SurfaceFeature(1, 1, 2, 2, 'dummy_wkt_string', 0.5, 'dummy_id')\n sf1.determine_quadkey()\n\n self.assertEqual(sf1.quadkey, '3000000')",
"def test_rebincpds_geom(self):\n command = '{0} -r 1.03'.format(\n os.path.join(self.datadir, 'monol_test_E3-50_cpds') +\n HEN_FILE_EXTENSION)\n hen.rebin.main(command.split())\n os.path.exists(os.path.join(self.datadir,\n 'monol_test_E3-50_cpds_rebin1.03' +\n HEN_FILE_EXTENSION))",
"def test_coord_preceding_fs(self):",
"def test_run_spatial_function(session):\n factories.ConnectionNodeFactory()\n q = session.query(func.ST_AsGeoJSON(models.ConnectionNode.the_geom))\n q.first()",
"def test_cover_geometry_poly_w_hole1(tiler, poly_w_hole):\n tiles = [tile for tile in cover_geometry(tiler, poly_w_hole, 7)]\n assert len(tiles) == 11\n assert set(tiles) == set([(72, 22, 7), (74, 21, 7), (75, 22, 7), (73, 20, 7), (74, 22, 7), (73, 22, 7), (74, 20, 7), (73, 21, 7), (75, 21, 7), (72, 21, 7), (72, 20, 7)])",
"def test_generalized_banana_polygon_is_valid():\n park = query_row(db_conf, 'osm_landusages', 7101)\n # geometry is not valid\n assert not park['geometry'].is_valid, park\n park = query_row(db_conf, 'osm_landusages_gen0', 7101)\n # but simplified geometies are valid\n assert park['geometry'].is_valid, park\n park = query_row(db_conf, 'osm_landusages_gen1', 7101)\n assert park['geometry'].is_valid, park",
"def test_single_quadrant(self):",
"def init_grid_geometry(self):\n self.vc = self.grid.cells_center() # circumcenters\n self.ec = self.grid.edges_center()\n \n self.c1 = self.grid.edges['cells'][:,0]\n self.c2 = self.grid.edges['cells'][:,1]\n\n # distance between cell centers\n self.d_j = utils.mag( self.vc[self.c1] - self.vc[self.c2] )\n bdry=self.c2<0\n # grid has a few places where vc is coincident with outer boundary, thanks\n # to janet\n self.d_j[bdry] = 2*utils.mag( self.vc[self.c1[bdry]] - self.ec[bdry] ).clip(self.d_j_min,np.inf)\n self.l_j = self.grid.edges_length()\n\n self.normal_j = self.grid.edges_normals()\n self.area_c = self.grid.cells_area()\n\n self.K_j = 100*np.ones(self.grid.Nedges())\n\n j_valid=~self.grid.edges['deleted']\n\n print(\"Checking finite geometry\")\n assert np.all( np.isfinite(self.d_j[j_valid]))\n assert np.all( np.isfinite(self.l_j[j_valid]))\n assert np.all( np.isfinite(self.area_c))\n assert np.all( np.isfinite(self.normal_j[j_valid]))\n assert np.all( self.d_j[j_valid] > 0 )\n assert np.all( self.l_j[j_valid] > 0 )\n assert np.all( self.area_c > 0 )",
"def test_pointnum2():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(100, -100), radius=400, thickness=25)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13",
"def test_grid(self):\n self.testpzz.grid()\n rows = len(self.testpzz.board)\n cols = len(self.testpzz.board[0])\n # Success\n self.assertEqual(self.ROWS, rows)\n self.assertEqual(self.COLS, cols)",
"def test_pointnum1():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(0, 0), radius=300, thickness=10)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13",
"def test_sky_coord_classic(self):\n sc_cen = SkyCoord(SCS_CENTER)\n tab = conesearch.conesearch(\n sc_cen, SCS_RADIUS, catalog_db=self.url, verbose=self.verbose)\n assert len(tab) > 0",
"def test_constants():\n c = pycvm.Constants()\n assert c.get_number_of_layers() == 3, 'number of layers is wrong'\n assert c.get_utm_zone() == 10, 'utm zone is wrong' \n \n assert c.get_number_of_grid_points_in_x(pycvm.LayerIdentifier.top) == 3271, 'nx top wrong'\n assert c.get_number_of_grid_points_in_x(pycvm.LayerIdentifier.middle) == 2181, 'nx middle wrong'\n assert c.get_number_of_grid_points_in_x(pycvm.LayerIdentifier.bottom) == 727, 'nx bottom wrong'\n\n assert c.get_number_of_grid_points_in_y(pycvm.LayerIdentifier.top) == 5367, 'ny top wrong'\n assert c.get_number_of_grid_points_in_y(pycvm.LayerIdentifier.middle) == 3578, 'ny middle wrong'\n assert c.get_number_of_grid_points_in_y(pycvm.LayerIdentifier.bottom) == 1193, 'ny bottom wrong'\n\n assert c.get_number_of_grid_points_in_z(pycvm.LayerIdentifier.top) == 13, 'nz top wrong'\n assert c.get_number_of_grid_points_in_z(pycvm.LayerIdentifier.middle) == 29, 'nz middle wrong'\n assert c.get_number_of_grid_points_in_z(pycvm.LayerIdentifier.bottom) == 55, 'nz bottom wrong'\n\n assert abs(c.get_grid_spacing_in_x(pycvm.LayerIdentifier.top) - 200) < 1.e-13, 'dx top wrong'\n assert abs(c.get_grid_spacing_in_x(pycvm.LayerIdentifier.middle) - 300) < 1.e-13, 'dx middle wrong'\n assert abs(c.get_grid_spacing_in_x(pycvm.LayerIdentifier.bottom) - 900) < 1.e-13, 'dx bottom wrong'\n\n assert abs(c.get_grid_spacing_in_y(pycvm.LayerIdentifier.top) - 200) < 1.e-13, 'dy top wrong'\n assert abs(c.get_grid_spacing_in_y(pycvm.LayerIdentifier.middle) - 300) < 1.e-13, 'dy middle wrong'\n assert abs(c.get_grid_spacing_in_y(pycvm.LayerIdentifier.bottom) - 900) < 1.e-13, 'dy bottom wrong'\n\n assert abs(c.get_grid_spacing_in_z(pycvm.LayerIdentifier.top) - 100) < 1.e-13, 'dz top wrong'\n assert abs(c.get_grid_spacing_in_z(pycvm.LayerIdentifier.middle) - 300) < 1.e-13, 'dz middle wrong'\n assert abs(c.get_grid_spacing_in_z(pycvm.LayerIdentifier.bottom) - 900) < 1.e-13, 'dz bottom wrong'\n\n \"\"\" \n auto mLCM = static_cast<double> (std::lcm(std::lcm(200, 300), 900));\n EXPECT_NEAR(c.getLeastCommonMultipleOfGridSpacingsInXAndY(), mLCM, 1.e-12);\n \"\"\"\n print(\"Passed constants test\")"
] | [
"0.63995576",
"0.6172929",
"0.59261703",
"0.5890235",
"0.5848824",
"0.5845203",
"0.578988",
"0.57143784",
"0.56967634",
"0.5629314",
"0.56118125",
"0.56068027",
"0.5600486",
"0.55879253",
"0.5568533",
"0.5525954",
"0.5519003",
"0.54754615",
"0.546403",
"0.54613346",
"0.5454193",
"0.5444123",
"0.54269636",
"0.541364",
"0.5385801",
"0.5385707",
"0.5376822",
"0.53586966",
"0.53371334",
"0.5335427"
] | 0.63004273 | 1 |
Updates the state with new info from the packet.\n packet Packet with info to update from. | def update(self, packet):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setPacket(self, packet):\n\t\tself.clear()\n\t\tself.packet = packet\n\t\t\n\t\tfields = self.fields\n\t\t\n\t\tfields.append(['Reception time', '%s:%s:%s.%s' % tuple(packet.time), None])\n\t\t\n\t\tif self.packet.isInvalid:\n\t\t\treturn\n\t\t\n\t\tfields.append(['Transmission info', 'CRC passed: %s, LQI: %s, RSSI: %s' % (packet.CRCOk, packet.LQI, packet.RSSI), None])\n\t\tfields.append(['PHY fields', '', None])\n\t\tphy = len(fields) - 1\n\t\tfields.append(['Frame length', len(packet.load), phy])\n\t\t\n\t\tfields.append(['MAC fields', '', None])\n\t\tmac = len(fields) - 1\n\t\tfields.append(['Frame control', packet.frameControl, mac])\n\t\tfields.append(['Frame Type', packet.frameType, mac])\n\t\tfields.append(['Security enabled', packet.securityEnabled, mac])\n\t\tfields.append(['Frame pending', packet.framePending, mac])\n\t\tfields.append(['Ack. request', packet.ackRequest, mac])\n\t\tfields.append(['Intra-PAN', packet.intraPAN, mac])\n\t\tfields.append(['Dest. addressing mode', packet.dstAddrMode, mac])\n\t\tfields.append(['Source addressing mode', packet.srcAddrMode, mac])\n\t\tfields.append(['Sequence number', packet.seqNumber, mac])\n\t\t\n\t\tif hasattr(packet, 'dstPANID'):\n\t\t\tfields.append(['Destination PAN-ID', packet.dstPANID, mac])\n\t\t\n\t\tif hasattr(packet, 'dstAddr'):\n\t\t\tfields.append(['Destination address', packet.dstAddr, mac])\n\t\t\n\t\tif hasattr(packet, 'srcPANID'):\n\t\t\tfields.append(['Source PAN-ID', packet.srcPANID, mac])\n\t\t\t\n\t\tif hasattr(packet, 'srcAddr'):\n\t\t\tfields.append(['Source address', packet.srcAddr, mac])\n\t\t\t\n\t\tif hasattr(packet, 'payload'):\n\t\t\tfields.append(['Payload', packet.payload, mac])\n\t\t\n\t\tif hasattr(packet, 'commandType'):\n\t\t\tfields.append(['Command type', packet.commandType, mac])\n\t\t\n\t\tif hasattr(packet, 'commandPayload'):\n\t\t\tfields.append(['Command payload', packet.commandPayload, mac])\n\t\t\n\t\tif hasattr(packet, 'superFrameSpec'):\n\t\t\tfields.append(['Superframe specification', packet.superFrameSpec, mac])\n\t\t\tsfs = len(fields) - 1\n\t\t\tfields.append(['Beacon order', packet.beaconOrder, sfs])\n\t\t\tfields.append(['Superframe order', packet.superFrameOrder, sfs])\n\t\t\tfields.append(['finalCAPSlot', packet.finalCAPSlot, sfs])\n\t\t\tfields.append(['Batt. life extension', packet.battLifeExt, sfs])\n\t\t\tfields.append(['PAN Coordinator', packet.PANCoord, sfs])\n\t\t\tfields.append(['Association permit', packet.assocPermit, sfs])\n\t\t\n\t\tif hasattr(packet, 'GTS'):\n\t\t\tfields.append(['GTS specification', packet.GTS, mac])\n\t\t\tgts = len(fields) - 1\n\t\t\tfields.append(['GTS descriptor count', packet.GTSDescrCount, gts])\n\t\t\tfields.append(['GTS permit', packet.GTSPermit, gts])\n\t\t\tif int(packet.GTSDescrCount, 16) > 0:\n\t\t\t\tfields.append(['GTS directions', packet.GTSDirections, gts])\n\t\t\t\tfields.append(['GTS descriptors list', '', gts])\n\t\t\t\tdscList = len(fields) - 1\n\t\t\t\tfor i in xrange(int(packet.GTSDescrCount, 16)):\n\t\t\t\t\tfields.append(['Descriptor #'+str(i), '', dscList])\n\t\t\t\t\td = len(fields) - 1\n\t\t\t\t\tfields.append(['Device short address', packet.GTSDescriptors[i].deviceShortAddr, d])\n\t\t\t\t\tfields.append(['GTS starting slot', packet.GTSDescriptors[i].GTSStartingSlot, d])\n\t\t\t\t\tfields.append(['GTS length', packet.GTSDescriptors[i].GTSLength, d])\n\t\t\t\n\t\t\tfields.append(['Pending addresses list', '', gts])\n\t\t\tpnd = len(fields) - 1\n\t\t\tif int(packet.numShortAddrPnd, 16) > 0 or int(packet.numShortAddrPnd, 16) > 0:\n\t\t\t\tfor i in xrange(int(self.numShortAddrPnd, 16)):\n\t\t\t\t\tfields.append(['Short addr. #%i' % i, packet.shortAddrPndList[i], pnd])\n\n\t\t\t\tfor i in xrange(int(self.numLongAddrPnd, 16)):\n\t\t\t\t\tfields.append(['Long addr. #%i' % i, packet.longAddrPndList[i], pnd])\n\t\t\n\t\tif hasattr(packet, 'bcnPayload'):\n\t\t\tfields.append(['Beacon payload', packet.bcnPayload, mac])\n\t\t\n\t\tself.beginInsertRows(QModelIndex(), 0, len(self.fields)+1)\n\t\tself.endInsertRows()\n\t\tfor field in fields:\n\t\t\tprint field",
"def update_table(self, packet):\n # add packet to list of updates (cache for later)\n self.updates.append(packet)\n packetMessage = packet[MESG]\n # if we don't need to coalesce, add entry to forwarding table\n if not self.coalesce(packet):\n # add a new entry into forwarding table\n # storing network, netmask, peer, localPref,\n # selfOrigin, AS Path, and Origin information\n self.forwarding_table.append({\n # SRCE\n SRCE: packet[SRCE],\n # DEST\n DEST: packet[DEST],\n # NTWK\n NTWK: packetMessage[NTWK],\n # NMSK\n NMSK: packetMessage[NMSK],\n # PEER IP\n PEER: packet[SRCE],\n # Local Pref\n LPRF: packetMessage[LPRF],\n # Self Origin\n SORG: packetMessage[SORG],\n # AS Path\n APTH: packetMessage[APTH],\n # Origin\n ORIG: packetMessage[ORIG],\n # CIDR Prefix Length\n \"CIDR\": self.get_prefix(packetMessage)\n })",
"def notify(self, packet):\n\t\tself.update_listeners(packet)",
"def update(self, new_gameStateData):\r\n pass",
"def update_to_state(self, game_state):\n pass",
"def update(self):\n self._state = 23",
"def process(self, packet):\n pass",
"def update(self):\n try:\n self._state = self.pushbullet.data[self._element]\n self._state_attributes = self.pushbullet.data\n except (KeyError, TypeError):\n pass",
"def tx_packet(self, packet):\r\n self.tx_cmd(packet.fctype, packet.nto, packet.narg1, packet.narg2, packet.smessage)",
"def updatePacketDetails(treeWidget, packet):\n\t#first remove all content\n\tfontBold = QFont()\n\tfontBold.setBold(True)\n\t\n\ttreeWidget.clear()\n\t\n\tt = QTreeWidgetItem(treeWidget)\n\tt.setText(0, 'Reception time')\n\tt.setText(1, '%s:%s:%s.%s' % tuple(packet.time))\n\t\n\tif packet.isInvalid:\n\t\t\treturn\n\t\n\ttrInfo = QTreeWidgetItem(treeWidget)\n\ttrInfo.setText(0, 'Transmission info')\n\ttrInfo.setText(1, 'CRC passed: %s, LQI: %s, RSSI: %s' % (packet.CRCOk, packet.LQI, packet.RSSI))\n\t\n\tPHY = QTreeWidgetItem(treeWidget)\n\tPHY.setText(0, 'PHY fields')\n\tPHY.setFont(0, fontBold)\n\n\tframeLength = QTreeWidgetItem(PHY)\n\tframeLength.setText(0, 'Frame length')\n\tframeLength.setText(1, '%i' % len(packet.load))\n\t\n\tMAC = QTreeWidgetItem(treeWidget)\n\tMAC.setText(0, 'MAC fields')\n\tMAC.setFont(0, fontBold)\n\t\n\tframeControl = QTreeWidgetItem(MAC)\n\tframeControl.setText(0, 'Frame control')\n\tframeControl.setText(1, packet.frameControl)\n\t\n\tframeType = QTreeWidgetItem(frameControl)\n\tframeType.setText(0, 'Frame Type')\n\tframeType.setText(1, packet.frameType)\n\t\n\tsecurityEnabled = QTreeWidgetItem(frameControl)\n\tsecurityEnabled.setText(0, 'Security enabled')\n\tsecurityEnabled.setText(1, packet.securityEnabled)\n\t\n\tframePending = QTreeWidgetItem(frameControl)\n\tframePending.setText(0, 'Frame pending')\n\tframePending.setText(1, packet.framePending)\n\t\n\tackRequest = QTreeWidgetItem(frameControl)\n\tackRequest.setText(0, 'Ack. request')\n\tackRequest.setText(1, packet.ackRequest)\n\t\n\tintraPAN = QTreeWidgetItem(frameControl)\n\tintraPAN.setText(0, 'Intra-PAN')\n\tintraPAN.setText(1, packet.intraPAN)\n\t\n\tdstAddrMode = QTreeWidgetItem(frameControl)\n\tdstAddrMode.setText(0, 'Dest. addressing mode')\n\tdstAddrMode.setText(1, packet.dstAddrMode)\n\t\n\tsrcAddrMode = QTreeWidgetItem(frameControl)\n\tsrcAddrMode.setText(0, 'Source addressing mode')\n\tsrcAddrMode.setText(1, packet.srcAddrMode)\n\t\n\tseqNumber = QTreeWidgetItem(MAC)\n\tseqNumber.setText(0, 'Sequence number')\n\tseqNumber.setText(1, packet.seqNumber)\n\t\n\tif hasattr(packet, 'dstPANID'):\n\t\tdstPANID = QTreeWidgetItem(MAC)\n\t\tdstPANID.setText(0, 'Destination PAN-ID')\n\t\tdstPANID.setText(1, packet.dstPANID)\n\t\n\tif hasattr(packet, 'dstAddr'):\n\t\tdstAddr = QTreeWidgetItem(MAC)\n\t\tdstAddr.setText(0, 'Destination address')\n\t\tdstAddr.setText(1, packet.dstAddr)\n\t\n\tif hasattr(packet, 'srcPANID'):\n\t\tsrcPANID = QTreeWidgetItem(MAC)\n\t\tsrcPANID.setText(0, 'Source PAN-ID')\n\t\tsrcPANID.setText(1, packet.srcPANID)\n\t\t\n\tif hasattr(packet, 'srcAddr'):\n\t\tsrcAddr = QTreeWidgetItem(MAC)\n\t\tsrcAddr.setText(0, 'Source address')\n\t\tsrcAddr.setText(1, packet.srcAddr)\n\t\t\n\tif hasattr(packet, 'payload'):\n\t\tpayload = QTreeWidgetItem(MAC)\n\t\tpayload.setText(0, 'Payload')\n\t\tpayload.setText(1, packet.payload)\n\t\n\tif hasattr(packet, 'commandType'):\n\t\tcommandType = QTreeWidgetItem(MAC)\n\t\tcommandType.setText(0, 'Command type')\n\t\tcommandType.setText(1, packet.commandType)\n\t\n\tif hasattr(packet, 'commandPayload'):\n\t\tcommandPayload = QTreeWidgetItem(MAC)\n\t\tcommandPayload.setText(0, 'Command payload')\n\t\tcommandPayload.setText(1, packet.commandPayload)\n\t\n\tif hasattr(packet, 'superFrameSpec'):\n\t\tsuperFrameSpec = QTreeWidgetItem(MAC)\n\t\tsuperFrameSpec.setText(0, 'Superframe specification')\n\t\tsuperFrameSpec.setText(1, packet.superFrameSpec)\n\t\n\t\tbeaconOrder = QTreeWidgetItem(superFrameSpec)\n\t\tbeaconOrder.setText(0, 'Beacon order')\n\t\tbeaconOrder.setText(1, packet.beaconOrder)\n\t\n\t\tsuperFrameOrder = QTreeWidgetItem(superFrameSpec)\n\t\tsuperFrameOrder.setText(0, 'Superframe order')\n\t\tsuperFrameOrder.setText(1, packet.superFrameOrder)\n\t\n\t\tfinalCAPSlot = QTreeWidgetItem(superFrameSpec)\n\t\tfinalCAPSlot.setText(0, 'finalCAPSlot')\n\t\tfinalCAPSlot.setText(1, packet.finalCAPSlot)\n\t\n\t\tbattLifeExt = QTreeWidgetItem(superFrameSpec)\n\t\tbattLifeExt.setText(0, 'Batt. life extension')\n\t\tbattLifeExt.setText(1, packet.battLifeExt)\n\t\n\t\tPANCoord = QTreeWidgetItem(superFrameSpec)\n\t\tPANCoord.setText(0, 'PAN Coordinator')\n\t\tPANCoord.setText(1, packet.PANCoord)\n\t\n\t\tassocPermit = QTreeWidgetItem(superFrameSpec)\n\t\tassocPermit.setText(0, 'Association permit')\n\t\tassocPermit.setText(1, packet.assocPermit)\n\t\n\tif hasattr(packet, 'GTS'):\n\t\tGTS = QTreeWidgetItem(MAC)\n\t\tGTS.setText(0, 'GTS specification')\n\t\tGTS.setText(1, packet.GTS)\n\t\n\t\tGTSDescrCount = QTreeWidgetItem(GTS)\n\t\tGTSDescrCount.setText(0, 'GTS descriptor count')\n\t\tGTSDescrCount.setText(1, packet.GTSDescrCount)\n\t\t\n\t\tGTSPermit = QTreeWidgetItem(GTS)\n\t\tGTSPermit.setText(0, 'GTS permit')\n\t\tGTSPermit.setText(1, packet.GTSPermit)\n\t\t\n\t\tif int(packet.GTSDescrCount, 16) > 0:\n\t\t\tGTSDirections = QTreeWidgetItem(GTS)\n\t\t\tGTSDirections.setText(0, 'GTS directions')\n\t\t\tGTSDirections.setText(1, packet.GTSDirections)\n\t\t\n\t\t\tGTSDescriptors = QTreeWidgetItem(GTS)\n\t\t\tGTSDescriptors.setText(0, 'GTS descriptors list')\n\t\t\tdescriptors = []\n\t\t\tfor i in xrange(int(packet.GTSDescrCount, 16)):\n\t\t\t\tdescriptor = [QTreeWidgetItem(GTSDescriptors)] * 3\n\t\t\t\tdescriptor[0].setText(0, 'Device short address')\n\t\t\t\tdescriptor[0].setText(1, packet.GTSDescriptors[i].deviceShortAddr)\n\t\t\t\tdescriptor[1].setText(0, 'GTS starting slot')\n\t\t\t\tdescriptor[1].setText(1, packet.GTSDescriptors[i].GTSStartingSlot)\n\t\t\t\tdescriptor[2].setText(0, 'GTS length')\n\t\t\t\tdescriptor[3].setText(1, packet.GTSDescriptors[i].GTSLength)\n\t\t\t\tdescriptors.append(descriptor)\n\t\t\n\t\tif int(packet.numShortAddrPnd, 16) > 0 or int(packet.numShortAddrPnd, 16) > 0:\n\t\t\tpendingAddr = QTreeWidgetItem(MAC)\n\t\t\tpendingAddr.setText(0, 'Pending addresses list')\n\t\t\tpndShort = []\n\t\t\tpndLong = []\n\t\t\tfor i in xrange(int(packet.numShortAddrPnd, 16)):\n\t\t\t\tpndShort.append(QTreeWidgetItem(pendingAddr))\n\t\t\t\tpndShort[i].setText(0, 'Pending short addr. #%i' % i)\n\t\t\t\tpndShort[i].setText(1, packet.shortAddrPndList[i])\n\t\t\tfor i in xrange(int(packet.numLongAddrPnd, 16)):\n\t\t\t\tpndLong.append(QTreeWidgetItem(pendingAddr))\n\t\t\t\tpndLong[i].setText(0, 'Pending long addr. #%i' % i)\n\t\t\t\tpndLong[i].setText(1, packet.longAddrPndList[i])\n\t\t\t\n\tif hasattr(packet, 'bcnPayload'):\n\t\tbcnPayload = QTreeWidgetItem(MAC)\n\t\tbcnPayload.setText(0, 'Beacon payload')\n\t\tbcnPayload.setText(1, packet.bcnPayload)",
"def process_packet(self, in_port, packet):\n \n buf = bytearray(packet)\n for idx in range((len(packet) + 19)/20):\n logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20))\n\n if self.disabled:\n logging.debug(\"Switch is disabled; discarding packet\")\n return\n\n parsed_packet = ParsedPacket(buf, self.metadata)\n logging.debug(\"Processing packet %d from port %d with %s\" % \n (parsed_packet.id, in_port,\n self.first_processor.name))\n self.first_processor.process(parsed_packet)",
"def update_state(self, dstate):\n pass",
"def _cache_response(self, packet):\n self.operator.update_message(packet.message_id, packet.from_node, packet.ret_parameters)",
"def handle_packet(self, packet):\n if self.compression:\n compression_len, packet = ParseVarInt(packet, consume=True)\n\n # if we have compressed data decompress it\n if compression_len != 0:\n packet = zlib.decompress(bytearray(packet))\n\n packet_id, packet = ParseVarInt(packet, consume=True)\n try:\n packet_id = str(self.state(packet_id))\n except ValueError:\n # print(\"Unknown packet ID %s for state %s\" % (hex(packet_id), self.state))\n pass\n\n try:\n func = getattr(self, \"handle_\" + packet_id.split(\".\")[1])\n packet = func(packet=packet)\n assert len(packet) == 0\n except AttributeError:\n # print(\"Unknown packet: %s\" % packet)\n pass",
"def update(self):\n self.data_service.update()\n attr = self.data_service.attributes.get(self._json_key)\n self._state = attr[\"soc\"]",
"def update(self):\n self._data.update()\n\n self._state = self._data.get_value(self._type)",
"def touch_packet (self, byte_count, now=None):\n if now is None: now = time.time()\n self.byte_count += byte_count\n self.packet_count += 1\n self.last_touched = now",
"def update(self):\n self._state = self._state",
"def handle_packet(cls, packet: scapypacket):\n pass",
"def update(self):\n self._state = get_local_ip()",
"def set_pkt(self, pkt):\n self.pkt = pkt",
"def change_state(self, timestamp, state):\n\t\tself.timestamp = timestamp\n\t\tself.state = state",
"def _do_some_logic(self, packet):\n\n\n pass",
"def _update_state(self) -> None:\n raise NotImplementedError(\"\")",
"def update(self):\n self._state = status\n attributes['host'] = host\n attributes['port'] = port\n self.custom_attributes = attributes",
"def update(self, t):\n self.state.send(t)",
"def new_loop_packet(self, event):\n # packet has traveled back in time\n if self.end_ts > event.packet['dateTime']:\n self.logger.error(\"Service ignoring packet has dateTime of %f which is prior to previous packet %f\"\n %(event.packet['dateTime'], self.end_ts))\n else:\n start_ts = self.end_ts\n self.end_ts = event.packet['dateTime']\n\n for topic in self.subscriber.subscribed_topics: # topics might not be cached.. therefore use subscribed?\n self.logger.debug(\"Service packet prior to update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.packet['dateTime']),\n to_sorted_string(event.packet)))\n target_data = self.subscriber.get_accumulated_data(topic,\n start_ts, self.end_ts, event.packet['usUnits'])\n event.packet.update(target_data)\n self.logger.debug(\"Service packet after update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.packet['dateTime']),\n to_sorted_string(event.packet)))",
"def update(self):\r\n self._state = self._dev.state",
"def update(self, new_gameStateData):\r\n self.data = new_gameStateData\r\n self._refresh()",
"def update(self):\n self._state = read_input(self._port)"
] | [
"0.6572827",
"0.6166308",
"0.5936583",
"0.59047705",
"0.5738875",
"0.5675512",
"0.56577075",
"0.56325054",
"0.56124914",
"0.5608003",
"0.55850834",
"0.5570383",
"0.5556657",
"0.5554677",
"0.55363494",
"0.55109787",
"0.5457843",
"0.5445314",
"0.5434475",
"0.53559756",
"0.5328599",
"0.53208476",
"0.53193325",
"0.5312385",
"0.53003645",
"0.5285184",
"0.5282576",
"0.52769715",
"0.52712667",
"0.5268619"
] | 0.7768925 | 0 |
Override _search to order the results, according to some employee. The order is the following limit (limited leaves first, such as Legal Leaves) virtual remaining leaves (higher the better, so using reverse on sorted) This override is necessary because those fields are not stored and depends on an employee_id given in context. This sort will be done when there is an employee_id in context and that no other order has been given to the method. | def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
leave_ids = super (HolidaysType, self)._search (args, offset=offset, limit=limit, order=order, count=count,
access_rights_uid=access_rights_uid)
if not count and not order and self._context.get ('employee_id'):
leaves = self.browse (leave_ids)
sort_key = lambda l: (not l.limit, l.virtual_remaining_leaves)
return leaves.sorted (key=sort_key, reverse=True).ids
return leave_ids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):\n if context is None:\n context = {}\n if 'emp_hours' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('emp.luggage_transfer.hours'),\n context.get('emp_hours'), [\"employee\"], context)\n args.append(('id', 'not in', [isinstance(d['employee'], tuple) and d['employee'][0] or d['employee'] for d in emp_ids]))\n if 'mission_line' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.mission.line'),\n context.get('mission_line'), [\"employee_id\"], context)\n args.append(('id', 'not in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n if 'illness' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.illness'),\n context.get('illness'), [\"employee_id\"], context)\n args.append(('id', 'not in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n\n if 'same' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.mission.line'),\n context.get('same'), [\"employee_id\"], context)\n args.append(('id', 'in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n \n if 'alternative_setting_id' in context:\n old_ids = super(hr_employee, self).name_search(cr, uid, name, args=args, operator=operator, context={}, limit=limit)\n\n alternative_setting_id = context.get('alternative_setting_id')\n setting_obj = self.pool.get('hr.alternative.setting')\n alternative_setting_id = setting_obj.browse(cr, uid, alternative_setting_id)\n degrees_ids = [\n x.id for x in alternative_setting_id.degrees_ids]\n degrees_ids += degrees_ids\n degrees_ids = tuple(degrees_ids)\n\n departments_ids = [\n x.id for x in alternative_setting_id.departments_ids]\n departments_ids += departments_ids\n departments_ids = tuple(departments_ids)\n\n ex_employees_ids = [\n x.id for x in alternative_setting_id.employees_ids]\n ex_employees_ids += ex_employees_ids\n ex_employees_ids = tuple(ex_employees_ids)\n\n\n old_ids_tuple = [x[0] for x in old_ids] + [x[0] for x in old_ids]\n old_ids_tuple = tuple(old_ids_tuple)\n\n accessed_ids = self.search(cr, uid, [])\n accessed_ids += accessed_ids\n accessed_ids = tuple(accessed_ids)\n\n if not old_ids_tuple:\n old_ids_tuple = (0,0)\n \n if not departments_ids:\n departments_ids = (0,0)\n cr.execute(\n ''' Select emp.id,(SELECT MAX(date) as max_date\n FROM hr_alternative_process_line\n WHERE employee_id=emp.id and state='confirmed')date\n from hr_employee emp\n where emp.degree_id in %s \n and emp.department_id not in %s \n and emp.state = 'approved' \n and emp.payroll_state = 'khartoum' \n and emp.id in %s \n and emp.gender='male' \n and emp.id in %s \n and emp.id not in %s \n order by date NULLS LAST''', (degrees_ids,departments_ids,old_ids_tuple,accessed_ids,ex_employees_ids))\n history = cr.dictfetchall()\n new_ids = []\n while True:\n try:\n new_ids.append( history.pop()['id'] )\n except:\n break\n\n temp = dict(old_ids)\n old_ids = [x for x in old_ids if x[0] in new_ids]\n #new_ids = [x for x in new_ids if x in accessed_ids]\n #print \"..........................temp\",new_ids\n #print \"......................\",[(x, temp.get(x,False) ) for x in new_ids]\n #print \"......................\",sorted(old_ids, key=lambda x :new_ids.index(x[0]))\n return sorted(old_ids, key=lambda x :new_ids.index(x[0]))\n\n return super(hr_employee, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)",
"def search(self, cr, uid, args, offset=0, limit=None, order=None, \n context=None, count=False):\n if context is None:\n context = {}\n if 'alternative_setting_id' in context:\n alternative_setting_id = context.get('alternative_setting_id')\n setting_obj = self.pool.get('hr.alternative.setting')\n alternative_setting_id = setting_obj.browse(cr, uid, alternative_setting_id)\n degrees_ids = [\n x.id for x in alternative_setting_id.degrees_ids]\n degrees_ids += degrees_ids\n degrees_ids = tuple(degrees_ids)\n\n departments_ids = [\n x.id for x in alternative_setting_id.departments_ids]\n departments_ids += departments_ids\n departments_ids = tuple(departments_ids)\n\n ex_employees_ids = [\n x.id for x in alternative_setting_id.employees_ids]\n ex_employees_ids += ex_employees_ids\n ex_employees_ids = tuple(ex_employees_ids)\n\n search_ids = super(hr_employee, self).search(cr, uid, args, offset, limit, order, context={}, count=count)\n search_ids += search_ids\n search_ids = tuple(search_ids)\n if not search_ids:\n search_ids = (0,0)\n \n if not departments_ids:\n departments_ids = (0,0)\n cr.execute(\n ''' Select emp.id,(SELECT MAX(date) as max_date\n FROM hr_alternative_process_line\n WHERE employee_id=emp.id and state='confirmed')date\n from hr_employee emp\n where emp.degree_id in %s \n and emp.department_id not in %s \n and emp.state = 'approved' \n and emp.payroll_state = 'khartoum' \n and emp.id in %s \n and emp.gender='male' \n and emp.id not in %s \n order by date NULLS LAST''', (degrees_ids,departments_ids,search_ids,ex_employees_ids))\n history = cr.dictfetchall()\n new_ids = []\n while True:\n try:\n new_ids.append( history.pop()['id'] )\n except:\n break\n #new_ids = list(reversed( new_ids ) )\n # search_ids = super(hr_employee, self).search(cr, uid, args, offset, limit, order, context=context, count=count)\n # new_ids = new_ids[offset:limit]\n # if search_ids:\n # new_ids = [x for x in new_ids if x in search_ids]\n # return new_ids[offset:limit]\n return new_ids[offset:limit]\n return super(hr_employee, self).search(cr, uid, args, offset, limit, order, context=context, count=count)",
"def filter_employees(self, searchQuery=\"\", researchGroup=\"\", promotor=0, ):\n from Employee import Employee\n try:\n cursor = self.dbconnect.get_cursor()\n\n sql = 'select * from employee e INNER JOIN researchGroup r ON r.groupID=e.researchGroup WHERE ' \\\n 'e.name LIKE %(searchQueryQ)s'\n\n if researchGroup != \"\":\n sql += \"AND r.name = %(researchGroupQ)s\"\n\n if promotor == 1:\n sql += 'AND e.promotor = TRUE'\n if promotor == 2:\n sql += 'AND e.promotor = FALSE'\n\n cursor.execute(sql, dict(searchQueryQ=\"%\" + searchQuery + \"%\", researchGroupQ=researchGroup))\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees\n except:\n self.dbconnect.rollback()\n raise Exception('unable to filter employees')",
"def customize_search_results(self):\n adjust_date_range(self.driver, self.date_range)\n adjust_salary_range(self.driver, self.salary_range)\n # adjust_search_radius(self.driver, self.search_radius) # deprecated\n # scroll to top of page so the sorting menu is in view\n self.driver.execute_script(\"window.scrollTo(0, 0);\")\n sort_results_by(self.driver, self.sort_by)",
"def get_queryset(self):\n search_str = self.request.GET.get('search')\n col_nm = self.request.GET.get('sort_by', \"name\")\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', \"ASC\")\n self.sort_ordr=sort_order\n\n if search_str:\n search_str = self.request.GET.get('search', None)\n a = Q(name__icontains=search_str)\n b = Q(administrator__first_name__icontains = search_str)\n c = Q(administrator__last_name__icontains = search_str)\n d = Q(administrator__username__icontains = search_str)\n e = Q(types__name__icontains = search_str)\n f = Q(description__icontains = search_str)\n objects = Organization.objects.filter(a | b | c | d | e | f).distinct()\n\n else: # SORTING BY COL_NM\n if col_nm in ['name', 'description'] :\n objects = Organization.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n elif col_nm =='administrator__first_name':\n objects=Organization.objects.filter().order_by(col_nm)\n if sort_order == \"DESC\":\n objects = objects.reverse()\n else:\n objects=Organization.objects.extra(select=\n {'name':'lower(name)'}).order_by('name')\n\n\n return objects",
"def searchsorted(self, **kwargs): # noqa: PR02\n return SeriesDefault.register(pandas.Series.searchsorted)(self, **kwargs)",
"def search(Resource=None, SearchExpression=None, SortBy=None, SortOrder=None, NextToken=None, MaxResults=None):\n pass",
"def sort(self, *order_fields):\n return MockSearch(\n self, self._query, self.nested_filter_calls, order_fields,\n self._script_fields\n )",
"def search(self, query, maxhits=100):",
"def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):\n if context is None:\n context = {}\n\n if 'default_spare_ok' in context:\n ids = []\n ids = self.search(cr, uid, [('name', operator, name)]+ args, limit=limit, context=context)\n ids += self.search(cr, uid, [('e_name', operator, name)]+ args, limit=limit, context=context)\n ids += self.search(cr, uid, [('t_number', operator, name)]+ args, limit=limit, context=context)\n ids += self.search(cr, uid, [('default_code', operator, name)]+ args, limit=limit, context=context)\n ids = list(set(ids))\n args.append(('id','in',ids))\n\n if 'spares_ids' in context:\n idss = []\n product_ids = resolve_o2m_operations(cr, uid, self.pool.get('maintenance.spare'),\n context.get('spares_ids'), [\"product_id\"], context)\n args.append(('id', 'not in', [isinstance(\n d['product_id'], tuple) and d['product_id'][0] or d['product_id'] for d in product_ids]))\n\n '''if 'vehicle_id' in context and context['vehicle_id']:\n vehicle_category = self.pool.get('fleet.vehicle').browse(cr, uid, context['vehicle_id'], context).type.id\n if vehicle_category:\n #idss = self.search(cr, uid, [('vehicle_category','=',vehicle_category)])\n idss = self.search(cr, uid, [('vehicle_category_ids','in',[vehicle_category])])'''\n #args.append(('id','in',idss))\n if ids :\n result = self.name_get(cr, uid, ids, context=context)\n return result\n else:\n return []\n else:\n return super(product_product, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)",
"def test_relatedfieldlistfilter_foreignkey_default_ordering(self):\n\n class BookAdmin(ModelAdmin):\n list_filter = (\"employee\",)\n\n self.addCleanup(setattr, Employee._meta, \"ordering\", Employee._meta.ordering)\n Employee._meta.ordering = (\"name\",)\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(self.jack.pk, \"Jack Red\"), (self.john.pk, \"John Blue\")]\n self.assertEqual(filterspec.lookup_choices, expected)",
"def get_queryset(self):\n\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', \"name\")\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', \"ASC\")\n self.sort_ordr=sort_order\n if search_str:\n a = Q(name__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = self.model.objects.filter(a | b).distinct()\n\n else:\n objects = OrganizationType.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects",
"def search(self,\n model,\n resume: bool = False,\n target_metric=None,\n mode: str = 'best',\n n_parallels=1,\n acceleration=False,\n input_sample=None,\n **kwargs):\n if not check_hpo_status(self.hposearcher):\n return None\n Trainer._log_api_event(\"search\")\n\n return self.hposearcher.search(model,\n resume=resume,\n target_metric=target_metric,\n mode=mode,\n n_parallels=n_parallels,\n acceleration=acceleration,\n input_sample=input_sample,\n **kwargs)",
"def _search(self, searcher, whoosh_query, sortedby=None, **kwargs):\n\n if isinstance(sortedby, sorting.ScoreFacet):\n # Score sorting is default behaviour. Avoid overhead\n sortedby = None\n else:\n assert sortedby is None or isinstance(sortedby, sorting.FieldFacet), \"%s supports sorting by Field or Score only\" % self.__class__.__name__\n\n return searcher.search(whoosh_query, sortedby=sortedby, **kwargs)",
"def abstract_search(self, model, params):\n domain = []\n\n for key, value in params.items():\n self.check_field_existence(model, key)\n\n # we change the operator according to the field type or name\n if key == 'name':\n domain.append((key, 'ilike', value))\n elif type(value) is list:\n domain.append((key, 'in', value))\n elif key == 'active' and value == False:\n domain.append((key, '!=', True))\n else:\n domain.append((key, '=', value))\n\n return self.env[model].sudo().search(domain)",
"def test_relatedonlyfieldlistfilter_foreignkey_default_ordering(self):\n\n class BookAdmin(ModelAdmin):\n list_filter = ((\"employee\", RelatedOnlyFieldListFilter),)\n\n albert = Employee.objects.create(name=\"Albert Green\", department=self.dev)\n self.djangonaut_book.employee = albert\n self.djangonaut_book.save()\n self.bio_book.employee = self.jack\n self.bio_book.save()\n\n self.addCleanup(setattr, Employee._meta, \"ordering\", Employee._meta.ordering)\n Employee._meta.ordering = (\"name\",)\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(albert.pk, \"Albert Green\"), (self.jack.pk, \"Jack Red\")]\n self.assertEqual(filterspec.lookup_choices, expected)",
"def search(self, where=\"\", order_by=[], limit=None, parameters={}, namespaces=[]):\n return self._backend.search(where, order_by, limit, parameters, namespaces)",
"def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):\n if args is None:\n args = []\n if context is None:\n context = {}\n if not context.get('closed',False):\n args.append(('state', '=', 'draft'))\n return super(account_period, self).name_search(cr, uid, name, args=args, operator='ilike', context=context, limit=limit)",
"def employee_list_group_by_badges(request):\n if request.method == 'GET':\n if request.GET.get('search'):\n search_term = request.GET.get('search')\n badge_list = EmployeeBadge.objects.filter(\n Q(badge__name__icontains=search_term)).values(\n 'badge__pk',\n 'badge__name').annotate(num_employees=Count('to_user')).order_by('-num_employees')\n else:\n badge_list = EmployeeBadge.objects.all().values(\n 'badge__pk',\n 'badge__name').annotate(num_employees=Count('to_user')).order_by('-num_employees')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(badge_list, request)\n serializer = EmployeeBadgeListSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def do_search(self, **criteria):\n return self.app.get(url(controller='dex_search',\n action='move_search',\n **criteria))",
"def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list",
"def do_search(self, *args, **kwargs):\n return [{}]",
"def get_queryset(self, request):\n return models.Employee.objects.exclude(username='root')",
"def list_employees(order_by=\"id\"):\n ret = {}\n status, result = _query(action=\"employees\", command=\"directory\")\n root = ET.fromstring(result)\n for cat in root:\n if cat.tag != \"employees\":\n continue\n for item in cat:\n emp_id = next(iter(item.values()))\n emp_ret = {\"id\": emp_id}\n for details in item:\n emp_ret[next(iter(details.values()))] = details.text\n ret[emp_ret[order_by]] = emp_ret\n return ret",
"def test_search_employee_returns_the_correct_menu(self):\n # add some employees to the database\n test_employees = [\n {'id': 1, 'name': \"Test Employee 1\"},\n {'id': 2, 'name': \"Test Employee 2\"}\n ]\n for employee in test_employees:\n e = db_manager.Employee.get_or_create(name=employee['name'])\n # give each employee an associated logentry\n db_manager.LogEntry.create(\n employee=e[0],\n date=datetime.date(2018, 1, 2),\n task_name='Test task {}'.format(employee['id']),\n duration=employee['id'],\n notes='Note'\n )\n user_input = '1'\n with patch('builtins.input', side_effect=user_input):\n result = self.menu.search_employee()\n\n expected_result = self.menu.present_next_result\n\n self.assertEqual(expected_result, result)",
"def get_query(self,q,request):\n kwargs = { \"%s__icontains\" % search_field : q }\n return model.objects.filter(**kwargs).order_by(search_field)",
"def get_queryset(self):\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', 'title')\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', 'ASC')\n self.sort_ordr=sort_order\n if search_str:\n search_str = self.request.GET.get('search', None)\n a = Q(title__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = Designation.objects.filter(a | b).distinct()\n else: # SORTING BY COL_NM\n objects = Designation.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects",
"def concrete_search(self, limit):\n frontier = Frontier_SortedList.Frontier_SortedList()\n closed = {}\n initial_node = TreeNode.TreeNode(\n id=0,\n state=self.problem.initial_state,\n cost=0,\n node_depth=0,\n f=None,\n parent=None,\n last_action=None,\n )\n initial_node.f = self.__f_strategy(initial_node)\n id = 1\n frontier.insert(initial_node)\n solution = False\n while not solution and not frontier.is_empty():\n actual_node = frontier.remove()\n pruned = False\n if self.problem.is_goal(actual_node.state):\n solution = True\n else:\n if self.pruning == 1:\n pruned = self.check_node_pruning_1st_prune(actual_node, closed)\n if not pruned:\n closed[actual_node.state.create_md5()] = abs(actual_node.f)\n\n if self.pruning in [0, 1]:\n if not pruned:\n if actual_node.node_depth < limit:\n frontier, id = self.expand_node(id, actual_node, frontier)\n\n if self.pruning == 2:\n if actual_node.node_depth < limit:\n list_nodes, id = self.expand_node_2nd_prune(id, actual_node)\n for node in list_nodes:\n md5 = node.state.create_md5()\n if md5 not in closed or closed[md5] > abs(node.f):\n closed[md5] = abs(node.f)\n frontier.insert(node)\n if solution:\n return self.solution(actual_node)\n else:\n return None",
"def order_assessment_part_results(self, assessment_part_search_order):\n raise errors.Unimplemented()",
"def search(self, name=None, first_name=None, last_name=None, email=None,\r\n phone=None, company=None, twitter=None, labels=None,\r\n case_id=None, subject=None, description=None,\r\n status=None, priority=None, assigned_group=None,\r\n assigned_user=None, channels=None, notes=None, attachments=None,\r\n created=None, updated=None, since_created_at=None,\r\n max_created_at=None, since_updated_at=None, max_updated_at=None,\r\n since_id=None, max_id=None, per_page=None, page=None,\r\n embed=None, fields=None, **case_custom_fields):\r\n store = locals()\r\n store.update(store.pop('case_custom_fields'))\r\n\r\n params = base.get_params(None, store)\r\n url = '{0}/{1}'.format(self.get_url(), 'search')\r\n return http.Request('GET', url, params), parsers.parse_json"
] | [
"0.64379",
"0.57039595",
"0.52205575",
"0.51754266",
"0.5063366",
"0.5011705",
"0.5009542",
"0.49143377",
"0.48910806",
"0.4789285",
"0.4787127",
"0.47528285",
"0.47489092",
"0.46934026",
"0.4679221",
"0.4671263",
"0.46634984",
"0.4649555",
"0.4624436",
"0.46154812",
"0.46114376",
"0.46076193",
"0.45990518",
"0.45978326",
"0.45873576",
"0.45772067",
"0.4570625",
"0.4570181",
"0.45635104",
"0.45550552"
] | 0.64316547 | 1 |
If there are no date set for date_to, automatically set one day later than the date_from. Also update the number_of_days. | def _onchange_date_from(self):
date_from = self.date_from
date_to = self.date_to
self.compute_valid_leaves_for_employee(date_from, date_to)
# policy_id = self.env['leaves.policy'].sudo().search(
# [('leave_type', '=', self.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])
# if date_from and not date_to:
# date_to_with_delta = fields.Datetime.from_string(date_from) + timedelta(hours=8)
# self.date_to = str(date_to_with_delta)
# number_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(date_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds()/(24*3600)
# self.number_of_days_temp = number_of_day
# # Compute and update the number of days
# if (date_to and date_from) and (date_from <= date_to):
# if policy_id:
# for val in policy_id:
# number_of_days = 0
# if val.weekends_leave_period == 'dont_count':
# num_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)
# date_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')
# date_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')
#
# # Logic of Public Holidays when week offs count as holidays is True 2019-11-19
# emp_shift = self.employee_id.resource_calendar_ids
# global_leaves = emp_shift.global_leave_ids
# # List to store the global leaves
# public_holidays = []
# for holiday in global_leaves:
# public_holidays.append((holiday.date_from, holiday.date_to))
#
# # Public holidays between leave period
# leave_period_dates = []
# start_date = date_from1.date()
# end_date = date_to1.date()
# delta = end_date - start_date
# for i in range(delta.days + 1):
# day = start_date + timedelta(days=i)
# leave_period_dates.append(day)
# count = 0
# for date in public_holidays:
# if datetime.strptime(date[0], '%Y-%m-%d %H:%M:%S').date() in leave_period_dates:
# count += 1
# # End of Public Holidays logic
#
# self.number_of_days_temp = num_days - count
# else:
# number_of_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)
# date_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')
# date_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')
# if val.dur_full and not val.dur_half:
# total_days = (date_to1 - date_from1).days
# else:
# total_seconds = (date_to1 - date_from1).seconds
# total_days = total_seconds / (24 * 3600)
#
# week_offs = total_days - number_of_days
# self.number_of_days_temp = number_of_days + week_offs
# else:
# # self.number_of_days_temp = self._get_number_of_days(date_from, date_to, self.employee_id.id)
# number_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(
# date_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds() / (24 * 3600)
# self.number_of_days_temp = number_of_day
#
# elif (date_to and date_from) and (date_from > date_to):
# raise ValidationError("From Date cannot be greater then To Date")
# else:
# self.number_of_days_temp = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def date_to(self, date_to):\n\n self._date_to = date_to",
"def _set_days_until_triage(self):\n if self.sla_triaged_at:\n btd = dates.businesstimedelta(self.created_at, self.sla_triaged_at)\n self.days_until_triage = btd.days\n else:\n self.days_until_triage = None",
"def limit_date_range_to(self, limit_date_range_to):\n\n self._limit_date_range_to = limit_date_range_to",
"def date_from(self, date_from):\n\n self._date_from = date_from",
"def adjust_start_and_end_dates(self):\n if self.start_date < self.install_date:\n self.start_date = self.install_date\n log.info(\"Adjusting start date to {}.\".format(self.start_date))\n\n today = datetime.today().date()\n if self.end_date > today:\n self.end_date = today\n log.info(\"Adjusting end date to {}\".format(self.end_date))\n\n if self.start_date > self.end_date:\n self.end_date = self.start_date + timedelta(days=1)\n log.info(\"Adjusting end date to {}\".format(self.end_date))",
"def limit_date_range_from(self, limit_date_range_from):\n\n self._limit_date_range_from = limit_date_range_from",
"def set_to_date(self):\n self.set_value_into_input_field(self.set_to_date_locator, self.get_current_date())",
"def from_date(self, value: date):\n self._from_date = value\n self._dao.from_date = value",
"def _iterate_days(from_date, to_date):\n if from_date > to_date:\n raise ValueError('from_date %s is > to_date %s', from_date, to_date)\n return rrule.rrule(rrule.DAILY, dtstart=from_date, until=to_date)",
"def rate_between(self, from_date, to_date):\n print(\"override the above\")",
"def _increment_date_data(klass, series, date_data):\n\n # delta is the timedelta in between events\n delta = timedelta(days=7 * series.every)\n date_data['start_date'] = date_data['start_date'] + delta\n date_data['end_date'] = date_data['end_date'] + delta",
"def to_date(self, value: date):\n self._to_date = value\n self._dao.to_date = value",
"def checkio(from_date: date, to_date: date) -> int:\n result = 0\n while True:\n if from_date.weekday() == 5 or from_date.weekday() == 6:\n result += 1\n if from_date == to_date:\n break\n else:\n from_date += timedelta(days=1)\n return result",
"def _get_number_of_days(self, date_from, date_to):\n\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_days = timedelta.days + float(timedelta.seconds) / 86400\n return diff_days",
"def _get_number_of_days(self, date_from, date_to, employee_id):\n\t\tfrom_dt = fields.Datetime.from_string (date_from)\n\t\tto_dt = fields.Datetime.from_string (date_to)\n\t\tif employee_id:\n\t\t\temployee = self.env['hr.employee'].browse (employee_id)\n\n\t\t\t# Testing 16/11/19\n\t\t\tshift = employee.resource_calendar_ids\n\t\t\treturn employee.get_work_days_count (from_dt, to_dt, shift)\n\n\t\ttime_delta = to_dt - from_dt\n\t\treturn math.ceil (time_delta.days + float (time_delta.seconds) / 86400)",
"def limit_date_range_to(self):\n return self._limit_date_range_to",
"def set_from_date_as_current_date(self):\n self.set_value_into_input_field(self.set_from_date_locator, self.get_current_date())",
"def _check_dates(self, cr, uid, ids, context=None):\n for act in self.browse(cr, uid, ids, context):\n date_from = self.get_date(act.date_from)\n date_to = self.get_date(act.date_to)\n previous_ids = self.search(cr, uid, [('id','!=',act.id), ('alternative_setting_id','=',act.alternative_setting_id.id)],context=context)\n dates = self.read(cr, uid, previous_ids, ['date_from','date_to'], context=context)\n\n dates = [{'date_from':self.get_date(x['date_from']),'date_to':self.get_date(x['date_to'])} for x in dates]\n for date in dates:\n case0 = date['date_from'] >= date_from and date['date_to'] <= date_to\n\n case1 = date['date_from'] <= date_from and date['date_to'] >= date_to\n\n case2 = date['date_from'] <= date_from and date_from <= date['date_to'] \n\n case3 = date_from <= date['date_from'] and date['date_from'] <= date_to\n \n if case0 or case1 or case2 or case3:\n raise osv.except_osv(_('Error'), _(\"THIS RANGE OF DATE HAVE BEEN FETCHED BEFORE\"))\n return True",
"def _normalize(self):\n if self.event_type == 'event_allday':\n from_date = self.from_date\n if from_date is not None:\n time_since_daystart = from_date.hour() * 3600 \\\n + from_date.minute() * 60 + from_date.second()\n if time_since_daystart:\n timeTime = from_date.timeTime()\n self.from_date = DateTime(timeTime - time_since_daystart)\n to_date = self.to_date\n if to_date is not None:\n time_since_daystart = to_date.hour() * 3600 \\\n + to_date.minute() * 60 + to_date.second()\n # 86399 sec = 24h - 1sec\n if time_since_daystart != 86399:\n timeTime = to_date.timeTime()\n self.to_date = DateTime(\n timeTime - time_since_daystart + 86399)\n if self.to_date.lessThan(self.from_date):\n to_date = self.to_date\n self.to_date = self.from_date\n self.from_date = to_date\n\n if self.event_type == 'event_allday':\n self._normalize()",
"def days_between(self, other):\n new_self = self.copy()\n new_other = other.copy()\n count=0\n if self.is_before(other):\n while(True):\n if new_self == new_other:\n break\n count-=1\n new_self.advance_one()\n elif self.is_after(other):\n while(True):\n if new_self==new_other:\n break\n count+=1\n new_other.advance_one()\n\n return count",
"def set_date_range(self, start_date, end_date):\n self._validate_date_range(start_date, end_date)\n self.start_date = pd.Timestamp(start_date)\n self.end_date = pd.Timestamp(end_date)",
"def calc_total_price(price_per_day, date_from, date_to):\n date_from = datetime.strptime(date_from, '%Y-%m-%d')\n date_to = datetime.strptime(date_to, '%Y-%m-%d')\n n_days = date_to - date_from\n n_days = n_days.days + 1\n return price_per_day * n_days",
"def limit_date_range_from(self):\n return self._limit_date_range_from",
"def update_validity(sender, instance, **kwargs):\n\tinstance.up_to_date = True\n\n\tfor sol in instance.post_set.all():\n\t\tsol.up_to_date = False\n\t\tsol.save()",
"def date_to_days(self, date):\n date = str_to_date(date)\n return (date-self.start_date).days",
"def set_end_date(self, date):\n pass",
"def _putMailInPast(self, mail, days):\n doc = mail.getEditableContent()\n fid = self.archiver.date_field_id\n doc.edit({fid: doc.getDataModel()[fid] - days}, mail)",
"def date_to(self):\n return self._date_to",
"def from_dt(self, from_dt):\n\n self._from_dt = from_dt",
"def __init__(self, dateStart, dateEnd): \n #TODO: Raise an exception if dateEnd<dateStart.\n super(dateGenerator,self).__init__()\n d = dateEnd - dateStart\n self._startDate = dateStart\n self._dateDiffSeconds = d.days * 86400 + d.seconds"
] | [
"0.6600826",
"0.62123364",
"0.60784036",
"0.6028921",
"0.6023509",
"0.58135504",
"0.5699685",
"0.56320995",
"0.5561258",
"0.55328417",
"0.55286765",
"0.551658",
"0.55129164",
"0.5464243",
"0.5361167",
"0.5355377",
"0.5281642",
"0.5279441",
"0.5274924",
"0.5264698",
"0.52578354",
"0.5244781",
"0.52378786",
"0.51990867",
"0.5193093",
"0.5183797",
"0.5161171",
"0.514466",
"0.5127083",
"0.51242536"
] | 0.6646776 | 0 |
This method will create entry in resource calendar leave object at the time of holidays validated | def _create_resource_leave(self):
for leave in self:
self.env['resource.calendar.leaves'].create ({
'name': leave.name,
'date_from': leave.date_from,
'holiday_id': leave.id,
'date_to': leave.date_to,
'resource_id': leave.employee_id.resource_id.id,
'calendar_id': leave.employee_id.resource_calendar_id.id
})
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_leave_request(self):\n\t\tfor holiday in self.filtered (lambda request: request.type == 'remove' and request.holiday_type == 'employee'):\n\t\t\tmeeting_values = holiday._prepare_holidays_meeting_values ()\n\t\t\tmeeting = self.env['calendar.event'].with_context (no_mail_to_attendees=True).create (meeting_values)\n\t\t\tholiday.write ({'meeting_id': meeting.id})\n\t\t\tholiday._create_resource_leave ()",
"def create(self, values):\n\t\temployee_id = values.get('employee_id', False)\n\t\tprint(\"the val in the dict\", values)\n\t\tif (values.get('date_from') and values.get('date_to')) == False:\n\t\t\tcurrent = datetime.strftime(datetime.today().date(),'%Y-%m-%d')\n\t\t\tvalues.update({'allocate_date': current})\n\t\t\tprint(values)\n\t\tif not values.get('department_id'):\n\t\t\tvalues.update({'department_id': self.env['hr.employee'].browse (employee_id).department_id.id})\n\n\t\tholiday = super (Holidays, self.with_context (mail_create_nolog=True, mail_create_nosubscribe=True)).create(values)\n\t\tholiday.add_follower (employee_id)\n\n\t\t# Trilok code for policies\n\t\tpolicy_id = holiday.env['leaves.policy'].search(\n\t\t\t[('leave_type', '=', holiday.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])\n\t\t# print (\"policy iddddddddddddddd\",policy_id)\n\t\temp_type = holiday.employee_id.employee_type.id\n\t\tfor val in policy_id:\n\t\t\tif val.employee_type.id == emp_type:\n\t\t\t\tfor employee in holiday.employee_id:\n\t\t\t\t\tif holiday.type == 'remove':\n\t\t\t\t\t\tquery = '''select count(*) from hr_holidays where upper(type) = upper('rEMove')and upper(state) = upper('Validate') and create_date::date between to_date(concat(date_part('Year',now()::date),'-01-01'),'yyyy-mm-dd') and now()::date and employee_id = %s''' % employee.id\n\t\t\t\t\t\tholiday.env.cr.execute(query)\n\t\t\t\t\t\tquery_result = holiday.env.cr.dictfetchone()\n\t\t\t\t\t\t# print(\"query_result\", query_result)\n\t\t\t\t\t\tif val.min_app_per_year > 0 and query_result[\"count\"] > val.min_app_per_year:\n\t\t\t\t\t\t\traise ValidationError(\"maximum number of applications per year is {} days\".format(val.min_app_per_year))\n\n\t\t\t\t\t\tquery1 = '''select create_date::date,date_to::date from hr_holidays where upper(type) = \n\t\t\t\t\t\tupper('rEMove') and upper(state) = upper('Validate') and create_date::date between to_date(concat(date_part('Year',now()::date),'-01-01'),'yyyy-mm-dd') \n\t\t and now()::date and employee_id = %s order by create_date desc limit 1'''\\\n\t\t\t\t\t\t\t\t % employee.id\n\t\t\t\t\t\tholiday.env.cr.execute(query1)\n\t\t\t\t\t\tquery_result1 = holiday.env.cr.fetchall()\n\t\t\t\t\t\tif query_result1 is not None:\n\t\t\t\t\t\t\t# print(\"query_resulttttttttttttttttttttttttttt\", query_result1)\n\t\t\t\t\t\t\t# print(\"query_resulttttttttttttttttttttttttttt\", query_result1[0][0], query_result1[0][1])\n\t\t\t\t\t\t\tcre_date = datetime.strptime(query_result1[0][0], '%Y-%m-%d')\n\t\t\t\t\t\t\tdate_to = datetime.strptime(query_result1[0][1], '%Y-%m-%d')\n\t\t\t\t\t\t\t# print(\"cre_date\", cre_date, type(cre_date))\n\t\t\t\t\t\t\tcurrent_dt = fields.Datetime.now()\n\t\t\t\t\t\t\t# cdate=datetime.strptime(current_dt,'%Y-%m-%d')\n\t\t\t\t\t\t\tcurrent_date = datetime.strptime(current_dt.split(\" \")[0], '%Y-%m-%d')\n\t\t\t\t\t\t\tdays = (current_date - date_to).days\n\t\t\t\t\t\t\tif val.min_leave_app_gap > 0 and days > val.min_leave_app_gap:\n\t\t\t\t\t\t\t\traise ValidationError(\n\t\t\t\t\t\t\t\t\t\"Minimum gap between two application should be atleast {} days\".format(\n\t\t\t\t\t\t\t\t\t\tval.min_leave_app_gap))\n\n\t\treturn holiday",
"def test_holidays_validate(self):\n self.request.sudo(self.user_1.id).holidays_validate()\n\n accrual = self.employee.get_leave_accrual(self.leave_type.id)\n self.assertEqual(accrual.total_hours, 22.5)",
"def _remove_resource_leave(self):\n\t\treturn self.env['resource.calendar.leaves'].search ([('holiday_id', 'in', self.ids)]).unlink ()",
"def check_leave_request_holiday(self, cr, uid, att, context=None):\n if att:\n # check have overtime yet?\n att_name = datetime.strptime(att.name, DEFAULT_SERVER_DATETIME_FORMAT)\n param_obj = self.pool.get('ir.config_parameter') \n max_early = param_obj.get_param(cr, uid, 'maximum_early_minutes', default=60)\n max_late = param_obj.get_param(cr, uid, 'maximum_late_minutes', default=60)\n try:\n max_early = int (max_early)\n max_late = int (max_late)\n except:\n raise except_osv(_(\"Warning !\"),_(\"maximum_early_minutes or maximum_late_minutes in config parameter is incorrect\"))\n \n time_early = att_name + timedelta(minutes = max_early)\n time_late = att_name - timedelta(minutes = max_late)\n \n overtime_obj = self.pool.get('hr.overtime')\n overtime_confirmed_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('name', '=', att.day_tz),\n ('datetime_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('datetime_stop', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('state', 'in', ['confirmed'])\n ])\n if overtime_confirmed_ids:\n return False\n \n public_holiday_obj = self.pool.get('trobz.hr.public.holidays')\n public_holiday_ids = public_holiday_obj.search(cr, uid, [('date', '=', att.day_tz), ('state', '=', 'approved')], context=context)\n if public_holiday_ids:\n return True\n sql = '''\n SELECT line.first_date_type, line.first_date, line.last_date_type, line.last_date\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND line.first_date <= '%s' AND line.last_date >= '%s'\n AND h.state = 'validate'\n '''% (att.employee_id.id, att.day_tz, att.day_tz)\n cr.execute(sql)\n for leave in cr.fetchall():\n if att.action == 'sign_out':\n afternoon = datetime.strptime(att.name_tz, DEFAULT_SERVER_DATETIME_FORMAT).hour >= 13\n else:\n afternoon = datetime.strptime(att.name_tz, DEFAULT_SERVER_DATETIME_FORMAT).hour >= 12\n if att.day_tz == leave[1]:\n if leave[0] == 'afternoon' and afternoon:\n return True\n if leave[0] == 'morning' and not afternoon:\n return True\n if leave[0] == 'full':\n return True\n if att.day_tz == leave[3]:\n if leave[2] == 'afternoon' and afternoon:\n return True\n if leave[2] == 'morning' and not afternoon:\n return True\n if leave[2] == 'full':\n return True\n if datetime.strptime(att.day_tz, '%Y-%m-%d') > datetime.strptime(leave[1], '%Y-%m-%d')\\\n and datetime.strptime(att.day_tz, '%Y-%m-%d') < datetime.strptime(leave[3], '%Y-%m-%d'):\n return True\n return False",
"def test_one_day_leave(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 1 day of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=12,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.REGULAR, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=0).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.PENDING, leave.review_status)\n self.assertEqual(\n 1,\n get_taken_leave_days(\n staffprofile, Leave.PENDING, Leave.REGULAR, 2017, 2017\n ),\n )",
"def _onchange_date_from(self):\n\t\tdate_from = self.date_from\n\t\tdate_to = self.date_to\n\t\tself.compute_valid_leaves_for_employee(date_from, date_to)\n\n\t\t# policy_id = self.env['leaves.policy'].sudo().search(\n\t\t# \t[('leave_type', '=', self.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])\n\t\t# if date_from and not date_to:\n\t\t# \tdate_to_with_delta = fields.Datetime.from_string(date_from) + timedelta(hours=8)\n\t\t# \tself.date_to = str(date_to_with_delta)\n\t\t# \tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(date_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds()/(24*3600)\n\t\t# \tself.number_of_days_temp = number_of_day\n\t\t# # Compute and update the number of days\n\t\t# if (date_to and date_from) and (date_from <= date_to):\n\t\t# \tif policy_id:\n\t\t# \t\tfor val in policy_id:\n\t\t# \t\t\tnumber_of_days = 0\n\t\t# \t\t\tif val.weekends_leave_period == 'dont_count':\n\t\t# \t\t\t\tnum_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t#\n\t\t# \t\t\t\t# Logic of Public Holidays when week offs count as holidays is True 2019-11-19\n\t\t# \t\t\t\temp_shift = self.employee_id.resource_calendar_ids\n\t\t# \t\t\t\tglobal_leaves = emp_shift.global_leave_ids\n\t\t# \t\t\t\t# List to store the global leaves\n\t\t# \t\t\t\tpublic_holidays = []\n\t\t# \t\t\t\tfor holiday in global_leaves:\n\t\t# \t\t\t\t\tpublic_holidays.append((holiday.date_from, holiday.date_to))\n\t\t#\n\t\t# \t\t\t\t# Public holidays between leave period\n\t\t# \t\t\t\tleave_period_dates = []\n\t\t# \t\t\t\tstart_date = date_from1.date()\n\t\t# \t\t\t\tend_date = date_to1.date()\n\t\t# \t\t\t\tdelta = end_date - start_date\n\t\t# \t\t\t\tfor i in range(delta.days + 1):\n\t\t# \t\t\t\t\tday = start_date + timedelta(days=i)\n\t\t# \t\t\t\t\tleave_period_dates.append(day)\n\t\t# \t\t\t\tcount = 0\n\t\t# \t\t\t\tfor date in public_holidays:\n\t\t# \t\t\t\t\tif datetime.strptime(date[0], '%Y-%m-%d %H:%M:%S').date() in leave_period_dates:\n\t\t# \t\t\t\t\t\tcount += 1\n\t\t# \t\t\t# End of Public Holidays logic\n\t\t#\n\t\t# \t\t\t\tself.number_of_days_temp = num_days - count\n\t\t# \t\t\telse:\n\t\t# \t\t\t\tnumber_of_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tif val.dur_full and not val.dur_half:\n\t\t# \t\t\t\t\ttotal_days = (date_to1 - date_from1).days\n\t\t# \t\t\t\telse:\n\t\t# \t\t\t\t\ttotal_seconds = (date_to1 - date_from1).seconds\n\t\t# \t\t\t\t\ttotal_days = total_seconds / (24 * 3600)\n\t\t#\n\t\t# \t\t\t\tweek_offs = total_days - number_of_days\n\t\t# \t\t\t\tself.number_of_days_temp = number_of_days + week_offs\n\t\t# \telse:\n\t\t# \t\t# self.number_of_days_temp = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(\n\t\t# \t\t\tdate_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds() / (24 * 3600)\n\t\t# \t\tself.number_of_days_temp = number_of_day\n\t\t#\n\t\t# elif (date_to and date_from) and (date_from > date_to):\n\t\t# \traise ValidationError(\"From Date cannot be greater then To Date\")\n\t\t# else:\n\t\t# \tself.number_of_days_temp = 0",
"def test_leaveform_process_with_overlap(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n # make sure leave obj already exists for said dates\n mommy.make(\n \"small_small_hr.Leave\",\n staff=staffprofile,\n start=start,\n end=end,\n leave_type=Leave.REGULAR,\n review_status=Leave.APPROVED,\n )\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=4,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n \"review_status\": Leave.REJECTED,\n }\n\n form = LeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.REGULAR, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=5).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.REJECTED, leave.review_status)",
"def holidays_validate(self, cr, uid, ids, context=None):\n \n super(hr_holidays ,self).holidays_validate(cr, uid, ids, context=context)\n \n grant_order_obj = self.pool.get(\"granted.rights.order\")\n grant_order_lines_obj = self.pool.get(\"granted.rights.order.lines\") \n department_obj = self.pool.get('hr.department')\n \n \n manager = False\n for rec in self.browse(cr,uid,ids):\n\t if rec.holiday_status_id.alternative_emp:\n\t\t dep_ids = department_obj.search(cr,uid,[('manager_id','=',rec.employee_id.id)])\n\t\t #if rec.employee_id.id == department_obj.browse(cr,uid,rec.department_id.id).manager_id.id :\n\t\t if dep_ids:\n\t\t\t\t manager = True\n\n\t\t \n\t\t grant_date = datetime.strptime(rec.date_to, '%Y-%m-%d %H:%M:%S')\n\t\t \n\t\t grant_date = grant_date + timedelta(days=1)\n\n\n\t\t if rec.date_to >= time.strftime('%Y-%m-%d'):\n\t\t\t order_id = grant_order_obj.create( cr, uid,{\n\t\t\t\t \n\t\t\t\t 'delegation_type' : 'holiday',\n\t\t\t\t 'holiday_order_id' :rec.id,\n\t\t\t\t 'employee_donor' : rec.employee_id.id,\n\t\t\t\t 'employee_candidate' : rec.alternative_employee.id,\n\t\t\t\t 'start_grant_date' : rec.date_from, \n\t\t\t\t 'end_grant_date' : grant_date,\n\t\t\t\t 'department_id' : rec.department_id.id,\n\t\t\t\t 'is_a_amanger' : manager,\n\n\t\t\t\t })\n\t\t\t \n\t\t\t res = grant_order_obj.on_change_donor_employee(cr, uid, order_id , rec.employee_id.id , context=context)\n\t\t\t for rec in res['value']['donor_groups_ids']:\n\t\t\t\t rec.update({ 'order_id' : order_id})\n\t\t\t\t grant_order_lines_obj.create( cr, uid,rec )\n \n return True",
"def test_leave_oversubscribe_off(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 40 days of leave\n start = datetime(2017, 6, 1, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 7, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=0,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Mini retirement\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertFalse(form.is_valid())\n self.assertEqual(2, len(form.errors.keys()))\n self.assertEqual(\n \"Not enough leave days. Available leave days are 21.00\",\n form.errors[\"start\"][0],\n )\n self.assertEqual(\n \"Not enough leave days. Available leave days are 21.00\",\n form.errors[\"end\"][0],\n )",
"def _check_leave_request(self, cr, uid, request, token, context=None):\n holidays_obj = request.registry['hr.holidays']\n holidays_ids = holidays_obj.search(cr, uid, [\n ('token', '=', token)\n ])\n\n if len(holidays_ids) == 0:\n return request.website.render(\n \"tk_hr_approve_request.leave_request_not_found\"\n )\n\n _id = holidays_ids[0] if len(holidays_ids) else None\n if _id:\n leave_request = holidays_obj.browse(\n cr, uid, _id, context=context\n )\n return leave_request",
"def test_leaveform_no_overlap(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=12,\n )\n\n mommy.make(\n \"small_small_hr.Leave\",\n leave_type=Leave.REGULAR,\n start=start,\n end=end,\n review_status=Leave.APPROVED,\n staff=staffprofile,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertFalse(form.is_valid())\n self.assertEqual(2, len(form.errors.keys()))\n self.assertEqual(\n \"you cannot have overlapping leave days\", form.errors[\"start\"][0]\n )\n self.assertEqual(\n \"you cannot have overlapping leave days\", form.errors[\"end\"][0]\n )",
"def test_leaveform_max_days(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 7, 10, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n allowed_days=21,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = LeaveForm(data=data)\n self.assertFalse(form.is_valid())\n self.assertEqual(2, len(form.errors.keys()))\n self.assertEqual(\n \"Not enough leave days. Available leave days are 21.00\",\n form.errors[\"start\"][0],\n )\n self.assertEqual(\n \"Not enough leave days. Available leave days are 21.00\",\n form.errors[\"end\"][0],\n )",
"def test_leaveform_apply(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=12,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.REGULAR, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=5).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.PENDING, leave.review_status)",
"def test_leave_oversubscribe(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 40 days of leave\n start = datetime(2017, 6, 1, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 7, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=0,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Mini retirement\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n\n # make it approved\n obj_type = ContentType.objects.get_for_model(leave)\n review = ModelReview.objects.get(content_type=obj_type, object_id=leave.id)\n review.review_status = ModelReview.APPROVED\n review.save()\n leave.refresh_from_db()\n\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.REGULAR, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=39).days, (leave.end - leave.start).days)\n self.assertEqual(\"Mini retirement\", leave.review_reason)\n self.assertEqual(Leave.APPROVED, leave.review_status)\n self.assertEqual(\n 40,\n get_taken_leave_days(\n staffprofile, Leave.APPROVED, Leave.REGULAR, 2017, 2017\n ),\n )\n self.assertEqual(-19, staffprofile.get_available_leave_days(year=2017))",
"def process_employee_exit(self):\n if self.is_employee_serving():\n self._end_date.append(datetime.now().isoformat())\n\n print(f\"Successfully processed exit for employee {self.name} on\" \\\n f\"{self._end_date[-1]}\\nWe wish {self.name} for future endeavours\")\n return\n raise RejoiningException(\"Employee not in service. Cannot process exit.\")",
"def onchange_leave_date(self):\n warning = {}\n if self.date_of_leave and self.date_of_leave < self.date_of_join:\n warning.update({\n 'title': _('Information'),\n 'message': _(\"Leaving Date Must Be Greater Than Joining Date.\")})\n self.date_of_leave = False\n return {'warning': warning}",
"def test_leaveform_start_end(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 1, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.SICK,\n carried_over_days=4,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.SICK,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = LeaveForm(data=data)\n self.assertFalse(form.is_valid())\n self.assertEqual(1, len(form.errors.keys()))\n self.assertEqual(\"end must be greater than start\", form.errors[\"end\"][0])\n\n # end year and start year must be the same\n\n end = datetime(2018, 6, 1, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n data2 = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.SICK,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form2 = LeaveForm(data=data2)\n self.assertFalse(form2.is_valid())\n self.assertEqual(2, len(form2.errors.keys()))\n self.assertEqual(\n \"start and end must be from the same year\", form2.errors[\"start\"][0]\n )\n self.assertEqual(\n \"start and end must be from the same year\", form2.errors[\"end\"][0]\n )",
"def _check_approval_update(self, state):\n\t\tcurrent_employee = self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1)\n\t\t# is_officer = self.env.user.has_group('hr_holidays.group_hr_holidays_user')\n\t\tis_manager = self.env.user.has_group('hr_holidays.group_hr_holidays_manager')\n\t\tfor holiday in self:\n\t\t\tval_type = holiday.holiday_status_id.validation_type\n\t\t\tif state == 'confirm':\n\t\t\t\tcontinue\n\n\t\t\tif state == 'draft':\n\t\t\t\tif holiday.employee_id != current_employee and not is_manager:\n\t\t\t\t\traise UserError(_('Only a Leave Manager can reset other people leaves.'))\n\t\t\t\tcontinue\n\n\t\t\t# if not is_officer:\n\t\t\t# \traise UserError(_('Only a Leave Officer or Manager can approve or refuse leave requests.'))\n\n\t\t\t# if is_officer:\n\t\t\t# \t# use ir.rule based first access check: department, members, ... (see security.xml)\n\t\t\tholiday.check_access_rule('write')\n\n\t\t\tif holiday.employee_id == current_employee and not is_manager:\n\t\t\t\traise UserError(_('Only a Leave Manager can approve its own requests.'))\n\n\t\t\tif (state == 'validate1' and val_type == 'both') or (state == 'validate' and val_type == 'manager'):\n\t\t\t\tmanager = holiday.employee_id.parent_id or holiday.employee_id.department_id.manager_id\n\t\t\t\tif (manager and manager != current_employee) and not self.env.user.has_group('hr_holidays.group_hr_holidays_manager'):\n\t\t\t\t\traise UserError(_('You must be either %s\\'s manager or Leave manager to approve this leave') % (holiday.employee_id.name))\n\n\t\t\tif state == 'validate' and val_type == 'both':\n\t\t\t\tif not self.env.user.has_group('hr_holidays.group_hr_holidays_manager'):\n\t\t\t\t\traise UserError(_('Only an Leave Manager can apply the second approval on leave requests.'))",
"def test_sickleave_apply(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.SICK,\n carried_over_days=4,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.SICK,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.SICK, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=5).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.PENDING, leave.review_status)",
"def test_sickleave_process(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.SICK,\n carried_over_days=4,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.SICK,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n \"review_status\": Leave.REJECTED,\n }\n\n form = LeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.SICK, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=5).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.REJECTED, leave.review_status)",
"def test_leaveform_admin(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=12,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n \"review_status\": Leave.APPROVED,\n }\n\n form = LeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.REGULAR, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=5).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.APPROVED, leave.review_status)",
"def add_school_holidays(df):\n sd_noel = pd.Timestamp('2019-12-21')\n ed_noel = pd.Timestamp('2020-01-06')\n\n sd_hiver = pd.Timestamp('2020-02-08')\n ed_hiver = pd.Timestamp('2020-02-24')\n\n sd_ete = pd.Timestamp('2020-07-04')\n ed_ete = pd.Timestamp('2020-09-01')\n\n sd_printemps = pd.Timestamp('2020-04-04')\n ed_printemps = pd.Timestamp('2020-04-20')\n\n sd_ascension = pd.Timestamp('2020-05-20')\n ed_ascension = pd.Timestamp('2020-05-25')\n\n sd_toussaint = pd.Timestamp('2020-10-17')\n ed_toussaint = pd.Timestamp('2020-11-02')\n\n def get_name_vacation(x):\n if (x >= sd_noel and x < ed_noel):\n return 'noel'\n elif (x >= sd_hiver and x < ed_hiver):\n return 'hiver'\n elif (x >= sd_ete and x < ed_ete):\n return 'ete'\n elif (x >= sd_printemps and x < ed_printemps):\n return 'printemps'\n elif (x >= sd_ascension and x < ed_ascension):\n return 'ascension'\n elif (x >= sd_toussaint and x < ed_toussaint):\n return 'toussaint'\n else:\n return np.nan\n\n df['nom_vacance_scolaire'] = df['Date'].map(get_name_vacation)\n df['vacance_scolaire'] = df['nom_vacance_scolaire'].map(lambda x: 0 if pd.isnull(x) else 1)\n return df",
"def creating_entry(self):\n response = \"\"\n today = str(date.today())\n curent_time = str(datetime.time(datetime.now()))\n entry = Diary(self.entry_id, self.title, self.body)\n lst = {}\n lst[\"entry_id\"] = entry.entry_id\n lst[\"title\"] = entry.title\n lst[\"date\"] = today\n lst[\"time\"] = curent_time\n lst[\"body\"] = entry.body\n lst[\"updated\"] = entry.updated\n if Validate.validate_entry(Diary.entries, entry):\n response = jsonify({\"message\": \"Duplicate data,Try again\"})\n response.status_code = 409\n else:\n Diary.entries.append(lst)\n response = jsonify({\"message\": \"Entry saved\", \"data\": lst})\n response.status_code = 201\n return response",
"def __init__(self, obj_holidays, workdays, excludes, days_offset, name):\n self._name = name\n self._obj_holidays = obj_holidays\n self._workdays = workdays\n self._excludes = excludes\n self._days_offset = days_offset\n self._state = None",
"def create_new_reservation():\n if not request.json:\n return jsonify({'error': 'no body supplied'}), 400\n\n # look up by date to see if any availability\n res_date = request.json.get('date', None)\n if not res_date:\n error = 'no reservation date supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n\n # check if res time present, if found, convert to DT object\n res_time = request.json.get('time', None)\n if not res_time:\n error = 'no reservation time supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n res_time = time_str_to_obj(res_time)\n\n open_inventory = session.query(Inventory).filter_by(date=res_date).all()\n if not open_inventory:\n error = 'no open inventory for date {}'.format(res_date)\n flash(error, 'error')\n return jsonify({'error': error})\n\n error = 'reservation invalid'\n for inv in open_inventory:\n for window in inv.windows:\n if window.current_res_count < window.max_res_count:\n # check if res date falls in current window\n window_start = time_str_to_obj(window.start_time)\n window_end = time_str_to_obj(window.end_time)\n\n # if requested res time is valid, update res count and save res\n if window_start <= res_time < window_end:\n window.current_res_count = window.current_res_count + 1\n session.add(window)\n\n res = Reservation(**request.json)\n session.add(res)\n resp = session.commit()\n if not resp:\n # send message to flask for creation by name\n flash('reservation for {} created'.format(request.json.get('name')), 'success')\n return jsonify({'message': 'reservation for {} created'.format(request.json.get('name'))})\n else:\n error = 'requested reservation time is not available in current inventory'\n else:\n error = 'current inventory window cannot accept additional reservations, please select different time'\n flash(error, 'error')\n return jsonify({'error': error}), 400",
"def createEvents(self):\n # If modifying these scopes, delete the file token.pickle.\n SCOPES = ['https://www.googleapis.com/auth/calendar']\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n # create the service variable\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n\n # adds self.mainEvent to calendar\n event = service.events().insert(calendarId='primary', body=self.mainEvent).execute()\n print('Event created: %s' % (event.get('htmlLink')))\n\n # adds all values in self.importantDates to calendar\n for i in self.importantDates:\n event = service.events().insert(calendarId='primary', body=i).execute()\n print('Event created: %s' % (event.get('htmlLink')))",
"def get_school_holidays(start_date_data, end_date_data,\n zones=['A', 'B', 'C'],\n calendar_file=\"../input/holidays.xml\"):\n\n # Not necessary to explain. It basicaly gets the information\n # from the xml file.\n parsedXML = et.parse(calendar_file)\n school_holidays = {}\n node_cal = parsedXML.getroot()[-1]\n for node_zone in node_cal.getchildren():\n zone = node_zone.attrib['libelle']\n if zone in zones:\n dates = []\n for node_date in node_zone.getchildren():\n start = pd.to_datetime(node_date.attrib['debut'])\n end = pd.to_datetime(node_date.attrib['fin'])\n if (end >= start_date_data) and (start <= end_date_data):\n dates.append([start, end])\n school_holidays[zone] = dates\n return school_holidays",
"def test_leaveform_max_sick_days(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 20, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.SICK,\n carried_over_days=0,\n allowed_days=10,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.SICK,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = LeaveForm(data=data)\n self.assertFalse(form.is_valid())\n self.assertEqual(2, len(form.errors.keys()))\n self.assertEqual(\n \"Not enough sick days. Available sick days are 10.00\",\n form.errors[\"start\"][0],\n )\n self.assertEqual(\n \"Not enough sick days. Available sick days are 10.00\", form.errors[\"end\"][0]\n )",
"def test_event_serialize_deserialize_holiday_mode():\n start = datetime.date.today() - datetime.timedelta(days=1)\n end = datetime.date.today() + datetime.timedelta(days=1)\n holiday_mode = HolidayMode(True, start, end, 15)\n\n new_holiday_mode = holiday_mode_from_json(holiday_mode_to_json(holiday_mode))\n assert holiday_mode.target == new_holiday_mode.target\n assert holiday_mode.start_date == new_holiday_mode.start_date\n assert holiday_mode.end_date == new_holiday_mode.end_date"
] | [
"0.7653445",
"0.7196214",
"0.65434945",
"0.6539993",
"0.63767076",
"0.6295288",
"0.6260696",
"0.6255377",
"0.62113553",
"0.6104574",
"0.6042434",
"0.58993787",
"0.5892861",
"0.58624077",
"0.5775199",
"0.5663149",
"0.56233764",
"0.56051123",
"0.5601992",
"0.5562575",
"0.5541991",
"0.5471418",
"0.5427412",
"0.54159003",
"0.5370289",
"0.5314971",
"0.5311441",
"0.53035855",
"0.52985525",
"0.52917814"
] | 0.7792591 | 0 |
Validate leave requests (holiday_type='employee' and holiday.type='remove') by creating a calendar event and a resource leaves. | def _validate_leave_request(self):
for holiday in self.filtered (lambda request: request.type == 'remove' and request.holiday_type == 'employee'):
meeting_values = holiday._prepare_holidays_meeting_values ()
meeting = self.env['calendar.event'].with_context (no_mail_to_attendees=True).create (meeting_values)
holiday.write ({'meeting_id': meeting.id})
holiday._create_resource_leave () | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_resource_leave(self):\n\t\tfor leave in self:\n\t\t\tself.env['resource.calendar.leaves'].create ({\n\t\t\t\t'name': leave.name,\n\t\t\t\t'date_from': leave.date_from,\n\t\t\t\t'holiday_id': leave.id,\n\t\t\t\t'date_to': leave.date_to,\n\t\t\t\t'resource_id': leave.employee_id.resource_id.id,\n\t\t\t\t'calendar_id': leave.employee_id.resource_calendar_id.id\n\t\t\t})\n\t\treturn True",
"def _remove_resource_leave(self):\n\t\treturn self.env['resource.calendar.leaves'].search ([('holiday_id', 'in', self.ids)]).unlink ()",
"def _check_leave_request(self, cr, uid, request, token, context=None):\n holidays_obj = request.registry['hr.holidays']\n holidays_ids = holidays_obj.search(cr, uid, [\n ('token', '=', token)\n ])\n\n if len(holidays_ids) == 0:\n return request.website.render(\n \"tk_hr_approve_request.leave_request_not_found\"\n )\n\n _id = holidays_ids[0] if len(holidays_ids) else None\n if _id:\n leave_request = holidays_obj.browse(\n cr, uid, _id, context=context\n )\n return leave_request",
"def test_holidays_validate(self):\n self.request.sudo(self.user_1.id).holidays_validate()\n\n accrual = self.employee.get_leave_accrual(self.leave_type.id)\n self.assertEqual(accrual.total_hours, 22.5)",
"def test_leaveform_process_with_overlap(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n # make sure leave obj already exists for said dates\n mommy.make(\n \"small_small_hr.Leave\",\n staff=staffprofile,\n start=start,\n end=end,\n leave_type=Leave.REGULAR,\n review_status=Leave.APPROVED,\n )\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=4,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n \"review_status\": Leave.REJECTED,\n }\n\n form = LeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.REGULAR, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=5).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.REJECTED, leave.review_status)",
"def test_leaveform_no_overlap(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=12,\n )\n\n mommy.make(\n \"small_small_hr.Leave\",\n leave_type=Leave.REGULAR,\n start=start,\n end=end,\n review_status=Leave.APPROVED,\n staff=staffprofile,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertFalse(form.is_valid())\n self.assertEqual(2, len(form.errors.keys()))\n self.assertEqual(\n \"you cannot have overlapping leave days\", form.errors[\"start\"][0]\n )\n self.assertEqual(\n \"you cannot have overlapping leave days\", form.errors[\"end\"][0]\n )",
"def check_leave_request_holiday(self, cr, uid, att, context=None):\n if att:\n # check have overtime yet?\n att_name = datetime.strptime(att.name, DEFAULT_SERVER_DATETIME_FORMAT)\n param_obj = self.pool.get('ir.config_parameter') \n max_early = param_obj.get_param(cr, uid, 'maximum_early_minutes', default=60)\n max_late = param_obj.get_param(cr, uid, 'maximum_late_minutes', default=60)\n try:\n max_early = int (max_early)\n max_late = int (max_late)\n except:\n raise except_osv(_(\"Warning !\"),_(\"maximum_early_minutes or maximum_late_minutes in config parameter is incorrect\"))\n \n time_early = att_name + timedelta(minutes = max_early)\n time_late = att_name - timedelta(minutes = max_late)\n \n overtime_obj = self.pool.get('hr.overtime')\n overtime_confirmed_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('name', '=', att.day_tz),\n ('datetime_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('datetime_stop', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('state', 'in', ['confirmed'])\n ])\n if overtime_confirmed_ids:\n return False\n \n public_holiday_obj = self.pool.get('trobz.hr.public.holidays')\n public_holiday_ids = public_holiday_obj.search(cr, uid, [('date', '=', att.day_tz), ('state', '=', 'approved')], context=context)\n if public_holiday_ids:\n return True\n sql = '''\n SELECT line.first_date_type, line.first_date, line.last_date_type, line.last_date\n FROM hr_holidays_line line JOIN hr_holidays h ON line.holiday_id = h.id\n WHERE h.employee_id = %d\n AND line.first_date <= '%s' AND line.last_date >= '%s'\n AND h.state = 'validate'\n '''% (att.employee_id.id, att.day_tz, att.day_tz)\n cr.execute(sql)\n for leave in cr.fetchall():\n if att.action == 'sign_out':\n afternoon = datetime.strptime(att.name_tz, DEFAULT_SERVER_DATETIME_FORMAT).hour >= 13\n else:\n afternoon = datetime.strptime(att.name_tz, DEFAULT_SERVER_DATETIME_FORMAT).hour >= 12\n if att.day_tz == leave[1]:\n if leave[0] == 'afternoon' and afternoon:\n return True\n if leave[0] == 'morning' and not afternoon:\n return True\n if leave[0] == 'full':\n return True\n if att.day_tz == leave[3]:\n if leave[2] == 'afternoon' and afternoon:\n return True\n if leave[2] == 'morning' and not afternoon:\n return True\n if leave[2] == 'full':\n return True\n if datetime.strptime(att.day_tz, '%Y-%m-%d') > datetime.strptime(leave[1], '%Y-%m-%d')\\\n and datetime.strptime(att.day_tz, '%Y-%m-%d') < datetime.strptime(leave[3], '%Y-%m-%d'):\n return True\n return False",
"def test_leave_oversubscribe_off(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 40 days of leave\n start = datetime(2017, 6, 1, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 7, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=0,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Mini retirement\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertFalse(form.is_valid())\n self.assertEqual(2, len(form.errors.keys()))\n self.assertEqual(\n \"Not enough leave days. Available leave days are 21.00\",\n form.errors[\"start\"][0],\n )\n self.assertEqual(\n \"Not enough leave days. Available leave days are 21.00\",\n form.errors[\"end\"][0],\n )",
"def _check_approval_update(self, state):\n\t\tcurrent_employee = self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1)\n\t\t# is_officer = self.env.user.has_group('hr_holidays.group_hr_holidays_user')\n\t\tis_manager = self.env.user.has_group('hr_holidays.group_hr_holidays_manager')\n\t\tfor holiday in self:\n\t\t\tval_type = holiday.holiday_status_id.validation_type\n\t\t\tif state == 'confirm':\n\t\t\t\tcontinue\n\n\t\t\tif state == 'draft':\n\t\t\t\tif holiday.employee_id != current_employee and not is_manager:\n\t\t\t\t\traise UserError(_('Only a Leave Manager can reset other people leaves.'))\n\t\t\t\tcontinue\n\n\t\t\t# if not is_officer:\n\t\t\t# \traise UserError(_('Only a Leave Officer or Manager can approve or refuse leave requests.'))\n\n\t\t\t# if is_officer:\n\t\t\t# \t# use ir.rule based first access check: department, members, ... (see security.xml)\n\t\t\tholiday.check_access_rule('write')\n\n\t\t\tif holiday.employee_id == current_employee and not is_manager:\n\t\t\t\traise UserError(_('Only a Leave Manager can approve its own requests.'))\n\n\t\t\tif (state == 'validate1' and val_type == 'both') or (state == 'validate' and val_type == 'manager'):\n\t\t\t\tmanager = holiday.employee_id.parent_id or holiday.employee_id.department_id.manager_id\n\t\t\t\tif (manager and manager != current_employee) and not self.env.user.has_group('hr_holidays.group_hr_holidays_manager'):\n\t\t\t\t\traise UserError(_('You must be either %s\\'s manager or Leave manager to approve this leave') % (holiday.employee_id.name))\n\n\t\t\tif state == 'validate' and val_type == 'both':\n\t\t\t\tif not self.env.user.has_group('hr_holidays.group_hr_holidays_manager'):\n\t\t\t\t\traise UserError(_('Only an Leave Manager can apply the second approval on leave requests.'))",
"def test_leaveform_max_days(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 7, 10, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n allowed_days=21,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = LeaveForm(data=data)\n self.assertFalse(form.is_valid())\n self.assertEqual(2, len(form.errors.keys()))\n self.assertEqual(\n \"Not enough leave days. Available leave days are 21.00\",\n form.errors[\"start\"][0],\n )\n self.assertEqual(\n \"Not enough leave days. Available leave days are 21.00\",\n form.errors[\"end\"][0],\n )",
"def test_one_day_leave(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 1 day of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=12,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.REGULAR, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=0).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.PENDING, leave.review_status)\n self.assertEqual(\n 1,\n get_taken_leave_days(\n staffprofile, Leave.PENDING, Leave.REGULAR, 2017, 2017\n ),\n )",
"def test_leaveform_apply(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=12,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.REGULAR, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=5).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.PENDING, leave.review_status)",
"def test_leave_oversubscribe(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 40 days of leave\n start = datetime(2017, 6, 1, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 7, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=0,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Mini retirement\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n\n # make it approved\n obj_type = ContentType.objects.get_for_model(leave)\n review = ModelReview.objects.get(content_type=obj_type, object_id=leave.id)\n review.review_status = ModelReview.APPROVED\n review.save()\n leave.refresh_from_db()\n\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.REGULAR, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=39).days, (leave.end - leave.start).days)\n self.assertEqual(\"Mini retirement\", leave.review_reason)\n self.assertEqual(Leave.APPROVED, leave.review_status)\n self.assertEqual(\n 40,\n get_taken_leave_days(\n staffprofile, Leave.APPROVED, Leave.REGULAR, 2017, 2017\n ),\n )\n self.assertEqual(-19, staffprofile.get_available_leave_days(year=2017))",
"def test_leaveform_start_end(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 1, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.SICK,\n carried_over_days=4,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.SICK,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = LeaveForm(data=data)\n self.assertFalse(form.is_valid())\n self.assertEqual(1, len(form.errors.keys()))\n self.assertEqual(\"end must be greater than start\", form.errors[\"end\"][0])\n\n # end year and start year must be the same\n\n end = datetime(2018, 6, 1, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n data2 = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.SICK,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form2 = LeaveForm(data=data2)\n self.assertFalse(form2.is_valid())\n self.assertEqual(2, len(form2.errors.keys()))\n self.assertEqual(\n \"start and end must be from the same year\", form2.errors[\"start\"][0]\n )\n self.assertEqual(\n \"start and end must be from the same year\", form2.errors[\"end\"][0]\n )",
"def test_leaveform_admin(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.REGULAR,\n carried_over_days=12,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.REGULAR,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n \"review_status\": Leave.APPROVED,\n }\n\n form = LeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.REGULAR, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=5).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.APPROVED, leave.review_status)",
"def calculate_leaves(self, type):\n leaves = 0\n for attendance in self.attendance:\n if attendance.date >= self.current_payrollyear.start_date \\\n and attendance.date <= self.current_payrollyear.end_date:\n if attendance.on_leave and \\\n attendance.leave_application.leave_type == type:\n leaves += 1\n\n return leaves",
"def _onchange_date_from(self):\n\t\tdate_from = self.date_from\n\t\tdate_to = self.date_to\n\t\tself.compute_valid_leaves_for_employee(date_from, date_to)\n\n\t\t# policy_id = self.env['leaves.policy'].sudo().search(\n\t\t# \t[('leave_type', '=', self.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])\n\t\t# if date_from and not date_to:\n\t\t# \tdate_to_with_delta = fields.Datetime.from_string(date_from) + timedelta(hours=8)\n\t\t# \tself.date_to = str(date_to_with_delta)\n\t\t# \tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(date_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds()/(24*3600)\n\t\t# \tself.number_of_days_temp = number_of_day\n\t\t# # Compute and update the number of days\n\t\t# if (date_to and date_from) and (date_from <= date_to):\n\t\t# \tif policy_id:\n\t\t# \t\tfor val in policy_id:\n\t\t# \t\t\tnumber_of_days = 0\n\t\t# \t\t\tif val.weekends_leave_period == 'dont_count':\n\t\t# \t\t\t\tnum_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t#\n\t\t# \t\t\t\t# Logic of Public Holidays when week offs count as holidays is True 2019-11-19\n\t\t# \t\t\t\temp_shift = self.employee_id.resource_calendar_ids\n\t\t# \t\t\t\tglobal_leaves = emp_shift.global_leave_ids\n\t\t# \t\t\t\t# List to store the global leaves\n\t\t# \t\t\t\tpublic_holidays = []\n\t\t# \t\t\t\tfor holiday in global_leaves:\n\t\t# \t\t\t\t\tpublic_holidays.append((holiday.date_from, holiday.date_to))\n\t\t#\n\t\t# \t\t\t\t# Public holidays between leave period\n\t\t# \t\t\t\tleave_period_dates = []\n\t\t# \t\t\t\tstart_date = date_from1.date()\n\t\t# \t\t\t\tend_date = date_to1.date()\n\t\t# \t\t\t\tdelta = end_date - start_date\n\t\t# \t\t\t\tfor i in range(delta.days + 1):\n\t\t# \t\t\t\t\tday = start_date + timedelta(days=i)\n\t\t# \t\t\t\t\tleave_period_dates.append(day)\n\t\t# \t\t\t\tcount = 0\n\t\t# \t\t\t\tfor date in public_holidays:\n\t\t# \t\t\t\t\tif datetime.strptime(date[0], '%Y-%m-%d %H:%M:%S').date() in leave_period_dates:\n\t\t# \t\t\t\t\t\tcount += 1\n\t\t# \t\t\t# End of Public Holidays logic\n\t\t#\n\t\t# \t\t\t\tself.number_of_days_temp = num_days - count\n\t\t# \t\t\telse:\n\t\t# \t\t\t\tnumber_of_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tif val.dur_full and not val.dur_half:\n\t\t# \t\t\t\t\ttotal_days = (date_to1 - date_from1).days\n\t\t# \t\t\t\telse:\n\t\t# \t\t\t\t\ttotal_seconds = (date_to1 - date_from1).seconds\n\t\t# \t\t\t\t\ttotal_days = total_seconds / (24 * 3600)\n\t\t#\n\t\t# \t\t\t\tweek_offs = total_days - number_of_days\n\t\t# \t\t\t\tself.number_of_days_temp = number_of_days + week_offs\n\t\t# \telse:\n\t\t# \t\t# self.number_of_days_temp = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(\n\t\t# \t\t\tdate_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds() / (24 * 3600)\n\t\t# \t\tself.number_of_days_temp = number_of_day\n\t\t#\n\t\t# elif (date_to and date_from) and (date_from > date_to):\n\t\t# \traise ValidationError(\"From Date cannot be greater then To Date\")\n\t\t# else:\n\t\t# \tself.number_of_days_temp = 0",
"def create(self, values):\n\t\temployee_id = values.get('employee_id', False)\n\t\tprint(\"the val in the dict\", values)\n\t\tif (values.get('date_from') and values.get('date_to')) == False:\n\t\t\tcurrent = datetime.strftime(datetime.today().date(),'%Y-%m-%d')\n\t\t\tvalues.update({'allocate_date': current})\n\t\t\tprint(values)\n\t\tif not values.get('department_id'):\n\t\t\tvalues.update({'department_id': self.env['hr.employee'].browse (employee_id).department_id.id})\n\n\t\tholiday = super (Holidays, self.with_context (mail_create_nolog=True, mail_create_nosubscribe=True)).create(values)\n\t\tholiday.add_follower (employee_id)\n\n\t\t# Trilok code for policies\n\t\tpolicy_id = holiday.env['leaves.policy'].search(\n\t\t\t[('leave_type', '=', holiday.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])\n\t\t# print (\"policy iddddddddddddddd\",policy_id)\n\t\temp_type = holiday.employee_id.employee_type.id\n\t\tfor val in policy_id:\n\t\t\tif val.employee_type.id == emp_type:\n\t\t\t\tfor employee in holiday.employee_id:\n\t\t\t\t\tif holiday.type == 'remove':\n\t\t\t\t\t\tquery = '''select count(*) from hr_holidays where upper(type) = upper('rEMove')and upper(state) = upper('Validate') and create_date::date between to_date(concat(date_part('Year',now()::date),'-01-01'),'yyyy-mm-dd') and now()::date and employee_id = %s''' % employee.id\n\t\t\t\t\t\tholiday.env.cr.execute(query)\n\t\t\t\t\t\tquery_result = holiday.env.cr.dictfetchone()\n\t\t\t\t\t\t# print(\"query_result\", query_result)\n\t\t\t\t\t\tif val.min_app_per_year > 0 and query_result[\"count\"] > val.min_app_per_year:\n\t\t\t\t\t\t\traise ValidationError(\"maximum number of applications per year is {} days\".format(val.min_app_per_year))\n\n\t\t\t\t\t\tquery1 = '''select create_date::date,date_to::date from hr_holidays where upper(type) = \n\t\t\t\t\t\tupper('rEMove') and upper(state) = upper('Validate') and create_date::date between to_date(concat(date_part('Year',now()::date),'-01-01'),'yyyy-mm-dd') \n\t\t and now()::date and employee_id = %s order by create_date desc limit 1'''\\\n\t\t\t\t\t\t\t\t % employee.id\n\t\t\t\t\t\tholiday.env.cr.execute(query1)\n\t\t\t\t\t\tquery_result1 = holiday.env.cr.fetchall()\n\t\t\t\t\t\tif query_result1 is not None:\n\t\t\t\t\t\t\t# print(\"query_resulttttttttttttttttttttttttttt\", query_result1)\n\t\t\t\t\t\t\t# print(\"query_resulttttttttttttttttttttttttttt\", query_result1[0][0], query_result1[0][1])\n\t\t\t\t\t\t\tcre_date = datetime.strptime(query_result1[0][0], '%Y-%m-%d')\n\t\t\t\t\t\t\tdate_to = datetime.strptime(query_result1[0][1], '%Y-%m-%d')\n\t\t\t\t\t\t\t# print(\"cre_date\", cre_date, type(cre_date))\n\t\t\t\t\t\t\tcurrent_dt = fields.Datetime.now()\n\t\t\t\t\t\t\t# cdate=datetime.strptime(current_dt,'%Y-%m-%d')\n\t\t\t\t\t\t\tcurrent_date = datetime.strptime(current_dt.split(\" \")[0], '%Y-%m-%d')\n\t\t\t\t\t\t\tdays = (current_date - date_to).days\n\t\t\t\t\t\t\tif val.min_leave_app_gap > 0 and days > val.min_leave_app_gap:\n\t\t\t\t\t\t\t\traise ValidationError(\n\t\t\t\t\t\t\t\t\t\"Minimum gap between two application should be atleast {} days\".format(\n\t\t\t\t\t\t\t\t\t\tval.min_leave_app_gap))\n\n\t\treturn holiday",
"def test_leaveform_max_sick_days(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 20, 0, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.SICK,\n carried_over_days=0,\n allowed_days=10,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.SICK,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = LeaveForm(data=data)\n self.assertFalse(form.is_valid())\n self.assertEqual(2, len(form.errors.keys()))\n self.assertEqual(\n \"Not enough sick days. Available sick days are 10.00\",\n form.errors[\"start\"][0],\n )\n self.assertEqual(\n \"Not enough sick days. Available sick days are 10.00\", form.errors[\"end\"][0]\n )",
"def test_sickleave_process(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.SICK,\n carried_over_days=4,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.SICK,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n \"review_status\": Leave.REJECTED,\n }\n\n form = LeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.SICK, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=5).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.REJECTED, leave.review_status)",
"def test_sickleave_apply(self):\n user = mommy.make(\"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\")\n staffprofile = mommy.make(\"small_small_hr.StaffProfile\", user=user)\n staffprofile.leave_days = 21\n staffprofile.sick_days = 10\n staffprofile.save()\n\n request = self.factory.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n # 6 days of leave\n start = datetime(2017, 6, 5, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n end = datetime(2017, 6, 10, 7, 0, 0, tzinfo=pytz.timezone(settings.TIME_ZONE))\n\n mommy.make(\n \"small_small_hr.AnnualLeave\",\n staff=staffprofile,\n year=2017,\n leave_type=Leave.SICK,\n carried_over_days=4,\n )\n\n data = {\n \"staff\": staffprofile.id,\n \"leave_type\": Leave.SICK,\n \"start\": start,\n \"end\": end,\n \"review_reason\": \"Need a break\",\n }\n\n form = ApplyLeaveForm(data=data)\n self.assertTrue(form.is_valid())\n leave = form.save()\n self.assertEqual(staffprofile, leave.staff)\n self.assertEqual(Leave.SICK, leave.leave_type)\n self.assertEqual(start, leave.start)\n self.assertEqual(end, leave.end)\n self.assertEqual(timedelta(days=5).days, (leave.end - leave.start).days)\n self.assertEqual(\"Need a break\", leave.review_reason)\n self.assertEqual(Leave.PENDING, leave.review_status)",
"def process_employee_exit(self):\n if self.is_employee_serving():\n self._end_date.append(datetime.now().isoformat())\n\n print(f\"Successfully processed exit for employee {self.name} on\" \\\n f\"{self._end_date[-1]}\\nWe wish {self.name} for future endeavours\")\n return\n raise RejoiningException(\"Employee not in service. Cannot process exit.\")",
"def onchange_leave_date(self):\n warning = {}\n if self.date_of_leave and self.date_of_leave < self.date_of_join:\n warning.update({\n 'title': _('Information'),\n 'message': _(\"Leaving Date Must Be Greater Than Joining Date.\")})\n self.date_of_leave = False\n return {'warning': warning}",
"def holidays_validate(self, cr, uid, ids, context=None):\n \n super(hr_holidays ,self).holidays_validate(cr, uid, ids, context=context)\n \n grant_order_obj = self.pool.get(\"granted.rights.order\")\n grant_order_lines_obj = self.pool.get(\"granted.rights.order.lines\") \n department_obj = self.pool.get('hr.department')\n \n \n manager = False\n for rec in self.browse(cr,uid,ids):\n\t if rec.holiday_status_id.alternative_emp:\n\t\t dep_ids = department_obj.search(cr,uid,[('manager_id','=',rec.employee_id.id)])\n\t\t #if rec.employee_id.id == department_obj.browse(cr,uid,rec.department_id.id).manager_id.id :\n\t\t if dep_ids:\n\t\t\t\t manager = True\n\n\t\t \n\t\t grant_date = datetime.strptime(rec.date_to, '%Y-%m-%d %H:%M:%S')\n\t\t \n\t\t grant_date = grant_date + timedelta(days=1)\n\n\n\t\t if rec.date_to >= time.strftime('%Y-%m-%d'):\n\t\t\t order_id = grant_order_obj.create( cr, uid,{\n\t\t\t\t \n\t\t\t\t 'delegation_type' : 'holiday',\n\t\t\t\t 'holiday_order_id' :rec.id,\n\t\t\t\t 'employee_donor' : rec.employee_id.id,\n\t\t\t\t 'employee_candidate' : rec.alternative_employee.id,\n\t\t\t\t 'start_grant_date' : rec.date_from, \n\t\t\t\t 'end_grant_date' : grant_date,\n\t\t\t\t 'department_id' : rec.department_id.id,\n\t\t\t\t 'is_a_amanger' : manager,\n\n\t\t\t\t })\n\t\t\t \n\t\t\t res = grant_order_obj.on_change_donor_employee(cr, uid, order_id , rec.employee_id.id , context=context)\n\t\t\t for rec in res['value']['donor_groups_ids']:\n\t\t\t\t rec.update({ 'order_id' : order_id})\n\t\t\t\t grant_order_lines_obj.create( cr, uid,rec )\n \n return True",
"def get_timeoff_whosout(self):\n response = requests.get(\n self._base_url + \"time_off/whos_out/?end=\" + str(date.today()),\n auth=(self._api_key, 'pass'),\n headers={'Accept': 'application/json'})\n if response.status_code != 200:\n response.raise_for_status()\n leaves_json = json.loads(response.text)\n return {x['employeeId']: Leave(self._get_date_from_string(x['start']),\n self._get_date_from_string(x['end']))\n for x in leaves_json if 'employeeId' in x}",
"def isLeaveLeft(self,leave_type,days):\n if leave_type == 1 :\n return days<=self.earned_balance\n elif leave_type == 2 :\n return days<=self.hp_balance\n elif leave_type == 3 :\n return days*2<=self.hp_balance \n else :\n return False",
"def test_leave_accrual_access_rights(self):\n accrual = self.employee.get_leave_accrual(self.leave_type.id)\n accrual.write({\n 'line_ids': [(0, 0, {\n 'name': 'Test',\n 'amount_cash': 100,\n 'date': datetime.now(),\n })],\n })\n\n self.assertRaises(\n Exception,\n accrual.sudo(self.user_3.id).check_access_rule, 'read')\n\n self.assertRaises(\n Exception,\n accrual.sudo(self.user_2.id).check_access_rights, 'write')\n\n accrual.sudo(self.user_1.id).check_access_rule('read')\n self.assertTrue(\n accrual.sudo(self.user_1.id).check_access_rights('read'))\n\n # The manager can not access the leave accruals of the employee 2\n # because he is not the employee's manager\n accrual_2 = self.employee_2.get_leave_accrual(self.leave_type.id)\n\n self.assertRaises(\n Exception,\n accrual_2.sudo(self.user_1.id).check_access_rule, 'read')\n\n self.user_1.write({\n 'groups_id': [(4, self.ref('base.group_hr_manager'))]})\n\n for operation in ['read', 'write', 'create', 'unlink']:\n accrual_2.sudo(self.user_1.id).check_access_rule(operation)\n self.assertTrue(\n accrual_2.sudo(self.user_1.id).check_access_rights(operation))",
"def leave_request_decline(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.signal_workflow('refuse')\n if res.state == 'refuse':\n return request.website.render(\n \"tk_hr_approve_request.leave_request_refused\"\n )",
"def leave_request_accept(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.signal_workflow('validate')\n if res.state == 'validate':\n return request.website.render(\n \"tk_hr_approve_request.leave_request_accepted\"\n )",
"def reservation_mark_exit(user: User, reservation: Reservation):\n owns_restaurant = reservation.restaurant.operator == user\n if owns_restaurant and reservation.status is ReservationState.SEATED:\n #Might want to add user notification\n reservation.exit_time = datetime.datetime.now()\n reservation.status = ReservationState.DONE\n db.session.commit()\n return True\n\n return False"
] | [
"0.76563054",
"0.6980905",
"0.6831349",
"0.65803087",
"0.65579015",
"0.65431947",
"0.6317081",
"0.6314969",
"0.62980187",
"0.60757554",
"0.59903985",
"0.58735424",
"0.5798595",
"0.5751281",
"0.5667299",
"0.5629215",
"0.5600948",
"0.55589676",
"0.55558693",
"0.5510392",
"0.54887307",
"0.5409216",
"0.53880954",
"0.5365955",
"0.5337869",
"0.529723",
"0.5136809",
"0.51218325",
"0.51036125",
"0.49653834"
] | 0.87582946 | 0 |
Handle HR users and officers recipients that can validate or refuse holidays directly from email. | def _notification_recipients(self, message, groups):
groups = super (Holidays, self)._notification_recipients (message, groups)
self.ensure_one ()
hr_actions = []
if self.state == 'confirm':
app_action = self._notification_link_helper ('controller', controller='/hr_holidays/validate')
hr_actions += [{'url': app_action, 'title': _ ('Approve')}]
if self.state in ['confirm', 'validate', 'validate1']:
ref_action = self._notification_link_helper ('controller', controller='/hr_holidays/refuse')
hr_actions += [{'url': ref_action, 'title': _ ('Refuse')}]
new_group = (
'group_hr_holidays_user', lambda partner: bool (partner.user_ids) and any (
user.has_group ('hr_holidays.group_hr_holidays_user') for user in partner.user_ids), {
'actions': hr_actions,
})
return [new_group] + groups | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mail_responder(event, _):\n logger.info('%s: Request received:%s', __name__,\n str(event['Records'][0]['eventSource']))\n\n try:\n (source_email, recipient) = parse_ses_notification(\n event['Records'][0]['ses'])\n except Exception:\n logger.error('Error parsing received Email')\n return False\n\n LANG = CONFIG['LANG']\n\n logger.debug('Source Email {} recipient {}'.format(\n source_email, recipient))\n\n if recipient == CONFIG['TEST_EMAIL']:\n feedback.send_email(\n CONFIG['REPLY_EMAIL'],\n source_email,\n TEMPLATES['EMAIL_SUBJECT'],\n 'a',\n 'a',\n '',\n None,\n CONFIG['FEEDBACK_EMAIL'])\n return True\n\n elif recipient == CONFIG['TEST_EMAIL_NEW']:\n email_key(source_email, 'https://example.com')\n return True\n\n elif recipient == CONFIG['REPLY_EMAIL']:\n logger.info('Response to no-reply ignored')\n return True\n\n elif recipient == CONFIG['DELETE_USER_EMAIL']:\n try:\n deleted = api.delete_user(user_id=source_email)\n except Exception:\n email(source_email, 'try_again.j2')\n return False\n if deleted:\n email(source_email, 'unsubscribed.j2')\n return False\n\n elif recipient == CONFIG['GET_EMAIL']:\n try:\n user_exist = api.get_user(source_email)\n except Exception:\n logger.error('API error when checking {}'.format(source_email))\n email(source_email, 'try_again.j2')\n return False\n\n if not user_exist:\n try:\n api.create_user(source_email, 'EM')\n except Exception:\n logger.error('API error when Creating {}'.format(source_email))\n email(source_email, 'try_again.j2')\n return False\n\n try:\n new_key = api.get_new_key(user_id=source_email)\n except Exception:\n logger.error(\n 'API error when getting key fo {}'.format(source_email))\n email(source_email, 'try_again.j2')\n return False\n\n if not new_key:\n email(source_email, 'no_key.j2')\n return False\n\n awsurl = ((CONFIG['OUTLINE_AWS_URL']).format(\n urllib.parse.quote(new_key)))\n\n email_key(source_email, awsurl)\n\n return True",
"def handle(self, *args, **options):\n\n candidates_with_email = [candidate for candidate in Candidate.objects.all()\n if candidate.contact_address and candidate.participating]\n\n\n print 'sending e-mails'\n conn = get_connection()\n for c in candidates_with_email:\n if c.should_send_reminder():\n\n print 'emailing', c\n # store timestamp for reminder email so that they don't get another one for <REMINDER_TIME_PERIOD> days\n c.last_reminder_sent = timezone.now()\n c.save()\n msg = make_email(c)\n conn.send_messages([msg])\n conn.close()",
"def holidays_validate(self, cr, uid, ids, context=None):\n \n super(hr_holidays ,self).holidays_validate(cr, uid, ids, context=context)\n \n grant_order_obj = self.pool.get(\"granted.rights.order\")\n grant_order_lines_obj = self.pool.get(\"granted.rights.order.lines\") \n department_obj = self.pool.get('hr.department')\n \n \n manager = False\n for rec in self.browse(cr,uid,ids):\n\t if rec.holiday_status_id.alternative_emp:\n\t\t dep_ids = department_obj.search(cr,uid,[('manager_id','=',rec.employee_id.id)])\n\t\t #if rec.employee_id.id == department_obj.browse(cr,uid,rec.department_id.id).manager_id.id :\n\t\t if dep_ids:\n\t\t\t\t manager = True\n\n\t\t \n\t\t grant_date = datetime.strptime(rec.date_to, '%Y-%m-%d %H:%M:%S')\n\t\t \n\t\t grant_date = grant_date + timedelta(days=1)\n\n\n\t\t if rec.date_to >= time.strftime('%Y-%m-%d'):\n\t\t\t order_id = grant_order_obj.create( cr, uid,{\n\t\t\t\t \n\t\t\t\t 'delegation_type' : 'holiday',\n\t\t\t\t 'holiday_order_id' :rec.id,\n\t\t\t\t 'employee_donor' : rec.employee_id.id,\n\t\t\t\t 'employee_candidate' : rec.alternative_employee.id,\n\t\t\t\t 'start_grant_date' : rec.date_from, \n\t\t\t\t 'end_grant_date' : grant_date,\n\t\t\t\t 'department_id' : rec.department_id.id,\n\t\t\t\t 'is_a_amanger' : manager,\n\n\t\t\t\t })\n\t\t\t \n\t\t\t res = grant_order_obj.on_change_donor_employee(cr, uid, order_id , rec.employee_id.id , context=context)\n\t\t\t for rec in res['value']['donor_groups_ids']:\n\t\t\t\t rec.update({ 'order_id' : order_id})\n\t\t\t\t grant_order_lines_obj.create( cr, uid,rec )\n \n return True",
"def create(self, values):\n\t\temployee_id = values.get('employee_id', False)\n\t\tprint(\"the val in the dict\", values)\n\t\tif (values.get('date_from') and values.get('date_to')) == False:\n\t\t\tcurrent = datetime.strftime(datetime.today().date(),'%Y-%m-%d')\n\t\t\tvalues.update({'allocate_date': current})\n\t\t\tprint(values)\n\t\tif not values.get('department_id'):\n\t\t\tvalues.update({'department_id': self.env['hr.employee'].browse (employee_id).department_id.id})\n\n\t\tholiday = super (Holidays, self.with_context (mail_create_nolog=True, mail_create_nosubscribe=True)).create(values)\n\t\tholiday.add_follower (employee_id)\n\n\t\t# Trilok code for policies\n\t\tpolicy_id = holiday.env['leaves.policy'].search(\n\t\t\t[('leave_type', '=', holiday.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])\n\t\t# print (\"policy iddddddddddddddd\",policy_id)\n\t\temp_type = holiday.employee_id.employee_type.id\n\t\tfor val in policy_id:\n\t\t\tif val.employee_type.id == emp_type:\n\t\t\t\tfor employee in holiday.employee_id:\n\t\t\t\t\tif holiday.type == 'remove':\n\t\t\t\t\t\tquery = '''select count(*) from hr_holidays where upper(type) = upper('rEMove')and upper(state) = upper('Validate') and create_date::date between to_date(concat(date_part('Year',now()::date),'-01-01'),'yyyy-mm-dd') and now()::date and employee_id = %s''' % employee.id\n\t\t\t\t\t\tholiday.env.cr.execute(query)\n\t\t\t\t\t\tquery_result = holiday.env.cr.dictfetchone()\n\t\t\t\t\t\t# print(\"query_result\", query_result)\n\t\t\t\t\t\tif val.min_app_per_year > 0 and query_result[\"count\"] > val.min_app_per_year:\n\t\t\t\t\t\t\traise ValidationError(\"maximum number of applications per year is {} days\".format(val.min_app_per_year))\n\n\t\t\t\t\t\tquery1 = '''select create_date::date,date_to::date from hr_holidays where upper(type) = \n\t\t\t\t\t\tupper('rEMove') and upper(state) = upper('Validate') and create_date::date between to_date(concat(date_part('Year',now()::date),'-01-01'),'yyyy-mm-dd') \n\t\t and now()::date and employee_id = %s order by create_date desc limit 1'''\\\n\t\t\t\t\t\t\t\t % employee.id\n\t\t\t\t\t\tholiday.env.cr.execute(query1)\n\t\t\t\t\t\tquery_result1 = holiday.env.cr.fetchall()\n\t\t\t\t\t\tif query_result1 is not None:\n\t\t\t\t\t\t\t# print(\"query_resulttttttttttttttttttttttttttt\", query_result1)\n\t\t\t\t\t\t\t# print(\"query_resulttttttttttttttttttttttttttt\", query_result1[0][0], query_result1[0][1])\n\t\t\t\t\t\t\tcre_date = datetime.strptime(query_result1[0][0], '%Y-%m-%d')\n\t\t\t\t\t\t\tdate_to = datetime.strptime(query_result1[0][1], '%Y-%m-%d')\n\t\t\t\t\t\t\t# print(\"cre_date\", cre_date, type(cre_date))\n\t\t\t\t\t\t\tcurrent_dt = fields.Datetime.now()\n\t\t\t\t\t\t\t# cdate=datetime.strptime(current_dt,'%Y-%m-%d')\n\t\t\t\t\t\t\tcurrent_date = datetime.strptime(current_dt.split(\" \")[0], '%Y-%m-%d')\n\t\t\t\t\t\t\tdays = (current_date - date_to).days\n\t\t\t\t\t\t\tif val.min_leave_app_gap > 0 and days > val.min_leave_app_gap:\n\t\t\t\t\t\t\t\traise ValidationError(\n\t\t\t\t\t\t\t\t\t\"Minimum gap between two application should be atleast {} days\".format(\n\t\t\t\t\t\t\t\t\t\tval.min_leave_app_gap))\n\n\t\treturn holiday",
"def incoming_mail(request, recipients):\n try:\n _process_incoming_mail(request.raw_post_data, recipients)\n except InvalidIncomingEmailError as err:\n logging.debug(str(err))\n return HttpTextResponse('')",
"def email_process(recipient_list: List[Client]) -> None:\n\n if recipient_list:\n send_email(recipient_list)\n update_only_emailed_clients(recipient_list)\n remove_fully_contacted_clients()\n else:\n print(\"No emails were sent.\")",
"def throwerEmail_update(self, data, sesh):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['email', 'email_passwd'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# Find the thrower\n\t\toThrower = Thrower.get(sesh['thrower']['_id'])\n\t\tif not oThrower:\n\t\t\treturn Services.Effect(error=1104)\n\n\t\t# Validate the password\n\t\tif not oThrower.passwordValidate(data['email_passwd']):\n\t\t\treturn Services.Effect(error=(1001, [('email_passwd', 'invalid')]))\n\n\t\t# Make sure the email is valid structurally\n\t\tif not _emailRegex.match(data['email']):\n\t\t\treturn Services.Effect(error=(1001, [('email', 'invalid')]))\n\n\t\t# Look for someone else with that email\n\t\tdThrower = Thrower.get(data['email'], index='email', raw=['_id'])\n\t\tif dThrower:\n\t\t\treturn Services.Effect(error=(1206, data['email']))\n\n\t\t# Update the email and verified fields\n\t\ttry:\n\t\t\toThrower['email'] = data['email']\n\t\t\toThrower['verified'] = StrHelper.random(32, '_0x')\n\t\texcept ValueError as e:\n\t\t\treturn Services.Effect(error=(1001, e.args[0]))\n\n\t\t# Update the thrower\n\t\toThrower.save(changes={\"creator\":sesh['thrower']['_id']})\n\n\t\t# Send en e-mail for verification\n\t\tdConf = Conf.get(\"domain\")\n\t\tsURL = \"%s://external.%s/verify/%s/%s\" % (\n\t\t\tdConf['protocol'],\n\t\t\tdConf['primary'],\n\t\t\toThrower['_id'],\n\t\t\toThrower['verified']\n\t\t)\n\t\toEffect = Services.create('communications', 'email', {\n\t\t\t\"_internal_\": Services.internalKey(),\n\t\t\t\"html_body\": Templates.generate('email/verify.html', {\"url\":sURL}, oThrower['locale']),\n\t\t\t\"subject\": Templates.generate('email/verify_subject.txt', {}, oThrower['locale']),\n\t\t\t\"to\": data['email'],\n\t\t})\n\t\tif oEffect.errorExists():\n\t\t\treturn oEffect\n\n\t\t# Return OK\n\t\treturn Services.Effect(True)",
"def clean_email(self):\n if self.data.get(\"selected_item\") != self.AGENT_ID:\n # resume normal invite flow\n return super().clean_email()\n\n email = self.cleaned_data[\"email\"]\n email = get_invitations_adapter().clean_email(email)\n try:\n self._agent_user = User.objects.get(email__iexact=email)\n except User.DoesNotExist:\n return super().clean_email()\n\n if self._agent_user.account_type != AccountType.agent_user.value:\n raise forms.ValidationError(\n _(\"An active non-agent user is using this e-mail address\")\n )\n if self._agent_user.organisations.filter(\n id=self.instance.organisation.id\n ).exists():\n raise forms.ValidationError(\n _(\"This agent is already active for this organisation\")\n )\n\n return email",
"def process_event(self, event):\n message = 'From: ' + self.fromaddr + \\\n '\\nTo: ' + join(self.toaddrs, ', ') + \\\n '\\nSubject: ' + event.data.match.expand(self.subject) + \\\n '\\n\\n' + \\\n event.data.match.expand(self.body) + '\\n' \n\n try:\n server = SMTP(self.smtphost)\n server.sendmail(self.fromaddr, self.toaddrs, message)\n server.quit()\n\n except Exception, e:\n print >> stderr, \"Could not send mail:\", e\n\n return 1",
"def inbound_handler():\n req = request.json\n # Take the time to clear out any past reminders\n try:\n virtual_tn = req['to']\n assert len(virtual_tn) <= 18\n sms_from = req['from']\n assert len(sms_from) <= 18\n req['body']\n except (TypeError, KeyError, AssertionError) as e:\n msg = (\"Malformed inbound message: {}\".format(req))\n log.error({\"message\": msg, \"status\": \"failed\", \"exc\": str(e)})\n return Response('There was an issue parsing your request.', status=400)\n else:\n Reminder.clean_expired()\n try:\n appt = Reminder.query.filter_by(\n contact_num=sms_from).one()\n except NoResultFound:\n msg = \"No existing un-responded reminder for contact {}.\".format(\n sms_from)\n log.info({\"message\": msg})\n return Response(status=200)\n else:\n message = req['body'].upper()\n if 'YES' in message:\n appt.will_attend = True\n confirm = True\n elif 'NO' in message:\n appt.will_attend = False\n confirm = False\n else:\n confirm = None\n db_session.add(appt)\n try:\n send_reply.apply_async((appt.id,), {'confirm': confirm})\n except ConnectionError as e:\n log.critical({\"message\": \"unable to connect to redis\",\n \"exc\": type(e)})\n db_session.rollback()\n return Response(status=500)\n else:\n db_session.commit()\n log.info({\"message\":\n (\"successfully recorded response from {}, scheduled \"\n \"SMS confirmation for appointment {}\").format(\n sms_from, appt.id),\n \"reminder_id\": appt.id})\n return Response(status=200)",
"def postprocess():\n if ERRORS:\n address = '[email protected]'\n body = '\\n\\n'.join( ERRORS )\n msg = create_message( body, address )\n send_mail( msg, address )",
"def send_email_to_assigned_user(recipients, lead_id, domain='demo.django-crm.io', protocol='http', source=''):\n lead = Lead.objects.get(id=lead_id)\n created_by = lead.created_by\n blocked_domains = BlockedDomain.objects.values_list('domain', flat=True)\n blocked_emails = BlockedEmail.objects.values_list('email', flat=True)\n for user in recipients:\n recipients_list = []\n user = User.objects.filter(id=user, is_active=True).first()\n if user:\n if (user.email not in blocked_emails) and (user.email.split('@')[-1] not in blocked_domains):\n recipients_list.append(user.email)\n context = {}\n context[\"url\"] = protocol + '://' + domain + \\\n reverse('leads:view_lead', args=(lead.id,))\n context[\"user\"] = user\n context[\"lead\"] = lead\n context[\"created_by\"] = created_by\n context[\"source\"] = source\n subject = 'Assigned a lead for you. '\n html_content = render_to_string(\n 'assigned_to/leads_assigned.html', context=context)\n msg = EmailMessage(\n subject,\n html_content,\n to=recipients_list\n )\n msg.content_subtype = \"html\"\n msg.send()",
"def delegate_last_day():\n\n regs = Registration.objects.all()\n\n template = 'notifications/last_day_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Schedule and other details'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='[email protected]')",
"def delegate_remainder(template=None):\n\n regs = Registration.objects.all()\n\n for reg in regs:\n subject = DEF_REMAINDER_ACCO_CONTACT_SUBJECT\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.get_full_name()})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='[email protected]')",
"def mailto_supervisor(request, application):\n applicant_name = application.get_full_name()\n subject = '{} -- {} clinical database access request'.format(\n applicant_name, settings.SITE_NAME)\n body = loader.render_to_string(\n 'notification/email/mailto_contact_supervisor.html', {\n 'application': application,\n 'applicant_name': applicant_name,\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'footer': email_footer(), 'SITE_NAME': settings.SITE_NAME\n })\n\n # rm comma to handle mailto issue with comma and special char.\n # ref https://github.com/MIT-LCP/physionet-build/issues/1028\n to = formataddr((application.reference_name.replace(',', ''),\n application.reference_email))\n bcc = 'credential-reference+{0}@{1}'.format(\n application.id, get_current_site(request))\n return mailto_url(to, subject=subject, bcc=bcc, body=body)",
"def handle_emails():\n email = request.data['email'].strip()\n user = User.query.filter_by(email=email).first()\n option = \\\n request.data['option'].strip() # have a <select> in the frontend\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Reset password', sender=app.config['ADMINS'][0],\n recipients=[email])\n link = 'http://localhost:3000/confirm_email/{}/{}'\\\n .format(option, token)\n if user:\n msg.body = 'Your link is {}'.format(link)\n else:\n msg.body = 'You attempted to reset your password but you do not \\\n have an account with us. Please Sign Up and Log in. {}'\\\n .format('http://localhost:3000/register')\n\n mail.send(msg)\n return jsonify({\"message\":\"Please confirm your email.\"}), 201",
"def contact_supervisor(request, application):\n applicant_name = ' '.join([application.first_names, application.last_name])\n subject = 'Please verify {} for {} credentialing'.format(\n applicant_name, settings.SITE_NAME)\n body = loader.render_to_string(\n 'notification/email/contact_supervisor.html', {\n 'application': application,\n 'applicant_name': applicant_name,\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'footer': email_footer(), 'SITE_NAME': settings.SITE_NAME\n })\n\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [application.reference_email], fail_silently=False)",
"def delegate_about_event():\n\n regs = Registration.objects.all()\n\n template = 'notifications/sprints_about_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Details of the individual events'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='[email protected]')",
"def process_data(data):\n\n response = \"Seems like there's some problem with the input.\"\n user_input = data['message'].split()\n\n try:\n\n if len(user_input) == 1 and user_input[0] == 'help':\n response = slack_help()\n return\n\n elif len(user_input) == 1 and user_input[0] == 'check':\n response = check_leaves(data['user'])\n return\n\n elif user_input[0] == 'apply':\n user_data = get_user_data(data['user'])\n leave_type = user_input[1]\n\n if 'for' in user_input:\n reason = ' '.join(user_input[6:])\n day = fetch_day(user_input[4], user_data['timezone'])\n if not reason:\n response = \"Please insert reason as well. (Followed by keyword `reason`)\"\n return\n\n update = record_transaction(user_data['email'], leave_type, day, day, reason,\n data['message_id'], data['time'])\n response = update['text']\n if update['status']:\n send_message_to_chat(notification_channel, f\"Hi Team, {user_data['name']} will be on leave \"\n f\"{user_input[4]}. \\nReason: {reason}\")\n return\n\n elif 'from' and 'till' in user_input:\n reason = ' '.join(user_input[8:])\n # dealing with from day:\n start_day = fetch_day(user_input[4], user_data['timezone'])\n\n # dealing with till day:\n end_day = fetch_day(user_input[6], user_data['timezone'])\n delta = end_day - start_day\n if delta.days < 0 or delta.hours < 0:\n response = \"Please put in proper date. End date can't be prior to start date.\"\n return\n if not reason:\n response = \"Please insert reason as well. (Followed by keyword `reason`)\"\n return\n\n update = record_transaction(user_data['email'], leave_type, start_day, end_day, reason,\n data['message_id'], data['time'])\n response = update['text']\n if update['status']:\n send_message_to_chat(notification_channel, f\"Hi Team, {user_data['name']} will be on leave from \"\n f\"{start_day.to_date_string()} till {end_day.to_date_string()}. \"\n f\"\\nReason: {reason}\")\n return\n\n except Exception as e:\n logging.error(e)\n\n finally:\n send_message_to_chat(data['channel'], response)",
"def get_email(cls, unused_provider_details):\r\n return None",
"def resend_email(self, userdict):\n return self.post('resend', userdict)",
"def email_body_cancellation_from_buyer_outside_24_hours(buyer_name, sellr_name):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> Shucks. You cancelled your appointment. Thanks for letting <a href=\"#\" style=\"color:#1488CC\">' + sellr_name + '</a> know ahead of time; you will not be charged for the cancellation.<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to reschedule? Go right ahead. <br><br>'\n\tmsg = msg + '\\t\\t\\t You can also explore other options, too. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #e6e6e6;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg",
"def recipient(self, recipient, model):\n if recipient == \"hod\":\n workmails = model.address_id, model.work_email\n workmail = {workmail for workmail in workmails if workmail}\n workmail = workmail.pop() if workmail else model.work_email\n if not isinstance(workmail, str):\n try:\n return workmail.email\n except:\n pass\n return workmail\n elif recipient == \"department_manager\":\n manager = model.manager_id\n return manager.work_email or manager.address_id.email",
"def sendEmail(householdID):\n contactID = mdb.getContact(householdID)\n sqlq = \"\"\"\n SELECT Name, Surname, Address1, Address2, Town, Postcode, email, status\n FROM Contact\n WHERE idContact = '{}';\n \"\"\".format(contactID)\n result = mdb.getSQL(sqlq)[0]\n\n # thisName = (\"%s %s\" % (result['Name'], result['Surname']))\n thisName = (\"%s\" % (result['Name']))\n thisAddress = (\"%s</br>%s</br>%s %s\" % (result['Address1'], result['Address2'], result['Town'], result['Postcode']))\n thisAddress = thisAddress.replace(\"None </br>\", \"\")\n thisEmail = (\"%s\" % (result['email']))\n thisStatus = (\"%s\" % (result['status']))\n thisAddress = thisAddress.replace(\"None</br>\", \"\")\n participantCount = (\"%s\" % mdb.getParticipantCount(str(householdID)))\n # prepare the custom email\n\n thisPath = os.path.dirname(os.path.abspath(__file__))\n if (thisStatus == 'de'):\n # DOESN'T happen yet - de is excluded from query for now\n # emailPath = os.path.join(thisPath, \"emails/email_confirm_de.html\")\n locale.setlocale(locale.LC_ALL, 'de_DE.utf8')\n else:\n emailPath = os.path.join(thisPath, \"emails/email_automated_date.html\")\n\n templateFile = open(emailPath, \"r\")\n templateText = templateFile.read()\n templateFile.close()\n templateText = templateText.replace(\"[householdID]\", householdID)\n templateText = templateText.replace(\"[contactID]\", contactID)\n templateText = templateText.replace(\"[name]\", thisName)\n templateText = templateText.replace(\"[address]\", thisAddress)\n templateText = templateText.replace(\"[securityCode]\", mdb.getSecurityCode(householdID))\n templateText = templateText.replace(\"[participantCount]\", participantCount)\n\n # Subject\n subjectLine = templateText.splitlines()[0]\n templateText = templateText[templateText.find('\\n') + 1:] # find line break and return all from there - i.e. remove first line\n \n # email file\n emailFilePath = os.path.join(thisPath, \"tempEmail.htmail\")\n\n emailFile = open(emailFilePath, \"w+\")\n emailFile.write(templateText)\n emailFile.close()\n\n # call('mutt -e \"set content_type=text/html\" -s \"[TESTING]' + subjectLine + '\" [email protected] < ' + emailFilePath, shell=True)\n call('mutt -e \"set content_type=text/html\" -s \"' + subjectLine + '\" ' + thisEmail + ' -b [email protected] < ' + emailFilePath, shell=True)",
"def on_email_email_changed(\n request, user, from_email_address: EmailAddress, to_email_address, **kwargs\n):\n if from_email_address:\n dillo.tasks.profile.update_mailing_list_subscription(from_email_address.email, False)",
"def send_mail_when_failed(self, body):\r\n pass",
"def notify_participant_event_withdraw(request, user, event):\n\n subject = f\"{settings.SITE_NAME} Event Registration Withdrawn\"\n context = {\n 'name': user.get_full_name(),\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'event_title': event.title,\n 'event_url': reverse('event_detail', args=[event.slug]),\n 'SITE_NAME': settings.SITE_NAME,\n }\n body = loader.render_to_string('events/email/event_participation_withdraw.html', context)\n # Not resend the email if there was an integrity error\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [user.email], fail_silently=False)",
"def check_message(self):\n def check(fld_key):\n if not self[fld_key]:\n string = self._fields[fld_key].string\n raise UserError(\n _(\"%s field required to send an email.\") % string)\n if self.email_type == 'general':\n check('subject')\n check('body')\n elif self.email_type == 'scheduled':\n check('date')\n check('duration')\n check('priority')\n check('sub_subject')\n check('mail_template_id')",
"def create_email(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Account Created\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n \n message = \"Your ECE/CIS %s account has been created with the username: %s\\n\\n\" % (domain, user.username)\n message += \"Please do not reply to this message. If you need assistance with your account, please visit:\\n\"\n message += \"%s\\n\\n\" % helprequest\n message += \"-- EE/CIS Labstaff\\n\"\n\n send('[email protected]', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)",
"def to_do_fehrist_tasks_reminder():\n\n from todofehrist.models import Task, User\n from todofehrist.utility import send_email\n\n result = Task.objects.filter(\n completion_status=0, completion_datetime__date=date.today()).values(\"user\").annotate(\n count=Count(\"user\"))\n\n for user_tasks_entry in result:\n email_address = User.objects.get(pk=user_tasks_entry[\"user\"]).email\n send_email(\"ToDoFehrist - Pending Tasks Reminder\",\n f\"You have {user_tasks_entry['count']} pending tasks due today.\",\n email_address)\n\n logging.debug(f\"Reminder Email sent to user with email address {email_address}\")"
] | [
"0.5860881",
"0.5794972",
"0.5757674",
"0.5723443",
"0.5606513",
"0.5556952",
"0.5550611",
"0.5537931",
"0.5508338",
"0.5471315",
"0.53628576",
"0.53263485",
"0.53258735",
"0.53146553",
"0.52828825",
"0.52389985",
"0.5191454",
"0.51899254",
"0.5173753",
"0.5155635",
"0.5153288",
"0.51376283",
"0.51342404",
"0.513297",
"0.51235443",
"0.5122932",
"0.5121345",
"0.509686",
"0.50927866",
"0.50915945"
] | 0.59531474 | 0 |
Skips tokens until we hit the token with value 'find' | def skip_until(self, find, consume=False):
# TODO: handle scanner errors, EOF
if isinstance(find, str):
find = [find]
while self.token.value not in find:
self.token = next(self.tokens)
if consume:
self.token = next(self.tokens)
# always consume the newlines
if self.token.value == '\n' and '\n' in find:
self.skip_until('\n', consume=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def skip(self, position):\n if not self._is_position_in_corpus(position):\n raise ValueError('{0} not in corpus'.format(position))\n\n self._staged_next = None\n self._has_staged = False\n self._next_search_pos = position",
"def test_find_token_no_matches(self, token_re):\n token_re.finditer.return_value = ()\n\n return_value = TokenRemover.find_token_in_message(self.msg)\n\n self.assertIsNone(return_value)\n token_re.finditer.assert_called_once_with(self.msg.content)",
"def process_skippable(self, pos):\n newlines = []\n done = False\n while not done:\n # Strip any leading whitespace\n ws = self.whitespace_match.match(self.src, pos=pos)\n if ws:\n # The span of a match from regex is (startidx, after_idx), so our new position is just the second\n # value in that tuple.\n pos = ws.span()[1]\n\n # Check for a line break\n lb = self.linebreak_match.match(self.src, pos=pos)\n if lb:\n # Record the line break in our newlines list.\n lb_span = lb.span()\n newlines.append(Span(start=lb_span[0], after=lb_span[1]))\n pos = lb_span[1]\n # And then jump back to the top of the token scanner.\n continue\n\n # Check for single line comments\n slc = self.single_line_comment_match.match(self.src, pos=pos)\n if slc:\n # We just skip over single line comments (because they cannot include line breaks)\n pos = slc.span()[1]\n continue\n\n # Check for multi-line comments\n mlc = self.multi_line_comment_match.match(self.src, pos=pos)\n if mlc:\n span = mlc.span()\n # We need to collect any newlines embedded in the comment.\n scan_pos = span[0]\n while 1:\n embedded = self.linebreak_match.search(self.src, pos=scan_pos, endpos=span[1])\n if embedded is None:\n break\n lb_span = embedded.span()\n newlines.append(Span(start=lb_span[0], after=lb_span[1]))\n scan_pos = lb_span[1]\n pos = span[1]\n continue\n\n # None of those means we're at something we shouldn't ignore.\n done = True\n return self.skippable(after=pos, newlines=newlines)",
"def _skipbykeyword(self, keywordexpr):\n if not keywordexpr:\n return\n chain = self.listchain()\n for key in filter(None, keywordexpr.split()):\n eor = key[:1] == '-'\n if eor:\n key = key[1:]\n if not (eor ^ self._matchonekeyword(key, chain)):\n return True",
"def skip(self) -> \"Scanner\":\n raise NotImplementedError",
"def skip(self, *what: str or tuple) -> bool:\n for x in what:\n if x != TokenName.space.value:\n self.ignore_ws()\n\n tok = self.list_tokens[0]\n if tok.token != x:\n return False\n else:\n self.list_tokens.pop(0)\n return True",
"def ignore_token(self):\n\n def get_next_ignore(remove=False):\n \"\"\"Get next ignore from ignore_next and remove from ignore_next\"\"\"\n next_ignore = self.ignore_next\n\n # Just want to return it, don't want to remove yet\n if not remove:\n if type(self.ignore_next) in (list, tuple):\n next_ignore = self.ignore_next[0]\n return next_ignore\n\n # Want to remove it from ignore_next\n if type(next_ignore) in (list, tuple) and next_ignore:\n next_ignore = self.ignore_next.pop(0)\n elif not next_ignore:\n self.next_ignore = None\n next_ignore = None\n else:\n self.next_ignore = None\n\n return next_ignore\n\n # If we have tokens to be ignored and we're not just inserting till some token\n if not self.insert_till and self.ignore_next:\n # Determine what the next ignore is\n next_ignore = get_next_ignore()\n if next_ignore == (self.current.tokenum, self.current.value):\n # Found the next ignore token, remove it from the stack\n # So that the next ignorable token can be considered\n get_next_ignore(remove=True)\n return True\n else:\n # If not a wildcard, then return now\n if type(next_ignore) is not WildCard:\n return False\n\n # Go through tokens until we find one that isn't a wildcard\n while type(next_ignore) == WildCard:\n next_ignore = get_next_ignore(remove=True)\n\n # If the next token is next ignore then we're done here!\n if next_ignore == (self.current.tokenum, self.current.value):\n get_next_ignore(remove=True)\n return True\n else:\n # If there is another token to ignore, then consider the wildcard\n # And keep inserting till we reach this next ignorable token\n if next_ignore:\n self.insert_till = next_ignore\n return False",
"def skip(self, word_list, word_type):\n\n while self.peek(word_list) == word_type:\n self.match(word_list, word_type)",
"def should_skip(self, text):\n return self.skipper and self.skipper.match(text)",
"def continue_search( self ):\n return True;",
"async def test_on_message_skips_missing_token(self, find_token_in_message, take_action):\n cog = TokenRemover(self.bot)\n find_token_in_message.return_value = False\n\n await cog.on_message(self.msg)\n\n find_token_in_message.assert_called_once_with(self.msg)\n take_action.assert_not_awaited()",
"def skip_lines(input_file, keyword):\n dummy = ''\n while True:\n dummy = input_file.readline().strip()\n if dummy == keyword:\n dummy = input_file.readline()\n break\n return input_file",
"def preorder_search(self, start, find_val):\n return False",
"def skip(self, skip):\n self._evaluated = False\n self._offset = skip\n return self",
"def not_equal(self, skip):\n for word in self.two_words():\n if word.value != skip:\n return word",
"def skip(func):\n return",
"def skip_until_re(self, r, flags=0, timeout=None):\n match = self.read_cond(lambda x: re.search(r, x.buf, flags=flags), timeout)\n self.buf = self.buf[match.start():]\n return match if len(match.groups()) > 1 else match.group(len(match.groups()))",
"def escSkipSomethingW(self) :\n while 1 :\n (value, end) = self.getInteger()\n if value is None :\n return\n if end == 'W' : \n self.pos += value\n #self.logdebug(\"SKIPTO %08x\" % self.pos)",
"def skip_until(self, s, timeout=None):\n self.read_cond(lambda x: s in x.buf, timeout)\n start = self.buf.find(s)\n self.buf = self.buf[start:]\n return",
"def _skip(self):\n if self.current_char == \"/\":\n last_position = self.current_position\n if self._peek() == \"/\": # Single-line comment\n while self.current_char != \"\\n\":\n self._next_char()\n elif self._peek() == \"*\": # Multiple-line comment\n while self.current_char != \"*\" or self._peek() != \"/\":\n self._next_char()\n if self.EOF: # Check unclosed comment\n raise LexerError(\n last_position, f\"Unclosed comment at position {last_position}\")\n self._next_char()\n self._next_char()\n\n while self.current_char in [\" \", \"\\t\", \"\\r\", \"\\n\"]:\n self._next_char()",
"def unget(self):\n self.lookahead += 1\n \n if self.lookahead == 4: \n raise ParseError(\"PANIC: too much lookahead!\", self.fileId, self.line)\n \n self.tokenIndex = (self.tokenIndex - 1) & 3",
"def token_filter(tok):\n return tok is token or \\\n tok.dep_.endswith(\"mod\") or \\\n tok.dep_ == \"compound\"",
"def Find(self, children, sink):\n\n tkns = [];\n for child in children:\n key = child.word;\n if not child.word: key = child.tag;\n tkns.append(key);\n self.FindFromTokens(tkns, sink);",
"def skip_ents(self, doc, skip=[\"CARDINAL\", \"DATE\", \"QUANTITY\", \"TIME\"]):\n # Match months\n with doc.retokenize() as retokenizer:\n for ent in doc.ents:\n merge = True\n if ent.label_ in skip:\n merge = False\n if ent.label_ == \"DATE\" and re.match(regex.MONTHS_RE, ent.text.lower()):\n merge = True\n if merge == True:\n attrs = {\n \"tag\": ent.root.tag,\n \"dep\": ent.root.dep,\n \"ent_type\": ent.label,\n }\n retokenizer.merge(ent, attrs=attrs)\n return doc",
"def iter_with_skips(self, skip_types=None, skip_type_names=None, skip_values=None):\n for t in self.token_list:\n if skip_types and t.type in skip_types:\n continue\n if skip_type_names and t.type_name in skip_type_names:\n continue\n if skip_values and t.value in skip_values:\n continue\n yield t",
"def search(self, find_val):\n return False",
"def skip_prologue(text, cursor):\n ### NOT AT ALL COMPLETE!!! definitely can be confused!!!\n prologue_elements = (\"!DOCTYPE\", \"?xml\", \"!--\")\n done = None\n while done is None:\n #print \"trying to skip:\", repr(text[cursor:cursor+20])\n openbracket = text.find(\"<\", cursor)\n if openbracket<0: break\n past = openbracket+1\n found = None\n for e in prologue_elements:\n le = len(e)\n if text[past:past+le]==e:\n found = 1\n cursor = text.find(\">\", past)\n if cursor<0:\n raise ValueError(\"can't close prologue %r\" % e)\n cursor = cursor+1\n if found is None:\n done=1\n #print \"done skipping\"\n return cursor",
"def run_skip(self):\n pass",
"def skip_to_word(self) -> None:\n char = ...\n while self._is_whitespace(char):\n char = self.next_character()",
"def _nonkey():\n def not_key(s):\n return not (lexer.singularize(s.lower()) in pattern_key)\n def p(tok):\n return tok.type == 'WORD' and not_key(tok.value)\n return next_word().if_test(p)"
] | [
"0.5658785",
"0.56327987",
"0.5598761",
"0.5581111",
"0.5537023",
"0.5429428",
"0.53949994",
"0.5328356",
"0.52932805",
"0.52559245",
"0.52536815",
"0.5205816",
"0.5193714",
"0.51935846",
"0.517538",
"0.5172944",
"0.50992453",
"0.50889593",
"0.50706685",
"0.50678885",
"0.49908254",
"0.49766168",
"0.49666476",
"0.49542418",
"0.49530435",
"0.49354056",
"0.4931714",
"0.49215454",
"0.49005497",
"0.48754057"
] | 0.7885091 | 0 |
Returns a list of symbol names in the current scope | def cur_symbols(self):
return self.symbols[-1].keys() + self.global_symbols.keys() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_symbol(self):\n return []",
"def get_symbols(self, type_name):\n return self._symtab[type_name].get_symbols()",
"def get_symbols_list(self):\n return self.symbols_list",
"def list_syms():\n\tSymStringVec=[];\n\tSymStringVec.append(\"CSYM\");\n\tSymStringVec.append(\"DSYM\");\n\tSymStringVec.append(\"TET_SYM\");\n\tSymStringVec.append(\"OCT_SYM\");\n\tSymStringVec.append(\"ICOS_SYM\");\n\tSymStringVec.append(\"ISYM\");\n\treturn SymStringVec",
"def ionic_symbols(self) -> list[str]:\n return self.to_list().symbols",
"def get_symbols(self):\n\n raise NotImplementedError('''\n Must implement get_symbols. Call help() for details.\n ''')",
"def getSymbols(self, name: unicode, namespace: ghidra.program.model.symbol.Namespace) -> List[ghidra.program.model.symbol.Symbol]:\n ...",
"def symbols(self) -> List[SingleMapping]:\n return self._symbols",
"def getSymbols(self):\n return self.alpha.getSymbols()",
"def names(self):\n if not self.extensions:\n self.discover()\n\n names = list(self.builtins.keys())\n names += self.extensions.keys()\n\n return sorted(names)",
"def find_symbols(self, **kw):\n return list(self.ifind_symbols(**kw))",
"def symbols(self):\n pass",
"def symbols(self) -> Dict[str, Variable]:\n symbols = {}\n symbols.update(self.args)\n symbols.update(self.locals)\n return symbols",
"def get_global_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_glo_var\"][:]]",
"def get_symmetries(self):\n temp = self._properties.get('symmetries', [])\n return temp",
"def list_symbol_tables(mst):\n stlist = []\n def append_st(st):\n #print(st)\n stlist.append(st)\n for s in st.get_symbols():\n for ns in s.get_namespaces():\n append_st(ns)\n if not isinstance(mst, symtable.SymbolTable):\n # Assume it is text of a program to compile\n mst = symtable.symtable(mst, '<string>', 'exec')\n append_st(mst)\n return stlist",
"def ls():\n for f in get_user_functions(globals()):\n print (str(f).split()[1])",
"def names(self) -> list[str]:",
"def symbol_table(self) -> str:\n return self._symbol_table",
"def get_scopes(self, frame_ord):\n\n frame = self.get_frame(frame_ord)\n\n return [self.get_scope(frame, frame.f_locals, \"Locals\", False), self.get_scope(frame, frame.f_globals, \"Globals\", True)]",
"def names(self):\n return [x for x in self._dict.keys()]",
"def getGlobalFunctions(self, name: unicode) -> List[ghidra.program.model.listing.Function]:\n ...",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def _globals(self):\n return self.dicts[0]._globals()",
"def itersymbols(self):\n for syms in self._symbols.itervalues():\n for sym in syms:\n yield sym",
"def m_symb(self):\n return self._m_symb",
"def scopes(self):\n return [scope.rsplit('/', 1)[1] for scope in self.opts.scopes]",
"def getNames(self) -> List[unicode]:\n ...",
"def show_symbol_table(st):\n print(st)\n # Dump the name lists get_*()\n if isinstance(st, symtable.Function):\n for nlist in _NAME_LISTS:\n names = getattr(st, \"get_\"+nlist)()\n if names:\n print(' {} : {!r}'.format(nlist, names))\n # Dump the properties as short names is_global -> global, etc..\n for s in st.get_symbols():\n scope = to_scope_name(s._Symbol__scope)\n props = [scope]\n for p in _NAME_PROPS:\n if getattr(s, \"is_\"+p)():\n props.append(p)\n print(' \"{}\" : {}'.format(s.get_name(), ', '.join(props)))"
] | [
"0.74625385",
"0.72063017",
"0.7173719",
"0.69365084",
"0.6891114",
"0.68857193",
"0.6881177",
"0.6853945",
"0.6763958",
"0.666098",
"0.65792805",
"0.6552155",
"0.6484818",
"0.6465965",
"0.6334895",
"0.6305106",
"0.6294767",
"0.6293096",
"0.6277566",
"0.6261958",
"0.6206069",
"0.6189047",
"0.61850256",
"0.61850256",
"0.61657476",
"0.61591345",
"0.6145266",
"0.6143763",
"0.61352",
"0.6121792"
] | 0.72822034 | 1 |
Returns the size in byte of the local paramters | def local_param_size(self):
size = 0
for s in self.symbols[-1]:
if self.symbols[-1][s].type == 'procedure': continue
if not self.symbols[-1][s].isparam: continue
size += 1
return size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def local_symbols_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if self.symbols[-1][s].isparam: continue\n #if self.symbols[-1][s].isparam: continue\n size += self.symbols[-1][s].size\n return size",
"def param_size(model):\n n_params = sum(\n np.prod(v.size()) for k, v in model.named_parameters() if not k.startswith('aux_head'))\n return n_params / 1024. / 1024.",
"def get_size(self):",
"def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize",
"def param_size(module:nn.Module):\n return np.sum(v.numel() for name, v in module.named_parameters() \\\n if \"auxiliary\" not in name)",
"def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params",
"def get_size(self):\n ...",
"def _get_param_size(module: torch.nn.Module):\n return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()])",
"def getSize(self):\n if self.sym != None:\n return self.sym.getSize()\n return self.define.getSize()",
"def num_param(self):\n return len(self._parameters)",
"def num_params(self):",
"def len_parameters(self):\n return len(self._Parameters._fields)",
"def num_parameters(self) -> int:\n return len(self) * self.convention.value",
"def get_num_parameters(self):\n return len(self.parameters)",
"def n_parameters(self) -> int:\n return nkjax.tree_size(self.parameters)",
"def getSize(self) -> int:\n ...",
"def getSize(self) -> int:\n ...",
"def getSize(self) -> long:\n ...",
"def size (self):\n\t\timport struct\n\t\treturn struct.calcsize (self.struct)",
"def size():\n return int(os.environ['WORLD_SIZE'])",
"def get_num_params(self):\n if self.num_params is None:\n self.num_params = len(self.params)\n return self.num_params",
"def size(*args):",
"def get_num_params(self):\n if self.num_params is None:\n import inspect\n argspec = inspect.getfullargspec(self.get_code())\n if argspec.varargs or argspec.varkw:\n self.num_params = -1\n else:\n self.num_params = len(argspec.args)\n return self.num_params",
"def __get_size(self):\n\t\treturn 4*self.version + 17",
"def _get_final_size(param_grid):\n tmp = {} # same pattern than param_grid but store the size\n for idx, key in enumerate(param_grid.iterkeys()):\n if isinstance(param_grid[key], list):\n tmp[idx] = [sys.getsizeof(value) for value in param_grid[key]]\n else:\n tmp[idx] = [sys.getsizeof(param_grid[key])]\n return np.array([x for x in itertools.product(*tmp.values())]).sum()",
"def __len__(self) -> int:\n return len(self.parameters)",
"def get_size(self):\n return self.get_params().shape[0]",
"def get_size(self):\n return self.__size",
"def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n self.size.value = tmpsize\n return self.size.value + self.ID.get_size() + self.size.get_size()",
"def size(self) -> int:"
] | [
"0.759114",
"0.702782",
"0.70264846",
"0.69344485",
"0.69227624",
"0.68348753",
"0.68206537",
"0.6802461",
"0.67675924",
"0.6758175",
"0.6757479",
"0.6746264",
"0.6734",
"0.6717889",
"0.6693805",
"0.66817945",
"0.66817945",
"0.66736794",
"0.6670453",
"0.6669982",
"0.66659766",
"0.6658869",
"0.66568047",
"0.66539055",
"0.6629959",
"0.6628996",
"0.6626838",
"0.6620318",
"0.6601923",
"0.6581004"
] | 0.8453446 | 0 |
Returns the size in bytes of the local symbols | def local_symbols_size(self):
size = 0
for s in self.symbols[-1]:
if self.symbols[-1][s].type == 'procedure': continue
if self.symbols[-1][s].isparam: continue
#if self.symbols[-1][s].isparam: continue
size += self.symbols[-1][s].size
return size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def global_symbols_size(self):\n size = 0\n for s in self.global_symbols:\n if self.global_symbols[s].type == 'procedure': continue\n size += self.global_symbols[s].size\n return size",
"def expected_size(self):\n return self.nsym * self.symbol_len_per_byte",
"def getSize(self):\n if self.sym != None:\n return self.sym.getSize()\n return self.define.getSize()",
"def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size",
"def symbol_width(self):\n return self._symbol.get_extents().width",
"def _nbytes(self, deep: bool = False) -> int:\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result",
"def size_of_variables(glob):\n return sorted(\n [\n (k, sys.getsizeof(glob[k]) / 1e6)\n for k in list(glob.keys())\n ],\n key=lambda k_v: k_v[1],\n reverse=True\n )",
"def getSize(self):\n if self.subsym == None:\n if self.size == 0:\n return 1\n else:\n return self.size\n else:\n if self.size == 0:\n return self.subsym.getSize()\n else:\n return self.size * self.subsym.getSize()",
"def getSize(self):\n raise Exception(\"Unimplemented function in symbol: \"+self.name)",
"def size(self, level=None):\n level = level or self.local_variables\n names = {}\n while level:\n for name in level.bindings:\n names[name] = 1\n level = level.parent\n return len(names)",
"def size(name):",
"def CountLocal():\r\n return _hiew.HiewGate_Names_CountLocal()",
"def total_length():\n return",
"def __len__(self):\n return self._fa.faidx.index[self.name].rlen",
"def total_size(self, code):\n total = 0\n for address in list(code.keys()):\n total += len(code[address])\n return total",
"def size():\n return int(os.environ['WORLD_SIZE'])",
"def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize",
"def local_bitwidth():\r\n # Note that according to Python documentation, `platform.architecture()` is\r\n # not reliable on OS X with universal binaries.\r\n # Also, sys.maxsize does not exist in Python < 2.6.\r\n # 'P' denotes a void*, and the size is expressed in bytes.\r\n return struct.calcsize('P') * 8",
"def size(self):\n return len(self.chars)",
"def getGlobalSize(self):\n return self._get_global_size( )",
"def countSymbols(epr, symbols):\n freeSymbols = epr.free_symbols\n return len(set(freeSymbols).intersection(symbols))",
"def __get_size(self):\n\t\treturn 4*self.version + 17",
"def getSize(self):\n return self.bf.memory()",
"def get_size(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetSize', self.handle)",
"def __sizeof__(self):\r\n\r\n S = 0 # Full size of the object\r\n if self.loss_list is not None:\r\n for value in self.loss_list:\r\n S += getsizeof(value)\r\n if self.meshsol_list is not None:\r\n for value in self.meshsol_list:\r\n S += getsizeof(value)\r\n if self.loss_index is not None:\r\n for key, value in self.loss_index.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.logger_name)\r\n if self.axes_dict is not None:\r\n for key, value in self.axes_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.Pstator)\r\n S += getsizeof(self.Protor)\r\n S += getsizeof(self.Pmagnet)\r\n S += getsizeof(self.Pprox)\r\n S += getsizeof(self.Pjoule)\r\n if self.coeff_dict is not None:\r\n for key, value in self.coeff_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n return S",
"def size (self):\n\t\timport struct\n\t\treturn struct.calcsize (self.struct)",
"def lvar_mapping_size(*args):\n return _ida_hexrays.lvar_mapping_size(*args)",
"def get_var_nbytes(self, var_name):\n return self.get_value_ref(var_name).nbytes",
"def get_size(self):",
"def size(self):\n # type: () -> string_types\n return self._size"
] | [
"0.78402543",
"0.74415696",
"0.7194053",
"0.67470306",
"0.6624587",
"0.63827235",
"0.6340208",
"0.6319925",
"0.6165855",
"0.609423",
"0.60747105",
"0.6067792",
"0.6066112",
"0.6058626",
"0.6032693",
"0.602242",
"0.60105544",
"0.6000291",
"0.59911376",
"0.5981964",
"0.594508",
"0.5934434",
"0.59202427",
"0.591215",
"0.58786917",
"0.58683556",
"0.58425564",
"0.58384573",
"0.5835789",
"0.5817639"
] | 0.85582393 | 0 |
Returns the size in bytes of the global symbols | def global_symbols_size(self):
size = 0
for s in self.global_symbols:
if self.global_symbols[s].type == 'procedure': continue
size += self.global_symbols[s].size
return size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def local_symbols_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if self.symbols[-1][s].isparam: continue\n #if self.symbols[-1][s].isparam: continue\n size += self.symbols[-1][s].size\n return size",
"def expected_size(self):\n return self.nsym * self.symbol_len_per_byte",
"def getSize(self):\n if self.sym != None:\n return self.sym.getSize()\n return self.define.getSize()",
"def getGlobalSize(self):\n return self._get_global_size( )",
"def size_of_variables(glob):\n return sorted(\n [\n (k, sys.getsizeof(glob[k]) / 1e6)\n for k in list(glob.keys())\n ],\n key=lambda k_v: k_v[1],\n reverse=True\n )",
"def _nbytes(self, deep: bool = False) -> int:\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result",
"def GlobalSize(self):\n return _hypre.HypreParVector_GlobalSize(self)",
"def symbol_width(self):\n return self._symbol.get_extents().width",
"def getSize(self):\n if self.subsym == None:\n if self.size == 0:\n return 1\n else:\n return self.size\n else:\n if self.size == 0:\n return self.subsym.getSize()\n else:\n return self.size * self.subsym.getSize()",
"def size():\n return int(os.environ['WORLD_SIZE'])",
"def getSize(self):\n raise Exception(\"Unimplemented function in symbol: \"+self.name)",
"def __get_size(self):\n\t\treturn 4*self.version + 17",
"def getBitsPerSymbol(self):\n \n return self.bits_per_symbol",
"def hbins_size(self):\n return self.unpack_dword(0x10)",
"def getSize(self):\n return self.bf.memory()",
"def CountGlobal():\r\n return _hiew.HiewGate_Names_CountGlobal()",
"def get_size(self):",
"def hbins_size(self):\n return self.unpack_dword(0x28)",
"def get_insternal_size(self):\n return (\n sys.getsizeof(self.theta) +\n sys.getsizeof(self.num_buckets) +\n sys.getsizeof(self.k) +\n sys.getsizeof(self.fp_size) +\n sys.getsizeof(self.max_iter) +\n sys.getsizeof(self.bucket_size)\n )",
"def countSymbols(epr, symbols):\n freeSymbols = epr.free_symbols\n return len(set(freeSymbols).intersection(symbols))",
"def setSymbolSize(x=30):\n dislin.hsymbl(x)",
"def _api_memory_info() -> Dict[str, Any]:\n process = psutil.Process(os.getpid())\n return {k: size(v) for k, v in process.memory_info()._asdict().items()}",
"def size(self):\n return len(self.chars)",
"def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size",
"def size_bytes(self):\n size_words = self.size_words()\n if size_words is None:\n return None\n return size_words * intnat.sizeof",
"def print_size(self):\n return self.container['print_size']",
"def __sizeof__(self):\r\n\r\n S = 0 # Full size of the object\r\n if self.loss_list is not None:\r\n for value in self.loss_list:\r\n S += getsizeof(value)\r\n if self.meshsol_list is not None:\r\n for value in self.meshsol_list:\r\n S += getsizeof(value)\r\n if self.loss_index is not None:\r\n for key, value in self.loss_index.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.logger_name)\r\n if self.axes_dict is not None:\r\n for key, value in self.axes_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.Pstator)\r\n S += getsizeof(self.Protor)\r\n S += getsizeof(self.Pmagnet)\r\n S += getsizeof(self.Pprox)\r\n S += getsizeof(self.Pjoule)\r\n if self.coeff_dict is not None:\r\n for key, value in self.coeff_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n return S",
"def get_gsize(self):\n gsize_file = Genome(self.genome).get_fasize()\n gsize = 0\n with open(gsize_file, 'rt') as fi:\n for a in fi:\n c, n = a.strip().split('\\t')\n gsize += int(n)\n return gsize",
"def get_size(self):\n ...",
"def size (self):\n\t\timport struct\n\t\treturn struct.calcsize (self.struct)"
] | [
"0.7904729",
"0.77030617",
"0.7503799",
"0.7279682",
"0.6880621",
"0.6663535",
"0.6554453",
"0.65205085",
"0.64663875",
"0.6297727",
"0.6290106",
"0.6283424",
"0.6266394",
"0.61969995",
"0.61902064",
"0.61764604",
"0.6048911",
"0.604705",
"0.6027367",
"0.6023816",
"0.60055757",
"0.59835064",
"0.5971852",
"0.5968874",
"0.5967969",
"0.59483594",
"0.5944005",
"0.59353834",
"0.59147185",
"0.59111327"
] | 0.87714946 | 0 |
Get account balance for address at a given block number | def get_balance_by_block(address, block_num, endpoint=_default_endpoint, timeout=_default_timeout) -> int:
method = 'hmy_getBalanceByBlockNumber'
params = [
address,
str(hex(block_num))
]
balance = rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
try:
return int(balance, 16)
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_balance(self, address):\n balance = 0\n for block in self.chain:\n for t in block['transactions']:\n if t['recipient'] == address:\n balance += t['amount']\n elif t['sender'] == address:\n balance -= t['amount']\n return balance",
"def get_balance(address: str) -> int:\n return base.Balances(balance_of).get(address)",
"def get_address_balance(litecoinaddress):\n total_balance = 0\n unspent = list_unspent(litecoinaddress)\n for block in unspent:\n total_balance += float(block[\"amount\"])\n\n return total_balance",
"def balance_of_address(self, address):\n balance = 0\n for block in self.chain:\n for transaction in block.transactions:\n if transaction.walletoffrom == address:\n balance -= transaction.amount\n\n if transaction.walletofto == address:\n balance += transaction.amount\n return balance",
"def getBtcInWallet(address):\n btc = 'https://blockchain.info/q/addressbalance/' + address\n check = requests.get(btc)\n value = int((check.content)) / 100000000.0\n return value",
"def balance(self, account_number: int): \n return self._accounts[account_number][1]",
"def get_balance(self, address: str, erc20_address: str) -> int:\n return get_erc20_contract(self.w3, erc20_address).functions.balanceOf(address).call()",
"def q_addressbalance(abe, page, chain):\n addr = wsgiref.util.shift_path_info(page['env'])\n if chain is None or addr is None:\n return 'returns amount of money at the given address\\n' \\\n '/chain/CHAIN/q/addressbalance/ADDRESS\\n'\n\n if not util.possible_address(addr):\n return 'ERROR: address invalid'\n\n version, hash = util.decode_address(addr)\n total = abe.store.get_balance(chain.id, hash)\n\n return (\"ERROR: please try again\" if total is None else\n format_satoshis(total, chain))",
"def execute_get_balance(arg):\n blockchain = Blockchain()\n blockchain.read_blockchain()\n\n address = arg['address']\n\n if address is None:\n print('You have to give you account address!!!')\n\n elif not blockchain._wallet_pool.has_address(address):\n print(f'The address {address} does not exist!!!')\n\n else:\n balance = blockchain.get_balance(address)\n print(f'Address: {address}')\n print(f'Balance = {balance}')\n\n return",
"def get_account_balance(account):\n balance = 0\n\n for address in get_addresses_by_account(account):\n balance += get_address_balance(address)\n\n return float(balance)",
"async def get_balance(sochain_url:str, network:str, address:str):\n try:\n balance = await sochain_api.get_balance(sochain_url, network, address)\n if balance == None:\n raise Exception(\"Invalid Address\")\n return balance\n except Exception as err:\n raise Exception(str(err))",
"def get_balance_response(address):\n call = Address(address=address)\n response = call.get_address_info()\n if response:\n return response\n else:\n return None",
"def accountBalance(blockchain,id):\r\n balance = 0\r\n for block in blockchain:\r\n tupleblock = tuple(eval(block.data))\r\n if tupleblock[0] == id:\r\n balance -= tupleblock[1]\r\n if tupleblock[2] == id:\r\n balance += tupleblock[1]\r\n return balance",
"def get_balance(self, crypto, address, confirmations=1):\n raise NotImplementedError(\n \"This service does not support getting address balances. \"\n \"Or rather it has no defined 'get_balance' method.\"\n )",
"def get_balance(self, address):\n\n raise NotImplementedError('coming soon...')\n\n return sum(o.value for o in self.get_unspent_outputs(address)) / 100000000.0",
"def get_balances_blockchain(addresses):\r\n print(\"* blockchain.info not yet supported\")\r\n return None",
"def address_balance(self, address, confirmations=0):\n address = utils.request_type(address)\n\n confs = {'confirmations': confirmations}\n if confs['confirmations'] > 0:\n res = r.get(self.url + self.balance + str(address), params=confs)\n else:\n res = r.get(self.url + self.balance + str(address))\n return self.execute(res)",
"def get_balances_blockonomics(addresses):\r\n print(\"* Using blockonomics.co to query {} addresses\".format(len(addresses))) # NOQA\r\n BLOCKONOMICS_BALANCE_ENDPOINT = \"https://www.blockonomics.co/api/balance\"\r\n addrs = \" \".join(addresses)\r\n response = requests.post(BLOCKONOMICS_BALANCE_ENDPOINT,\r\n json={\"addr\": addrs})\r\n if response.status_code != 200:\r\n print(\"** blockonomics sent bad server reponse: \",\r\n response.status_code)\r\n return None\r\n\r\n balances = {}\r\n data = response.json()\r\n for each in data[\"response\"]:\r\n balances[each[\"addr\"]] = each[\"confirmed\"] + each[\"unconfirmed\"]\r\n\r\n return balances",
"def get_balances_blockr(addresses):\r\n print(\"* Using blockr.io to query {} addresses\".format(len(addresses)))\r\n BLOCKR_BALANCE_ENDPOINT = \"http://btc.blockr.io/api/v1/address/balance/{addresses}\" # NOQA\r\n\r\n addrs = ','.join(addresses)\r\n response = requests.get(BLOCKR_BALANCE_ENDPOINT.format(addresses=addrs))\r\n if response.status_code != 200:\r\n print(\"** Blockr sent bad server reponse: \", response.status_code)\r\n return None\r\n\r\n data = response.json()\r\n if data[\"status\"] != \"success\":\r\n print(\"** Blockr sent bad data reponse: \", data[\"status\"])\r\n return None\r\n\r\n balances = {}\r\n for each in data[\"data\"]:\r\n balances[each[\"address\"]] = each[\"balance\"]\r\n\r\n return balances",
"def get_wallet_balance():\n try:\n if CONF.exchange == 'bitmex':\n return EXCHANGE.fetch_balance()['info'][0]['walletBalance'] * CONF.satoshi_factor\n if CONF.exchange == 'kraken':\n asset = CONF.base if CONF.base != 'BTC' else 'XBt'\n return float(EXCHANGE.private_post_tradebalance({'asset': asset})['result']['tb'])\n if CONF.exchange == 'liquid':\n result = EXCHANGE.private_get_accounts_balance()\n if result is not None:\n for bal in result:\n if bal['currency'] == CONF.base:\n return float(bal['balance'])\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_wallet_balance()",
"def get_balance(self):\n r = requests.get(build_api_call(self.base_url, None, 'balance', ''), auth=HTTPBasicAuth(KEY, SECRET))\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'",
"def get_balances_blockexplorer(addresses):\r\n print(\"* blockexplorer.com not yet supported\")\r\n return None",
"def get_account_balance(self):\n return int(self.request('get', 'fort/accounts')['balance'])",
"def balance():\n address = request.args.get(\"address\")\n balance = p2p.query(\"/balance\", address=address)[\"balance\"]\n payload = jsonpickle.encode({\"balance\": balance})\n return payload, 200, {\"Content-Type\": \"application/json\"}",
"def get_wallet_balance(self, walletId, currency):\n return",
"def get_balance(self):\n if self.hosting_node == None:\n return None\n user = self.hosting_node\n tx_involving_user = [\n [tx.amount for tx in block.transfers if tx.user == user]\n for block in self.__chain\n ]\n # open_tx_sender = [tx.amount\n # for tx in __open_transfers if tx.user== user]\n # tx_sender.append(open_tx_sender)\n # print(tx_sender)\n total_amount = reduce(\n lambda tx_sum, tx_amt: tx_sum + sum(tx_amt)\n if len(tx_amt) > 0\n else tx_sum + 0,\n tx_involving_user,\n 0,\n )\n # Return the total balance\n print(\"balance = \", total_amount)\n return total_amount",
"def get_balance(self):\n return self._call_account_method(\n 'getBalance'\n )",
"def get_balance(address, endpoint=_default_endpoint, timeout=_default_timeout) -> int:\n method = 'hmy_getBalance'\n params = [\n address,\n 'latest'\n ]\n balance = rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']\n try:\n return int(balance, 16)\n except TypeError as e:\n raise InvalidRPCReplyError(method, endpoint) from e",
"def get_balance(pub_key, blocks):\n \n balance = 0\n for block in blocks:\n if block.transactions.to_pk == pub_key:\n balance += block.transactions.amount\n if block.transactions.from_pk == pub_key:\n balance -= block.transactions.amount\n return balance",
"def get_balance(self, currency):\n\n result = self.api_query('getInfo', {'coinName': currency, 'need_new':0})\n\n #{'success': True, 'message': '', 'result': {'Currency': 'NXS', 'Balance': 1.55257461, 'Available': 1.55257461, 'Pending': 0.0, 'CryptoAddress': None}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 2}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255221}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255362}}\n\n #{'success': False, 'message': 'INVALID_CURRENCY', 'result': None}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255600}}\n try:\n result = {'success': True, 'message' :'', 'result':{'Currency': currency, 'Balance': result['return']['funds_incl_orders'][currency], 'Available': result['return']['funds'][currency], 'Pending': 0.0, 'CryptoAddress': None}}\n except:\n result = {'success': False, 'message' :'', 'result':{'Currency': currency, 'Balance': 0.0, 'Available': 0.0, 'Pending': 0.0, 'CryptoAddress': None}}\n return result"
] | [
"0.7753831",
"0.76965743",
"0.75815",
"0.7496749",
"0.7283361",
"0.7160326",
"0.713291",
"0.6988025",
"0.6978337",
"0.6957172",
"0.69118226",
"0.6904677",
"0.68739074",
"0.68685806",
"0.6836331",
"0.6831155",
"0.6781547",
"0.6766959",
"0.67386377",
"0.65896213",
"0.655743",
"0.65433526",
"0.6516909",
"0.6491531",
"0.64678764",
"0.64384115",
"0.6425852",
"0.6416618",
"0.6395574",
"0.6390658"
] | 0.7808565 | 0 |
Get current account balance in all shards & optionally report errors getting account balance for a shard | def get_balance_on_all_shards(address, skip_error=True, endpoint=_default_endpoint, timeout=_default_timeout) -> list:
balances = []
sharding_structure = get_sharding_structure(endpoint=endpoint, timeout=timeout)
for shard in sharding_structure:
try:
balances.append({
'shard': shard['shardID'],
'balance': get_balance(address, endpoint=shard['http'], timeout=timeout)
})
except (KeyError, RPCError, RequestsError, RequestsTimeoutError):
if not skip_error:
balances.append({
'shard': shard['shardID'],
'balance': None
})
return balances | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_balance(self):\n r = requests.get(build_api_call(self.base_url, None, 'balance', ''), auth=HTTPBasicAuth(KEY, SECRET))\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'",
"def _balances(self) -> Dict[str, int]:\n\n return self.client.get(self._resources(\"balance\"))",
"def authorized_get_account_balance(self, huid):\n acc = self.request('get', safeformat('fort/accounts/{:hex}', huid))\n return int(acc['balance'])",
"def balance(self, account_number: int): \n return self._accounts[account_number][1]",
"def getbalance(self, account=None, minconf=None):\n args = []\n if account is not None:\n args.append(account)\n if minconf is not None:\n args.append(minconf)\n return self.proxy.getbalance(*args)",
"def get_account_balance(self):\n return int(self.request('get', 'fort/accounts')['balance'])",
"def get_wallet_balances(self):\r\n method = self.wallet_endpoints['balances']['method']\r\n url = self.base_url + self.wallet_endpoints['balances']['url']\r\n req = requests.request(method, url, headers=self.get_auth_headers())\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res",
"def get_account_balances(self):\n params = clean_locals(locals())\n date_time_sent = datetime.datetime.utcnow()\n response = self.request('GetAccountBalances', params, secure=True)\n data = self.process_response(response, date_time_sent, None)\n return parse_account_balance(data.get('data', {})) if data.get('data') else {}",
"def _fund_accounts(accounts, shard_index, amount):\n config = get_config()\n assert 0 <= shard_index < len(config[\"ENDPOINTS\"])\n max_threads = multiprocessing.cpu_count() if not config['MAX_THREAD_COUNT'] else config['MAX_THREAD_COUNT']\n min_funding_balance = (config[\"ESTIMATED_GAS_PER_TXN\"] + config['INIT_SRC_ACC_BAL_PER_SHARD']) * len(accounts)\n funding_accounts = sorted(_get_accounts_with_funds(min_funding_balance, shard_index),\n key=lambda e: account_balances[e][shard_index][\"amount\"], reverse=True)\n Loggers.general.info(f\"Funding {len(accounts)} accounts on shard {shard_index} \"\n f\"using {len(funding_accounts)} funding accounts.\")\n if len(funding_accounts) > max_threads:\n funding_accounts = funding_accounts[:max_threads]\n Loggers.general.warning(f\"Have more funding accounts than configured threads, using top {max_threads} funded \"\n f\"accounts on shard {shard_index} {[cli.get_address(n) for n in funding_accounts]}\")\n assert funding_accounts, f\"No validator in CLI's keystore has {min_funding_balance} on shard {shard_index}\"\n transaction_hashes = _fund_accounts_from_account_pool(accounts, shard_index, amount, funding_accounts)\n return transaction_hashes",
"def account_balances(self):\n return self.get('balances', auth=True)",
"async def get_balance(self) -> int:\n # todo: support both strong and eventual consistency\n try:\n response = await self.storage.get(pk=self.unique_id, fields=\"balance\")\n except storage.exceptions.ObjectNotFoundError:\n raise crud.exceptions.WalletNotFoundError(\n f\"Wallet with {self.wallet_id=} does not exists\"\n )\n\n return int(response[\"balance\"])",
"def get_balance(self):\n return self._call_account_method(\n 'getBalance'\n )",
"def balances():\n loop.run_until_complete(app.exchanges.fetch_balances())\n print(app.exchanges.balances_str)",
"def __get_balance(self):\n return self.__balance",
"def get_balance(self, acc: Account) -> Decimal:\n return sum_queryset(self.get_entries(acc))",
"def get_balances(self):\r\n balances = self.api.find(\"tokens\", \"balances\", query={\"account\": self.account})\r\n return balances",
"def get_balance(self):\r\n return self.balance",
"def get_balance(self):\n balance = 0\n for transaction in self.ledger:\n balance += transaction[\"amount\"]\n return balance",
"def get_account_balance(account):\n balance = 0\n\n for address in get_addresses_by_account(account):\n balance += get_address_balance(address)\n\n return float(balance)",
"def query_balances(self) -> ExchangeQueryBalances:\n self.first_connection()\n\n response = self._api_query('wallets')\n if response.status_code != HTTPStatus.OK:\n result, msg = self._process_unsuccessful_response(\n response=response,\n case='balances',\n )\n return result, msg\n try:\n response_list = jsonloads_list(response.text)\n except JSONDecodeError as e:\n msg = f'{self.name} returned invalid JSON response: {response.text}.'\n log.error(msg)\n raise RemoteError(msg) from e\n\n # Wallet items indices\n currency_index = 1\n balance_index = 2\n assets_balance: defaultdict[AssetWithOracles, Balance] = defaultdict(Balance)\n for wallet in response_list:\n if len(wallet) < API_WALLET_MIN_RESULT_LENGTH:\n log.error(\n f'Error processing a {self.name} balance result. '\n f'Found less items than expected',\n wallet=wallet,\n )\n self.msg_aggregator.add_error(\n f'Failed to deserialize a {self.name} balance result. '\n f'Check logs for details. Ignoring it.',\n )\n continue\n\n if wallet[balance_index] <= 0:\n continue # bitfinex can show small negative balances for some coins. Ignore\n\n try:\n asset = asset_from_bitfinex(\n bitfinex_name=wallet[currency_index],\n currency_map=self.currency_map,\n )\n except (UnknownAsset, UnsupportedAsset) as e:\n asset_tag = 'unknown' if isinstance(e, UnknownAsset) else 'unsupported'\n self.msg_aggregator.add_warning(\n f'Found {asset_tag} {self.name} asset {e.identifier} due to: {e!s}. '\n f'Ignoring its balance query.',\n )\n continue\n\n try:\n usd_price = Inquirer().find_usd_price(asset=asset)\n except RemoteError as e:\n self.msg_aggregator.add_error(\n f'Error processing {self.name} {asset.name} balance result due to inability '\n f'to query USD price: {e!s}. Skipping balance result.',\n )\n continue\n\n try:\n amount = deserialize_asset_amount(wallet[balance_index])\n except DeserializationError as e:\n self.msg_aggregator.add_error(\n f'Error processing {self.name} {asset.name} balance result due to inability '\n f'to deserialize asset amount due to {e!s}. Skipping balance result.',\n )\n continue\n\n assets_balance[asset] += Balance(\n amount=amount,\n usd_value=amount * usd_price,\n )\n\n return dict(assets_balance), ''",
"async def __getDataFromBalance(self, account) -> dict:\n _LOGGER.info(\"Getting appliance usage data\")\n\n data = {}\n\n URL_BALANCE = API_HOST + \"/api/resources/account/{account}/balance?count=-1\"\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(URL_BALANCE.format(account=account))\n if response.status == 200:\n data = (await response.json())[\"data\"]\n\n indice = [i for i, x in enumerate(data) if x[\"details\"] == \"DEBT\"][\n 0\n ]\n\n deb = data[indice][\"amount\"]\n\n except Exception as e:\n _LOGGER.error(e)\n\n return {\"balance_data\": data}",
"def get_balance(self):\n return self.balance",
"def get_balance(self):\n return self.balance",
"def get_balance(self):\n return self._balance",
"def do_balance(self, args):\n if not self._check_args(args):\n return\n else:\n self.wallet.update_balances()\n balance = self.wallet.addresses.get(args, -1)['balance']\n if balance == -1:\n print(\"Address not found.\")\n else:\n print(balance)",
"async def update_account_balance():\n\n try:\n balance = App.client.get_asset_balance(asset=App.config[\"base_asset\"])\n except Exception as e:\n log.error(f\"Binance exception in 'get_asset_balance' {e}\")\n return\n\n App.base_quantity = Decimal(balance.get(\"free\", \"0.00000000\")) # BTC\n\n try:\n balance = App.client.get_asset_balance(asset=App.config[\"quote_asset\"])\n except Exception as e:\n log.error(f\"Binance exception in 'get_asset_balance' {e}\")\n return\n\n App.quote_quantity = Decimal(balance.get(\"free\", \"0.00000000\")) # USD\n\n pass",
"def get_balance(self) -> int:\n with cd(self._get_cwd()):\n agent_config = AgentConfigManager.verify_private_keys(\n Path(\".\"),\n substitude_env_vars=False,\n private_key_helper=private_key_verify,\n password=self.PASSWORD,\n ).agent_config\n wallet = get_wallet_from_agent_config(agent_config, password=self.PASSWORD)\n return int(try_get_balance(agent_config, wallet, self.LEDGER_ID))",
"def get_wallet_balance():\n try:\n if CONF.exchange == 'bitmex':\n return EXCHANGE.fetch_balance()['info'][0]['walletBalance'] * CONF.satoshi_factor\n if CONF.exchange == 'kraken':\n asset = CONF.base if CONF.base != 'BTC' else 'XBt'\n return float(EXCHANGE.private_post_tradebalance({'asset': asset})['result']['tb'])\n if CONF.exchange == 'liquid':\n result = EXCHANGE.private_get_accounts_balance()\n if result is not None:\n for bal in result:\n if bal['currency'] == CONF.base:\n return float(bal['balance'])\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_wallet_balance()",
"def account_balance(self):\n return self._account_balance",
"def trail_balance(self, pk, request):\n try:\n # Checking Token Authentication available\n auth_info = AccountingOauth2.objects.filter(company_id=pk).values('accessToken', 'accessSecretKey',\n 'tokenAcitvatedOn', 'tokenExpiryON')\n secret_keys = Utils.get_access_keys(pk)\n if len(auth_info) == 0:\n return Utils.dispatch_failure(request, \"NO_TOKEN_AUTHENTICATION\")\n\n for key, value in auth_info[0].items():\n OAUTH_PERSISTENT_SERVER_STORAGE.update({key: value})\n stored_values = OAUTH_PERSISTENT_SERVER_STORAGE\n\n if len(stored_values) == 0:\n return Utils.dispatch_failure(request, \"NO_TOKEN_AUTHENTICATION\")\n\n\n # Checking Xero Connection Authentication available\n auth = Utils.get_xero_auth(pk)\n\n if AccountingConfiguration.PRIVATE == secret_keys.type:\n credentials = PrivateCredentials(**auth)\n else:\n credentials = PublicCredentials(**auth)\n\n if credentials.expired() or credentials is None:\n return Utils.dispatch_failure(request, \"NO_TOKEN_AUTHENTICATION\")\n\n try:\n xero = Xero(credentials)\n xero.reports.get('TrialBalance')\n\n except XeroException as e:\n if AccountingConfiguration.PRIVATE == secret_keys.type:\n error = [\"%s\" % e]\n return Utils.dispatch_failure(request, 'XERO_CONNECTION_ERROR', error)\n else:\n return Utils.dispatch_failure(request, \"NO_TOKEN_AUTHENTICATION\")\n try:\n meta = CompanyMeta.objects.filter(company_id=pk).first()\n if meta.monthly_reporting_current_period:\n st = time.time()\n from portalbackend.lendapi.v1.accounting.tasks import trial_balance_for_period\n job = group(trial_balance_for_period.s(pk, i) for i in range(0, 23))\n result = job.apply_async()\n else:\n return Utils.dispatch_failure(request, 'MISSING_MONTHLY_REPORTING_CURRENT_PERIOD')\n\n while not result.ready():\n continue\n return Utils.dispatch_success(request, 'TRIAL_BALANCE_RECEIVED_SUCCESS')\n except Exception as e:\n error = [\"%s\" % e]\n return Utils.dispatch_failure(request, 'DATA_PARSING_ISSUE', error)\n except Exception as e:\n return Utils.dispatch_failure(request, \"INTERNAL_SERVER_ERROR\")"
] | [
"0.666637",
"0.6562417",
"0.649321",
"0.63970065",
"0.6385605",
"0.6329423",
"0.6321331",
"0.6281901",
"0.6257391",
"0.6218982",
"0.6182547",
"0.6173757",
"0.61105436",
"0.6097671",
"0.6081987",
"0.60235274",
"0.6020045",
"0.59833217",
"0.59707737",
"0.5962612",
"0.5960762",
"0.5944982",
"0.5944982",
"0.592313",
"0.59077513",
"0.59009874",
"0.5880875",
"0.5880679",
"0.5878299",
"0.58423644"
] | 0.6737994 | 0 |
Write the converted routes to the configuration file. | def write_routes(self, pod, collection):
routes_file = os.path.join(collection.pod_path, ROUTES_FILENAME)
if self.data['routes']:
print(' └─ Writing: {}'.format(routes_file))
print('')
output = yaml.dump(
self.data, Dumper=yaml_utils.PlainTextYamlDumper,
default_flow_style=False, allow_unicode=True, width=800)
pod.write_file(routes_file, output)
else:
print(' └─ Skipping: {}'.format(routes_file))
print('') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_routes(output_dir: str, routes: List[Dict[str, Any]]):\n\n routes_file = ROUTES_FILE.format(output_dir=output_dir)\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n with open(routes_file, 'w') as f:\n logging.info(f'Wrote {len(routes)} routes to {routes_file}.')\n json.dump(routes, f, indent=4)",
"def write_config(self, config_file):\n \n # write root paths\n \n # write reference data\n \n # write tool paths\n \n pass",
"def write(self, filename: str):\n obj = self.to_dict(self)\n config.write(obj, filename)",
"def writePathways( self ):\n\n self.logger.info( 'writePathways: START' )\n\n # Generate inserts for meabolic pathways.\n self.importerPathway.writePathways()\n\n self.logger.info( 'writePathways: DONE' )",
"def write_config(self):\n logging.debug(\"Writing configuration file: %s\" % self.config_file)\n f = open(self.config_file, \"w\")\n self.config.write(f)\n f.close()",
"def write_config(self):\r\n obj = [\r\n [self.ip,\r\n self.gate,\r\n self.mask,\r\n self.name,\r\n self.time]\r\n ]\r\n with open('config.json', 'wt') as jsonfile:\r\n json.dump(obj, jsonfile)",
"def write(self):\n cfgpath = os.path.join(self.config_dir, CONFIG_FILENAME)\n ofile = open(cfgpath, 'w')\n if ofile:\n log.debug( \"Write config: %s\" % cfgpath )\n cfg = yaml.dump(self.yaml, default_flow_style=False)\n log.debug( \"Config:\\n%s\" % cfg)\n ofile.write(cfg)\n ofile.close()",
"def write(self, fn):\n with open(fn, 'w') as f:\n self.config.write(f)",
"def convert(self):\n print('Converting: {}'.format(self.collection.pod_path))\n\n # Pull out the meta information from all the docs.\n sorted_docs = sorted(self.collection.list_docs_unread(), key=lambda doc: doc.pod_path)\n for doc in sorted_docs:\n self.routes_data.extract_doc(doc)\n\n self.routes_data.write_routes(self.pod, self.collection)",
"def save(self):\n for p, c in self.configs_:\n c.write(p)",
"def save_config_file(self):\n with open(self.config_file_name, 'w',encoding='utf-8') as outfile:\n json.dump(self._config, outfile,indent=2)",
"def write(self, path):\n with open(path, \"w\") as fh_:\n fh_.write(self.config())",
"def write(self, path):\n with open(path, \"w\") as fh_:\n fh_.write(self.config())",
"def write_sitemap ( self ):\n try:\n self.output_fd = open ( file=dflt_cfg.DFLT_CFG[ OUTPUT_PATH ], mode='w' )\n self.print_url_links ( self.root )\n except (PermissionError, AttributeError) as err:\n self.logger.error ( \"Error {0} occurred. Output file {1} cannot be created\".format ( err, \\\n dflt_cfg.DFLT_CFG[\n OUTPUT_PATH ] ) )\n except Exception as err:\n self.logger.error ( \"Error {0} occurred while writing sitemap in output file: {1}\".format ( err, \\\n dflt_cfg.DFLT_CFG[ OUTPUT_PATH ] ) )\n self.output_fd.close ( )\n else:\n print(\"Sitemap for {} is written in {}.\".format(dflt_cfg.DFLT_CFG[DOMAIN], dflt_cfg.DFLT_CFG[ OUTPUT_PATH ]))\n print( \"Logs (Broken or dead URLs along with application logs) for domain {0} are available in {1} directory.\".format ( dflt_cfg.DFLT_CFG[DOMAIN], \"./logs\" ) )\n self.output_fd.close ( )",
"def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()",
"def writeRoutesCSV(filename, routes):\n if filename[-4:] != \".csv\": # Make sure the filename is a .csv\n filename += \".csv\"\n try:\n with open(os.path.join(\"input\", filename), \"w\", newline='') as f:\n writer = csv.writer(f, delimiter=\",\")\n writer.writerows(routes)\n except (OSError, FileNotFoundError):\n return False\n else:\n return True",
"def __write_config(self):\n with open(self.config_file, 'w') as data_file:\n config = {\"ibooks_doc_root\":self.ibooks_doc_root,\n \"library_folder\":self.library_folder,\n \"annotation_folder\":self.annotation_folder,\n \"tmp_dir\":self.tmp_dir\n } \n data = json.dumps(config, ensure_ascii=False)\n data_file.write(data)",
"def dump(self):\n with open(self._config_filename, 'w', encoding='utf-8') as file:\n self._parser.write(file)",
"def save(self):\n # Always write out components in alphabetical order for determinism,\n # especially in tests.\n for function_name in sorted(self._components.keys()):\n self._config_parser[_COMPONENTS_SECTION][\n function_name] = self._components[function_name]\n\n with open(str(self._config_filepath), 'w') as f:\n self._config_parser.write(f)",
"def save_to_file(self):\n check_path(self.config_path)\n\n with open(self.settings_file, 'w') as settings_file:\n options = self._get_options()\n json.dump(options,\n \t settings_file,\n \t indent=4,\n \t separators=(',', ': '))",
"def export_routes_xml(self, filepath=None, method_routechoice=None, encoding='UTF-8'):\n if method_routechoice is None:\n method_routechoice = self.get_route_first\n\n if filepath is None:\n filepath = self.get_routefilepath()\n print 'export_routes_xml', filepath\n try:\n fd = open(filepath, 'w')\n except:\n print 'WARNING in write_obj_to_xml: could not open', filepath\n return False\n\n xmltag_routes, xmltag_veh, attrname_id = (\"routes\", \"vehicle\", \"ids_sumo\")\n xmltag_trip = \"trip\"\n xmltag_rou = \"route\"\n\n fd.write('<?xml version=\"1.0\" encoding=\"%s\"?>\\n' % encoding)\n fd.write(xm.begin(xmltag_routes))\n indent = 2\n\n #ids_modes_used = set(self.parent.vtypes.ids_mode[self.ids_vtype.get_value()])\n self.parent.vtypes.write_xml(fd, indent=indent,\n ids=set(self.ids_vtype.get_value()),\n is_print_begin_end=False\n )\n\n ids_mode = self.parent.vtypes.ids_mode\n id_pedestrian = MODES['pedestrian']\n routes = self.routes.get_value()\n\n # here we could write the route info\n # but we do write it inside each trip so that it can be parsed\n # in the same way as duarouter output\n # routes.write_xml( fd, indent=indent,\n # attrconfigs_excluded = [routes.costs, routes.probabilities],\n # is_print_begin_end = False)\n\n # let's write trip info manually\n tripconfigs = [self.ids_vtype,\n self.times_depart,\n self.ids_edge_depart,\n self.ids_edge_arrival,\n self.inds_lane_depart,\n self.positions_depart,\n self.speeds_depart,\n self.inds_lane_arrival,\n self.positions_arrival,\n self.speeds_arrival,\n ]\n\n routeconfigs = [routes.ids_edges,\n routes.colors,\n ]\n\n attrconfig_id = getattr(self.get_attrsman(), attrname_id)\n xmltag_id = attrconfig_id.xmltag\n\n for id_trip in self.times_depart.get_ids_sorted():\n\n if ids_mode[self.ids_vtype[id_trip]] == id_pedestrian:\n self.write_persontrip_xml(fd, id_trip,\n method_routechoice=method_routechoice,\n indent=indent+2)\n\n else:\n id_route = method_routechoice(id_trip)\n if id_route >= 0: # a valid route has been found\n # init vehicle route only if valid route exists\n fd.write(xm.start(xmltag_veh, indent+2))\n else:\n # init trip instead of route\n fd.write(xm.start(xmltag_trip, indent+2))\n\n # print ' make tag and id',_id\n fd.write(xm.num(xmltag_id, attrconfig_id[id_trip]))\n\n # print ' write columns',len(scalarcolconfigs)>0,len(idcolconfig_include_tab)>0,len(objcolconfigs)>0\n for attrconfig in tripconfigs:\n # print ' attrconfig',attrconfig.attrname\n attrconfig.write_xml(fd, id_trip)\n\n if id_route >= 0: # a valid route has been found\n # write route id\n #fd.write(xm.num('route', id_route ))\n\n # instead of route id we write entire route here\n fd.write(xm.stop())\n fd.write(xm.start(xmltag_rou, indent+4))\n for attrconfig in routeconfigs:\n # print ' attrconfig',attrconfig.attrname\n attrconfig.write_xml(fd, id_route)\n\n # end route and vehicle\n fd.write(xm.stopit())\n fd.write(xm.end(xmltag_veh, indent+2))\n\n else:\n # end trip without route\n fd.write(xm.stopit())\n\n fd.write(xm.end(xmltag_routes))\n fd.close()\n return filepath",
"def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)",
"def write(self):\n print yaml.dump(self._config, default_flow_style=False),",
"def build_routes_file(routes, name):\n top = dict()\n top[\"file-type\"] = \"routes\"\n top[\"name\"] = name\n top[\"routes\"] = routes\n return top",
"def generateRouting(self, adjFile, outFile):\n\n if self.paths:\n feasible = getPathFeasibility(self.net, adjFile, self.paths)\n if feasible:\n routingConf = generateRoutingConf(self.net, self.paths, outFile)\n info(\"**** [G2]: path specs are FEASIBLE; generated routing conf file\", outFile, \"\\n\")\n else:\n if os.path.exists(outFile):\n os.remove(outFile)\n info(\"**** [G2]: INFEASIBLE path sepcs; deleted any old routing conf files present; controller will receive NO routing conf\\n\")\n else:\n info(\"**** [G2]: NO path sepcs found; controller will receive NO routing conf\\n\")",
"def save(self):\r\n with open(self.filename, 'w') as f:\r\n if self.pretty:\r\n json.dump(self.__config, f, sort_keys=False,\r\n indent=4, separators=(',', ': '))\r\n else:\r\n json.dump(self.__config, f)",
"def write_config_file(self):\n for opt, opt_desc in self.opt_dict.iteritems():\n if 'permanent' in opt_desc and opt_desc['permanent'] == True:\n enabled = 'Always'\n else:\n enabled = opt_desc['enabled'].__str__()\n\n self.file_parser.set(opt, 'enabled', enabled)\n self.file_parser.set(opt, 'implementation',\n opt_desc['selected_imp'])\n self.file_parser.set(opt, 'optype', opt_desc['imptype'])\n\n for config, config_desc in self.config_dict.iteritems():\n enabled = config_desc['enabled'].__str__()\n self.file_parser.set(config, 'enabled', enabled)\n\n scratch_file = self.config_filename + '.scratch'\n with open(scratch_file, 'w') as cfile:\n for config in sorted(self.config_dict.keys()):\n self.write_section(cfile, config)\n\n for opt in sorted(self.opt_dict.keys()):\n self.write_section(cfile, opt)\n\n for imp in sorted(self.imp2opt_dict.keys()):\n self.write_section(cfile, imp)\n\n cfile.write(\"\\n\")\n\n os.rename(scratch_file, self.config_filename)",
"def write_config_file():\n\tif not config_parser:\n\t\tprint \"Config module not loaded. I don't save anything.\"\n\t\treturn\n\n\tf = file(config_file, \"w\")\n\tconfig_parser.write(f)\n\tf.close()",
"def save():\n with open(CONFIG_FILE, 'w') as f:\n json.dump(config, f, indent=4, sort_keys=True)",
"def save_config(self):\n\n if not self.__conf.has_section(self.section):\n self.__conf.add_section(self.section)\n\n for key in self._params:\n val = self._params[key]\n self.__conf.set(self.section, key, val)\n\n with open(self.conf_path, 'w') as f:\n self.__conf.write(f)"
] | [
"0.6734417",
"0.64529943",
"0.6035774",
"0.5861394",
"0.58209974",
"0.58195937",
"0.5759961",
"0.5752589",
"0.5728276",
"0.56543946",
"0.5629924",
"0.5625839",
"0.5625839",
"0.56163245",
"0.5613927",
"0.55870634",
"0.5579713",
"0.55711526",
"0.55490965",
"0.55257416",
"0.5522408",
"0.5510581",
"0.5490274",
"0.5475592",
"0.5475317",
"0.54592586",
"0.54410124",
"0.54331917",
"0.54245174",
"0.5422334"
] | 0.66468537 | 1 |
Perform the conversion to use collection based routing. | def convert(self):
print('Converting: {}'.format(self.collection.pod_path))
# Pull out the meta information from all the docs.
sorted_docs = sorted(self.collection.list_docs_unread(), key=lambda doc: doc.pod_path)
for doc in sorted_docs:
self.routes_data.extract_doc(doc)
self.routes_data.write_routes(self.pod, self.collection) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalise(self) -> \"Route\":\n pass",
"def _do_mapping(self):\n pass",
"def route(self):\n pass",
"def _translate_to_collection(\n self,\n collection,\n recursive=False,\n run_conditions=[],\n resource_conditions=[],\n variety_conditions=[],\n ):\n\n run_list = []\n if recursive:\n run_conditions.extend(\n [\n (\"collection_id =\", collection[\"id\"]),\n ]\n )\n _logger.debug(\"Loading run with conditions: {0}\".format(run_conditions))\n run_list = self.load_runs(\n recursive=recursive,\n run_conditions=run_conditions,\n resource_conditions=resource_conditions,\n variety_conditions=variety_conditions,\n )\n\n res = RunCollection(collection[\"name\"], data=run_list)\n res.set_id(collection[\"id\"])\n\n return res",
"def process_collection(pathname, ctx):\n collection_pth = Path(pathname)\n if ctx.found_collection_name is None:\n ctx.found_collection_name = \".\".join(collection_pth.parts[-2:])\n ctx.enter_collection(ctx.found_collection_name, pathname)\n ctx.add_dependencies()\n get_collection_plugins(pathname, ctx)\n process_collection_roles(str(collection_pth / \"roles\"), ctx)\n process_collection_tests(str(collection_pth / \"tests\"), ctx)\n ctx.exit_collection()",
"def routes(self, body):\n pass",
"def _get_collection(self, collection_uri, request_headers=None):\n\n # get the collection\n status, headers, thecollection = self._rest_get(collection_uri)\n\n if status != 200:\n msg = self._get_extended_error(thecollection)\n raise exception.IloError(msg)\n\n while status < 300:\n # verify expected type\n # Don't limit to version 0 here as we will rev to 1.0 at some\n # point hopefully with minimal changes\n ctype = self._get_type(thecollection)\n if (ctype not in ['Collection.0', 'Collection.1']):\n raise exception.IloError(\"collection not found\")\n\n # if this collection has inline items, return those\n # NOTE: Collections are very flexible in how the represent\n # members. They can be inline in the collection as members\n # of the 'Items' array, or they may be href links in the\n # links/Members array. The could actually be both. Typically,\n # iLO implements the inline (Items) for only when the collection\n # is read only. We have to render it with the href links when an\n # array contains PATCHable items because its complex to PATCH\n # inline collection members.\n\n if 'Items' in thecollection:\n # iterate items\n for item in thecollection['Items']:\n # if the item has a self uri pointer,\n # supply that for convenience.\n memberuri = None\n if 'links' in item and 'self' in item['links']:\n memberuri = item['links']['self']['href']\n yield 200, None, item, memberuri\n\n # else walk the member links\n elif ('links' in thecollection and\n 'Member' in thecollection['links']):\n # iterate members\n for memberuri in thecollection['links']['Member']:\n # for each member return the resource indicated by the\n # member link\n status, headers, member = self._rest_get(memberuri['href'])\n yield status, headers, member, memberuri['href']\n\n # page forward if there are more pages in the collection\n if ('links' in thecollection and\n 'NextPage' in thecollection['links']):\n next_link_uri = (collection_uri + '?page=' + str(\n thecollection['links']['NextPage']['page']))\n status, headers, thecollection = self._rest_get(next_link_uri)\n\n # else we are finished iterating the collection\n else:\n break",
"def create_router_for_backend(backend):\n\n router = SimpleRouter()\n\n for name, collection in backend.collections.viewitems():\n\n class NewViewSet(ModelViewSet):\n permission_classes = [IsAuthenticated, TokenHasReadWriteScope]\n\n queryset = collection.model.objects.all()\n serializer_class = collection.serializer_class\n\n def update(self, request, *args, **kwargs):\n # Here we call Daryl's Code to hax it!\n return daz_update(self, request, *args, **kwargs)\n\n NewViewSet.__name__ = collection.model.__name__ + 'ViewSet'\n\n router.register(name, NewViewSet)\n\n return router",
"def add_routes(self):\n pass",
"def create_collections(self):\n\n ''''''",
"def visit_collection(self, collection: CollectionEntity):\n logging.debug(\"Adding parts for collection %s\", collection.item_id)\n upload_matrix = AddPartsVisitor.get_upload_matrix(collection)\n routing_matrix = AddPartsVisitor.get_routing_matrix(collection)\n self._create_parts(collection, upload_matrix, routing_matrix)",
"def add_routes(self, mapper):\n pass",
"def convert(self, name, value):\n if isinstance(value, dict):\n cls = self.convert_resources.get(name, Resource)\n return cls(value, api=self.api)\n elif isinstance(value, list):\n new_list = []\n for obj in value:\n new_list.append(self.convert(name, obj))\n return new_list\n else:\n return value",
"def __init__(self, collection):\n self.collection = collection",
"def collection(cls, c: \"Collection_Type\") -> \"Link\":\n return cls(pystac.RelType.COLLECTION, c, media_type=pystac.MediaType.JSON)",
"def collect(self, paths):\r\n raise NotImplementedError()",
"def collection(self):\r\n raise NotImplementedError",
"def accept(self, visitor: Any) -> Any:\n visitor.visit_collection(self)",
"def route( request, c ):",
"def apply(self, callback, route):",
"def mvcObj(self, router):\n pass",
"def converter(item):\n pass",
"def __init__(self, collection_id):\n BaseResourceHandler.__init__(self, collection_id)",
"def collection(subject):\n if request.method == 'GET':\n args = get_args(request.args)\n verb = \"list\"\n if request.method == 'POST':\n args = get_args(request.json)\n verb = \"create\"\n subject = singular[subject];\n return apicall(verb, subject, args)",
"def set_routing(self, rinfo):\n\n self.routing = [ self.Routing(*r) for r in rinfo ]",
"def route_layout(self):\n self.route_pins()\n self.route_internal()\n self.route_supplies()",
"def convert_alleles(self, alleles):\n raise NotImplementedError",
"def move_to_collection(self, destination_collection):\n for entity in self:\n entity.move_to_collection(destination_collection)",
"def get_routers(self):",
"def add_all_conversions(self):\n model = self.model\n # Mathematical expressions\n self.convert_assignments(model.get_assignments())\n # Connections\n for conn in getattr(model, u'connection', []):\n comp1 = model.get_component_by_name(conn.map_components.component_1)\n comp2 = model.get_component_by_name(conn.map_components.component_2)\n for mapping in conn.map_variables:\n var1 = model.get_variable_by_name(comp1.name, mapping.variable_1)\n var2 = model.get_variable_by_name(comp2.name, mapping.variable_2)\n self.convert_mapping(mapping, comp1, comp2, var1, var2)\n return"
] | [
"0.5987358",
"0.54556525",
"0.5441375",
"0.53992254",
"0.5345253",
"0.5340782",
"0.5335203",
"0.5289516",
"0.5278404",
"0.523436",
"0.5180618",
"0.51348835",
"0.5123802",
"0.510506",
"0.5033277",
"0.50302935",
"0.50169706",
"0.5013713",
"0.5008044",
"0.49894407",
"0.49573487",
"0.4954573",
"0.4936583",
"0.48943168",
"0.488907",
"0.48679796",
"0.48667118",
"0.48623818",
"0.48463014",
"0.48443985"
] | 0.6795894 | 0 |
Handle user signup. Create new user and add to DB. Redirect to home page. If form not valid, present form. | def signup():
form = UserAddForm()
if form.validate_on_submit():
try:
user = User.signup(
username=form.username.data,
password=form.password.data,
)
db.session.commit()
except IntegrityError:
flash("Username already taken", 'danger')
return render_template('users/signup.html', form=form)
do_login(user)
return redirect("/")
else:
return render_template('signup.html', form=form) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sign_up():\n form = RegisterForm()\n if request.method == \"GET\":\n return render_template('adduser.html', title='Add New User', form=form)\n if request.method == 'POST' and form.validate_on_submit():\n username = form.username.data\n password = form.password1.data\n email = form.email.data\n account = db.check_item(\"username\", username)\n if account is not None:\n flash('This User name or Email is existing')\n return redirect(url_for('sign_up'))\n else:\n db.add_user(username, password, email)\n flash(\"You have add a new user successfully\")\n return redirect(url_for('sign_up'))\n return render_template('adduser.html', title='Add New User', form=form)",
"def signup():\n\n form = UserAddForm()\n\n if form.validate_on_submit():\n try:\n user = User.signup(\n username=form.username.data,\n password=form.password.data,\n email=form.email.data,\n image_url=form.image_url.data or User.image_url.default.arg,\n )\n db.session.add(user)\n db.session.commit()\n\n except IntegrityError:\n flash(\"Username / Email already taken\", 'danger')\n return render_template('users/signup.html', form=form)\n\n do_login(user)\n\n return redirect(\"/\")\n\n else:\n return render_template('users/signup.html', form=form)",
"def signup():\n\n form = UserAddForm()\n if form.validate_on_submit():\n try:\n user = User.signup(\n username=form.username.data,\n password=form.password.data,\n email=form.email.data,\n image_url=form.image_url.data or User.image_url.default.arg,\n )\n db.session.commit()\n\n except IntegrityError:\n flash(\"Username already taken\", 'danger')\n return render_template('users/signup.html', form=form)\n\n do_login(user)\n\n return redirect(\"/\")\n\n else:\n return render_template('users/signup.html', form=form)",
"def sign_up():\n #POST - the info coming from the sign-up-form\n\n #get username and password that was filled in sign-up form\n #if username exits - flash \"username taken\" and redirct to /sign-up-form\n\n #else save the new user to the database - user table, flash success message\n #and redirect back to /more-details/cat_id",
"def signup():\n\n form = UserAddForm()\n \n\n if form.validate_on_submit():\n try:\n user = User.signup(\n username=form.username.data,\n password=form.password.data,\n email=form.email.data,\n image_url=form.image_url.data or User.image_url.default.arg,\n )\n db.session.commit()\n\n except IntegrityError:\n flash(\"Username already taken\", 'danger')\n return render_template('users/signup.html', form=form)\n\n do_login(user)\n\n return redirect(\"/\")\n\n else:\n return render_template('users/signup.html', form=form)",
"def signupview(request):\n if request.method != 'POST':\n # Display blank registration form\n form = UserCreationForm()\n else:\n # PRocess completed form\n form = UserCreationForm(data=request.POST)\n if form.is_valid():\n form.save()\n # authenticated_user = authenticate(username=new_user.username,\n # password=request.POST['password'])\n return HttpResponseRedirect(reverse('dashboard:user_profile'))\n\n context = {'form': form}\n template_name = 'registration/signup.html'\n return render(request, template_name, context)",
"def process_signup():\n\n\temail = request.form.get('email');\n\tpassword = request.form.get('password');\n\n\tif email:\n\t\tnew_user = model.User(email=email, password=password)\n\t\tmodel.session.add(new_user)\n\t\tmodel.session.commit()\n\t\tsession['email'] = email\t\n\n\treturn render_template(\"signup.html\")",
"def sign_up():\n\n if current_user.is_authenticated:\n return redirect(url_for('main.index'))\n\n form = SignUpForm()\n if form.validate_on_submit():\n existing_user = User.query.filter(\n (User.email == form.email.data) | (User.username == form.username.data)\n ).first()\n if existing_user is None:\n user = User(\n username=form.username.data,\n email=form.email.data,\n role=Role.user.name\n )\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n login_user(user)\n return redirect(url_for('main.index'))\n flask.flash('A user already exists with that username or email address.')\n return render_template(\n 'auth/signup.html',\n title='Sign up',\n form=form\n )",
"def user_signup():\n\n if request.method == \"GET\":\n return render_template(\"signup_form.html\")\n\n # post request logic starts here\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n\n if email_is_valid(email):\n\n flash(\"It looks like you are already signed up for Readerboard! Try signing in instead.\")\n return redirect(\"/signin\")\n\n else:\n\n new_user = User()\n db.session.add(new_user)\n db.session.commit()\n new_acct = Account(user_id=new_user.user_id, email=email, password=password)\n db.session.add(new_acct)\n\n db.session.commit()\n session['acct'] = new_acct.acct_id\n\n return redirect(\"/auth/goodreads\")",
"def register_new_user():\n register_form = UserAddForm()\n login_form = LoginForm()\n\n if register_form.validate_on_submit():\n try:\n user = User.signup(\n email=register_form.new_email.data,\n password=register_form.new_password.data,\n username=register_form.new_username.data,\n first_name=register_form.first_name.data.capitalize(),\n last_name=register_form.last_name.data.capitalize(),\n image_url=register_form.image_url.data or User.image_url.default.arg,\n cover_url=register_form.cover_url.data or User.cover_url.default.arg\n )\n db.session.commit()\n\n do_login(user)\n return redirect('/')\n except IntegrityError:\n flash(\n \"Email or username already registered! Please log in or try again\", 'danger')\n return render_template('home_anon.html', register_form=register_form, login_form=login_form)\n\n else:\n return render_template('home_anon.html', register_form=register_form, login_form=login_form)",
"def sign_up():\n\n form = SignupForm()\n # Checking form has been filled in correctly\n if form.validate_on_submit():\n users = mongo.db.users\n existing_user = users.find_one({'username': request.form['username']})\n\n # If username isn't already in database\n if existing_user is None:\n hash_password = generate_password_hash(request.form['password'])\n # Create an account\n users.insert_one({'username': request.form['username'],\n 'password': hash_password})\n # Notify them\n flash(f'Account created for \\'{form.username.data}\\'!', 'success')\n session['username'] = request.form['username']\n session['logged'] = True\n return redirect(url_for('index'))\n else:\n # If username already exists then tell user to try another username\n flash(f'Username \\'{form.username.data}\\' already exists!' +\n 'Please choose a different username', 'danger')\n return redirect(url_for('sign_up'))\n\n return render_template('sign_up.html', title=\"Sign Up\", form=form)",
"def signup():\n if request.method == 'GET':\n form = SignUpForm(obj=current_user)\n\n else:\n form = SignUpForm(request.form)\n if request.method == 'POST' and form.validate():\n email = form.email.data\n password = form.password.data\n\n # Check if they they exist already\n user = Users.get_one(email = email)\n if not user:\n email = form.email.data\n first_name = form.first_name.data\n last_name = form.last_name.data\n user = User(**{'email':email, 'first_name':first_name, 'last_name':last_name})\n user.set_password(password)\n user.provider_type = form.provider_type.data\n user.practice_name = form.practice_name.data\n user.practice_type = form.practice_type.data\n try:\n user.save()\n except Exception as e:\n log.exception(f\"Exception trying to save user {email}\")\n else:\n return redirect('/')\n else:\n form.errors = \"User already exists\"\n \n context = {'form':form}\n content = render_template( 'signup.html', **context )\n return content",
"def signup():\n\n # Get values from signup form\n signup_email = request.form.get(\"signup_email\")\n signup_password = request.form.get(\"signup_password\")\n\n # If user exists, ask them to log in\n # Otherwise, add user into database and log them in, redirecting to homepage\n if db.session.query(User).filter(User.email == signup_email).first():\n flash(\"You already have an account please use login!\", \"danger\")\n return redirect(\"/signup-login\")\n\n else:\n new_user = User(email=signup_email, password=signup_password, age=None, zipcode=None)\n db.session.add(new_user)\n db.session.commit()\n \n session[\"logged_in_user_email\"] = signup_email\n session[\"logged_in_user\"] = new_user.user_id\n \n flash(\"Your account has been created! You now are logged in!\", \"success\")\n \n return redirect(\"/\")",
"def signup(request):\n if request.method == \"POST\":\n # Double check the form was actually submitted, instead of the page being\n # loaded\n form = BeeUserCreationForm(request.POST)\n\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get(\"username\")\n raw_password = form.cleaned_data.get(\"password\")\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n return redirect(\"home\")\n\n else:\n # If the page was just loaded, set the form to the proper one\n form = BeeUserCreationForm()\n\n return render(request, \"signup.html\", {\"form\": form})",
"def register_page():\n form = addUser()\n\n if form.validate_on_submit():\n username=form.username.data\n password=form.password.data\n email=form.email.data\n first_name=form.first_name.data\n last_name=form.last_name.data\n \n new_user = User.register(username=username, password=password, email=email, first_name=first_name, last_name=last_name)\n\n db.session.add(new_user)\n db.session.commit()\n\n session[\"user\"] = new_user.username\n return redirect(f'/users/{username}')\n else:\n return render_template(\"reg_form.html\", form=form)",
"def signup():\n signup_form = SignupForm(request.form) # Rempli les champs créer dans le SignupForm avec les valeurs du forumlaire corerspondantes au nom donné au champs\n # Les champs créer dans le SignupForm peuvent être parcouru grâce à la methode __setitem__ et __getitem__.\n if request.method == 'POST':\n if signup_form.validate(): # Utilise les validators renseignés dans SignupForm pour vérifier les valeurs des champs\n email = signup_form.email.data\n last_name = signup_form.last_name.data\n first_name = signup_form.first_name.data\n phone = signup_form.phone.data\n password = signup_form.password.data\n\n if not UserController().exists(email):\n hashed_password = hashlib.sha256(password.encode('utf8')).hexdigest()\n user = UserController().insert(email, hashed_password, last_name, first_name, phone)\n login_user(user)\n return redirect(url_for('main_bp.home'))\n flash('Un utlisateur utilise déjà cette adresse mail')\n return redirect(url_for('auth_bp.signup'))\n\n return render_template('signup.html',\n current_user=current_user,\n form=signup_form)",
"def signup():\n print(\"In signup.....\")\n auth_service = AuthService()\n form = SignUpForm()\n if request.method == 'GET':\n return render_template('auth/signup.html', title='Sign Up', form=form)\n\n elif request.method == 'POST':\n if form.validate_on_submit():\n user_dto = UserDto(form.email.data, form.password.data, form.name.data, form.contact.data)\n try:\n auth_service.create_user(user_dto)\n flash('SignUp successfull name = \"%s\" , email = \"%s\"' % (form.name.data, form.email.data))\n return redirect(url_for('auth.signin'))\n except UserExistsException:\n flash(\"User already exists\")\n return redirect(url_for('auth.signup'))\n flash('SignUp Failed')\n return render_template('auth/signup.html', title='Sign Up', form=form)",
"def create_user():\n if request.method == 'POST':\n PLAN.create_user(request.form['fname'],\n request.form['lname'],\n request.form['username'],\n request.form['password'],\n request.form['email'])\n return redirect(url_for('index'))\n return render_template('newuser.html')",
"def sign_up(request):\n form = UserCreationForm()\n if request.method == 'POST':\n form = UserCreationForm(data=request.POST)\n if form.is_valid():\n form.save()\n user = authenticate(\n username=form.cleaned_data['username'],\n password=form.cleaned_data['password1']\n )\n login(request, user)\n messages.success(\n request,\n \"You're now a user! You've been signed in, too.\"\n )\n return HttpResponseRedirect(reverse('home'))\n return render(request, 'accounts/sign_up.html', {'form': form})",
"def register_user():\n\n form = AddUserForm()\n\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n email = form.email.data\n first_name = form.first_name.data\n last_name = form.last_name.data\n \n new_user = User.register(username, password, email, first_name, last_name)\n db.session.add(new_user)\n db.session.commit()\n session[\"username\"] = username\n flash(f\"{username} is now registered!\", \"success\")\n return redirect(f\"/users/{username}\")\n\n else:\n\n return render_template(\n \"register.html\", form=form)",
"def view_signup(self):\n logged_in = authenticated_userid(self.request)\n message = u''\n username = u''\n password = u''\n\n # Create form by using schemas with validations\n form = Form(self.request, schema=SignUpSchema,\n state=State(request=self.request))\n\n if form.validate():\n username = self.request.params['username']\n password = self.request.params['password']\n email = self.request.params['email']\n self.context['users'].add(username, password, email)\n self.context['groups'].add(username, group_names['member'])\n self.context['groups'].add(username, u'u:%s' % username)\n\n message = msg['succeed_add_user'] + username\n\n return {\n 'message': message,\n 'url': self.request.application_url + '/signup',\n 'username': username,\n 'logged_in': logged_in,\n 'password': password,\n 'form': FormRenderer(form)\n }",
"def signup(request):\n try:\n registered = False\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n user_form.username = request.POST['email']\n profile_form = UserProfileInfoForm(data=request.POST)\n if user_form.is_valid() and profile_form.is_valid():\n user = User()\n user.first_name = request.POST.get('first_name')\n user.last_name = request.POST.get('last_name')\n user.email = request.POST.get('email')\n user.username = request.POST.get('email')\n user.set_password(request.POST.get('password'))\n user.save()\n profile = profile_form.save(commit=False)\n profile.user = user\n profile.save()\n registered = True\n else:\n print(user_form.errors,profile_form.errors)\n else:\n user_form = UserForm()\n profile_form = UserProfileInfoForm()\n return render(request,'accounts/registration.html',\n {'user_form':user_form,\n 'profile_form':profile_form,\n 'registered':registered})\n except Exception as e:\n return HttpResponse(e, status=500)",
"def create_user():\n form = UserForm(prefix='register')\n\n if not form.validate_on_submit():\n flash('Invalid input.', 'warning')\n return view_index(form)\n else:\n user, exists = db_insert_or_get(User, name=form.name.data, defaults={'password': form.password.data})\n if exists:\n flash('Username taken.', 'warning')\n else:\n db.session.commit()\n\n session['user_name'] = user.name\n app.logger.info('User %s created successfully.', user.name)\n flash('User created successfully.', 'success')\n\n return redirect(url_for('view_index'))",
"def post(self, request):\n self.context[\"form\"] = AddUserForm(request.POST)\n form = self.context[\"form\"]\n if form.is_valid():\n # Reject input if user already exists\n username = form.cleaned_data[\"username\"]\n if User.objects.filter(username=username).count() > 0:\n # reject\n ev = PiEvent.createEvent(type=PiEvent.ADDUSER_TYPE, status=PiEvent.FAIL_STATUS,\n message=\"User '{}' already exists\".format(username))\n ev.save()\n\n return tryAgain(msg=\"The username '<b>{}</b>' already exists\".format(username),\n url=\"javascript:history.back()\")\n password = form.cleaned_data[\"password\"]\n firstName = form.cleaned_data[\"firstName\"]\n lastName = form.cleaned_data[\"lastName\"]\n email = form.cleaned_data[\"email\"]\n organization = form.cleaned_data[\"organization\"]\n mobilePhone = form.cleaned_data[\"mobilePhone\"]\n workPhone = form.cleaned_data[\"workPhone\"]\n otherPhone = form.cleaned_data[\"otherPhone\"]\n note = form.cleaned_data[\"note\"]\n\n # Create a Django User object\n user = User.objects.create_user(username, email=email, password=password)\n user.first_name = firstName\n user.last_name = lastName\n user.save()\n \n msUser = MSUser(organization=organization,\n work_phone=workPhone,\n mobile_phone=mobilePhone,\n other_phone=otherPhone,\n note=note,\n user=user)\n msUser.save()\n\n ev = PiEvent.createEvent(type=PiEvent.ADDUSER_TYPE, status=PiEvent.SUCCESS_STATUS,\n message=\"User '{}' added\".format(unicode(msUser)))\n ev.save()\n return HttpResponseRedirect(\"/dbkeeper/\")\n\n return render(request, \"dbkeeper/add.html\", self.context)",
"def add_user_process():\n\n # extract form data, add, commit, then redirect to /users\n first_name = request.form[\"first-name\"]\n last_name = request.form[\"last-name\"]\n image_url = request.form[\"image-url\"]\n\n msg = db_add_user(first_name, last_name, image_url)\n\n flash(msg[\"text\"], msg[\"severity\"])\n\n return redirect(\"/users\")",
"def signup():\n\tif request.method=='POST':\n\t\tform = request.form\n\t\tusername=request.form['username']\n\t\temail=request.form['email']\n\t\tpassword=request.form['password']\n\t\tquery=\"SELECT * from users WHERE username=%s\"\n\t\tparam=username\n\t\tc.execute(query,param)\n\t\tif c.fetchone() is not None:\n\t\t\tsession['message'] = \"That username is already taken...\"\n\t\t\treturn render_template('register.html',form=form)\n\t\telse:\n\t\t\tquery=\"SELECT * from users WHERE email=%s\"\n\t\t\tparam=email\n\t\t\tc.execute(query,param)\n\t\t\tif c.fetchone() is not None:\n\t\t\t\tsession['message'] = \"That Email is already taken...\"\n\t\t\t\treturn render_template('register.html',form=form)\n\t\t\telse:\n\t\t\t\tc.execute(\"\"\"INSERT into users(username,email,password) VALUES (%s,%s,%s)\"\"\",(username,email,password))\n\t\t\t\tconn.commit()\n\t\t\t\treturn redirect('/login')",
"def registration():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n models.User.create_user(username=form.username.data,\n email=form.email.data,\n password=form.password.data)\n return redirect(url_for('login'))\n return render_template(\"register.html\", form=form)",
"def register_user():\n\n form = UserForm()\n\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n email = form.email.data\n first_name = form.first_name.data\n last_name = form.last_name.data\n new_user = User.register(username, password, email, first_name, last_name)\n\n db.session.add(new_user)\n db.session.commit()\n session['username'] = new_user.username\n\n flash(f'Created {username} user.')\n return redirect('/users/<username>')\n\n else:\n return render_template('users/register.html', form=form)",
"def sign_up(request):\n\n sign_form = forms.SignUp()\n\n if request.method == 'POST':\n sign_form = forms.SignUp(request.POST)\n\n # Checking validation.\n if sign_form.is_valid():\n sign_form.save(commit=True)\n\n # return homepage on submit.\n return home(request)\n else:\n print(\"invalid Form\")\n\n return render(request, r\"app_two/signup.html\", context={'form': sign_form})",
"def sign_up():\n if request.method == 'POST':\n result = register(request.form['name'], request.form['username'],\n request.form['password'], request.form['rpt_password'])\n if result == \"Registration successful\":\n flash(result, 'info')\n return redirect(url_for('sign_in'))\n flash(result, 'warning')\n return render_template('register.html')"
] | [
"0.84403425",
"0.8225739",
"0.8176397",
"0.8168092",
"0.8141075",
"0.81184983",
"0.8070323",
"0.8016985",
"0.80074",
"0.7969208",
"0.79564214",
"0.79262125",
"0.7865015",
"0.7858723",
"0.7858716",
"0.77863723",
"0.7768571",
"0.7757153",
"0.77525586",
"0.76977766",
"0.76679504",
"0.7650627",
"0.7638262",
"0.76251984",
"0.7625112",
"0.76193255",
"0.7609879",
"0.76020956",
"0.7587377",
"0.75526637"
] | 0.8322984 | 1 |
Returns the probability of transitioning into state s1 after taking action a in state s. | def _transition_probability(self, s, a, s1):
unreachable_states = [4, # F with prod_score == 4
5] # M with prod_score == 0
if s1 in unreachable_states:
return 0
else:
return 1 / (self.n_states - len(unreachable_states)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_transition_prob(self, state, action, next_state):\n return self.get_next_states(state, action).get(next_state, 0.0)",
"def _transition_prob(self, s1, a, s2):\n if self._stochsatic:\n return self._dynamics_noise_distribution.pdf(s2 - (s1 + a))\n else:\n return s2 == s1 + a",
"def _starting_prob(self, s):\n return self._starting_state_distribution.pdf(s)",
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs",
"def get_action(self, s):\n probs = self.predict(s)\n action = torch.multinomial(probs, 1).item()\n log_prob = torch.log(probs[action])\n return action, log_prob",
"def probability_s(self, s, c):\n return sum([self.get_likelihood(c, w) for w in s]) + self.prior_probability[c]",
"def prob_class_1_arrival(state, lambda_1, mu, num_of_servers):\n return lambda_1 / (lambda_1 + (mu * min(state[1], num_of_servers)))",
"def get_state(self, s):\n return (torch.tensor(s, device=self.device).permute(2, 0, 1)).unsqueeze(0).float()",
"def get_action_probability_dict(self, state):\n pass",
"def get_reward(self, p, s, p_next, s_next):\r\n\r\n if p_next == p and s_next == s and (abs(p_next) > 1 or abs(s_next) > 3):\r\n return 0\r\n elif p_next < -1 or abs(s_next) > 3:\r\n return -1\r\n elif p_next > 1 and abs(s_next) <= 3:\r\n return 1\r\n else:\r\n return 0",
"def get_action_probs(self, state):\n state = state.astype(np.float32)\n return self.session.run(self.action_probs,\n feed_dict={self.s_placeholder: state})",
"def prob(self, state, action):\n if state + action == 100:\n reward = 1\n else:\n reward = 0\n\n return [(state + action, self._p_head, reward), (state - action, 1 - self._p_head, 0)]",
"def action_log_probs(self, state):\n dist = self.action_distribution(state)\n raw_action = dist.rsample() # reparametrization trick\n\n # enforcing action bounds\n tanh_action = torch.tanh(raw_action) # prevent recomputation later.\n action = tanh_action * self.action_scale + self.action_bias\n\n # change of variables for log prob\n raw_log_prob = dist.log_prob(raw_action)\n log_prob = raw_log_prob - torch.log(\n self.action_scale * (1 - tanh_action.pow(2)) + FEPS\n )\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob",
"def policy_action(self, s):\n return np.random.choice(np.arange(self.act_dim), 1, p=self.actor.predict(s).ravel())[0]",
"def perplexity(self, sents):\n return 2 ** self.cross_entropy(sents)",
"def target_state(self, s):\n\n if s < self.total_length and s - self.transition >= self.subpaths[self.current_path].total_length:\n self.transition += self.subpaths[self.current_path].total_length\n offset = self.subpaths[self.current_path].target_state(self.subpaths[self.current_path].total_length)\n self.g = np.dot(self.g, rigid(offset))\n self.start_theta += offset[2]\n self.start_theta = (self.start_theta+np.pi)%(np.pi*2) - np.pi\n self.current_path += 1\n \n if s > self.total_length:\n s = self.total_length\n\n state = self.subpaths[self.current_path].target_state(s-self.transition)\n state[:2] = np.dot(self.g, np.append(state[:2], 1))[:2]\n state[2] += self.start_theta\n\n return state",
"def one_step(s,a):\n \n R = -1\n if s == 0:\n s_prime = a # left (0) goes to state 0, right (1) goes to state 1\n elif s == 1:\n s_prime = 2 if a ==0 else 0 # reversed motion\n elif s == 2:\n s_prime = 3 if a == 1 else 1\n \n return R, s_prime",
"def act(self, state):\n\t\trand_val = np.random.rand()\n\t\tif not self.is_eval and rand_val <= self.epsilon: # Do a random action only in train phase\n\t\t\treturn random.randrange(self.action_size)\n\n\t\tif self.firstIter: # If this is the first iteration, just do a \"hold\" action\n\t\t\tself.firstIter = False\n\t\t\treturn 2 # 2 = \"Hold action\"\n\n\t\toptions = self.model.predict(state) # Do a prediction based on a specific observation\n\t\t#print(options)\n\n\t\ttot = np.sum(options[0])\n\t\toptions[0] = options[0] / tot\n\t\t#print(options)\n\n\t\trand = random.random()\n\n\t\t#print(\"randm:\" + str(rand))\n\t\tif rand <= options[0][0]:\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 0\")\n\t\t\treturn 0\n\n\t\telif options[0][0] < rand <= (options[0][0] + options[0][1]):\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 1\")\n\t\t\treturn 1\n\t\telif (options[0][0] + options[0][1]) < rand <= (options[0][0] + options[0][1] + options[0][2]):\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 2\")\n\t\t\treturn 2\n\t\telse:\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 3\")\n\t\t\treturn 3\n\n\t\t#return np.argmax(options[0])'''",
"def _step(self, action):\n transitions = self.query_model(self.s, action)\n prob, next_s, r, is_terminal = transitions[categorical_sample(\n (t[0] for t in transitions), self.rng)]\n\n next_s = np.asarray(next_s)\n for i in range(len(self.s) - 1):\n if next_s[i+1] < self.observation_space.high[i+1]:\n p = self.p_add[i]\n if(categorical_sample([p, 1-p], self.rng) == 0):\n next_s[i+1] += 1\n\n self.s = tuple(next_s)\n self.lastaction = action\n return (next_s, r, is_terminal, {\"prob\": prob})",
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)",
"def probability_of_generating_containing_events(M, s):\n\n # initialize the probabilities of generating containing events.\n f = 1\n\n s_with_inclusion_probabilities = inclusion_probability(M, s)\n for v, p in s_with_inclusion_probabilities.items():\n f *= p\n\n return f",
"def priorProb(self, state):\n actions = []\n for i in range(0, 10):\n actions.append(((i, i+1), random.uniform(0, 1))) \n \n return actions",
"def s_derivation(self, p, s, action):\r\n m = 1\r\n g = 9.81\r\n\r\n first = action/(m*(1+hill_derivatives(p)[0]**2))\r\n second = g*hill_derivatives(p)[0]/(1+hill_derivatives(p)[0]**2)\r\n third = ((s**2)*hill_derivatives(p)[0]*hill_derivatives(p)[1])/(1+hill_derivatives(p)[0]**2)\r\n return first - second - third",
"def prob_service(state, lambda_1, mu, num_of_servers):\n return (min(state[1], num_of_servers) * mu) / (\n lambda_1 + (mu * min(state[1], num_of_servers))\n )",
"def get_state_action_probability_dict_dict(self):\n pass",
"def sigmoid_prime(self, s):\n return s * (1 - s)",
"def trans_prob(next_s, q, d):\n\n next_q, next_r, next_w = next_s\n\n A_actions = [0, 1, 2, 3, 4]\n\n prob = 0\n\n for a in A_actions:\n\n prob_r = attraction_h(next_r[0], a)\n\n q1 = attraction_g(next_q[0], q, d, a)\n q2 = attraction_g(1-next_q[0], q, d, a)\n prob_q = q1 / (q1 + q2)\n\n prob += a_given_s(a, q) * prob_r * prob_q\n\n return prob",
"def target_state(self, s):\n if s > self.total_length:\n s = self.total_length\n\n return np.array([self.sign*s, 0, 0])\n # return np.append(np.dot(self.g, np.array([s, 0, 1]))[:2], self.start_theta)",
"def rewards(self, s_p, action):\n r0 = GenerateSuccessor13(s_p, action, self.id).agents[self.id].completed_seqs\n r1 = GetReward(s_p)\n if self.r0 < r0:\n self.r0 = r0\n # print(\"0\",self.r0)\n return self.r0\n elif self.r1 > (r1*(-1)):\n self.r1 = r1*(-1)\n # print(\"1\",self.r1)\n return self.r1\n else:\n return 0",
"def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n\n sum_weights = sum(self.weights[s])\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * w / sum_weights) + (g / n) for w in self.weights[s]]\n # print(state, pi_s)\n return pi_s"
] | [
"0.74151826",
"0.74038136",
"0.7240678",
"0.66807497",
"0.6644237",
"0.65863276",
"0.63309807",
"0.63219106",
"0.6308571",
"0.62739587",
"0.62624675",
"0.6239656",
"0.62317383",
"0.62011445",
"0.615554",
"0.6150811",
"0.61338043",
"0.61216384",
"0.61120576",
"0.6089495",
"0.60892737",
"0.60675514",
"0.605791",
"0.6037455",
"0.59952277",
"0.5981429",
"0.59766674",
"0.59587926",
"0.59461707",
"0.5939064"
] | 0.8406897 | 0 |
Obtain the observation for the current state of the environment. This is a fully observable environment, so we can return the state directly. Returns list | def _get_obs(self):
return self.observation_function[self.cur_state] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_state(self):\r\n return self.currentObservation",
"def getObservation(self):\n return self._cur_state",
"def get_observation_list(self):\n return self.observations",
"def current_state(self):\n return self.obs_hook(self._current_obs)",
"def _get_observation(self):\n return []",
"def get_observation(self):\n # Check if there is an observation pending\n if self.observation_pending:\n raise RuntimeError(\"There is already a pending observation. \"\n \"The pending observation has to be answered first\")\n # Set pending observation to true\n self.observation_pending = True\n # Get the current environment\n obs = self.__gen_observation(self.current_player, roll_dice=True)\n\n # Add the bord and dice before the move to the history\n self.__add_to_hist()\n return obs, self.current_player",
"def get_state(self) -> numpy.ndarray:\n if self.clone_seeds:\n return self.gym_env.unwrapped.clone_full_state()\n else:\n return self.gym_env.unwrapped.clone_state()",
"def _get_state(self, obs_env):\n state = []\n obs_env = obs_env.reshape(self.n_agent, 2)\n for i in range(self.n_agent):\n local_obs = obs_env[i]\n if self.agent.startswith('ia2c'):\n imgs = [local_obs]\n\n if not self.agent == 'ia2c_fp': # ia2c\n for j in np.where(self.neighbor_mask[i] == 1)[0]:\n imgs.append(obs_env[j])\n imgs = np.array(imgs, dtype=np.float32)\n fps = np.array([], dtype=np.float32)\n\n else: # ia2c_fp\n fps = []\n for j in np.where(self.neighbor_mask[i] == 1)[0]:\n imgs.append(obs_env[j])\n fps.append(self.fp[j])\n imgs = np.array(imgs, dtype=np.float32)\n fps = np.concatenate(fps).astype(np.float32)\n\n agent_obs = [imgs, fps]\n\n else: # ma2c\n agent_obs = local_obs.astype(np.float32)\n\n state.append(agent_obs)\n\n return state\n # return [[obs_env, np.array([], dtype=np.float32)] for _ in range(self.n_agent)]",
"def _get_obs(self):\n # return np.concatenate((self.world.state[:6], self.world.state[7:13]))\n return np.concatenate((self.world.state, np.zeros(7)))\n # return self.world.state",
"def observation_space(self):\n return self._user_state.observation_space()",
"def get_state_observed_values(self):\n obsState = numpy.zeros(self.get_num_variables())\n i = 0\n for v in self.variables:\n obsState[i] = v.read_value_in_fmu(self.fmu)\n i += 1\n return obsState",
"def get_recent_state(self, current_observation):\n # This code is slightly complicated by the fact that subsequent observations might be\n # from different episodes. We ensure that an experience never spans multiple episodes.\n # This is probably not that important in practice but it seems cleaner.\n state = [current_observation]\n idx = len(self.recent_observations) - 1\n for offset in range(0, self.window_length - 1):\n current_idx = idx - offset\n current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False\n if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal):\n # The previously handled observation was terminal, don't add the current one.\n # Otherwise we would leak into a different episode.\n break\n state.insert(0, self.recent_observations[current_idx])\n while len(state) < self.window_length:\n state.insert(0, zeroed_observation(state[0]))\n return state",
"def get_obs(self, state: EnvState) -> chex.Array:\n return jnp.array([state.weekday, *state.stock])",
"def get_state(self):\n return self.history",
"def get_initial_observation(self):\n return self.__environment.reset()",
"def getstate(self):\n return [elem.getstate() for elem in self]",
"def get_observation_from_state(state: State) -> List[int]:\n return state.robots_data + [state.time] + state.positions",
"def get_state(self):\n return self.fmu._get_continuous_states()",
"def observation(self):\n return {\n name: np.asarray(\n self._env.observation(name), self._observation_spec[name].dtype)\n for name in self._obs_names\n }",
"def get_state(self):\n return self._env.get_state()",
"def observation(self):\n return {k: observer(self._state)\n for k, observer in self.observers.items()}",
"def get_state(self):\n return self.env.sim.get_state()",
"def get_terminal_observing_states(self):\n pass",
"def getObs(self):\n return self.listener.obs",
"def get_observation(self):\n return np.array(self.env.last_action_is_valid, dtype=np.int32)",
"def states(self):\n return self._x_list",
"def get_current_observation(self):\n return self.observation_history[-1]",
"def get_observation_verbose(self):\n state = {}\n for grid_id, grid in self.grids.items():\n o = grid.get_active_orders(self.city_time)\n d = list(grid.get_idle_drivers().values())\n state[grid_id] = [o,d]\n return state",
"def _get_obs(self):\n full_state = np.copy(self.state)\n if full_state[self.bot_y, self.bot_x] != self.TileState.DIRTY.value:\n full_state[self.bot_y, self.bot_x] = self.TileState.BOT.value\n return full_state",
"def get_observation(self):\n return np.array(self.env.mode, dtype=np.int32)"
] | [
"0.73571426",
"0.7285132",
"0.71541566",
"0.7076889",
"0.7041503",
"0.6926391",
"0.6779948",
"0.67035097",
"0.668648",
"0.66071546",
"0.6600513",
"0.6579601",
"0.65328586",
"0.644826",
"0.6435689",
"0.6430787",
"0.64077073",
"0.6407079",
"0.6355819",
"0.63506186",
"0.6345424",
"0.63213414",
"0.63066334",
"0.6274432",
"0.627441",
"0.6234616",
"0.6232547",
"0.6232483",
"0.62076217",
"0.6203277"
] | 0.78329116 | 0 |
Takes as input data the reads from a single (precomputed) block and the genotypes for all variants inside the block. Runs a threephase algorithm to compute a phasing for this isolated | def phase_single_block(block_id, allele_matrix, genotypes, prephasing, param, timers, quiet=False):
block_num_vars = allele_matrix.getNumPositions()
# Check for singleton blocks and handle them differently (for efficiency reasons)
if block_num_vars == 1:
# construct trivial solution for singleton blocks, by using the genotype as phasing
g = genotypes[0]
clusts = [[i for i, r in enumerate(allele_matrix) if r and r[0][1] == a] for a in g]
threads = [sorted(list(chain(*[[i] * g[a] for i, a in enumerate(g)])))]
haps = sorted(list(chain(*[[[a]] * g[a] for a in g])))
return PolyphaseBlockResult(block_id, clusts, threads, haps, [])
# Block is non-singleton here, so run the normal routine
# Phase I: Cluster Editing
# Compute similarity values for all read pairs
timers.start("read_scoring")
logger.debug("Computing similarities for read pairs ..")
sim = scoreReadset(allele_matrix, param.min_overlap, param.ploidy, 0.07)
timers.stop("read_scoring")
# Run cluster editing
timers.start("clustering")
logger.debug(
f"Solving cluster editing instance with {len(allele_matrix)} nodes and {len(sim)} edges .."
)
solver = ClusterEditingSolver(sim, param.ce_bundle_edges)
clustering = solver.run()
del solver
del sim
# Add trailing isolated nodes to single-ton clusters, if missing
nodes_in_c = sum(len(c) for c in clustering)
for i in range(nodes_in_c, len(allele_matrix)):
clustering.append([i])
timers.stop("clustering")
# Phase II: Threading
# Assemble clusters to haplotypes
logger.debug(f"Threading haplotypes through {len(clustering)} clusters ..\r")
timers.start("threading")
# Add dynamic programming for finding the most likely subset of clusters
threads, haplotypes = run_threading(
allele_matrix,
clustering,
param.ploidy,
genotypes,
distrust_genotypes=param.distrust_genotypes,
)
timers.stop("threading")
# Phase III: Reordering
logger.debug("Reordering ambiguous sites ..\r")
timers.start("reordering")
# Recursively resolve collapsed regions in clusters
sub_instances = find_subinstances(allele_matrix, clustering, threads, haplotypes)
sub_results = []
sub_param = copy(param)
sub_param.ignore_phasings = True
sub_param.threads = 1
for cid, thread_set, subm in sub_instances:
snps = [allele_matrix.globalToLocal(gpos) for gpos in subm.getPositions()]
assert all([0 <= pos < allele_matrix.getNumPositions() for pos in snps])
subhaps = [[haplotypes[i][pos] for i in thread_set] for pos in snps]
subgeno = [{a: h.count(a) for a in h} for h in subhaps]
sub_param.ploidy = len(thread_set)
timers.stop("reordering")
res = solve_polyphase_instance(subm, subgeno, sub_param, timers, quiet=True)
timers.start("reordering")
sub_results.append(res)
# collect breakpoints of sub-instances and overall instance. Update threads/haplotypes
breakpoints = integrate_sub_results(
allele_matrix, sub_instances, sub_results, threads, haplotypes
)
del sub_instances
del sub_results
# reorder pieces
run_reordering(allele_matrix, clustering, threads, haplotypes, breakpoints, prephasing)
timers.stop("reordering")
# collect results from threading
return PolyphaseBlockResult(
block_id=block_id,
clustering=[[allele_matrix.getGlobalId(r) for r in c] for c in clustering],
threads=threads,
haplotypes=haplotypes,
breakpoints=breakpoints,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_phaseg(locus_file, gam_file, vg_file, canu_alignments, true_haps):\n\trecombrate=1.26\n\tmax_coverage = 15\n\tall_heterozygous = False\n\tdistrust_genotypes = True\n\twith ExitStack() as stack:\n\t\tnode_seq_list, edge_connections = vg_graph_reader(vg_file)\n\t\tall_reads, alleles_per_pos, locus_branch_mapping = vg_reader(locus_file, gam_file, canu_alignments)\n\t\tall_positions = sorted(all_reads.get_positions())\n\t\tall_components = find_components(all_positions, all_reads)\n\t\tblocks = defaultdict(list)\n\t\tprint(\"all_components\")\n\t\tfor position, block_id in all_components.items():\n\t\t\tblocks[block_id].append(locus_branch_mapping[position][0][0][0])\n\t\tfor k,v in blocks.items():\n\t\t\tprint(k,v)\n\t\tprint(\"all_components\")\n\t\t\n\n\t\t#print(all_reads)\n\t\tselected_indices = readselection(all_reads, max_coverage)\n\t\tselected_reads = all_reads.subset(selected_indices)\n\n\t\t#selected_reads = slice_reads(all_reads, max_coverage)\n\t\t#print('positions from all reads')\n\t\t#print(len(all_reads.get_positions()))\n\t\tprint(\"reads after read-selection\")\n\t\tprint(len(selected_reads))\n\t\tprint(\"positions covered by atleast one read after read selection\")\n\t\tprint(len(selected_reads.get_positions()))\n\n\t\taccessible_positions = sorted(selected_reads.get_positions())\n\t\t\n\t\tprint(\"readset after read_selection\")\n\t\t#for read in selected_reads:\n\t\t\t#print(read.name)\n\t\tpedigree = Pedigree(NumericSampleIds())\n\t\t# compute the number of alleles at each position.\n\t\talleles_per_accessible_pos =[]\n\t\tgenotype_likelihoods = []\n\t\tfor pos in accessible_positions:\n\t\t\tif pos in alleles_per_pos:\n\t\t\t\tn_alleles = alleles_per_pos[pos] \n\t\t\t\tpossible_genotypes = n_alleles + ncr(n_alleles, 2)\n\t\t\t\tgenotype_likelihoods.append(None if all_heterozygous else PhredGenotypeLikelihoods([0]* possible_genotypes))\n\t\t# random input of genotypes, since distrust_genotypes is always ON.\n\t\tpedigree.add_individual('individual0', [0]* len(accessible_positions), genotype_likelihoods)\n\t\trecombination_costs = uniform_recombination_map(recombrate, accessible_positions)\n\t\t# Finally, run phasing algorithm\n\t\t#print(selected_reads)\n\t\tdp_table = PedigreeDPTable(selected_reads, recombination_costs, pedigree, distrust_genotypes, accessible_positions)\n\t\tsuperreads_list, transmission_vector = dp_table.get_super_reads()\n\n\t\tcost = dp_table.get_optimal_cost()\n\t\tprint(superreads_list[0])\n\t\t#print(cost)\n\t\tread_partitions = dp_table.get_optimal_partitioning()\n\t\t#print(read_partitions)\n\t\t\n\t\t## To generate the connected components and corresponding haplotypes.\n\t\tprint(\"in components\")\n\t\tf = open('whole_genome' + '.predicted_read_partionting.pred', 'w')\n\t\toverall_components = find_components(accessible_positions, selected_reads)\n\t\t\n\t\tread_partitions_dict ={}\n\t\tfor read, haplotype in zip(selected_reads, read_partitions):\n\t\t\tphaseset = overall_components[read[0].position] + 1\n\t\t\tprint(read.name, phaseset, haplotype, file=f)\n\t\t\tread_partitions_dict[read.name] = haplotype\n\t\t#phaset is blockid\n\n\t\tn_phased_blocks = len(set(overall_components.values()))\n\t\tall_phased_blocks = len(set(all_components.values()))\n\t\tprint('No. of phased blocks: %d', n_phased_blocks)\n\t\tlargest_component = find_largest_component(overall_components)\n\t\tprint('No. of blocks from all the reads: %d', all_phased_blocks)\n\t\tlargest_component_all_reads = find_largest_component(all_components)\n\t\tif len(largest_component) > 0:\n\t\t\tprint('Largest component contains %d variants',len(largest_component))\n\t\tif len(largest_component_all_reads) > 0:\n\t\t\tprint('Largest component contains %d variants',len(largest_component_all_reads))\n\t\t\n\t\t\n\t\t### To generate contig sequences\n\t\tsample = 0\n\t\tsuperreads, components = dict(), dict()\n\t\tsuperreads[sample] = superreads_list[0]\n\t\tcomponents[sample] = overall_components\n\t\t#generate_hap_contigs_based_on_canu(superreads_list[0], components[sample], node_seq_list, locus_branch_mapping, edge_connections, canu_alignments, vg_file)\n\t\t#generate_hap_contigs(superreads_list[0], overall_components, node_seq_list, locus_branch_mapping, edge_connections)\n\t\t\n\t\tnodes_in_bubbles =[]\n\t\twith stream.open(str(locus_file), \"rb\") as istream:\n\t\t\tfor data in istream:\n\t\t\t\tl = vg_pb2.SnarlTraversal()\n\t\t\t\tl.ParseFromString(data)\n\t\t\t\tfor i in range(0,len(l.visits)):\n\t\t\t\t\tnodes_in_bubbles.append(l.visits[i].node_id)\n\t\t\t\t#nodes_in_bubbles.append(l.snarl.end.node_id)\n\t\t\t\t#nodes_in_bubbles.append(l.snarl.start.node_id)\n\t\tedge_connections_tmp = defaultdict(list)\n\t\twith stream.open(str(vg_file), \"rb\") as istream:\n\t\t\tfor data in istream:\n\t\t\t\tl = vg_pb2.Graph()\n\t\t\t\tl.ParseFromString(data)\n\t\t\t\tfor j in range(len(l.edge)):\n\t\t\t\t\tfrom_edge = getattr(l.edge[j], \"from\")\n\t\t\t\t\t#if from_edge not in nodes_in_bubbles and l.edge[j].to not in nodes_in_bubbles:\n\t\t\t\t\tedge_connections_tmp[str(from_edge)].append(str(l.edge[j].to))\n\t\t\t\t\tedge_connections_tmp[str(l.edge[j].to)].append(str(from_edge))\n\n\n\t\t#generate_hap_contigs_based_on_canu(superreads, components, node_seq_list, locus_branch_mapping, edge_connections, canu_alignments, vg_file)\n\t\t#generate_hap_contigs_avgRL(superreads, components, node_seq_list, locus_branch_mapping, edge_connections, edge_connections_tmp, gam_file, read_partitions_dict, nodes_in_bubbles)\n\t\t\n\t\t# evaluation partition all the reads based on one iteration\n\t\t#print('partition all the reads based on haplotypes from one iteration')\n\t\t# Check here if you wanna do all reads or selected reads only\n\t\t#haplotag(superreads_list[0], selected_reads, overall_components, 1)\n\t\t\n\t\t#compute_read_partitioning_accuracy(\"true_partioning\")\n\n\n\n\t\t##generate_hap_contigs(superreads, components, node_seq_list, locus_branch_mapping, edge_connections)\n\t\t\n\t\t##For phasing accuracy, read true haps and generate corresponding superreads\n\t\t#all_reads_true, alleles_per_pos_true, locus_branch_mapping_true = vg_reader(locus_file, true_haps)\n\t\t# Finally, run phasing algorithm for true haplotypes\n\t\t#dp_table_true = PedigreeDPTable(all_reads_true, recombination_costs, pedigree, distrust_genotypes, accessible_positions)\n\t\t#superreads_list_true, transmission_vector_true = dp_table_true.get_super_reads()\n\t\t# to compute the phasing accuracy\n\t\t#true_haps = ReadSet()\n\t\t#for read in all_reads_true:\n\t\t\t#tmp_read = Read(read.name, 0, 0, 0)\n\t\t\t#for variant in read:\n\t\t\t\t#if variant.position in accessible_positions:\n\t\t\t\t\t#tmp_read.add_variant(variant.position, variant.allele, [10])\n\t\t\t#true_haps.add(tmp_read)\n\t\t#compare(superreads_list[0], true_haps, overall_components)\n\t\t## To perform iterative whatshap phasing\n\t\t#remaining_reads =[]\n\t\t#for read in all_reads:\n\t\t\t#remaining_reads.append(read.name)\n\t\t#prev_superreads = superreads_list[0]\n\t\t#for read in selected_reads:\n\t\t\t#remaining_reads.remove(read.name)\n\t\t#while len(remaining_reads)>0:\n\t\t\t#print('iteration')\n\t\t\t#iterative_reaset = ReadSet()\n\t\t\t#for read in all_reads:\n\t\t\t\t#if read.name in remaining_reads:\n\t\t\t\t\t#iterative_reaset.add(read)\n\n\t\t\t\t\n\t\t\t#selected_indices = readselection(iterative_reaset, max_coverage)\n\t\t\t#selected_reads = iterative_reaset.subset(selected_indices)\n\t\t\t#for read in prev_superreads:\n\t\t\t\t#selected_reads.add(read)\n\t\t\t\t#remaining_reads.append(read.name)\n\t\t\t#accessible_positions = sorted(selected_reads.get_positions())\n\t\t\t#selected_reads.sort()\n\t\t\t#pedigree = Pedigree(NumericSampleIds())\n\t\t\t## compute the number of alleles at each position.\n\t\t\t#alleles_per_accessible_pos =[]\n\t\t\t#genotype_likelihoods = []\n\t\t\t#for pos in accessible_positions:\n\t\t\t\t#if pos in alleles_per_pos:\n\t\t\t\t\t#n_alleles = alleles_per_pos[pos] \n\t\t\t\t\t#possible_genotypes = n_alleles + ncr(n_alleles, 2)\n\t\t\t\t\t#genotype_likelihoods.append(None if all_heterozygous else PhredGenotypeLikelihoods([0]* possible_genotypes))\n\t\t\t## random input of genotypes, since distrust_genotypes is always ON.\n\t\t\t#pedigree.add_individual('individual0', [0]* len(accessible_positions), genotype_likelihoods)\n\t\t\t#recombination_costs = uniform_recombination_map(recombrate, accessible_positions)\n\t\t\t## Finally, run phasing algorithm\n\t\t\t##print(selected_reads)\n\t\t\t#dp_table = PedigreeDPTable(selected_reads, recombination_costs, pedigree, distrust_genotypes, accessible_positions)\n\t\t\t#superreads_list, transmission_vector = dp_table.get_super_reads()\n\t\t\t#for read in selected_reads:\n\t\t\t\t#remaining_reads.remove(read.name)\n\t\t\t#prev_superreads = superreads_list[0]\n\t\t\t\n\t\t#print('I am final')\n\t\t#accessible_positions = sorted(all_reads.get_positions())\n\t\t#overall_components = find_components(accessible_positions, all_reads)\n\t\t#haplotag(superreads_list[0], all_reads, overall_components, \"all_iter\")\n\t\t#compare(superreads_list[0], superreads_list_true[0], overall_components)\n\t\t#print(superreads_list[0])\n\t\t\n\t\t#iterative whatshap for sparse matrices where we fix the phasing for variants at each iteration that reach max coverage.",
"def gen_data(npt, typ, ndim, rstate=None):\n mid = .5 # i'm placing in unit cube\n if typ == 'ball':\n r0 = 0.5\n pts = genball(npt, ndim, rstate=rstate) * r0 + mid\n volume = (np.pi**(ndim / 2) / scipy.special.gamma(ndim / 2 + 1) *\n r0**ndim)\n elif typ == 'pin':\n w = 0.01\n a = 1\n pts = np.zeros((npt, ndim))\n pts[:, 1:] = genball(npt, ndim - 1, rstate=rstate) * w + mid\n pts[:, 0] = (rstate.uniform(size=npt) - 0.5) * a + mid\n volume = (np.pi**((ndim - 1) / 2) /\n scipy.special.gamma((ndim - 1) / 2 + 1) * w**(ndim - 1) * a)\n elif typ == 'torus':\n w = 0.01\n r0 = 0.45\n pts = np.zeros((npt, ndim))\n pts[:, :2] = genshell(r0 - w / 2, r0 + w / 2, npt, 2,\n rstate=rstate) + mid\n pts[:,\n 2:] = (rstate.uniform(size=(npt, ndim - 2)) * 2 - 1) * w / 2 + mid\n volume = w**(ndim - 2) * np.pi * ((r0 + w / 2)**2 - (r0 - w / 2)**2)\n elif typ == 'cylinder':\n w = 0.01\n r0 = 0.45\n a = 1\n pts = np.zeros((npt, ndim))\n pts[:, :2] = genshell(r0 - w / 2, r0 + w / 2, npt, 2,\n rstate=rstate) + mid\n pts[:, 2:] = rstate.uniform(size=(npt, ndim - 2)) * a\n volume = np.pi * ((r0 + w / 2)**2 - (r0 - w / 2)**2)\n elif typ == 'shell':\n r1 = 0.45\n r2 = 0.46\n pts = genshell(r1, r2, npt, ndim, rstate=rstate) + mid\n volume = (np.pi**(ndim / 2) / scipy.special.gamma(ndim / 2 + 1) *\n (r2**ndim - r1**ndim))\n else:\n raise RuntimeError('unknown', typ)\n return pts, volume",
"def phase_blocks(posBlock, GTblock, RefBlock, FlagB):\n blockSameCount = 0\n blockReverseCount = 0\n GTblockPhase = []\n GTblockReturn = []\n\n for i in range(len(GTblock)):\n GT = GTblock[i]\n RefGT = RefBlock[i]\n if FlagB[i] == \"FV\": # uncertain variants are set to N\n GTblock[i] = ['N', 'N']\n else: # find and count cases when phased genotype is consistent/inconsistent with parental genotypes\n GTphase = phase_state(GT, RefGT)\n if GTphase == 'same':\n blockSameCount += 1\n GTblockPhase.append('same')\n elif GTphase == 'reverse':\n blockReverseCount += 1\n GTblockPhase.append('reverse')\n\n # find prevalent phase \n if all_same(GTblockPhase) and (len(GTblockPhase) >= 2): # absolutely consistent with parental genotypes\n if GTblockPhase[0] == ['same']:\n RSratio = 1.0\n else:\n RSratio = 0.0\n RSratio = 0.0\n elif GTblockPhase == []: # phase unknown\n RSratio = 'NA'\n else:\n RSratio = float(blockSameCount)/float(blockSameCount+blockReverseCount) # proportion of 'same' phasing state in block strings\n\n # define the block phase and produce output\n if (RSratio == 'NA') or (RSratio < 0.90 and RSratio > 0.10): # discard block that have > 90% of inconsistency with parental reference genotypes, or \n for j in range(len(GTblock)):\n posBlockPrint = posBlock[j]\n GTblockPrint1 = 'N'\n GTblockPrint2 = 'N'\n GTblockReturn.append([posBlockPrint[0], posBlockPrint[1], GTblockPrint1, GTblockPrint2])\n else: # phase according to the prevalent state\n # find prevalent state\n phaseStateNumber = max(map(GTblockPhase.count, GTblockPhase))\n GTblockDefinedPahse = list(set( i for i in GTblockPhase if GTblockPhase.count(i) == phaseStateNumber ))\n if len(GTblockDefinedPahse) == 1: # check if one state is prevalent\n if GTblockDefinedPahse == ['same']:\n phaseState = [0,1]\n else:\n phaseState = [1,0]\n for j in range(len(GTblock)):\n GT = GTblock[j]\n posBlockPrint = posBlock[j]\n GTblockPrint1 = GT[phaseState[0]]\n GTblockPrint2 = GT[phaseState[1]]\n GTblockReturn.append([posBlockPrint[0], posBlockPrint[1], GTblockPrint1, GTblockPrint2])\n else: # if there is conflict in phasing state, set to Ns. It usually applies for blocks with less then 10 position overlaps with parental reference.\n for j in range(len(GTblock)):\n posBlockPrint = posBlock[j]\n GTblockPrint1 = 'N'\n GTblockPrint2 = 'N'\n GTblockReturn.append([posBlockPrint[0], posBlockPrint[1], GTblockPrint1, GTblockPrint2])\n phaseState = [0,1]\n\n return(GTblockReturn, RSratio)",
"def proc_dataset_v2(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.divide(Y,X+1e-10)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat_v2.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat_v2.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta_v2.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes_v2.mat', data)\n return T, E, M, data",
"def pca_detector(data):\n #- 'vol_shape' is the shape of volumes\n vol_shape = data.shape[:-1]\n #- 'n_vols' is the number of volumes\n n_vols = data.shape[-1]\n #- N is the number of voxels in a volume\n N = np.prod(vol_shape)\n\n #- Reshape to 2D array that is voxels by volumes (N x n_vols)\n # transpose to n_vols x N\n X = data.reshape((N, n_vols)).T\n\n \"\"\"\n The first part of the code will use PCA to get component matrix U\n and scalar projections matrix C\n \"\"\"\n\n #- Calculate unscaled covariance matrix for X\n unscaled_cov = X.dot(X.T)\n\n #- Use SVD to return U, S, VT matrices from unscaled covariance\n U, S, VT = npl.svd(unscaled_cov)\n\n #- Calculate the scalar projections for projecting X onto the vectors in U.\n #- Put the result into a new array C.\n C = U.T.dot(X)\n # set nans to 0\n C[np.isnan(C)] = 0\n #- Transpose C\n #- Reshape C to have the 4D shape of the original data volumes.\n C_vols = C.T.reshape((vol_shape + (n_vols,)))\n\n \"\"\"\n The second part of the code determines which voxels are inside the brain\n and which are outside the brain and creates a mask (boolean matrix)\n \"\"\"\n\n #get the mean voxel intensity of entire 4D object\n mean_voxel = np.mean(data)\n #get the mean volume (3D) across time series (axis 3)\n mean_volume = np.mean(data, axis=3)\n #boolean mask set to all voxels above .5 in the first volume\n #(.125 is the SPM criterion but .5 seems like a better threshold)\n mask = mean_volume > (.5 * mean_voxel) #threshold can be adjusted!\n out_mask = ~mask\n\n \"\"\"\n The third part of code finds the root mean square of U from step 1, then uses the\n mask from step 2 to determine which components explain data outside the brain\n Selects these \"bad components\" with high \"outsideness\"\n \"\"\"\n\n #Apply mask to C matrix to get all voxels outside of brain\n outside = C_vols[out_mask]\n #Get RMS of the voxels outside, reflecting \"outsideness\" of this scan\n RMS_out = np.sqrt(np.mean((outside ** 2), axis=0))\n\n #Apply mask to C matrix to get all voxels inside brain\n inside = C_vols[mask]\n #Get RMS of the voxels inside, reflecting \"insideness\" of this scan\n RMS_in = np.sqrt(np.mean((inside ** 2), axis=0))\n\n #The closer this ratio is to 1, the worse the volume\n RMS_ratio = RMS_out / RMS_in\n\n \"\"\"\n The fourth part of the code uses the \"bad components\" to generate a new\n \"bad data set\" and then puts this dataset through the outlier detector\n \"\"\"\n\n #Create a boolean mask for the 10% worst PCs (meaning highest RMS ratio)\n PC_bad = np.percentile(RMS_ratio, 90)\n PC_bad_mask = RMS_ratio > PC_bad\n\n U_bad = U[:, PC_bad_mask]\n C_bad = C[PC_bad_mask]\n\n #generates data set based on the bad PCs and (U and C matrices)\n X_bad = U_bad.dot(C_bad).T.reshape((vol_shape + (n_vols,)))\n\n # calculate outliers using iqr_detector\n _, outliers = mah_detector(X_bad)\n\n return X_bad, outliers",
"def proc_dataset_v1(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.log1p(Y) - np.log1p(X)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes.mat', data)\n return T, E, M, data",
"def run_phaseg(gam_file):\n\ttimers = StageTimer()\n\trecombrate=1.26\n\tall_heterozygous = False\n\tdistrust_genotypes = True\n\ttimers.start('overall')\n\t#logger.info(\"This is WhatsHap %s running under Python %s\", __version__, platform.python_version())\n\twith ExitStack() as stack:\n\t\tlogger.info('Using uniform recombination rate of %g cM/Mb.', recombrate)\n\t\tall_reads, alleles_per_pos = vg_reader(locus_file, gam_file)\n\t\tprint(all_reads)\n\t\tselected_indices = readselection(all_reads, 15)\n\t\tselected_reads = all_reads.subset(selected_indices)\n\t\taccessible_positions = sorted(selected_reads.get_positions())\n\t\tpedigree = Pedigree(NumericSampleIds())\n\t\t# compute the number of alleles at each position.\n\t\talleles_per_accessible_pos =[]\n\t\tgenotype_likelihoods = []\n\t\tfor pos in accessible_positions:\n\t\t\tif pos in alleles_per_pos:\n\t\t\t\tn_alleles = alleles_per_pos[pos] \n\t\t\t\tpossible_genotypes = n_alleles + ncr(n_alleles, 2)\n\t\t\t\tgenotype_likelihoods.append(None if all_heterozygous else PhredGenotypeLikelihoods([0]* possible_genotypes))\n\t\t# random input of genotypes, since distrust_genotypes is always ON.\n\t\tpedigree.add_individual('individual0', [0]* len(accessible_positions), genotype_likelihoods)\n\t\trecombination_costs = uniform_recombination_map(recombrate, accessible_positions)\n\t\t# Finally, run phasing algorithm\n\t\tprint(selected_reads)\n\t\tdp_table = PedigreeDPTable(selected_reads, recombination_costs, pedigree, distrust_genotypes, accessible_positions)\n\t\tsuperreads_list, transmission_vector = dp_table.get_super_reads()\n\t\tcost = dp_table.get_optimal_cost()\n\t\tprint(superreads_list[0])\n\t\tprint(cost)\n\t\tread_partitions = dp_table.get_optimal_partitioning()\n\t\tprint(read_partitions)",
"def read_input():\n \n argv = sys.argv\n\n # Read file names from sd input\n f_dy = argv[1] # matdyn.modes\n f_pat = argv[2] # path.out (should be in crystal coords)\n f_ph = argv[3] # ph.x output (Gamma point)\n\n # Read input card\n f_inp = open(\"input.dat\",'r')\n l1 = f_inp.readline()\n l2 = f_inp.readline()\n l3 = f_inp.readline().split()\n f_inp.close()\n\n # Open files\n\n f = open(f_dy,'r') # matdyn.modes \n f_dyn = f.readlines()\n f.close()\n\n f = open(f_pat,'r') # path.out\n f_path = f.readlines()\n f.close()\n\n f = open(f_ph,'r') # ph.x output\n f_zs = f.readlines()\n f.close()\n\n # Assign values to a0, nat, M, nqp\n a0, vol = float(l1.split()[0]), float(l1.split()[1])\n nat = int(l2) \n mass = np.zeros(nat)\n for iat in range(nat):\n mass[iat] = float(l3[iat])\n\n # Assign values to G (reciprocal lattice vec)\n ig = 0 ; i = 0\n for line in f_zs:\n if \"reciprocal axes:\" in line:\n ig = i + 1 \n break\n i += 1 \n\n rG = np.zeros((3,3))\n for ic in range(3):\n rGtext = f_zs[ig+ic][23:48].split()\n rG[ic,:] = np.array([float(rGtext[0]), float(rGtext[1]), float(rGtext[2])])\n\n # Read Z* tensor from f_zs\n i = 0\n iz = 0\n zstart = []\n for line in f_zs:\n if \"(d P / du)\" in line:\n iz = i + 3\n if \"Px\" in line:\n zstart.append(i)\n\n i += 1\n\n # Read the dielectric tensor from f_zs\n i = 0\n ie = 0\n for line in f_zs:\n if \"Dielectric constant in cartesian axis\" in line:\n ie = i + 2\n break\n\n i += 1\n\n # Assign Z* values\n zs = np.zeros((nat,3,3)) # initialize Z*\n\n for iat in range(nat):\n for ic in range(3):\n ztext = f_zs[zstart[iat]+ic][19:56].split()\n for jc in range(3):\n zs[iat][ic][jc] = float(ztext[jc])\n\n # Assing the dielectric tensor\n eps = np.zeros((3,3))\n\n for ic in range(3):\n epstext = f_zs[ie+ic][16:66].split()\n for jc in range(3):\n eps[ic][jc] = float(epstext[jc])\n\n # Number of modes and q-points\n nmodes = 3 * nat\n nqpt = int(f_path[0].split()[0])\n\n # Read the q-points\n q = np.zeros((nqpt,4)) # 4th dimension is lenght for q-points on a line, weights for q-points on a grid \n for iq in range(1,nqpt+1):\n q[iq-1,] = np.array([float(f_path[iq].split()[0]),float(f_path[iq].split()[1]), \\\n float(f_path[iq].split()[2]),float(f_path[iq].split()[3])])\n\n # Read the eigenvalues(om) and eigenvectors(eig) \n # Initiate first\n om = np.zeros((nmodes,nqpt))\n eig = np.zeros((nmodes,nqpt,nat,3), dtype=complex) \n\n # Get the starting lines for each q-pt\n i = 0\n i_q = []\n for line in f_dyn:\n if \"q =\" in line:\n i_q.append(i+2)\n i += 1\n\n #Assign values to om and eig\n for iq in range(nqpt):\n for imod in range(nmodes):\n omtext = f_dyn[i_q[iq]+imod*(nat+1)][43:55]\n om[imod][iq] = float(omtext)\n for iat in range(nat):\n etext = f_dyn[i_q[iq]+imod*(nat+1)+iat+1][2:72].split()\n for ic in range(3):\n eig.real[imod][iq][iat][ic]=float(etext[2*ic])*np.sqrt(mass[iat])\n eig.imag[imod][iq][iat][ic]=float(etext[2*ic+1])*np.sqrt(mass[iat])\n\n #Normalize the eigenvectors\n t1 = eig[imod,iq,:,:]\n t_nu = np.sum(np.sum(np.conjugate(t1)*t1,axis=0))\n eig[imod,iq,:,:] = eig[imod,iq,:,:]/np.sqrt(np.abs(t_nu))\n\n # Check normalization\n delta = np.zeros((nmodes,nmodes), dtype=complex)\n for iat in range(nat):\n for ic in range(3):\n t2 = eig[:,iq,iat,ic]\n delta += np.outer(np.conjugate(t2),t2)\n\n unit = np.diag(np.diag(np.ones((nmodes,nmodes)))) # Unit vector\n test = np.abs( (delta-unit) )\n if ( np.max(test) > 1e-3):\n print \"Non-orthonormal eigenvector at iq=\", q[iq,:]\n\n return om, eig, q, zs, eps, mass, a0, vol, rG, nmodes, nqpt, nat",
"def read_from_ses3d_block(directory):\n\n # Initialise arrays of Cartesian coordinates.\n\n x=[]\n y=[]\n z=[]\n\n # Read yaml file containing information on the ses3d submodel.\n with io.open(os.path.join(directory,'modelinfo.yml'), 'rt') as fh:\n model_info = yaml.load(fh)\n\n rot_vec = np.array([model_info['geometry']['rot_x'], model_info['geometry']['rot_y'], model_info['geometry']['rot_z']])\n rot_angle = model_info['geometry']['rot_angle']\n\n # Read block files.\n\n fid_x = open(os.path.join(directory,'block_x'), 'r')\n fid_y = open(os.path.join(directory,'block_y'), 'r')\n fid_z = open(os.path.join(directory,'block_z'), 'r')\n\n dx = np.array(fid_x.read().strip().split('\\n'), dtype=float)\n dy = np.array(fid_y.read().strip().split('\\n'), dtype=float)\n dz = np.array(fid_z.read().strip().split('\\n'), dtype=float)\n\n fid_x.close()\n fid_y.close()\n fid_z.close()\n\n # Setup of coordinate lines.\n\n nsubvol = int(dx[0])\n\n idx = np.ones(nsubvol, dtype=int)\n idy = np.ones(nsubvol, dtype=int)\n idz = np.ones(nsubvol, dtype=int)\n\n for k in np.arange(1, nsubvol, dtype=int):\n idx[k] = int(dx[idx[k - 1]]) + idx[k - 1] + 1\n idy[k] = int(dy[idy[k - 1]]) + idy[k - 1] + 1\n idz[k] = int(dz[idz[k - 1]]) + idz[k - 1] + 1\n\n for k in np.arange(nsubvol, dtype=int):\n\n # Coordinates of the box corners.\n colat = dx[(idx[k] + 1):(idx[k] + 1 + int(dx[idx[k]]))]\n lon = dy[(idy[k] + 1):(idy[k] + 1 + int(dy[idy[k]]))]\n rad = dz[(idz[k] + 1):(idz[k] + 1 + int(dz[idz[k]]))]\n \n # Coordinates of the box centroids.\n colat_c = (np.array(colat[0:-1])+np.array(colat[1:]))/2.0\n lon_c = (np.array(lon[0:-1]) + np.array(lon[1:]))/2.0\n rad_c = (np.array(rad[0:-1]) + np.array(rad[1:]))/2.0\n \n # Compute Cartesian coordinates for all grid points.\n for c in colat_c:\n for l in lon_c:\n xx=np.cos(l*np.pi/180.0)*np.sin(c*np.pi/180.0)\n yy=np.sin(l*np.pi/180.0)*np.sin(c*np.pi/180.0)\n zz=np.cos(c*np.pi/180.0)\n for r in rad_c:\n x.append(r*xx)\n y.append(r*yy)\n z.append(r*zz)\n \n\n # Rotate, if needed.\n\n if (rot_angle!=0.0):\n rot_mat = get_rot_matrix(rot_angle*np.pi/180.0, *rot_vec)\n x, y, z = rotate(x, y, z, rot_mat)\n\n # Return.\n\n return x, y, z",
"def read_data(self, path, **kwargs):\n\n from glob import glob\n import os\n sc = self.sc\n pdt_lc = np.dtype([('pos', 'f4', 3),('vel', 'f4', 3)])\n\n blockids = kwargs['blockids']\n\n def set_particle_IDs_partition(index, iterator): \n \"\"\"\n Use the aggregate partition counts to set monotonically increasing \n particle indices\n \"\"\"\n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n arr['iGroup'] = loc_to_glob_map_b.value[index]\n local_index += len(arr)\n yield arr\n \n def read_file(index, i, chunksize=102400): \n for part,filename in i:\n timein = time.time()\n with open(filename,'rb') as f: \n header = f.read(62500)\n while True:\n chunk = f.read(chunksize*24)\n if len(chunk): \n p_arr = np.frombuffer(chunk, pdt_lc)\n new_arr = np.zeros(len(p_arr), dtype=pdt)\n new_arr['pos'] = p_arr['pos']\n yield new_arr\n else: \n t_elapsed = time.time()-timein\n rate = os.path.getsize(filename)/1e6/t_elapsed\n print 'spark_fof: reading %s took %d seconds in partition %d, %f MB/sec'%(filename, t_elapsed, index, rate)\n break\n \n # determine which files to read\n get_block_ids = re.compile('blk\\.(\\d+)\\.(\\d+)\\.(\\d+)?')\n\n if blockids is None: \n files = glob(os.path.join(self.path,'*/*'))\n else: \n files = []\n for dirname, subdirlist, filelist in os.walk(path):\n try: \n dirnum = int(os.path.basename(dirname))\n if dirnum in blockids: \n for f in filelist:\n ids = get_block_ids.findall(f)\n if len(ids) > 0:\n if all(int(x) in blockids for x in ids[0]):\n files.append(os.path.join(dirname,f))\n except ValueError: \n pass\n\n files.sort()\n nfiles = len(files) \n self.nPartitions = nfiles\n\n print 'spark_fof: Number of input files: ', nfiles\n\n # get particle counts per partition\n nparts = {i:_get_nparts(filename,62500,pdt_lc.itemsize) for i,filename in enumerate(files)}\n\n print 'spark_fof: Total number of particles: ', np.array(nparts.values()).sum()\n \n # set up the map from x,y,z to partition id \n ids = map(lambda x: tuple(map(int, get_block_ids.findall(x)[0])), files)\n ids_map = {x:i for i,x in enumerate(ids)}\n self.ids_map = ids_map\n loc_to_glob_map_b = self.local_to_global_map\n \n ids_map_b = sc.broadcast(ids_map)\n loc_to_glob_map_b = sc.broadcast(loc_to_glob_map_b)\n\n partition_counts = sc.broadcast(nparts)\n\n rec_rdd = (sc.parallelize(zip(ids,files), numSlices=self.nPartitions)\n .map(lambda (id,filename): (ids_map_b.value[id],filename))\n .partitionBy(self.nPartitions).cache()\n .mapPartitionsWithIndex(read_file, preservesPartitioning=True)\n .mapPartitionsWithIndex(set_particle_IDs_partition, \n preservesPartitioning=True))\n \n return rec_rdd",
"def part3c_0():\n xs = exampleInput\n N = 10000\n\n difference = 0.0\n for ys, estimatedProb in submission.computeGibbsProbabilities( simpleCRF, \n submission.getCRFBlocks,\n submission.chooseGibbsCRF,\n xs, N ).iteritems():\n trueProb = nerUtils.computeProbability( simpleCRF, xs, ys )\n difference = abs( trueProb - estimatedProb )\n grader.requireIsLessThan( 5e-2, difference )",
"def debug_flow_input_to_pcds_pcl_c3d(self, pcl_c3d_1, pcl_c3d_2, pcl_c3d_gt_1, pcl_c3d_gt_2, pcl_c3d_1_from_2, pcl_c3d_2_from_1):\n \n flats = {}\n flats[\"1\"] = pcl_c3d_1.flat.to_pcd()\n flats[\"2\"] = pcl_c3d_2.flat.to_pcd()\n flats[\"gt_1\"] = pcl_c3d_gt_1.flat.to_pcd()\n flats[\"gt_2\"] = pcl_c3d_gt_2.flat.to_pcd()\n flats[\"1_from_2\"] = pcl_c3d_1_from_2.flat.to_pcd()\n flats[\"2_from_1\"] = pcl_c3d_2_from_1.flat.to_pcd()\n\n grids = {}\n grids[\"1\"] = pcl_c3d_1.grid.to_pcd()\n grids[\"2\"] = pcl_c3d_2.grid.to_pcd()\n grids[\"gt_1\"] = pcl_c3d_gt_1.grid.to_pcd()\n grids[\"gt_2\"] = pcl_c3d_gt_2.grid.to_pcd()\n grids[\"1_from_2\"] = pcl_c3d_1_from_2.grid.to_pcd()\n grids[\"2_from_1\"] = pcl_c3d_2_from_1.grid.to_pcd()\n\n batch_size = len(flats[\"1\"])\n for key in flats:\n assert len(flats[key]) == batch_size\n for key in grids:\n assert len(grids[key]) == batch_size\n\n path = self.opts.debug_path\n name = \"n{:04d}_b{}_s{}_{}\"\n for ib in range(batch_size):\n pcl_write(flats[\"1\"][ib], os.path.join(path, name.format(self.internal_count, ib, 0, \"pred_flat\") ) )\n pcl_write(flats[\"2\"][ib], os.path.join(path, name.format(self.internal_count, ib, 1, \"pred_flat\") ) )\n pcl_write(flats[\"gt_1\"][ib], os.path.join(path, name.format(self.internal_count, ib, 0, \"gt_flat\") ) )\n pcl_write(flats[\"gt_2\"][ib], os.path.join(path, name.format(self.internal_count, ib, 1, \"gt_flat\") ) )\n pcl_write(flats[\"1_from_2\"][ib], os.path.join(path, name.format(self.internal_count, ib, 0, \"flowed_flat\") ) )\n pcl_write(flats[\"2_from_1\"][ib], os.path.join(path, name.format(self.internal_count, ib, 1, \"flowed_flat\") ) )\n\n pcl_write(grids[\"1\"][ib], os.path.join(path, name.format(self.internal_count, ib, 0, \"pred_grid\") ) )\n pcl_write(grids[\"2\"][ib], os.path.join(path, name.format(self.internal_count, ib, 1, \"pred_grid\") ) )\n pcl_write(grids[\"gt_1\"][ib], os.path.join(path, name.format(self.internal_count, ib, 0, \"gt_grid\") ) )\n pcl_write(grids[\"gt_2\"][ib], os.path.join(path, name.format(self.internal_count, ib, 1, \"gt_grid\") ) )\n pcl_write(grids[\"1_from_2\"][ib], os.path.join(path, name.format(self.internal_count, ib, 0, \"flowed_grid\") ) )\n pcl_write(grids[\"2_from_1\"][ib], os.path.join(path, name.format(self.internal_count, ib, 1, \"flowed_grid\") ) )\n\n return",
"def visualise_data_pca_3d(self, component1, component2, component3, input_data=False):\n if input_data:\n self.__generate_input_data()\n pca_3d(array(self.input_data), component1, component2, component3, self.class_indices, self.path,\n 'high_dimension_data', self.legend)\n else:\n self.__generate_output_data()\n pca_3d(array(self.output_data), component1, component2, component3, self.class_indices, self.path,\n 'low_dimension_data', self.legend)",
"def flow_condition(p_prime, p3, triple):\n\n all_edges = set(self.arc_info.keys())\n not_p_prime = all_edges.difference(set(p_prime))\n #print(\"Not p_prime: {}\".format(not_p_prime))\n not_p3 = all_edges.difference(set(p3))\n #print(\"Not p_3: {}\".format(not_p3))\n p_prime_alone = list(set(p_prime).intersection(not_p3))\n #print(\"p_prime_alone: {}\".format(p_prime_alone))\n p3_alone = list(set(p3).intersection(not_p_prime))\n #print(\"p3 alone: {}\".format(p3_alone))\n overlap = list(set(p3).intersection(p_prime))\n #print(\"overlap alone: {}\".format(overlap))\n\n #print(\"computing L_wprime and U_wprime\")\n L_wprime, U_wprime = compute_bounds(p_prime_alone, triple)\n #print(\"computing L_w3 and U_w3\")\n L_w3, U_w3 = compute_bounds(p3_alone, triple)\n #print(\"computing L_overlap and U_overlap\")\n L_overlap, U_overlap = compute_bounds(overlap, triple)\n #print(\"L_wprime, U_wprime: {} {}\".format(L_wprime, U_wprime))\n #print(\"L_w3, U_w3: {} {}\".format(L_w3, U_w3))\n #print(\"{} <= {}\".format(L_overlap, U_wprime + U_w3))\n #print(\"{} >= {}\".format(U_overlap, L_wprime + L_w3))\n meets_conditions = (L_wprime <= U_wprime) & \\\n (L_w3 <= U_w3) & \\\n (L_overlap <= U_wprime + U_w3) & \\\n (L_wprime + L_w3 <= U_overlap)\n if meets_conditions:\n w_prime, w3 = center_flows(L_wprime, U_wprime,\n L_w3, U_w3,\n L_overlap, U_overlap)\n # change paths\n # first, delete:\n for index in sorted(triple, reverse=True):\n del self.paths[index]\n del self.weights[index]\n # now, add:\n self.paths.append(p3)\n self.paths.append(p_prime)\n self.weights.append(w3)\n self.weights.append(w_prime)\n # update weights on edges\n self.update_edge_weights()\n self.check_flow()\n self.check_paths()\n return(True)\n else:\n return(False)",
"def set_consist(ss, ia, ib, input1, input2):\n comf3 = open(input2).readlines()\n comf1 = open(input1).readlines()\n\n \"\"\"\n get module identifiers\n \"\"\"\n comm1_array = []\n comm3_array = []\n\n for line in comf1:\n a, b = map(int, line.split())\n comm1_array.append(b)\n\n comm1_array.append(comm1_array[len(comm1_array)-1])\n\n for line in comf3:\n a, b = map(int, line.split())\n comm3_array.append(b)\n\n\n \"\"\"\n Make dictionaries. module numbers are keys and voxels in modules are values\n \"\"\" \n mod3_dict = {}\n mod1_dict = {}\n for i in set(comm3_array):\n mod3_dict[i] = [v for v, c in enumerate(comm3_array) if c == i]\n for i in set(comm1_array):\n mod1_dict[i] = [v for v, c in enumerate(comm1_array) if c == i]\n\n\n \"\"\"\n For each voxel, find its module in condition3, then in condition1, and interset voxels in its module in condition3 with condition1\n \"\"\"\n preservation = []\n for i in xrange(len(comm3_array)):\n if len(mod3_dict[comm3_array[i]]) < 20 or len(mod1_dict[comm1_array[i]]) < 20:\n preservation.append(777)\n else:\n inter = len(set(mod3_dict[comm3_array[i]]).intersection(set(mod1_dict[comm1_array[i]])))\n preservation.append(round(inter / float(len(mod3_dict[comm3_array[i]])), 4))\n\n pres_out = \"\"\n for line in preservation:\n pres_out += str(round(line,4))+\"\\n\"\n\n #outname = os.environ[\"state\"]+\"/\"+ss+\"/modularity5p/set_consistency/preserved_iters_\"+ia+\"_\"+ib+\"_\"+ss+\".txt\"\n #outname = os.environ[\"state\"]+\"/\"+ss+\"/modularity5p/set_consistency/iter\"+ia+\"_\"+ss+\"_preserved.txt\"\n outname = os.environ[\"state\"]+\"/\"+ss+\"/modularity5p/set_consistency2/iter\"+ia+\"_\"+ss+\"_preserved.txt\"\n outf = open(outname, \"w\")\n outf.write(pres_out)\n outf.close()",
"def data_prepare(raw_datapath, save_path, sample_size=256):\n ## data path\n data_path = raw_datapath\n ## sample size\n data_size = sample_size\n\n ## data lists\n pts = ['100', '104', '108', '113', '117', '122', '201', '207', '212', '217', '222', '231',\n '101', '105', '109', '114', '118', '123', '202', '208', '213', '219', '223', '232',\n '102', '106', '111', '115', '119', '124', '203', '209', '214', '220', '228', '233',\n '103', '107', '112', '116', '121', '200', '205', '210', '215', '221', '230', '234']\n\n ## map the ~19 classes to 5 classes\n ## according to the paper https://arxiv.org/pdf/1805.00794.pdf\n mapping = {'N': 0, 'L': 0, 'R': 0, 'e': 0, 'j': 0, 'B': 0, # N = 0\n 'A': 1, 'a': 1, 'J': 1, 'S': 1, # S = 1\n 'V': 2, 'E': 2, 'r': 2, 'n': 2, # V = 2\n 'F': 3, # F = 3\n '/': 4, 'f': 4, 'Q': 4, '?': 4} # Q = 4\n ignore = ['+', '!', '[', ']', 'x', '~', '|', '\"']\n\n ## we split the each set of the data into size 256( which we can see the ecg pulse, just one pulse)\n def dataSaver(dataset=pts, data_size=data_size):\n input_size = data_size ## default\n\n def dataprocess():\n ecg = np.zeros((1, input_size))\n label = np.zeros((1, 1))\n for num in tqdm(dataset):\n print(num, 'now')\n idx = 0 ## count for the matrixes\n record = wfdb.rdrecord(data_path + num, smooth_frames=True)\n\n ## normalize the data ecg\n signals0 = np.nan_to_num(record.p_signal[:, 0])\n # signals1 = np.nan_to_num(record.p_signal[:, 1])\n min_max_scaler = preprocessing.MinMaxScaler()\n signals0 = min_max_scaler.fit_transform(signals0.reshape(-1, 1))\n # signals1 = min_max_scaler.fit_transform(signals1.reshape(-1, 1))\n signals0 = signals0.reshape(-1)\n # signals1 = signals1.reshape(-1)\n\n ## find peaks # R-peaks\n ## we only use the channel 0\n peaks, _ = find_peaks(signals0, distance=150)\n\n X = np.zeros((len(peaks), input_size))\n Y = np.zeros((len(peaks), 1))\n\n # skip a first peak to have enough range of the sample\n # in the for loop, we look for the annotation\n for peak in tqdm(peaks[1:-1]):\n start, end = peak - input_size // 2, peak + input_size // 2\n start = max([0, start])\n end = min([len(signals0), end])\n ann = wfdb.rdann(data_path + num, extension='atr', sampfrom=start, sampto=end,\n return_label_elements=['symbol'])\n symbol = ann.symbol\n count = 0\n if len(symbol) != 1:\n for sym in symbol:\n if sym in ignore:\n count += 1\n continue\n elif sym == 'N':\n continue\n else:\n symbol = sym\n break\n if count > 0 and len(symbol) > 1:\n symbol = '+'\n elif len(symbol) > 1:\n symbol = 'N'\n elif len(symbol) == 0:\n symbol = '+'\n assert len(symbol) <= 1, \"the symbol is not only one.{} len\".format(len(symbol))\n\n if len(symbol) == 1:\n for ss in symbol:\n if ss in ignore:\n continue\n else:\n Y[idx, 0] = mapping[ss]\n sig = signals0[start:end]\n X[idx, :len(sig)] = sig\n idx += 1\n ecg = np.concatenate((ecg, X), axis=0)\n label = np.concatenate((label, Y), axis=0)\n ecg = ecg[1:, :]\n label = label[1:, :]\n ecg = pd.DataFrame(ecg)\n label = pd.DataFrame(label)\n\n return ecg, label\n ecg, label = dataprocess()\n return ecg, label\n\n ecg, label = dataSaver(pts)\n ecg_path = save_path + \"/ecg_signal_{}.csv\".format(data_size)\n label_path = save_path + \"/label_{}.csv\".format(data_size)\n ecg.to_csv(ecg_path, index=None, header=None)\n label.to_csv(label_path, index=None, header=None)\n return ecg, label",
"def Initialize():\n # --- Set four-character run id, comment lines, user's name.\n top.pline2 = \"Example 3D beam in a FODO lattice\"\n top.pline1 = \"S-G cigar beam. 64x64x256\"\n top.runmaker = \"David P. Grote\"\n\n # --- Invoke setup routine - it is needed to created a cgm file for plots\n setup()\n\n # --- Create the beam species\n beam = Species(type=Potassium,charge_state=+1,name=\"Beam species\")\n\n # --- Set input parameters describing the beam, 72 to 17.\n beam.b0 = 15.358933450767e-3\n beam.a0 = 8.6379155933081e-3\n beam.x0 = 3.*mm\n beam.emit = 51.700897052724e-6\n beam.ap0 = 0.e0\n beam.bp0 = 0.e0\n beam.ibeam = 2.e-03\n beam.vbeam = 0.e0\n beam.ekin = 80.e3\n beam.aion = beam.type.A\n beam.zion = beam.charge_state\n top.lrelativ = false\n top.derivqty()\n beam.vthz = .5e0*beam.vbeam*beam.emit/sqrt(beam.a0*beam.b0) # Vthz ~ Vthperp\n\n # +++ Set up arrays describing lattice.\n # --- Set temp variables.\n hlp = 36.0e-2 # half lattice period length\n piperad = 3.445e-2 # pipe radius\n quadlen = 11.e-2 # quadrupole length\n gaplen = 4.*cm\n rodlen = quadlen + gaplen\n dbdx = .949/quadlen\n\n # --- Set general lattice variables.\n top.tunelen = 2.e0*hlp\n env.zl = -hlp*2\n env.zu = -env.zl\n env.dzenv = top.tunelen/100.e0\n\n # --- Set up quadrupoles\n addnewquad(zs= -quadlen/2.,\n ze= +quadlen/2.,\n db=-dbdx,ap=piperad)\n addnewquad(zs=hlp - quadlen/2.,\n ze=hlp + quadlen/2.,\n db=+dbdx,ap=piperad)\n addnewquad(zs=2.*hlp - quadlen/2.,\n ze=2.*hlp + quadlen/2.,\n db=-dbdx,ap=piperad)\n top.zlatstrt = 0.\n top.zlatperi = 2.e0*hlp\n\n # +++ Set input parameters describing the 3d simulation.\n w3d.nx = 64/2\n w3d.ny = 64/2\n w3d.nz = 256/2\n steps_p_perd = 50\n top.dt = (top.tunelen/steps_p_perd)/beam.vbeam\n\n # --- Set to finite beam.\n top.pbound0 = top.pboundnz = periodic\n top.pboundxy = absorb\n w3d.xmmin = -piperad\n w3d.xmmax = piperad\n w3d.ymmin = -piperad\n w3d.ymmax = piperad\n w3d.zmmin = -hlp*2\n w3d.zmmax = +hlp*2\n top.prwall = piperad\n\n # --- Set pulse length.\n beam.zimin = w3d.zmmin*.95/2.\n beam.zimax = w3d.zmmax*.95/2.\n\n # --- Load Semi-Gaussian cigar beam.\n top.npmax = 20000\n w3d.distrbtn = \"semigaus\"\n w3d.cigarld = true\n w3d.xrandom = \"digitrev\"\n w3d.vtrandom = \"digitrev\"\n w3d.vzrandom = \"digitrev\"\n w3d.ldprfile = \"polar\"\n w3d.cylinder = false\n top.straight = .8\n\n # --- set up field solver\n w3d.l4symtry = true\n w3d.bound0 = periodic\n w3d.boundnz = periodic\n w3d.boundxy = dirichlet\n\n solver = MultiGrid3D()\n registersolver(solver)\n\n pipe = ZCylinderOut(piperad,4.,voltage=0.)\n installconductors(pipe,dfill=largepos)\n\n # --- Run the envelope solver to provide data used to initialize particles.\n package(\"env\")\n generate()\n step()\n\n # --- Generate the PIC code (allocate storage, load ptcls, t=0 plots, etc.).\n package(\"w3d\")\n generate()\n return",
"def main(): \n \n # parse command line arguments\n parser = argparse.ArgumentParser(description='Runs variant calling on pileup file and stores in vfc file')\n parser.add_argument('--use-read-quality', default=False, action='store_true',\n help='tells the algorithm to estimate p from read qualities')\n parser.add_argument('--call-less-positions', default=False, action='store_true',\n help='tells the program to call less positions (not whole pileup file)')\n parser.add_argument('--input-file', default='merged-normal.pileup', type=str,\n help='path to input file in pileup format')\n parser.add_argument('--output-file', default='Make name from input name', type=str,\n help='name for the output vcf file. If not given, will be created from input file name')\n parser.add_argument('--p', default='0.99', type=float,\n help='probability estimate of one nucleotide read being correct, used by vc algorithm')\n parser.add_argument('--positions-to-call', default='10000', type=int,\n help='how many positions to call if call-less-positions set to true')\n args = parser.parse_args()\n if args.output_file == 'Make name from input name':\n args.output_file = args.input_file + '.vcf'\n \n variant_caller = VariantCaller()\n sample = 'SAMPLE1'\n \n # creates vcf file\n create_vcf_start = time.time()\n vcf = create_vcf_file(args.output_file, sample)\n create_vcf_end = time.time()\n print('Vcf header created. Elapsed time: {}'.format(create_vcf_end - create_vcf_start))\n\n main_loop_start = time.time()\n position_count = 0\n variant_caller_time = 0\n positions_with_variants = 0\n write_vcf_time = 0\n\n for pileup_line in pileup_reader(args.input_file):\n # calls variant for each pileup line\n variant_caller_start = time.time()\n variant_caller.call_variant(pileup_line, args.p, args.use_read_quality)\n if pileup_line['alts'] != '.':\n positions_with_variants += 1\n variant_caller_time += time.time() - variant_caller_start\n\n # writes line in VCF file\n write_vcf_start = time.time()\n write_vcf_line(pileup_line, vcf, sample)\n write_vcf_time = time.time() - write_vcf_start\n\n position_count += 1\n if args.call_less_positions and (position_count >= args.positions_to_call):\n break\n \n main_loop_end = time.time()\n total_running_time = main_loop_end - main_loop_start\n\n print('Processed {} positions. Found variants at {} positions.'.format(position_count, positions_with_variants))\n\n print('Total running time is {}'.format(total_running_time))\n print('Pileup reader: {}'.format(total_running_time - variant_caller_time - write_vcf_time))\n print('Variant calling: {}'.format(variant_caller_time))\n print('Vcf writing: {}'.format(write_vcf_time))",
"def voxelize4(self, materials):\n\t\tlayers = list()\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\t\n\t\tlayerMaterial = list()\n\t\tself.volumeComposition = list()\n\t\tfor l in range(len(materials)):\n\t\t\tlayerMaterial.append(list())\n\t\t\tself.volumeComposition.append(list())\n\n\t\tvolumeGeneral = list()\n\t\tm = 0\n\t\tfor i in self.slicePoints:\n\t\t\t#print self.boolResult[m].shape\n\t\t\ttupleResultR = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleMaterial = list()\n\t\t\tfor l in range(len(materials)):\n\t\t\t\ttupleMaterial.append(numpy.zeros(self.boolLayers[m].shape, dtype=float))\n\t\t\t\n\t\t\tj = numpy.nditer(self.boolLayers[m], flags=['multi_index'], op_flags=['readwrite'])\n\t\t\twhile not j.finished:\n\t\t\t\tif j[0] == True:\n\t\t\t\t#tupleResult[j.multi_index] = round((i[direction] - minValue) * ratio)\n\t\t\t\t#tupleResult[j.multi_index] = 78\n\t\t\t\t\tprint type(j.multi_index)\n\t\t\t\t\tprint j.multi_index\n\t\t\t\t\t#tupleResult[j.multi_index] = planeWeight * math.fabs((j.multi_index[1] - planeOrigin[0]) * planeNormal[0] + (j.multi_index[0] - planeOrigin[1]) * planeNormal[1] + (i[2] - planeOrigin[2]) * planeNormal[2]) + pointWeight * math.sqrt(math.pow((j.multi_index[1]- pointValue[0]),2) + math.pow((j.multi_index[0] - pointValue[1]), 2)+math.pow((i[2] - pointValue[2]),2))\n\t\t\t\t\t\n\t\t\t\t\tdistanceList = []\n\t\t\t\t\ttotalDistance = 0.0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Plane\":\n\t\t\t\t\t\t\tGplane = math.fabs((j.multi_index[1] - materials[k].origin[0]) * materials[k].normal[0] + (j.multi_index[0] - materials[k].origin[1]) * materials[k].normal[1] + (i[2] - materials[k].origin[2]) * materials[k].normal[2])\n\t\t\t\t\t\t\tdistanceList.append(Gplane)\n\t\t\t\t\t\t\ttotalDistance += Gplane\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Point\":\n\t\t\t\t\t\t\tGpoint = (math.sqrt(math.pow((j.multi_index[1]- materials[k].point[0]),2) + math.pow((j.multi_index[0] - materials[k].point[1]), 2)+math.pow((i[2] - materials[k].point[2]),2)))\n\t\t\t\t\t\t\tdistanceList.append(Gpoint)\n\t\t\t\t\t\t\ttotalDistance += Gpoint\n\t\t\t\t\tfor k in range(len(distanceList)):\n\t\t\t\t\t\tdistanceList[k] = distanceList[k] / totalDistance\n\t\t\t\t\t\tdistanceList[k] = 1.0 - distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleResultR[j.multi_index] += materials[k].materialColor[0] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultG[j.multi_index] += materials[k].materialColor[1] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultB[j.multi_index] += materials[k].materialColor[2] * distanceList[k] * materials[k].weight\n\t\t\t\t\t#if(tupleResult[j.multi_index] > 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(tupleResult[j.multi_index]) \n\t\t\t\t\t#if(tupleResult[j.multi_index] == 0):\n\t\t\t\t\t#\t\ttupleResult[j.multi_index] = 1\n\t\t\t\t\t#if(tupleResult[j.multi_index] < 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(0 - tupleResult[j.multi_index]) \n\t\t\t\telse:\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = 0.0\n\t\t\t\tj.iternext()\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\t\tfor k in range(len(materials)):\n\t\t\t\tlayerMaterial[k].append(tupleMaterial[k])\n\t\t\t\t\n\t\t\tm = m + 1\n\t\tprint \"i got here\"\n\t\tvolumeR=numpy.array(layersR) # create the 3d volume\n\t\tvolumeG=numpy.array(layersG) \n\t\tvolumeB=numpy.array(layersB)\n\t\tfor k in range(len(materials)):\n\t\t\tself.volumeComposition[k] = numpy.array(layerMaterial[k])\n\t\t\n\t\tvolumeGeneral.append(volumeR)\n\t\tvolumeGeneral.append(volumeG)\n\t\tvolumeGeneral.append(volumeB)\n\t\treturn volumeGeneral",
"def part3c_1():\n xs = exampleInput\n ys = exampleTags\n N = 10000\n\n ys_ = submission.computeGibbsBestSequence(\n simpleCRF,\n submission.getCRFBlocks,\n submission.chooseGibbsCRF,\n xs, \n N)\n grader.requireIsEqual( ys, ys_ )",
"def ThreeDTest(SMethod,IMethod,Fraction,Plot = False):\r\n \r\n # Cylinder Parameters--------------------------------------------------------- \r\n CL = 100 # cylinder length\r\n Pt = 120 # number of points in each cylinder\r\n Cn = 50 # number of horizontal slices in cylinder\r\n\r\n x = np.zeros(Cn*Pt)\r\n y = np.zeros(Cn*Pt)\r\n z = np.zeros(Cn*Pt)\r\n # Generate cylinder-----------------------------------------------------------\r\n n = 0\r\n for i in range(Cn):\r\n for j in range(Pt):\r\n x[n] = np.cos((2*pi*j)/Pt)\r\n y[n] = np.sin((2*pi*j)/Pt)\r\n z[n] = i*(CL/Cn)\r\n n += 1\r\n \r\n YFull = (np.sin(2*pi*0.03*z))+(np.cos(2*pi*x+2*pi*x))\r\n XFull = np.column_stack((x,y,z))\r\n MFull = np.column_stack((x,y,z,YFull))\r\n\r\n # Randomise matrix and Generate sparse version of geometry--------------------\r\n split = int(np.ceil((MFull.shape[0])*Fraction)) \r\n np.random.shuffle(MFull)\r\n # Sparse Set\r\n XTrain = MFull[:split,:3]\r\n YTrain = MFull[:split,3]\r\n # Training set\r\n XStar = MFull[split:,:3]\r\n CStar = MFull[split:,3]\r\n\r\n # Reconstruct XFull's geometry using XTrain and YTrain------------------------\r\n YHat = ThreeDPointInter(XTrain,YTrain,XFull,SMethod,IMethod,10)\r\n mse = mseCalc(YFull,YHat)\r\n print('Mean Squared Error =',mse)\r\n # Plot whole data-----------------------------------------------------------\r\n if Plot:\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(131, projection='3d')\r\n ax1.scatter(XFull[:,0],XFull[:,1],XFull[:,2],c=[float(i) for i in YFull],cmap='plasma')\r\n ax1.set_xlabel('x')\r\n ax1.set_ylabel('y')\r\n ax1.set_zlabel('z')\r\n # Plot training Data\r\n ax2 = fig.add_subplot(132, projection='3d')\r\n ax2.scatter(XTrain[:,0],XTrain[:,1],XTrain[:,2],c=[float(i) for i in YTrain],cmap='plasma')\r\n ax2.set_xlabel('XTrain1')\r\n ax2.set_ylabel('XTrain2')\r\n ax2.set_zlabel('XTrain3')\r\n # Plot Reconstruction of XFull\r\n ax3 = fig.add_subplot(133, projection='3d')\r\n ax3.scatter(XFull[:,0],XFull[:,1],XFull[:,2],c=[float(i) for i in YHat],cmap='plasma')\r\n ax3.set_xlabel('x')\r\n ax3.set_ylabel('y')\r\n ax3.set_zlabel('z')\r\n \r\n plt.show()\r\n\r\n return mse",
"def get_three_spin_blocks_rdm(self) -> 'Nparray':\n norb = self.norb()\n # p^q r^s t^ u spin-blocks\n ckckck_aaa = numpy.zeros((norb, norb, norb, norb, norb, norb),\n dtype=self._dtype)\n ckckck_aab = numpy.zeros((norb, norb, norb, norb, norb, norb),\n dtype=self._dtype)\n ckckck_abb = numpy.zeros((norb, norb, norb, norb, norb, norb),\n dtype=self._dtype)\n ckckck_bbb = numpy.zeros((norb, norb, norb, norb, norb, norb),\n dtype=self._dtype)\n\n dveca, dvecb = self.calculate_dvec_spin()\n dveca_conj, dvecb_conj = dveca.conj().copy(), dvecb.conj().copy()\n opdm, tpdm = self.get_openfermion_rdms()\n # alpha-alpha-alpha\n for t, u in itertools.product(range(self.norb()), repeat=2):\n tdveca_a, _ = self._calculate_dvec_spin_with_coeff(\n dveca[t, u, :, :])\n tdveca_b, tdvecb_b = self._calculate_dvec_spin_with_coeff(\n dvecb[t, u, :, :])\n for r, s in itertools.product(range(self.norb()), repeat=2):\n # p(:)^ q(:) r^ s t^ u\n # a-a-a\n pq_rdm = numpy.tensordot(dveca_conj, tdveca_a[r, s, :, :]).T\n ckckck_aaa[:, :, r, s, t, u] = pq_rdm\n # a-a-b\n pq_rdm = numpy.tensordot(dveca_conj, tdveca_b[r, s, :, :]).T\n ckckck_aab[:, :, r, s, t, u] = pq_rdm\n # a-b-b\n pq_rdm = numpy.tensordot(dveca_conj, tdvecb_b[r, s, :, :]).T\n ckckck_abb[:, :, r, s, t, u] = pq_rdm\n # b-b-b\n pq_rdm = numpy.tensordot(dvecb_conj, tdvecb_b[r, s, :, :]).T\n ckckck_bbb[:, :, r, s, t, u] = pq_rdm\n\n # p^ r^ t^ u s q = p^ q r^ s t^ u + d(q, r) p^ t^ s u - d(q, t)p^ r^ s u\n # + d(s, t)p^ r^ q u - d(q,r)d(s,t)p^ u\n tpdm_swapped = tpdm.transpose(0, 2, 1, 3).copy()\n\n for ii in range(ckckck_aaa.shape[0]):\n ckckck_aaa[:, ii, ii, :, :, :] += tpdm_swapped[::2, ::2, ::2, ::2]\n ckckck_aaa[:, ii, :, :, ii, :] -= tpdm[::2, ::2, ::2, ::2]\n ckckck_aaa[:, :, :, ii, ii, :] += tpdm_swapped[::2, ::2, ::2, ::2]\n for jj in range(ckckck_aaa.shape[0]):\n ckckck_aaa[:, ii, ii, jj, jj, :] -= opdm[::2, ::2]\n ccckkk_aaa = ckckck_aaa.transpose(0, 2, 4, 5, 3, 1).copy()\n\n for ii in range(ckckck_aab.shape[0]):\n ckckck_aab[:, ii, ii, :, :, :] += tpdm_swapped[::2, ::2, 1::2, 1::2]\n ccckkk_aab = ckckck_aab.transpose(0, 2, 4, 5, 3, 1).copy()\n\n for ii in range(ckckck_abb.shape[0]):\n ckckck_abb[:, :, :, ii, ii, :] += tpdm_swapped[::2, ::2, 1::2, 1::2]\n ccckkk_abb = ckckck_abb.transpose(0, 2, 4, 5, 3, 1).copy()\n\n for ii in range(ckckck_bbb.shape[0]):\n ckckck_bbb[:, ii, ii, :, :, :] += tpdm_swapped[1::2, 1::2, 1::2, 1::\n 2]\n ckckck_bbb[:, ii, :, :, ii, :] -= tpdm[1::2, 1::2, 1::2, 1::2]\n ckckck_bbb[:, :, :, ii, ii, :] += tpdm_swapped[1::2, 1::2, 1::2, 1::\n 2]\n for jj in range(ckckck_bbb.shape[0]):\n ckckck_bbb[:, ii, ii, jj, jj, :] -= opdm[1::2, 1::2]\n ccckkk_bbb = ckckck_bbb.transpose(0, 2, 4, 5, 3, 1).copy()\n\n return ccckkk_aaa, ccckkk_aab, ccckkk_abb, ccckkk_bbb",
"def read(self, file, nints, nskip, nocal, nopass, selectpol):\n\n self.file = file\n self.nints = nints\n vis = miriad.VisData(self.file,)\n\n # read data into python arrays\n i = 0\n for inp, preamble, data, flags in vis.readLowlevel ('dsl3', False, nocal=True, nopass=True):\n # Loop to skip some data and read shifted data into original data arrays\n if i == 0:\n # get few general variables\n self.nants0 = inp.getScalar ('nants', 0)\n self.inttime0 = inp.getScalar ('inttime', 10.0)\n self.nspect0 = inp.getScalar ('nspect', 0)\n self.nwide0 = inp.getScalar ('nwide', 0)\n self.sdf0 = inp.getScalar ('sdf', self.nspect0)\n self.nschan0 = inp.getScalar ('nschan', self.nspect0)\n self.ischan0 = inp.getScalar ('ischan', self.nspect0)\n self.sfreq0 = inp.getScalar ('sfreq', self.nspect0)\n self.restfreq0 = inp.getScalar ('restfreq', self.nspect0)\n self.pol0 = inp.getScalar ('pol')\n # DLK 2013-04-04\n # get the initial phase center\n self.ra0=inp.getScalar('ra')\n self.dec0=inp.getScalar('dec')\n\n self.sfreq = self.sfreq0\n self.sdf = self.sdf0\n self.nchan = len(data)\n print 'Initializing nchan:', self.nchan\n bls = []\n\n # build complete list of baselines\n bls.append(preamble[4])\n # end here. assume at least one instance of each bl occurs before ~six integrations (accommodates MWA)\n if len(bls) == 6*len(n.unique(bls)):\n blarr = []\n for bl in n.unique(bls):\n blarr.append(mirtask.util.decodeBaseline (bl))\n self.blarr = n.array(blarr)\n bldict = dict( zip(n.unique(bls), n.arange(len(blarr))) )\n break\n\n i = i+1\n\n # find number of pols in data\n uvd = mirtask.UVDataSet(self.file, 'rw')\n self.npol_orig = uvd.getNPol()\n pols = []\n for i in xrange(20): # loop over the first few spectra to find all polarizations in the data\n pols.append(uvd.getPol())\n uvd.next()\n uvd.close()\n upols = n.unique(pols) # get unique pols in first few spectra\n polstr = mirtask.util.polarizationName(upols[0])\n if len(upols) > 1:\n for pol in upols[1:]:\n polstr = polstr + ', ' + mirtask.util.polarizationName(pol)\n self.npol = len(selectpol)\n if self.npol > self.npol_orig:\n raise ValueError('Trying to select %d pols from %d available.' % (self.npol, self.npol_orig))\n for pol in selectpol:\n if not pol in polstr:\n raise ValueError('Trying to select %s, but %s available.' % (pol, polstr))\n print 'Initializing npol: %d (of %d, %s)' % (self.npol, self.npol_orig, polstr)\n\n # Initialize more stuff...\n self.freq_orig = self.sfreq + self.sdf * n.arange(self.nchan)\n self.freq = self.freq_orig[self.chans]\n\n # good baselines\n self.nbl = len(self.blarr)\n print 'Initializing nbl:', self.nbl\n self.ants = n.unique(self.blarr)\n self.nants = len(self.ants)\n print 'Initializing nants:', self.nants\n self.nskip = int(nskip*self.nbl) # number of iterations to skip (for reading in different parts of buffer)\n nskip = int(self.nskip)\n\n # define data arrays\n self.rawdata = n.zeros((nints, self.nbl, self.nchan, self.npol),dtype='complex64')\n self.flags = n.zeros((nints, self.nbl, self.nchan, self.npol),dtype='bool')\n self.u = n.zeros((nints,self.nbl),dtype='float64')\n self.v = n.zeros((nints,self.nbl),dtype='float64')\n self.w = n.zeros((nints,self.nbl),dtype='float64')\n self.preamble = n.zeros((nints*self.nbl,5),dtype='float64')\n\n # go back and read data into arrays\n for polnum in range(self.npol):\n stokes = selectpol[polnum]\n i = 0\n for inp, preamble, data, flags in vis.readLowlevel ('dsl3', False, nocal=nocal, nopass=nopass, stokes=stokes):\n # Loop to skip some data and read shifted data into original data arrays\n\n if i < nskip:\n i = i+1\n continue \n\n # assumes ints in order, but may skip. after nbl iterations, it fills next row, regardless of number filled.\n if (i-nskip) < nints*self.nbl:\n self.preamble[i-nskip] = preamble\n self.rawdata[(i-nskip)//self.nbl, bldict[preamble[4]], :, polnum] = data\n self.flags[(i-nskip)//self.nbl, bldict[preamble[4]], :, polnum] = flags\n # uvw stored in preamble index 0,1,2 in units of ns\n # Assumes miriad files store uvw in ns. Set to lambda by multiplying by freq of first channel.\n self.u[(i-nskip)//self.nbl, bldict[preamble[4]]] = preamble[0] * self.freq_orig[0]\n self.v[(i-nskip)//self.nbl, bldict[preamble[4]]] = preamble[1] * self.freq_orig[0]\n self.w[(i-nskip)//self.nbl, bldict[preamble[4]]] = preamble[2] * self.freq_orig[0]\n else:\n break # stop at nints\n\n if not (i % (self.nbl*100)):\n print 'Read spectrum ', str(i)\n\n i = i+1\n\n time = self.preamble[::self.nbl,3]\n\n if ((not n.any(self.rawdata)) & (not n.any(time))):\n raise ValueError('rawdata and time arrays at default values. No data read?')\n\n # limit the data to actually real data (DLK)\n maxgoodtime=max(n.where(time>0)[0])\n if maxgoodtime+1 < nints:\n print 'Requested to read %d integrations, but only found %d good integrations' % (nints,\n maxgoodtime)\n # need to trim off some of the data\n time=time[:maxgoodtime]\n self.nints=len(time)\n self.u=self.u[:maxgoodtime]\n self.v=self.v[:maxgoodtime]\n self.w=self.w[:maxgoodtime]\n self.rawdata=self.rawdata[:maxgoodtime]\n self.flags=self.flags[:maxgoodtime]\n \n self.reltime = 24*3600*(time - time[0]) # relative time array in seconds. evla times change...?\n # preserve absolute time (DLK)\n self.time=time\n self.inttime = n.array([self.reltime[i+1] - self.reltime[i] for i in xrange(len(self.reltime)/5,len(self.reltime)-1)]).mean()\n\n # define relative phase center for each integration\n self.l0 = n.zeros(self.nints)\n self.m0 = n.zeros(self.nints)\n\n # print summary info\n print\n print 'Shape of raw data, time:'\n print self.rawdata.shape, self.reltime.shape",
"def main(data, setup):\n # input check \n varnames = ('vm_raw', 'vm_raw_theo')\n for varname in varnames:\n if varname not in data.keys():\n raise LookupError('data must contain variable %s.' %s)\n\n # display info message\n chrono = setup['chrono']\n chrono.issue('target velocity: correct for sensor motion...')\n\n # retrieve varialbes\n vnys = data['nqv']\n v_sensor_r = data['v_sensor_r']\n\n # ========== main =================================== #\n for key_raw in ('vm_raw', 'vm_raw_theo'):\n key_c = key_raw.replace('raw', 'raw_c')\n\n # sum\n vm_raw = data[key_raw]\n v_sum = (vm_raw + np.expand_dims(v_sensor_r, 1))\n\n # mod\n data[key_c] = symmod(v_sum, vnys)\n # ==================================================== #\n\n return data",
"def task_three():\n # Formula to calculate:\n # q2 = (z2 / z1) * (R + T * nt / d) * q1\n # where R - rotation\n # T - translation\n # nt - normal vertex of common plane of the 3d points\n # d - shift of the common plane\n # and (R + T * nt / d) required homography transform\n # defined up to constant\n # But in our case T == 0\n tetta = 30 * np.pi / 180\n H = np.array([[1, 0, 0],\n [0, np.cos(tetta), -np.sin(tetta)],\n [0, np.sin(tetta), np.cos(tetta)],\n ])\n print(\"Homography transformation:\\n\", H)",
"def processParticleData(imgdata, boxsize, partdatas, shiftdata, boxfile, rotate=False):\n imgdims={}\n imgdims['x'] = imgdata['image'].shape[1]\n imgdims['y'] = imgdata['image'].shape[0]\n #imgdims = imgdata['camera']['dimension']\n if rotate is True:\n ### with rotate we use a bigger boxsize\n halfbox = int(1.5*boxsize/2)\n else:\n halfbox = boxsize/2\n \n parttree = []\n boxedpartdatas = []\n eliminated = 0\n user = 0\n noangle = 0\n\n ### normal single particle\n f = open(boxfile, 'w')\n for i in range(len(partdatas)):\n partdata = partdatas[i]\n ### require particle with rotation\n if rotate is True and partdata['angle'] is None:\n noangle += 1\n continue\n\n ### xcoord is the upper left area corner of the particle box\n start_x,start_y = getBoxStartPosition(imgdata,halfbox,partdata, shiftdata)\n if checkBoxInImage(imgdims,start_x,start_y,boxsize):\n partdict = {\n 'x_coord': start_x,\n 'y_coord': start_y,\n 'angle': partdata['angle'],\n }\n parttree.append(partdict)\n boxedpartdatas.append(partdata)\n f.write(\"%d\\t%d\\t%d\\t%d\\t-3\\n\"%(start_x,start_y,boxsize,boxsize))\n else:\n eliminated += 1\n f.close()\n \n if eliminated > 0:\n apDisplay.printMsg(str(eliminated)+\" particle(s) eliminated because they were out of bounds\")\n if user > 0:\n apDisplay.printMsg(str(user)+\" particle(s) eliminated because they were 'user' labeled targets\")\n if noangle > 0:\n apDisplay.printMsg(str(noangle)+\" particle(s) eliminated because they had no rotation angle\")\n\n return parttree, boxedpartdatas",
"def processData(args):\n inputfile=args.inputfile\n threshold=args.threshold\n mask_file=args.maskfile\n crop=args.crop\n voxelsize=args.voxelsize\n binaryClosing=args.binaryclosing\n binaryOpening=args.binaryopening\n vtfile=args.vtfile\n vt2esofspy=args.vt2esofspy\n output_report_csv_file=args.output_report_csv_file\n \n # Reading/Generating data\n if inputfile is None: # # Using generated sample data\n logger.info('Generating sample data...')\n metadata = {'voxelsize_mm': [1, 1, 1]}\n data3d = generate_sample_data(1, 0, 0)\n else: # Normal runtime\n dr = datareader.DataReader()\n data3d, metadata = dr.Get3DData(inputfile)\n\n # Custom voxel size\n if voxelsize is not None:\n metadata['voxelsize_mm'] = voxelsize\n\n # Crop data\n if crop is not None:\n logger.debug('Croping data: %s', str(crop))\n data3d = data3d[crop[0]:crop[1], crop[2]:crop[3], crop[4]:crop[5]].copy()\n\n # Init HistologyAnalyser object\n logger.debug('Init HistologyAnalyser object')\n ha = HistologyAnalyser(data3d, metadata, threshold, \n binaryClosing=binaryClosing, binaryOpening=binaryOpening, \n nogui=True, aggregate_near_nodes_distance=args.aggregatenearnodes,\n hist_length_range=args.hist_length_range,\n hist_radius_range=args.hist_radius_range\n )\n\n # Remove Area = Load mask from file\n if mask_file is not None:\n logger.debug('Loading mask from file...')\n mask = misc.obj_from_file(filename=mask_file, filetype='pickle')\n if ha.data3d.shape == mask.shape:\n ha.data3d_masked = mask\n ha.data3d[mask == 0] = np.min(ha.data3d)\n else:\n raise ValueError('Mask file has wrong dimensions '+str(mask.shape))\n \n # Segmentation\n logger.debug('Segmentation')\n ha.data_to_skeleton()\n\n # Computing statistics\n logger.info(\"# ## ## ## ## statistics\")\n ha.data_to_statistics()\n\n # Saving files\n logger.info(\"# ## ## write stats to file\")\n ha.writeStatsToCSV()\n if vtfile is not None:\n ha.writeStatsToYAML(vtfile)\n if vt2esofspy is not None:\n ha.exportVT2esofspy(vt2esofspy)\n\n # ## Histology report\n logger.info(\"# ## ## Histology report\")\n hr = HistologyReport(ha.hr_hist_length_range, ha.hr_hist_radius_range)\n hr.data = ha.stats\n\n # Add results Record\n if crop is not None:\n label = str(crop[0])+\"-\"+str(crop[1])\n else:\n label = \"0-end\"\n # pass label into addResultRecord with stats\n hr.data['general']['label'] = label\n\n hr.generateStats()\n # TODO Rename functions\n hr.writeReportToCSV()\n hr.writeReportToYAML()\n\n \n if inputfile is None:\n hr.addResultsRecord(label=label, recordfilename=output_report_csv_file)\n else:\n hr.addResultsRecord(label=label, datapath=inputfile, recordfilename=output_report_csv_file)\n\n # ## End\n logger.info('Finished')",
"def processData(self,data_file,directory,comment = '',way='mpcc',opt=[1024]):\n self.dirs = [name for name in os.listdir(directory)\n if os.path.isdir(os.path.join(directory, name))]\n\n # directory names are names of instruments\n #self.dirs =\n # ['cel', 'cla', 'flu', 'gac', 'gel', 'org', 'pia', 'sax', 'tru', 'vio', 'voi']\n self.dirs = [name for name in os.listdir(directory)\n if os.path.isdir(os.path.join(directory, name))]\n \n # example: self.files['sax'] =\n # IRMAS-TrainingData\\sax\\006__[sax][nod][cla]1686__1.wav\n self.files = {}\n for d in self.dirs:\n self.files[d] = [] \n sub_dir = os.path.join(directory, d)\n for filename in glob.glob(os.path.join(sub_dir, '*.wav')):\n self.files[d].append(filename)\n\n # Ex) self.output['cel']: np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n i = 0\n for name in self.dirs:\n temp = []\n for j in range(len(self.dirs)):\n if i == j:\n temp.append(1)\n else:\n temp.append(0)\n self.output[name] = np.array(temp)\n i +=1\n\n #self.X = [] # list of input vectors\n #self.Y = [] # list of output vectors\n t0 = time.time()\n for name in self.dirs:\n t1 = time.time()\n for file in self.files[name]:\n #input_vector = processFile(file,length=length1,q=q1,fs_in=fs_in1,divide=divide1,plot = False)\n if way == 'mpcc':\n input_vector = processMPCC(file,*opt)\n elif way == 'fft':\n input_vector = processFFT(file,*opt)\n else:\n raise ValueError('Invalid Way, valid types include: \\'mpcc\\' or \\'fft\\'')\n if input_vector != 'failed':\n self.X.append(input_vector)\n self.Y.append(self.output[name])\n print('Time take to process '+str(name)+ ': ' + str((time.time()-t1)/60)[0:4] + ' min.')\n print('Total Processing Time: ' + str((time.time()-t0)/60)[0:4] + ' min.')\n\n # Now we can store all of the data in a json\n # Need to store self.X, self.Y, self.dirs,self.output,self.files,self.data\n # self.dirs is a list of strings -> fine\n # self.files is a dict() with string:string -> fine\n # self.output is a dict() with string:np.array\n output = {}\n for d in self.output:\n out_list = []\n for value in self.output[d]:\n out_list.append(int(value))\n output[d] = out_list # -> fine\n #self.X is a list of np.arrays\n X = []\n for i in range(len(self.X)):\n x = []\n for ele in self.X[i]:\n x.append(float(ele))\n X.append(x) # -> fine\n #self.Y is a list of np.arrays\n Y = []\n for i in range(len(self.Y)):\n y = []\n for ele in self.Y[i]:\n y.append(float(ele))\n Y.append(y) # -> fine\n \n store = {}\n store['dirs'] = self.dirs # good\n store['output'] = output # good\n store['files'] = self.files # good\n store['X'] = X # good\n store['Y'] = Y # good\n store['comment'] = comment\n with open(data_file, 'w') as outfile:\n json.dump(store, outfile)\n print('Preprocessed data stored in ' + str(data_file))\n return",
"def firstpass(data, pbc, symbols, units):\n # Get units information\n units_dict = style.unit(units)\n \n # Initialize parameter values\n atomsstart = None\n velocitiesstart = None\n natoms = None\n natypes = None\n firstatoms = False\n atomscolumns = 0\n masses = None\n num_masses_to_read = 0\n xlo = xhi = ylo = yhi = zlo = zhi = None\n xy = 0.0\n xz = 0.0\n yz = 0.0\n i = 0\n \n # Read str and files in the same way\n with uber_open_rmode(data) as fp:\n \n # Loop over all lines in fp\n for i, fullline in enumerate(fp):\n try:\n fullline = fullline.decode('UTF-8')\n except:\n pass\n \n # Remove comments after '#'\n try:\n comment_index = fullline.index('#')\n except:\n line = fullline\n else:\n line = fullline[:comment_index]\n \n terms = line.split()\n\n # Skip blank lines\n if len(terms)>0:\n \n # Read number of atoms \n if len(terms) == 2 and terms[1] == 'atoms':\n natoms = int(terms[0])\n\n # Read number of atom types\n elif len(terms) == 3 and terms[1] == 'atom' and terms[2] == 'types': \n natypes = int(terms[0])\n \n # Read boundary info\n elif len(terms) == 4 and terms[2] == 'xlo' and terms[3] == 'xhi':\n xlo = uc.set_in_units(float(terms[0]), units_dict['length'])\n xhi = uc.set_in_units(float(terms[1]), units_dict['length'])\n\n elif len(terms) == 4 and terms[2] == 'ylo' and terms[3] == 'yhi':\n ylo = uc.set_in_units(float(terms[0]), units_dict['length'])\n yhi = uc.set_in_units(float(terms[1]), units_dict['length'])\n\n elif len(terms) == 4 and terms[2] == 'zlo' and terms[3] == 'zhi':\n zlo = uc.set_in_units(float(terms[0]), units_dict['length'])\n zhi = uc.set_in_units(float(terms[1]), units_dict['length'])\n\n elif len(terms) == 6 and terms[3] == 'xy' and terms[4] == 'xz' and terms[5] == 'yz':\n xy = uc.set_in_units(float(terms[0]), units_dict['length'])\n xz = uc.set_in_units(float(terms[1]), units_dict['length'])\n yz = uc.set_in_units(float(terms[2]), units_dict['length'])\n \n # Identify starting line number for Atoms data\n elif len(terms) == 1 and terms[0] == 'Atoms':\n atomsstart = i + 1\n firstatoms = True\n\n # Check for atom_style comment\n try: \n comment_index = fullline.index('#')\n except:\n atom_style = None\n else:\n atom_style = fullline[comment_index + 1:].strip()\n \n # Count number of columns in Atoms table\n elif firstatoms:\n atomscolumns = len(terms)\n firstatoms = False\n \n # Identify starting line for Masses data\n elif len(terms) == 1 and terms[0] == 'Masses':\n if natypes is None:\n raise FileFormatError('# atom types must appear before Masses list')\n masses = [None for i in range(natypes)]\n num_masses_to_read = natypes\n \n # Read masses\n elif num_masses_to_read > 0:\n read_mass(terms, masses)\n num_masses_to_read -= 1\n\n # Identify starting line number for Velocity data\n elif len(terms) == 1 and terms[0] == 'Velocities':\n velocitiesstart = i + 1\n \n if i == 0:\n raise FileNotFoundError(f'File {data} not found')\n\n if natoms is None:\n raise FileFormatError('# atoms not found')\n\n if xlo is None or xhi is None:\n raise FileFormatError('xlo, xhi box dimensions missing')\n\n if ylo is None or yhi is None:\n raise FileFormatError('ylo, yhi box dimensions missing')\n\n if zlo is None or zhi is None:\n raise FileFormatError('zlo, zhi box dimensions missing')\n\n if atomsstart is None:\n raise FileFormatError('Atoms section missing')\n\n # Create system with natoms\n box = Box(xlo=xlo, xhi=xhi,\n ylo=ylo, yhi=yhi,\n zlo=zlo, zhi=zhi,\n xy=xy, xz=xz, yz=yz)\n atoms = Atoms(natoms=natoms)\n system = System(box=box, atoms=atoms, pbc=pbc, symbols=symbols,\n masses=masses)\n\n # Compile dict of params\n params = {}\n params['atomsstart'] = atomsstart\n params['velocitiesstart'] = velocitiesstart\n params['atomscolumns'] = atomscolumns\n params['atom_style'] = atom_style\n\n return system, params",
"def standardBlock(solver):\n #Create and fill shared array\n createCPUSharedArray(solver,numpy.zeros(solver.sharedShape,dtype=solver.dtype).nbytes)\n for i in range(solver.intermediate):\n solver.sharedArray[i,:,solver.operating:-solver.operating,solver.operating:-solver.operating] = solver.initialConditions[solver.globalBlock]\n solver.sharedArray[i,:,solver.operating:-solver.operating,:solver.operating] = solver.initialConditions[solver.globalBlock[0],solver.globalBlock[1],-solver.operating-1:-1]\n solver.sharedArray[i,:,solver.operating:-solver.operating,-solver.operating:] = solver.initialConditions[solver.globalBlock[0],solver.globalBlock[1],1:solver.operating+1]\n #Create phase objects\n solver.standard = geometry.Geometry() \n solver.standard.setAdjustment(solver.operating)\n #Setting up GPU\n if solver.gpuBool:\n # Creating cuda device and context\n cuda.init()\n cuda_device = cuda.Device(solver.gpuRank)\n solver.cuda_context = cuda_device.make_context()\n setupGPUStandard(solver)\n #Setup CPU\n setupCPUStandard(solver)\n solver.comm.Barrier() #Ensure all processes are"
] | [
"0.592895",
"0.5765463",
"0.55587965",
"0.5380478",
"0.53757083",
"0.5369531",
"0.5328402",
"0.5295265",
"0.5274195",
"0.52280647",
"0.51929116",
"0.5161996",
"0.51604706",
"0.51590586",
"0.5144043",
"0.51424474",
"0.51381207",
"0.51356965",
"0.51342386",
"0.51281947",
"0.5111206",
"0.51107085",
"0.50932443",
"0.5089226",
"0.5061803",
"0.50536746",
"0.50492555",
"0.5045962",
"0.50406027",
"0.503823"
] | 0.58804005 | 1 |
Wrapper for the phase_single_block() function. Carries a block_id through to the results. Creates a local submatrix without modifying the given allele matrix | def phase_single_block_mt(
allele_matrix,
partial_phasing,
block_id,
start,
end,
genotype_slice,
param,
timers,
job_id,
num_blocks,
quiet=False,
):
submatrix = allele_matrix.extractInterval(start, end)
subphasing = partial_phasing.extractInterval(start, end) if partial_phasing else None
block_vars = submatrix.getNumPositions()
if block_vars > 1 and not quiet:
logger.info(
f"Phasing block {job_id + 1} of {num_blocks} with {len(submatrix)} reads and {block_vars} variants."
)
result = phase_single_block(
block_id, submatrix, genotype_slice, subphasing, param, timers, quiet
)
if block_vars > 1 and not quiet:
logger.info(f"Finished block {job_id + 1}.")
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def phase_single_block(block_id, allele_matrix, genotypes, prephasing, param, timers, quiet=False):\n\n block_num_vars = allele_matrix.getNumPositions()\n\n # Check for singleton blocks and handle them differently (for efficiency reasons)\n if block_num_vars == 1:\n\n # construct trivial solution for singleton blocks, by using the genotype as phasing\n g = genotypes[0]\n clusts = [[i for i, r in enumerate(allele_matrix) if r and r[0][1] == a] for a in g]\n threads = [sorted(list(chain(*[[i] * g[a] for i, a in enumerate(g)])))]\n haps = sorted(list(chain(*[[[a]] * g[a] for a in g])))\n return PolyphaseBlockResult(block_id, clusts, threads, haps, [])\n\n # Block is non-singleton here, so run the normal routine\n # Phase I: Cluster Editing\n\n # Compute similarity values for all read pairs\n timers.start(\"read_scoring\")\n logger.debug(\"Computing similarities for read pairs ..\")\n sim = scoreReadset(allele_matrix, param.min_overlap, param.ploidy, 0.07)\n timers.stop(\"read_scoring\")\n\n # Run cluster editing\n timers.start(\"clustering\")\n logger.debug(\n f\"Solving cluster editing instance with {len(allele_matrix)} nodes and {len(sim)} edges ..\"\n )\n solver = ClusterEditingSolver(sim, param.ce_bundle_edges)\n clustering = solver.run()\n del solver\n del sim\n\n # Add trailing isolated nodes to single-ton clusters, if missing\n nodes_in_c = sum(len(c) for c in clustering)\n for i in range(nodes_in_c, len(allele_matrix)):\n clustering.append([i])\n\n timers.stop(\"clustering\")\n\n # Phase II: Threading\n\n # Assemble clusters to haplotypes\n logger.debug(f\"Threading haplotypes through {len(clustering)} clusters ..\\r\")\n timers.start(\"threading\")\n\n # Add dynamic programming for finding the most likely subset of clusters\n threads, haplotypes = run_threading(\n allele_matrix,\n clustering,\n param.ploidy,\n genotypes,\n distrust_genotypes=param.distrust_genotypes,\n )\n timers.stop(\"threading\")\n\n # Phase III: Reordering\n\n logger.debug(\"Reordering ambiguous sites ..\\r\")\n timers.start(\"reordering\")\n\n # Recursively resolve collapsed regions in clusters\n sub_instances = find_subinstances(allele_matrix, clustering, threads, haplotypes)\n sub_results = []\n sub_param = copy(param)\n sub_param.ignore_phasings = True\n sub_param.threads = 1\n for cid, thread_set, subm in sub_instances:\n snps = [allele_matrix.globalToLocal(gpos) for gpos in subm.getPositions()]\n assert all([0 <= pos < allele_matrix.getNumPositions() for pos in snps])\n subhaps = [[haplotypes[i][pos] for i in thread_set] for pos in snps]\n subgeno = [{a: h.count(a) for a in h} for h in subhaps]\n sub_param.ploidy = len(thread_set)\n timers.stop(\"reordering\")\n res = solve_polyphase_instance(subm, subgeno, sub_param, timers, quiet=True)\n timers.start(\"reordering\")\n sub_results.append(res)\n\n # collect breakpoints of sub-instances and overall instance. Update threads/haplotypes\n breakpoints = integrate_sub_results(\n allele_matrix, sub_instances, sub_results, threads, haplotypes\n )\n del sub_instances\n del sub_results\n\n # reorder pieces\n run_reordering(allele_matrix, clustering, threads, haplotypes, breakpoints, prephasing)\n\n timers.stop(\"reordering\")\n\n # collect results from threading\n return PolyphaseBlockResult(\n block_id=block_id,\n clustering=[[allele_matrix.getGlobalId(r) for r in c] for c in clustering],\n threads=threads,\n haplotypes=haplotypes,\n breakpoints=breakpoints,\n )",
"def _block_to_full(block_mat, inverse, shape):\n # block_map = cartprod(inverse[0], inverse[1]).T\n block_map = cartprod(inverse, inverse).T\n mat_by_edge = block_mat[block_map[0], block_map[1]]\n full_mat = mat_by_edge.reshape(shape)\n return full_mat",
"def _block_to_full(\n block_mat: np.ndarray, inverse: np.ndarray, shape: Tuple[int, ...]\n) -> np.ndarray:\n block_map = cartesian_product(inverse, inverse).T\n mat_by_edge = block_mat[block_map[0], block_map[1]]\n full_mat = mat_by_edge.reshape(shape)\n return full_mat",
"def standardBlock(solver):\n #Create and fill shared array\n createCPUSharedArray(solver,numpy.zeros(solver.sharedShape,dtype=solver.dtype).nbytes)\n for i in range(solver.intermediate):\n solver.sharedArray[i,:,solver.operating:-solver.operating,solver.operating:-solver.operating] = solver.initialConditions[solver.globalBlock]\n solver.sharedArray[i,:,solver.operating:-solver.operating,:solver.operating] = solver.initialConditions[solver.globalBlock[0],solver.globalBlock[1],-solver.operating-1:-1]\n solver.sharedArray[i,:,solver.operating:-solver.operating,-solver.operating:] = solver.initialConditions[solver.globalBlock[0],solver.globalBlock[1],1:solver.operating+1]\n #Create phase objects\n solver.standard = geometry.Geometry() \n solver.standard.setAdjustment(solver.operating)\n #Setting up GPU\n if solver.gpuBool:\n # Creating cuda device and context\n cuda.init()\n cuda_device = cuda.Device(solver.gpuRank)\n solver.cuda_context = cuda_device.make_context()\n setupGPUStandard(solver)\n #Setup CPU\n setupCPUStandard(solver)\n solver.comm.Barrier() #Ensure all processes are",
"def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)",
"def block_adjacency_matrix(self, block_id, ctx): # -> tuple[Unknown, Unknown]:\n ...",
"def block(B):\n return np.array(np.bmat(B))",
"def blocks_to_matrix(blocks, frame, orbs):\n\n io_base, _ = orbs_base(orbs)\n norbs = 0\n for el in list(frame.symbols):\n norbs+= len(orbs[el])\n nat = len(list(frame.symbols))\n unfock = np.zeros((norbs, norbs))\n\n bidx = {}\n for k in blocks.keys():\n bidx[k] = {}\n for bk in blocks[k].keys():\n bidx[k][bk] = 0\n cur_a = ()\n ki = 0\n nat = len(frame.numbers)\n for i in range(nat):\n el_a = frame.symbols[i]\n cur_a = ()\n for ia, oa in enumerate(orbs[el_a]):\n na, la, ma = oa\n na += io_base[el_a]\n # we read the Hamiltonian in blocks\n if (cur_a == (na,la)): continue\n cur_a = (na,la)\n kj = 0\n for j in range(nat):\n el_b = frame.symbols[j]\n cur_b = ()\n for ib, ob in enumerate(orbs[el_b]):\n nb, lb, mb = ob\n nb += io_base[el_b] # adds element offset\n if (cur_b == (nb,lb)): continue # only read at the beginning of each m block\n cur_b = (nb,lb)\n if (nb<na or (nb==na and lb<la)): continue\n orb = (na,la,nb,lb)\n if (i==j):\n blockij = blocks['diag'][orb][bidx['diag'][orb]]\n unfock[ki+ia:ki+ia+2*la+1, kj+ib:kj+ib+2*lb+1] = blockij\n unfock[ki+ib:ki+ib+2*lb+1, kj+ia:kj+ia+2*la+1] = blockij.T\n bidx['diag'][orb] += 1\n elif (el_a == el_b and i<j):\n blockij = ( ( blocks['offd_p'][orb][bidx['offd_p'][orb]] if orb in blocks['offd_p'] else 0)\n + ( blocks['offd_m'][orb][bidx['offd_m'][orb]] if orb in blocks['offd_m'] else 0)\n )/np.sqrt(2)\n blockji = ( ( blocks['offd_p'][orb][bidx['offd_p'][orb]] if orb in blocks['offd_p'] else 0)\n - ( blocks['offd_m'][orb][bidx['offd_m'][orb]] if orb in blocks['offd_m'] else 0)\n )/np.sqrt(2)\n unfock[ki+ia:ki+ia+2*la+1, kj+ib:kj+ib+2*lb+1] = blockij\n unfock[kj+ib:kj+ib+2*lb+1, ki+ia:ki+ia+2*la+1] = blockij.T\n unfock[kj+ia:kj+ia+2*la+1, ki+ib:ki+ib+2*lb+1] = blockji\n unfock[ki+ib:ki+ib+2*lb+1, kj+ia:kj+ia+2*la+1] = blockji.T\n if orb in bidx['offd_p']:\n bidx['offd_p'][orb] += 1\n if orb in bidx['offd_m']:\n bidx['offd_m'][orb] += 1\n elif (el_a != el_b):\n blockij = blocks['hete'][orb][bidx['hete'][orb]]\n unfock[ki+ia:ki+ia+2*la+1, kj+ib:kj+ib+2*lb+1] = blockij\n unfock[kj+ib:kj+ib+2*lb+1, ki+ia:ki+ia+2*la+1] = blockij.T\n bidx['hete'][orb] += 1\n kj += len(orbs[el_b])\n ki += len(orbs[el_a])\n return unfock",
"def BlockToMatrix(self):\n for h in range(height):\n for w in range(width):\n if self.matrix[h][w] == 2:\n self.matrix[h][w] = 0\n for i in self.coords:\n self.matrix[i[1]][i[0]] = 2",
"def solve_polyphase_instance(\n allele_matrix, genotype_list, param, timers, partial_phasing=None, quiet=False\n):\n num_vars = len(allele_matrix.getPositions())\n\n # Precompute block borders based on read coverage and linkage between variants\n if not quiet:\n logger.info(\"Detecting connected components with weak interconnect ..\")\n timers.start(\"detecting_blocks\")\n\n ploidy = param.ploidy\n sl = param.block_cut_sensitivity <= 1\n block_starts = compute_block_starts(allele_matrix, ploidy, single_linkage=sl)\n\n # Set block borders and split readset\n block_starts.append(num_vars)\n num_blocks = sum(1 for i, j in zip(block_starts[:-1], block_starts[1:]) if j > i + 1)\n if not quiet:\n logger.info(\n f\"Split heterozygous variants into {num_blocks} blocks (and {len(block_starts) - num_blocks - 1} singleton blocks).\"\n )\n\n # Process blocks independently\n results = []\n processed_blocks = 0\n timers.stop(\"detecting_blocks\")\n\n \"\"\"\n Python's multiprocessing makes hard copies of the passed arguments, which is not trivial for\n cython objects, especially when they contain pointers to other cython objects. Any passed\n object must be (de)serializable (in Python: pickle). All other objects created in the main\n thread are also accessible by the workers, but they are handled via the copy-on-write policy.\n This means, that e.g. the large main matrix is not hardcopied for every thread, as long as it\n is not modified there. This must be ensured to prevent a massive waste of memory consumption.\n \"\"\"\n if param.threads == 1:\n # for single-threading, process everything individually to minimize memory footprint\n for block_id, (start, end) in enumerate(zip(block_starts[:-1], block_starts[1:])):\n submatrix = allele_matrix.extractInterval(start, end)\n subphasing = partial_phasing.extractInterval(start, end) if partial_phasing else None\n if end - start > 1:\n processed_blocks += 1\n if not quiet:\n logger.info(\n f\"Processing block {processed_blocks} of {num_blocks} with {len(submatrix)} reads and {end - start} variants.\"\n )\n results.append(\n phase_single_block(\n block_id, submatrix, genotype_list[start:end], subphasing, param, timers, quiet\n )\n )\n del submatrix\n\n else:\n # sort block by descending size (4/3-approximation for scheduling problem)\n timers.start(\"phase_blocks\")\n joblist = list(zip(range(len(block_starts)), block_starts[:-1], block_starts[1:]))\n joblist.sort(key=lambda x: x[1] - x[2])\n\n with Pool(processes=param.threads) as pool:\n process_results = [\n pool.apply_async(\n phase_single_block_mt,\n (\n allele_matrix,\n partial_phasing,\n block_id,\n start,\n end,\n genotype_list[start:end],\n param,\n timers,\n job_id,\n num_blocks,\n quiet,\n ),\n )\n for job_id, (block_id, start, end) in enumerate(joblist)\n ]\n # collect all blockwise results\n blockwise_results = [res.get() for res in process_results]\n results = sorted(blockwise_results, key=lambda x: x.block_id)\n\n timers.stop(\"phase_blocks\")\n\n # Aggregate blockwise results\n if partial_phasing and param.block_cut_sensitivity == 0:\n # For lowest sensitivity, do not add block starts to global breakpoint list\n # (unless the partial phasing is also interrupted there)\n borders = {partial_phasing.getFirstPos(i) for i in range(len(partial_phasing))}\n else:\n borders = []\n return aggregate_results(results, ploidy, borders)",
"def block_process(a, blocksize, filt):\n block = np.empty(a.shape)\n for row in range(0, a.shape[0], blocksize):\n for col in range(0, a.shape[1], blocksize):\n block[row:row + blocksize, col:col + blocksize] = (\n filt(a[row:row + blocksize, col:col + blocksize]))\n return block",
"def _initBlock(o,block):\n o.block = block.clone().shift(*o.board.startPosition)",
"def identity_block(self, input_tensor, filters, activation, stage, block, start_with_batch_norm = True):\n\n # Set the naming convention\n conv_name_base = 'res' + str(stage) + block + '_branch'\n batch_norm_name_base = 'batch_norm' + str(stage) + block + '_branch'\n\n x_shortcut = input_tensor\n x = None\n\n if start_with_batch_norm:\n # Start with Batch Normalization\n x = BatchNormalization(axis = 3, name = batch_norm_name_base + '_1')(input_tensor)\n\n x = Activation(activation)(x)\n \n else:\n x = input_tensor\n \n # Main Path Block a\n x = self.main_path_block(\n x,\n filters,\n (3, 3),\n 'same',\n conv_name_base + '2a',\n batch_norm_name_base + '2a',\n activation = activation\n )\n\n # Main Path Block b\n x = self.main_path_block(\n x, filters,\n (3, 3), 'same',\n conv_name_base + '2b',\n batch_norm_name_base + '2b',\n batch_norm = False,\n activation = None\n )\n\n # Add skip connection\n x = Add()([x, x_shortcut])\n\n return x",
"def add_block(\n matrix, block, block_i, block_j, factor, banded\n): # pylint: disable=too-many-arguments\n block_size, block_size_y = block.shape\n assert block_size == block_size_y, \"Only square blocks allowed\"\n if banded:\n u = block_size\n for i in range(block_size):\n for j in range(block_size):\n actual_i = block_i * block_size + i\n actual_j = block_j * block_size + j\n if actual_i > actual_j:\n continue\n matrix[u + actual_i - actual_j, actual_j] += factor * block[i, j]\n else:\n matrix[\n block_size * block_i : block_size * (block_i + 1),\n block_size * block_j : block_size * (block_j + 1),\n ] += (factor * block)",
"def _fp32_mte_process_lt_one_block(axis_1_lp_index, sub_axis_1):\n\n def _fp32_inner_last_dim_lt_one_block(axis_0_lp_index, sub_axis_0):\n \"\"\"\n inner process of last dim less than one block\n \"\"\"\n\n # move data in\n in_offset = (block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size +\n axis_0_lp_index * max_no_core_axis_size * axis_1) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, in_offset)\n _data_move_in_last_dim_lt_one_block(tik_inst, ub_input, data_in, data_pos_info)\n\n # do transpose\n with tik_inst.new_stmt_scope():\n temp_sub_axis_1 = tik_inst.Scalar(\"int64\")\n temp_sub_axis_0 = tik_inst.Scalar(\"int64\")\n data_size_one_block = _get_elment_cnt_one_block(data_in.dtype)\n axis_1_0_2_size = axis_0 * axis_1 * axis_2\n sub_axis_1_0_2_size = sub_axis_1 * sub_axis_0 * axis_2\n\n # to avoid multiple core dirty data\n with tik_inst.if_scope(tik.all(sub_axis_1_0_2_size < data_size_one_block,\n axis_1_0_2_size > data_size_one_block)):\n with tik_inst.if_scope(sub_axis_1 == 1):\n temp_sub_axis_0.set_as(_ceil_div(data_size_one_block, axis_2))\n temp_sub_axis_1.set_as(sub_axis_1)\n with tik_inst.else_scope():\n temp_sub_axis_0.set_as(sub_axis_0)\n temp_sub_axis_1.set_as(_ceil_div(data_size_one_block,\n axis_0 * axis_2))\n with tik_inst.else_scope():\n temp_sub_axis_1.set_as(sub_axis_1)\n temp_sub_axis_0.set_as(sub_axis_0)\n\n sub_dim_size = (temp_sub_axis_1, temp_sub_axis_0, axis_2)\n _transpose_by_2_vnchwconv_not_last_dim(tik_inst, ub_input[ub_offset],\n ub_input, sub_dim_size)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size) * axis_0 +\n axis_0_lp_index * max_no_core_axis_size) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, out_offset)\n _data_move_out_last_dim_lt_one_block(tik_inst, data_out, ub_input[ub_offset],\n data_pos_info)\n\n with tik_inst.for_range(0, no_core_loop_cnt) as axis_0_lp_idx:\n _fp32_inner_last_dim_lt_one_block(axis_0_lp_idx, max_no_core_axis_size)\n with tik_inst.if_scope(no_core_left > 0):\n _fp32_inner_last_dim_lt_one_block(no_core_loop_cnt, no_core_left)",
"def sweptBlock(solver):\n #Create and fill shared array\n createCPUSharedArray(solver,numpy.zeros(solver.sharedShape,dtype=solver.dtype).nbytes)\n for i in range(solver.intermediate):\n solver.sharedArray[i,:,:,:] = solver.initialConditions[solver.globalBlock]\n #Create phase objects\n solver.Up = geometry.Geometry() \n solver.Down = geometry.Geometry() \n solver.Xb = geometry.Geometry()\n solver.Yb = geometry.Geometry()\n solver.Oct = geometry.Geometry() \n\n if solver.gpuBool:\n # Creating cuda device and context\n cuda.init()\n cuda_device = cuda.Device(solver.gpuRank)\n solver.cuda_context = cuda_device.make_context()\n setupGPUSwept(solver)\n #Setting up CPU\n setupCPUSwept(solver)\n solver.comm.Barrier() #Ensure all processes are",
"def soti_block_slab(size, p , q, nu, zu, t = -1, M = 2.3, D1 = 0.8, D2 = 0.5):\n # put unit_blocks into diag\n \n # make blocks array with dims (size,4q,4q)\n blocks = np.zeros((size,4*q,4*q),dtype=complex) \n \n # fill up\n #xs = linspace(0,size,num=size) # for completeness\n for i in range(size):\n #x = xs[i] # doesn't actually do anything\n blocks[i,:,:] = unit_block_slab(p=p,q=q,nu=nu,zu=zu,t=t,M=M,D1=D1,D2=D2)\n \n # put in diagonal\n M_diags = ss.block_diag(blocks)\n \n # off diagonals x -> x+1 & h.c.\n hop_x = 1/2 * (t * pms.s0_tz() + 1j * D1 * pms.sx_tx() + D2 * pms.s0_ty())\n hop_x_dag = hop_x.conj().T\n \n # fill up to identity\n hop_x_mat = np.kron(np.eye(N=size), hop_x)\n hop_x_mat_dag = np.kron(np.eye(N=size), hop_x_dag)\n \n # put these \"identity\" matrices on the off-diagonals\n ### double check the math for this section please\n M_top_diag = np.kron(np.diag(np.ones(size-1), k=1), hop_x_mat)\n M_bot_diag = np.kron(np.diag(np.ones(size-1), k=-1), hop_x_mat_dag)\n \n M_off_diags = M_top_diag + M_bot_diag\n \n MAT = M_diags + M_off_diags\n \n return MAT",
"def to_basic_block(self) -> \"gr::basic_block_sptr\":\n return _beamforming_swig.phasedarray_sptr_to_basic_block(self)",
"def _unroll_block_matrix(mat1: tf.Tensor) -> tf.Tensor:\n n_dim, m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [n_dim, m1, 1, n1])\n mat2 = tf.eye(n_dim, dtype=tf.float64)\n mat2_rsh = tf.reshape(mat2, [n_dim, 1, n_dim, 1])\n return tf.reshape(mat1_rsh * mat2_rsh, [n_dim * m1, n_dim * n1])",
"def matBlock(a, b, c, d):\n arows=matShape(a)[0]\n acols=matShape(a)[1]\n shape1=arows+matShape(c)[0]\n shape2=acols+matShape(b)[1]\n ret=matZeros((shape1,shape2))\n for i in range(shape1):\n for j in range(shape2):\n val=0\n if i<arows:\n val=matGet((a if j<acols else b), i, (j if j<acols else j-acols))\n else:\n val=matGet((c if j<acols else d), i-arows, (j if j<acols else j-acols))\n matSet(ret,i,j,val)\n return ret",
"def unblock(arr: np.ndarray, n1: int, n2: int, axis1: int = -1, axis2: int = -2, blocksize: bool = False) -> np.ndarray:\n\n \"\"\" test (stackoverflow): Ok, so considering I have N block matrices with bm x bn dimension and want to stack them in a m x n matrix, provided N = m x n, I would then have x.reshape(m,n,bm,bn).swapaxes(1,2).reshape(bm*m,-1)\n \"\"\"\n\n s = np.array(arr.shape)\n if s[axis1] % n1 != 0 or s[axis2] % n2 != 0:\n raise ValueError(f\"{s[axis1]}x{s[axis2]} does not divide by {n1}x{n2}\")\n\n if blocksize:\n n1 = s[axis1] // n1\n n2 = s[axis2] // n2\n\n # this first .split adds a new dimensions on the outside, so if a absolute index\n # is given for the second axis it must be moved one to the right\n if axis2 >= 0:\n _axis2 = axis2 + 1\n else:\n _axis2 = axis2\n\n arr = np.array(np.split(arr, n1, axis1))\n arr = np.array(np.split(arr, n2, _axis2))\n\n inv_blocksize = n1 * n2\n total = s[axis1] * s[axis2]\n s[axis2] = inv_blocksize\n s[axis1] = total // inv_blocksize\n\n return np.reshape(arr, s)",
"def apply_block(self, block_id, func=..., edges=..., inplace=...): # -> None:\n ...",
"def form_square_block_matrix(mat1,mat2):\n if mat1.cols==1:\n mat3 = mp.matrix(mat1.rows+mat2.rows,1)\n mat3[:mat1.rows] = mat1[:]\n mat3[mat1.rows:mat3.rows] = mat2[:]\n else:\n mat3 = mp.matrix(mat1.rows+mat2.rows, mat1.rows+mat2.rows)\n mat3[:mat1.rows,:mat1.rows] = mat1[:,:]\n mat3[mat1.rows:mat3.rows,mat1.rows:mat3.rows] = mat2[:,:]\n return mat3",
"def _fp32_inner_last_dim_lt_one_block(axis_0_lp_index, sub_axis_0):\n\n # move data in\n in_offset = (block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size +\n axis_0_lp_index * max_no_core_axis_size * axis_1) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, in_offset)\n _data_move_in_last_dim_lt_one_block(tik_inst, ub_input, data_in, data_pos_info)\n\n # do transpose\n with tik_inst.new_stmt_scope():\n temp_sub_axis_1 = tik_inst.Scalar(\"int64\")\n temp_sub_axis_0 = tik_inst.Scalar(\"int64\")\n data_size_one_block = _get_elment_cnt_one_block(data_in.dtype)\n axis_1_0_2_size = axis_0 * axis_1 * axis_2\n sub_axis_1_0_2_size = sub_axis_1 * sub_axis_0 * axis_2\n\n # to avoid multiple core dirty data\n with tik_inst.if_scope(tik.all(sub_axis_1_0_2_size < data_size_one_block,\n axis_1_0_2_size > data_size_one_block)):\n with tik_inst.if_scope(sub_axis_1 == 1):\n temp_sub_axis_0.set_as(_ceil_div(data_size_one_block, axis_2))\n temp_sub_axis_1.set_as(sub_axis_1)\n with tik_inst.else_scope():\n temp_sub_axis_0.set_as(sub_axis_0)\n temp_sub_axis_1.set_as(_ceil_div(data_size_one_block,\n axis_0 * axis_2))\n with tik_inst.else_scope():\n temp_sub_axis_1.set_as(sub_axis_1)\n temp_sub_axis_0.set_as(sub_axis_0)\n\n sub_dim_size = (temp_sub_axis_1, temp_sub_axis_0, axis_2)\n _transpose_by_2_vnchwconv_not_last_dim(tik_inst, ub_input[ub_offset],\n ub_input, sub_dim_size)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size) * axis_0 +\n axis_0_lp_index * max_no_core_axis_size) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, out_offset)\n _data_move_out_last_dim_lt_one_block(tik_inst, data_out, ub_input[ub_offset],\n data_pos_info)",
"def num_43():\n \n def block(a, r=3, cs=3, row_order=True):\n \"\"\"Block slice an array using a window of (rs, cs) size\n \"\"\"\n lenr = a.shape[0]//rs\n lenc = a.shape[1]//cs\n if row_order:\n iter = [(i, j) for (i, j) in np.ndindex(lenr, lenc)]\n else:\n iter = [(j, i) for (i, j) in np.ndindex(lenr, lenc)]\n b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] for (i,j) in iter])\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n return b\n r = 6\n c = 6\n a = np.arange(r*c).reshape(r, c)\n vs = np.array(np.vsplit(a, 2))\n hs = np.array(np.hsplit(a, 2))\n #a.squeeze(axis=(2,3))\n rs = 3\n cs = 4\n #lenr = a.shape[0]//rs\n #lenc = a.shape[1]//cs\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n #b1 = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (j, i) in np.ndindex(lenr, lenc)])\n e = block(a, 3, 4, row_first=False)\n b = block(a, rs, cs, True)\n b1 = block(a, rs, cs, False)\n c = np.array([np.vsplit(i, 2) for i in np.hsplit(a, 2)])\n d = np.array([np.hsplit(i, 2) for i in np.vsplit(a, 2)])\n #c = c.reshape(lenr*lenc, rs, cs) \n return a, b, b1, c, d, e",
"def build_A_block(self):\n N = self.N # number of MPC steps\n A = self.A\n \n row_list = [A] # reocrd the every row in B_hat\n \n for i in range(1, N):\n A = A.mm(self.A)\n row_list.append(A)\n return torch.cat(row_list,0)",
"def clean_and_copy_parmdb(instrument_name, instrument_out, blockl,\n flag_filler=False, msname=None, timepersample=10.0):\n pdb = lofar.parmdb.parmdb(instrument_name)\n parms = pdb.getValuesGrid(\"*\")\n keynames = parms.keys()\n filler = int(blockl/2.0)\n\n # Set phases to zero\n for key in keynames:\n if 'Phase' in key:\n tmp1 = np.copy(parms[key]['values'][filler-1:-filler-2,0])\n tmp1 = tmp1*0.0\n parms[key]['values'][filler-1:-filler-2,0] = tmp1\n lofar.expion.parmdbmain.store_parms(instrument_out, parms, create_new=True)\n\n if flag_filler:\n # Flag data for times that won't be filled with time-correlated solutions\n ms = pt.table(msname, readonly=False)\n starttime = ms[0]['TIME']\n endtime = ms[ms.nrows()-1]['TIME']\n\n end_block = blockl / 2.0 * timepersample\n tabStationSelection = ms.query('TIME > ' + str(starttime)\n + ' && TIME < ' + str(starttime+end_block),\n sortlist='TIME,ANTENNA1,ANTENNA2')\n tabStationSelection.putcol(\"FLAG_ROW\", numpy.ones(filler, dtype=bool))\n tabStationSelection.putcol(\"FLAG\", numpy.ones((filler, 4), dtype=bool))\n tabStationSelection.close()\n\n start_block = endtime - (blockl / 2.0 * timepersample)\n tabStationSelection = ms.query('TIME > ' + str(starttime+start_block)\n + ' && TIME < ' + str(endtime),\n sortlist='TIME,ANTENNA1,ANTENNA2')\n tabStationSelection.putcol(\"FLAG_ROW\", numpy.ones(filler, dtype=bool))\n tabStationSelection.putcol(\"FLAG\", numpy.ones((filler, 4), dtype=bool))\n tabStationSelection.close()\n ms.close()",
"def split(self, block: ghidra.program.model.mem.MemoryBlock, addr: ghidra.program.model.address.Address) -> None:\n ...",
"def _residual_block(input, id_block, conv_block, mid_f, output_f, repetitions, stage, is_first_layer=False):\n\n for i in range(repetitions):\n if i == 0 and is_first_layer is True:\n input = conv_block(mid_f, output_f, stage, i, input, stride=(1, 1))\n elif i == 0 and is_first_layer is False:\n input = conv_block(mid_f, output_f, stage, i, input)\n else:\n input = id_block(mid_f, output_f, stage, i, input)\n return input",
"def fun_n_to_one_big(self, block_index, block_dim, nc1_size):\n ub_output_tmp = self.tik_instance.Tensor(\n \"float32\", (4, self.c_block_size), name=\"ub_output_tmp\",\n scope=tik.scope_ubuf)\n ub_output = self.tik_instance.Tensor(\n \"float32\", (1, self.c_block_size), name=\"ub_output\",\n scope=tik.scope_ubuf)\n ub_input = self.tik_instance.Tensor(\n \"float32\", (240*4, self.c_block_size), name=\"ub_input\",\n scope=tik.scope_ubuf)\n input_num = _ceil_div(self.in_size_h*self.in_size_w*16, 240*64)\n if input_num > 1:\n thread_num = 2\n else:\n thread_num = 1\n\n nc1 = self.batch_size*self.c1_size\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 2, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(0, nc1 - (block_dim - 1)*nc1_size)\\\n as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 8, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)"
] | [
"0.7084021",
"0.5748252",
"0.5678491",
"0.5542582",
"0.5512303",
"0.55118346",
"0.54758036",
"0.5363573",
"0.5286815",
"0.5286382",
"0.5283284",
"0.5243682",
"0.5229807",
"0.5212749",
"0.5199913",
"0.5186671",
"0.5162968",
"0.51537496",
"0.5136429",
"0.5083988",
"0.5079707",
"0.50757235",
"0.5064226",
"0.5041985",
"0.5011712",
"0.5003123",
"0.50002116",
"0.49757037",
"0.49684992",
"0.4965934"
] | 0.72714204 | 0 |
Collects all blockwise phasing results and aggregates them into one list for each type of information. Local ids and indices are converted to globals ones in this step. | def aggregate_results(results: List[PolyphaseBlockResult], ploidy: int, borders: List[int]):
clustering, threads, breakpoints = [], [], []
haplotypes = [[] for _ in range(ploidy)]
cid_offset, pos_offset = 0, 0
for r in results:
clustering += [clust for clust in r.clustering]
threads += [[cid_offset + cid for cid in p] for p in r.threads]
for hap, ext in zip(haplotypes, r.haplotypes):
hap += ext
# Add the start of a block as breakpoint, unless a partial phasing bridges the blocks
if not borders or pos_offset in borders or pos_offset == 0:
breakpoints.append(PhaseBreakpoint(pos_offset, list(range(ploidy)), 0.0))
breakpoints += [
PhaseBreakpoint(b.position + pos_offset, b.haplotypes, b.confidence)
for b in r.breakpoints
]
cid_offset = len(clustering)
pos_offset = len(haplotypes[0])
return PolyphaseResult(clustering, threads, haplotypes, breakpoints) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _collect_all(self):",
"def calc_data(self):\n\n circ_counts = {}\n for trialidx in range(self._ntrials):\n for _, depth in enumerate(self._depths):\n circ_name = 'qv_depth_%d_trial_%d' % (depth, trialidx)\n\n # get the counts form ALL executed circuits\n count_list = []\n for result in self._result_list:\n try:\n count_list.append(result.get_counts(circ_name))\n except (QiskitError, KeyError):\n pass\n\n circ_counts[circ_name] = \\\n build_counts_dict_from_list(count_list)\n\n self._circ_shots[circ_name] = \\\n sum(circ_counts[circ_name].values())\n\n # calculate the heavy output probability\n self._heavy_output_counts[circ_name] = \\\n self._subset_probability(\n self._heavy_outputs[circ_name],\n circ_counts[circ_name])",
"def reduce_data(self, ctx):\n self.baselines_type = ctx.get(\"baselines_type\")\n visibilities = ctx.get(\"visibilities\")\n p_signal = self.compute_power(visibilities)\n\n # Remember that the results of \"simulate\" can be used in two places: (i) the computeLikelihood method, and (ii)\n # as data saved to file. In case of the latter, it is useful to save extra variables to the dictionary to be\n # looked at for diagnosis, even though they are not required in computeLikelihood().\n return [dict(p_signal=p_signal, baselines=self.baselines, frequencies=self.frequencies,\n u=self.u, eta=self.eta)]\n #, nbl_uv=self.nbl_uv, nbl_uvnu=self.nbl_uvnu, nbl_u=self.nbl_u, grid_weights=self.grid_weights)]",
"def _iterate_blocks(self):\n ranges = [host_tuple[0:2] for host_tuple in self.parity_hosts]\n flags_sql = \"SELECT id, value FROM {} FINAL WHERE name = 'traces_extracted'\".format(self.indices[\"block_flag\"])\n return self.client.iterate(\n index=self.indices[\"block\"],\n fields=[\"number\"],\n query=\"ANY LEFT JOIN ({}) USING id WHERE value IS NULL AND {}\".format(\n flags_sql,\n utils.make_range_query('number', *ranges)\n ),\n )",
"def mor_prepare_data():\n prices, locations, areas, links = [], [], [], []\n for i in range(START_PAGE, SEARCHING_DEPTH+1):\n handler = requests.get(main_url, params={\"page\": str(i)})\n soup = bs4.BeautifulSoup(handler.text, 'lxml')\n heads = soup.find_all(\"header\")\n once = True\n for head in heads:\n if head.find(\"meta\", {\"itemprop\": \"category\"}) and once:\n\n raw_price = head.find(\"meta\", {\"itemprop\": \"price\"})\n price = int(float(raw_price[\"content\"]) if raw_price else \"\")\n\n raw_loc_list = head.find(\"h2\",\n {\"class\": \"single-result__title\"}).getText().strip().split(\n \", \")\n found = False\n for loc in raw_loc_list:\n if location_mapper[CITY].get(loc.lower(), 0):\n location = location_mapper[CITY][loc.lower()]\n\n found = True\n break\n if not found:\n location = \"\"\n if DEBUG_MODE:\n print(raw_loc_list)\n\n raw_area = head.find(\"p\", {\n \"class\": \"single-result__price single-result__price--currency\"}).getText().strip().split()\n if price and location:\n square_price = raw_area[0] if len(raw_area) == 2 else \"\".join(\n (raw_area[0], raw_area[1]))\n\n area = int(price / float(square_price.replace(\",\", \".\")))\n link_url = head.find('a')['href']\n\n if location and area and link_url:\n prices.append(price) if price < PRICE_UPPER_LIMIT else prices.append(\n PRICE_UPPER_LIMIT)\n locations.append(location)\n areas.append(area) if area < AREA_UPPER_LIMIT else areas.append(\n AREA_UPPER_LIMIT)\n links.append(link_url)\n\n return prices, locations, areas, links",
"def _build_results(self):\n results = {}\n cols = []\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n cols.append(get_rate_column(pol, adjustment=adj, generated=False))\n cols.append(get_column(pol, adjustment=adj))\n cols.append(\"net_consumed_mwh\")\n for ba in self.regions:\n results[ba] = pd.DataFrame(\n index=self.generation.index, columns=cols, dtype=np.float64\n )\n return results",
"def collect_data(self):\n exp_conf: ec.ExperimentConfiguration\n # Disabled multiprocess run because of huge memory usage\n processes_number = 1 # self._campaign_configuration['General']['j']\n if processes_number == 1:\n self._logger.info(\"-->Evaluate experiments (sequentially)\")\n for exp_conf in tqdm.tqdm(self._exp_confs, dynamic_ncols=True):\n exp_conf.evaluate()\n if bool(self._campaign_configuration['General']['generate_plots']):\n exp_conf.generate_plots()\n self._logger.info(\"<--\")\n else:\n self._logger.info(\"-->Evaluate experiments (in parallel)\")\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(evaluate_wrapper, self._exp_confs), total=len(self._exp_confs)))\n if bool(self._campaign_configuration['General']['generate_plots']):\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(plot_wrapper, self._exp_confs), total=len(self._exp_confs)))\n self._logger.info(\"<--\")\n\n self.raw_results = {}\n for exp_conf in self._exp_confs:\n self.raw_results[tuple(exp_conf.get_signature())] = exp_conf.mapes",
"def _get_aggregated_results(self):\n gradients = self.gradients\n client_traj_infos = flatten_lists(self.client_traj_infos)\n client_opt_infos = self._combine_client_opt_infos(self.client_opt_infos)\n \n self.gradients = []\n self.client_traj_infos = []\n self.client_opt_infos = []\n\n return gradients, client_traj_infos, client_opt_infos",
"def permute_data(self):\n\n if not eval(self.args.Segment_Permutation_File):\n \"\"\"\n User has selected no permutation outputs and should not be here so kick them out.\n \"\"\"\n self.log.error(\"--Segment_Permutation_File must be set True\")\n return\n\n cell_list = []\n\n for cell in self.args.Cell_Name.strip().split(','):\n cell_list.append(cell)\n\n \"\"\"\n This block is designed to do Segment Permutation Analysis. Randomly select groups of\n self.permutation_group_size from the Genome space, in this case the list of ID's from the\n self.bin_tracking_list, and intersect them with the Target Bed data.\n \"\"\"\n self.log.info(\"Spawning {0} jobs to process {1} iterations each for segment permutation analysis.\"\n .format(self.args.Spawn, self.args.Iteration_Count))\n\n self.target_bed_map_array = self.target_mapping()\n selection_space = self.bin_tracking_array[:, 0]\n intersect_space = self.target_bed_map_array[:, 0].tolist()\n\n p = pathos.multiprocessing.Pool(int(self.args.Spawn))\n p.starmap(self.intersection_iteration, zip(itertools.repeat(selection_space),\n itertools.repeat(intersect_space),\n itertools.repeat(self),\n cell_list))\n\n self.log.info(\"Segment Permutation jobs done. Compile any temporary data files into final results.\")",
"def compute_statistics(self, approximate=False):\n self.stats = {}\n self.stats['approximate'] = bool(self.filters.A or self.filters.B or\n self.filters.X or self.filters.ABX)\n self.stats['approximate_nb_triplets'] = approximate and self.stats[\n 'approximate']\n self.stats['nb_by_levels'] = len(self.by_dbs)\n self.by_stats = {}\n\n if self.verbose:\n display = progress_display.ProgressDisplay()\n display.add('block', 'Computing statistics for by block',\n self.stats['nb_by_levels'])\n\n for by in self.by_dbs:\n if self.verbose:\n display.update('block', 1)\n display.display()\n\n stats = {}\n stats['nb_items'] = len(self.by_dbs[by])\n stats['on_levels'] = self.on_blocks[by].size()\n stats['nb_on_levels'] = len(stats['on_levels'])\n stats['across_levels'] = self.across_blocks[by].size()\n stats['nb_across_levels'] = len(stats['across_levels'])\n stats['on_across_levels'] = self.on_across_blocks[by].size()\n stats['nb_on_across_levels'] = len(stats['on_across_levels'])\n self.by_stats[by] = stats\n\n self.stats['nb_blocks'] = sum(\n [bystats['nb_on_across_levels']\n for bystats in self.by_stats.values()])\n\n if self.verbose:\n display = progress_display.ProgressDisplay()\n display.add(\n 'block', 'Computing statistics for by/on/across block',\n self.stats['nb_blocks'])\n\n for by, db in iteritems(self.by_dbs):\n stats = self.by_stats[by]\n stats['block_sizes'] = {}\n stats['nb_triplets'] = 0\n stats['nb_across_pairs'] = 0\n stats['nb_on_pairs'] = 0\n\n # iterate over on/across blocks\n for block_key, count in iteritems(stats['on_across_levels']):\n if self.verbose:\n display.update('block', 1)\n display.display()\n\n block = self.on_across_blocks[by].groups[block_key]\n on_across_by_values = dict(db.iloc[block[0]])\n\n # retrieve the on and across keys (as they are stored\n # in the panda object)\n on, across = on_across_from_key(block_key)\n\n # apply the filter and check if block is empty\n if self.filters.on_across_by_filter(on_across_by_values):\n n_A = count\n n_X = stats['on_levels'][on]\n\n # FIXME quick fix to process case whith no across, but\n # better done in a separate loop ...\n if self.across == ['#across']:\n n_B = stats['nb_items'] - n_X\n else:\n n_B = stats['across_levels'][across] - n_A\n\n n_X = n_X - n_A\n stats['nb_across_pairs'] += n_A * n_B\n stats['nb_on_pairs'] += n_A * n_X\n\n need_approx = approximate or not(\n self.filters.A or self.filters.B\n or self.filters.X or self.filters.ABX)\n\n if need_approx and not isinstance(across, tuple):\n stats['nb_triplets'] += n_A * n_B * n_X\n stats['block_sizes'][block_key] = n_A * n_B * n_X\n else:\n # count exact number of triplets, could be further\n # optimized because it isn't necessary to do the whole\n # triplet generation, in particular in the case where\n # there are no ABX filters\n nb_triplets = self.on_across_triplets(\n by, on, across, block, on_across_by_values,\n with_regressors=False).shape[0]\n\n stats['nb_triplets'] += nb_triplets\n stats['block_sizes'][block_key] = nb_triplets\n else:\n stats['block_sizes'][block_key] = 0\n\n self.stats['nb_triplets'] = sum(\n [bystats['nb_triplets'] for bystats in self.by_stats.values()])\n\n # FIXME remove empty by blocks then remove empty on_across_by\n # blocks here, also reset self.n_blocks in consequence\n self.n_blocks = self.stats['nb_blocks']",
"def aggregate_results(self):\n\n raise NotImplementedError",
"def _init_results(self) -> None:\n pt_bond_dimensions = {}\n for site, pt in enumerate(self._process_tensors):\n if pt is not None:\n pt_bond_dimensions[site] = pt.get_bond_dimensions()\n\n self._results = {\n 'time':[],\n 'norm': [],\n 'bond_dimensions': [],\n 'dynamics': {},\n 'pt_bond_dimensions': pt_bond_dimensions,\n }\n for sites in self._dynamics_sites:\n self._results['dynamics'][sites] = Dynamics(name=f\"site{sites}\")",
"def getBlocks(self) -> List[ghidra.program.model.mem.MemoryBlock]:\n ...",
"def _append_results(self) -> None:\n self._t_mps.compute_traces(self._step, self._process_tensors)\n time = self.time(self._step)\n norm = self._t_mps.get_norm()\n bond_dimensions = self._t_mps.get_bond_dimensions()\n self._results['time'].append(time)\n self._results['norm'].append(norm)\n self._results['bond_dimensions'].append(bond_dimensions)\n for sites, dynamics in self._results['dynamics'].items():\n if isinstance(sites, int):\n sites_list = [sites]\n else:\n sites_list = list(sites)\n dynamics.add(\n time,\n self._t_mps.get_density_matrix(sites_list))\n self._t_mps.clear_traces()",
"def reportCopyMap(self):\n copy_map = defaultdict(list)\n for para in self.block_map:\n offset = self.offset_map[para]\n for i in xrange(len(self.block_map[para]) - 1):\n start, var, block = self.block_map[para][i]\n span = self.block_map[para][i + 1][0] - start\n if var is not None:\n copy_map[para].append([start + offset, span, pulp.value(var)])\n prevVar = pulp.value(var)\n else:\n copy_map[para].append([start + offset, span, prevVar])\n finalStart, finalVar, finalBlock = self.block_map[para][-1]\n finalSpan = self.G.sizes[para] - finalStart\n if finalVar is not None:\n copy_map[para].append([finalStart + offset, finalSpan, pulp.value(var)])\n else:\n copy_map[para].append([finalStart + offset, finalSpan, prevVar])\n return copy_map",
"def get_all_results(pred_root, meta_results):\r\n results_all = {}\r\n for key in tqdm(meta_results, desc='Generating results ..'):\r\n persons = meta_results[key]\r\n\r\n global_seg = cv2.imread(pred_root + 'global_seg/{}.png'.format(key),\r\n cv2.IMREAD_UNCHANGED)\r\n global_tag = cv2.imread(pred_root + 'global_tag/{}.png'.format(key),\r\n cv2.IMREAD_UNCHANGED)\r\n\r\n results = {}\r\n dets, masks = [], []\r\n for p_id, score in persons:\r\n mask = (global_tag == p_id)\r\n if np.sum(mask) == 0:\r\n continue\r\n seg = mask * global_seg\r\n ys, xs = np.where(mask > 0)\r\n x1, y1, x2, y2 = xs.min(), ys.min(), xs.max(), ys.max()\r\n dets.append((x1, y1, x2, y2, score))\r\n masks.append(seg)\r\n\r\n # Reuiqred Field of each result: a list of masks,\r\n # each is a multi-class masks for one person.\r\n # It can also be sparsified to\r\n # [scipy.sparse.csr_matrix(mask) for mask in masks]\r\n # to save memory cost\r\n results['MASKS'] = masks if not Sparse \\\r\n else [scipy.sparse.csr_matrix(m) for m in masks]\r\n # Reuiqred Field of each result,\r\n # a list of detections corresponding to results['MASKS'].\r\n results['DETS'] = dets\r\n\r\n if cache_pkl:\r\n results_cache_add = cache_pkl_path + key + '.pklz'\r\n pickle.dump(results, gzip.open(results_cache_add, 'w'))\r\n results_all[key] = results_cache_add\r\n else:\r\n results_all[key] = results\r\n\r\n if PLOT:\r\n import pylab as plt\r\n plt.figure('seg')\r\n plt.imshow(global_seg)\r\n print('Seg unique:' + str(np.unique(global_seg)))\r\n plt.figure('tag')\r\n plt.imshow(global_tag)\r\n print('Tag unique:' + str(np.unique(global_tag)))\r\n plt.show()\r\n\r\n return results_all",
"def collect_all(self) -> list:\n raise NotImplementedError()",
"def analyze(L):\n rslt=[]\n #p=util.Progress(len(L))\n i=0\n import multiprocessing\n s_pid=str(multiprocessing.current_process().pid)\n for s_name, S_hit, S_go in L:\n i+=1\n #if (i % 50000): p.check(i, s_pid)\n if type(S_go) is str: S_go=[S_go]\n for s_go in S_go:\n if s_go not in self.GO_GENE: continue\n if S_score is None:\n c=self.analysis_go(s_go, S_hit, N_total, SRC_GENE=SRC_GENE, min_overlap=min_overlap, p_cutoff=p_cutoff)\n else:\n c=self.analysis_go_RSA(s_go, S_hit, S_score, N_total, SRC_GENE=SRC_GENE, min_overlap=min_overlap, p_cutoff=p_cutoff, l_keep_most=l_rsa_keep_most)\n if c is None:\n continue\n c['Name']=s_name\n if min_enrichment>0 and c['Enrichment']<min_enrichment: continue\n if p_cutoff<1 and 10**c['LogP']>p_cutoff: continue\n c['Description']= self.go_description(s_go)\n S_gene=c['GeneID'].split('|')\n S_symbol=[self.eg.C_GENENEW[x] if x in self.eg.C_GENENEW else x for x in S_gene]\n c['Hits']='|'.join(S_symbol)\n if 'GeneID_All' in c:\n S_gene=c['GeneID_All'].split('|')\n S_symbol=[self.eg.C_GENENEW[x] if x in self.eg.C_GENENEW else x for x in S_gene]\n c['Hits_All']='|'.join(S_symbol)\n if self.GPDB:\n c['CategoryID'] = self.get_category_id(c['GO'])\n c['Category'] = self.CATEGORY.get(self.get_category_id(c['GO']))\n c['GO'] = self.get_source_id(c['GO'])\n rslt.append(c)\n return rslt",
"def _compile_results(self):\n self.statements = stmts_from_json(self.__statement_jsons.values())\n if self.use_obtained_counts:\n self.__source_counts = get_available_source_counts(self.statements)\n self.__evidence_counts = get_available_ev_counts(self.statements)",
"def prepare(self):\n for scenario_result, scenario_pass, case_pass in self.iterate():\n for step_result in scenario_result.step_results:\n step_pass = step_result.success\n url, method = step_result.fetch.url, step_result.fetch.method\n params = step_result.fetch.kwargs.get(\"params\")\n method_report = self.get_method_report(url, method)\n if method_report:\n method_report.add(\n case_pass, scenario_pass, step_pass, params\n )",
"def get_results(self):\n summary = self.handle.get_summary_data(self.group_name)\n results = {'template': {'status': 'no data'},\n 'complement': {'status': 'no data'},\n '2d': {'status': 'no data'}}\n if 'genome_mapping_template' in summary:\n results['template'] = self._get_results(summary['genome_mapping_template'])\n if 'genome_mapping_complement' in summary:\n results['complement'] = self._get_results(summary['genome_mapping_complement'])\n if 'genome_mapping_2d' in summary:\n results['2d'] = self._get_results(summary['genome_mapping_2d'])\n return results",
"def return_results(self):\n\n message = 'INFO: entering return_results'\n self.report(message)\n\n # try/except to capture as mnuch as possible (everything that is there even when workflow exits unsuccessfully)\n # capture pk and uuids of last calc, params and remote\n try:\n last_calc_uuid = self.ctx.last_calc.uuid\n last_calc_pk = self.ctx.last_calc.pk\n last_params_uuid = self.ctx.last_params.uuid\n last_params_pk = self.ctx.last_params.pk\n last_remote_uuid = self.ctx.last_remote.uuid\n last_remote_pk = self.ctx.last_remote.pk\n except:\n last_calc_uuid = None\n last_calc_pk = None\n last_params_uuid = None\n last_params_pk = None\n last_remote_uuid = None\n last_remote_pk = None\n\n all_pks = []\n for calc in self.ctx.calcs:\n try:\n all_pks.append(calc.pk)\n except:\n self.ctx.warnings.append(f'cound not get pk of calc {calc}')\n\n # capture links to last parameter, calcualtion and output\n try:\n last_calc_out = self.ctx.kkr.out['output_parameters']\n last_calc_out_dict = last_calc_out.get_dict()\n last_RemoteData = self.ctx.last_remote\n last_InputParameters = self.ctx.last_params\n except:\n last_InputParameters = None\n last_RemoteData = None\n last_calc_out = None\n last_calc_out_dict = {}\n\n # capture convergence info\n try:\n last_rms = self.ctx.rms[-1]\n except:\n last_rms = None\n\n # now collect results saved in results node of workflow\n message = 'INFO: collect outputnode_dict'\n self.report(message)\n outputnode_dict = {}\n outputnode_dict['workflow_name'] = self.__class__.__name__\n outputnode_dict['workflow_version'] = self._workflowversion\n outputnode_dict['material'] = self.ctx.formula\n outputnode_dict['loop_count'] = self.ctx.loop_count\n outputnode_dict['warnings'] = self.ctx.warnings\n outputnode_dict['successful'] = self.ctx.successful\n outputnode_dict['last_params_nodeinfo'] = {'uuid': last_params_uuid, 'pk': last_params_pk}\n outputnode_dict['last_remote_nodeinfo'] = {'uuid': last_remote_uuid, 'pk': last_remote_pk}\n outputnode_dict['last_calc_nodeinfo'] = {'uuid': last_calc_uuid, 'pk': last_calc_pk}\n outputnode_dict['pks_all_calcs'] = all_pks\n outputnode_dict['convergence_value'] = last_rms\n outputnode_dict['convergence_values_all_steps'] = array(self.ctx.rms_all_steps)\n outputnode_dict['convergence_values_last_step'] = array(self.ctx.last_rms_all)\n outputnode_dict['convergence_reached'] = self.ctx.kkr_converged\n outputnode_dict['kkr_step_success'] = self.ctx.kkr_step_success\n outputnode_dict['used_higher_accuracy'] = self.ctx.kkr_higher_accuracy\n\n # report the status\n if self.ctx.successful:\n self.report(\n 'STATUS: Done, the convergence criteria are reached.\\n'\n 'INFO: The charge density of the KKR calculation pk= {} '\n 'converged after {} KKR runs and {} iterations to {} \\n'\n ''.format(\n last_calc_pk, self.ctx.loop_count - 1, sum(self.ctx.KKR_steps_stats.get('isteps', [])),\n self.ctx.last_rms_all[-1]\n )\n )\n else: # Termination ok, but not converged yet...\n self.report(\n 'STATUS/WARNING: Done, the maximum number of runs '\n 'was reached or something failed.\\n INFO: The '\n 'charge density of the KKR calculation pk= '\n 'after {} KKR runs and {} iterations is {} \"me/bohr^3\"\\n'\n ''.format(\n self.ctx.loop_count - 1, sum(self.ctx.KKR_steps_stats.get('isteps', [])), self.ctx.last_rms_all[-1]\n )\n )\n\n # create results node and link all calculations\n message = 'INFO: create results nodes'\n self.report(message)\n link_nodes = {}\n icalc = 0\n for calc in self.ctx.calcs:\n link_nodes[f'KkrimpCalc{icalc}'] = calc.outputs.remote_folder\n icalc += 1\n if not self.ctx.dos_run:\n link_nodes['final_imp_potential'] = self.ctx.last_pot\n outputnode_t = create_out_dict_node(Dict(dict=outputnode_dict), **link_nodes)\n outputnode_t.label = 'kkr_scf_wc_results'\n outputnode_t.description = 'Contains results of workflow (e.g. workflow version number, info about success of wf, lis tof warnings that occured during execution, ...)'\n\n self.out('workflow_info', outputnode_t)\n # store out_potential as SingleFileData only if this was no DOS run\n if not self.ctx.dos_run:\n self.out('host_imp_pot', self.ctx.last_pot)\n\n # print results table for overview\n # table layout:\n message = 'INFO: overview of the result:\\n\\n'\n message += '|------|---------|--------|------|--------|---------|-----------------|---------------------------------------------|\\n'\n message += '| irun | success | isteps | imix | mixfac | qbound | rms | pk and uuid |\\n'\n message += '| | | | | | | first | last | |\\n'\n message += '|------|---------|--------|------|--------|---------|--------|--------|---------------------------------------------|\\n'\n KKR_steps_stats = self.ctx.KKR_steps_stats\n for irun in range(len(KKR_steps_stats.get('success', []))):\n message += '|%6i|%9s|%8i|%6i|%.2e|%.3e|%.2e|%.2e|' % (\n irun + 1, KKR_steps_stats.get('success')[irun], KKR_steps_stats.get('isteps')[irun],\n KKR_steps_stats.get('imix')[irun], KKR_steps_stats.get('mixfac')[irun],\n KKR_steps_stats.get('qbound')[irun], KKR_steps_stats.get('first_rms')[irun],\n KKR_steps_stats.get('last_rms')[irun]\n )\n message += f\" {KKR_steps_stats.get('pk')[irun]} | {KKR_steps_stats.get('uuid')[irun]}|\\n\"\n message += '|------|---------|--------|------|--------|---------|-----------------|---------------------------------------------|\\n'\n \"\"\"\n message += \"#|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|\\n\".format(irun+1,\n KKR_steps_stats.get('success')[irun], KKR_steps_stats.get('isteps')[irun],\n KKR_steps_stats.get('imix')[irun], KKR_steps_stats.get('mixfac')[irun],\n KKR_steps_stats.get('qbound')[irun],\n KKR_steps_stats.get('first_rms')[irun], KKR_steps_stats.get('last_rms')[irun])\n \"\"\"\n self.report(message)\n\n # cleanup of unnecessary files after convergence\n # WARNING: THIS DESTROYS CACHABILITY OF THE WORKFLOW!!!\n if self.ctx.do_final_cleanup:\n if self.ctx.successful:\n self.report('INFO: clean output of calcs')\n remove_out_pot_impcalcs(self.ctx.successful, all_pks)\n self.report('INFO: clean up raw_input folders')\n clean_raw_input(self.ctx.successful, all_pks)\n\n # clean intermediate single file data which are not needed after successful run or after DOS run\n if self.ctx.successful or self.ctx.dos_run:\n self.final_cleanup()\n\n self.report('INFO: done with kkr_scf workflow!\\n')",
"def calc(self):\n self.proc_blocks = [cluster.cells for cluster in self.clusters]\n self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]\n self.particle_loads = [cluster.np for cluster in self.clusters]\n self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)",
"def gather_qpt_info(self):\n\n partial = self.gather_qpt_info_me()\n\n if i_am_master:\n\n qred_all = np.zeros((self.nqpt, 3), dtype=np.float)\n omega_all = np.zeros((self.nqpt, 3 * self.natom), dtype=np.float)\n\n qred_p, omega_p = partial\n for i, (qred, omega) in enumerate(zip(qred_p, omega_p)):\n qred_all[i,...] = qred[...]\n omega_all[i,...] = omega[...]\n\n active_ranks = self.get_active_ranks()\n if len(active_ranks) > 1:\n for irank in active_ranks[1:]:\n partial = comm.recv(source=irank, tag=10000+irank)\n qred_p, omega_p = partial\n for qred, omega in zip(qred_p, omega_p):\n i += 1\n qred_all[i,...] = qred[...]\n omega_all[i,...] = omega[...]\n\n elif self.active_worker:\n comm.send(partial, dest=0, tag=10000+rank)\n return\n else:\n return\n\n self.qred = qred_all\n self.omega = omega_all\n\n return self.qred, self.omega",
"def _compute_global_stats():\n global_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Check how many HITs have been completed. We now consider a HIT to be\n # completed once it has been annotated by one or more annotators.\n #\n # Before we required `hit.users.count() >= 3` for greater overlap.\n hits_completed = HIT.objects.filter(mturk_only=False, completed=True).count()\n \n # Check any remaining active HITs which are not yet marked complete.\n for hit in HIT.objects.filter(active=True, mturk_only=False, completed=False):\n if hit.users.count() >= 1:\n hits_completed = hits_completed + 1\n hit.completed = True\n hit.save()\n \n # Compute remaining HITs for all language pairs.\n hits_remaining = HIT.compute_remaining_hits()\n \n # Compute number of results contributed so far.\n ranking_results = RankingResult.objects.filter(\n item__hit__completed=True, item__hit__mturk_only=False)\n \n from math import factorial\n system_comparisons = 0\n for result in ranking_results:\n result.reload_dynamic_fields()\n # TODO: this implicitly counts A=B comparisons for multi systems.\n # Basically, inflating the number of pairwise comparisons... Fix!\n combinations = factorial(result.systems)/(factorial(result.systems-2) * 2) if result.systems > 2 else 0\n system_comparisons = system_comparisons + combinations\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # Compute average/total duration over all results.\n durations = RankingResult.objects.all().values_list('duration', flat=True)\n total_time = sum([datetime_to_seconds(x) for x in durations])\n avg_time = total_time / float(hits_completed or 1)\n avg_user_time = total_time / float(3 * hits_completed or 1)\n \n global_stats.append(('Users', len(wmt16_users)))\n global_stats.append(('Groups', len(groups)))\n global_stats.append(('HITs completed', '{0:,}'.format(hits_completed)))\n global_stats.append(('HITs remaining', '{0:,}'.format(hits_remaining)))\n global_stats.append(('Ranking results', '{0:,}'.format(ranking_results.count())))\n global_stats.append(('System comparisons', '{0:,}'.format(system_comparisons)))\n global_stats.append(('Average duration (per HIT)', seconds_to_timedelta(avg_time)))\n global_stats.append(('Average duration (per task)', seconds_to_timedelta(avg_user_time)))\n global_stats.append(('Total duration', seconds_to_timedelta(total_time)))\n \n # Create new status data snapshot\n TimedKeyValueData.update_status_if_changed('users', str(len(wmt16_users)))\n TimedKeyValueData.update_status_if_changed('groups', str(len(groups)))\n TimedKeyValueData.update_status_if_changed('hits_completed', str(hits_completed))\n TimedKeyValueData.update_status_if_changed('hits_remaining', str(hits_remaining))\n TimedKeyValueData.update_status_if_changed('ranking_results', str(ranking_results.count()))\n TimedKeyValueData.update_status_if_changed('system_comparisons', str(system_comparisons))\n TimedKeyValueData.update_status_if_changed('duration_per_hit', str(seconds_to_timedelta(avg_time)))\n TimedKeyValueData.update_status_if_changed('duration_per_task', str(seconds_to_timedelta(avg_user_time)))\n TimedKeyValueData.update_status_if_changed('duration_total', str(seconds_to_timedelta(total_time)))\n \n return global_stats",
"def _run_query(self):\n self._search_query()\n logger.debug(\"Payload\")\n logger.debug(self._payload)\n _resp = query_public_ip_pools(self._payload)\n logger.debug(_resp)\n _resp = self.load_json(_resp)\n _ret_list = []\n if _resp is None:\n self._record_total = self._record_filtered = 0\n return []\n _ret_list = _resp[\"ret_set\"]\n self._record_filtered = self._record_total = _resp.get(\"total_count\") or 100\n return _ret_list",
"def get_local_neighbourhood_composition(self):\n neighbourhood_students = []\n\n #print(\"id, students\",self.unique_id, len(self.neighbourhood_students_indexes))\n neighbourhood_students = self.model.get_households_from_index(self.neighbourhood_students_indexes)\n local_neighbourhood_composition = get_counts_util(neighbourhood_students, self.model)\n #print(\"step \",self.model.schedule.steps,\" neighb students \",len(self.neighbourhood_students))\n\n return (local_neighbourhood_composition)",
"def merge_par_results(res):\n nres = {}\n for r in res:\n nres.update(r)\n return nres",
"def _run_query(self):\n self._search_query()\n logger.debug(\"Payload\")\n logger.debug(self._payload)\n _resp = query_public_ip_pool_detail(self._payload)\n logger.debug(_resp)\n _resp = self.load_json(_resp)\n _ret_list = []\n if _resp is None:\n self._record_total = self._record_filtered = 0\n return []\n _ret_list = _resp[\"ret_set\"]\n self._record_filtered = self._record_total = _resp.get(\"total_count\") or 100\n return _ret_list",
"def _compute_all_blocks(self, **kwargs):\n # We are redefining this method to improve performance by parallelizing\n # first the blocks that produce independent workspaces and covariance\n # workspaces. That way two processes will not be computing the same\n # (cov)workspace at the same time.\n\n ccl_tracers, tracer_Noise, tracer_Noise_coupled = self.get_tracer_info(\n return_noise_coupled=True\n )\n\n # Make a list of all pair of tracer combinations needed to compute the\n # independent workspaces\n trs_wsp = self.get_list_of_tracers_for_wsp()\n # Now the tracers for covariance workspaces (without trs_wsp)\n trs_cwsp = self.get_list_of_tracers_for_cov_wsp(remove_trs_wsp=True)\n\n # Make a list of all remaining combinations\n tracers_cov = self.get_list_of_tracers_for_cov_without_trs_wsp_cwsp()\n\n # Save blocks and the corresponding tracers, as comm.gather does not\n # return the blocks in the original order.\n blocks = []\n tracers_blocks = []\n print(\"Computing independent covariance blocks\")\n print(\"Computing the blocks for independent workspaces\")\n for tracer_comb1, tracer_comb2 in self._split_tasks_by_rank(trs_wsp):\n print(tracer_comb1, tracer_comb2)\n cov = self.get_covariance_block_for_sacc(\n tracer_comb1=tracer_comb1, tracer_comb2=tracer_comb2, **kwargs\n )\n blocks.append(cov)\n tracers_blocks.append((tracer_comb1, tracer_comb2))\n\n if self.comm:\n self.comm.Barrier()\n\n print(\"Computing the blocks for independent covariance workspaces\")\n for tracer_comb1, tracer_comb2 in self._split_tasks_by_rank(trs_cwsp):\n print(tracer_comb1, tracer_comb2)\n cov = self.get_covariance_block_for_sacc(\n tracer_comb1=tracer_comb1, tracer_comb2=tracer_comb2, **kwargs\n )\n blocks.append(cov)\n tracers_blocks.append((tracer_comb1, tracer_comb2))\n\n if self.comm:\n self.comm.Barrier()\n\n print(\"Computing the remaining blocks\")\n # Now loop over the remaining tracers\n for tracer_comb1, tracer_comb2 in self._split_tasks_by_rank(\n tracers_cov\n ):\n print(tracer_comb1, tracer_comb2)\n cov = self.get_covariance_block_for_sacc(\n tracer_comb1=tracer_comb1, tracer_comb2=tracer_comb2, **kwargs\n )\n blocks.append(cov)\n tracers_blocks.append((tracer_comb1, tracer_comb2))\n\n return blocks, tracers_blocks"
] | [
"0.56452453",
"0.55284214",
"0.5465167",
"0.539003",
"0.5384237",
"0.5365769",
"0.5345599",
"0.53302294",
"0.5316398",
"0.52856123",
"0.52655923",
"0.5250718",
"0.5248248",
"0.524049",
"0.52215445",
"0.52090776",
"0.51985794",
"0.5186276",
"0.5150495",
"0.5137165",
"0.51355445",
"0.5129606",
"0.5128499",
"0.51232845",
"0.5120695",
"0.5106356",
"0.509484",
"0.50907004",
"0.50849587",
"0.5078609"
] | 0.57805455 | 0 |
Computes the cut positions for phasing blocks, based on the computed breakpoints of the reordering stage and the requeted block cut sensitivity. | def compute_cut_positions(
breakpoints: List[PhaseBreakpoint], ploidy: int, block_cut_sensitivity: int
):
cuts = []
hap_cuts = [[] for _ in range(ploidy)]
thresholds = [-float("inf"), -float("inf"), log(0.5), log(0.5), log(0.99), 0]
thresholds_num = [ploidy, ploidy, min(ploidy, 3), 2, 2, 0]
threshold = thresholds[block_cut_sensitivity]
threshold_num = thresholds_num[block_cut_sensitivity]
remaining_conf = [0.0 for _ in range(ploidy)]
for b in breakpoints:
# avoid duplicate cut positions
if cuts and cuts[-1] == b.position:
continue
# for zero confidence, always cut
if b.confidence == 0.0:
cuts.append(b.position)
for h in range(ploidy):
hap_cuts[h].append(b.position)
remaining_conf = [0.0 for _ in range(ploidy)]
continue
else:
for h in b.haplotypes:
remaining_conf[h] += log(b.confidence)
if sum([1 for i in range(ploidy) if remaining_conf[i] <= threshold]) >= threshold_num:
cuts.append(b.position)
for h in b.haplotypes:
hap_cuts[h].append(b.position)
remaining_conf = [0.0 for _ in range(ploidy)]
return cuts, hap_cuts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def determine_cutting_frames(pos_data):\n def get_pts(dat):\n \"\"\" short-cut function to determine when state in dat changes\n Args:\n A numpy vector\n Returns:\n Boolean vector\n \"\"\"\n return np.where(np.abs(np.diff(dat)) > 0)[0]+1\n\n # cutting points according to game status\n max_frame = pos_data.shape[0]-1\n poss_cts = get_pts(pos_data[:, 1])\n status_cts = get_pts(pos_data[:, 2])\n half_cts = get_pts(pos_data[:, 3])\n cut_pts = np.unique(np.concatenate([[0], status_cts, half_cts, poss_cts, [max_frame]]))\n return cut_pts",
"def detectBorders(self, points):\n lane1 = []; lane2 = []\n self.leftLane = [None for _ in range(int(np.floor(self.BIRDVIEW_HEIGHT / self.slideThickness)))]\n self.rightLane = [None for _ in range(int(np.floor(self.BIRDVIEW_HEIGHT / self.slideThickness)))]\n\n pointMap = np.zeros((points.shape[0], 20))\n prePoint = np.zeros((points.shape[0], 20))\n postPoint = np.zeros((points.shape[0], 20))\n\n dis = 10\n max1 = -1; max2 = -1\n\n ##\n ## /!\\ UNSAFE LOOP, TODO: FIX\n ##\n for i in range(points.shape[0]):\n for j in range(len(points[i])):\n pointMap[i][j] = 1\n prePoint[i][j] = -1\n postPoint[i][j] = -1\n\n for i in reversed(range(points.shape[0] - 2)):\n\n for j in range(len(points[i])):\n\n err = 320\n for m in range(1, min(points.shape[0] - 1 - i, 5)):\n check = False ## TODO: why unused ?\n\n for k in range(len(points[i + 1])):\n\n (x_m, y_m) = points[i + m][k].pt\n (x, y) = points[i][j].pt\n\n if (abs(x_m - x) < dis and abs(y_m - y) < err):\n err = abs(x_m - x)\n\n pointMap[i][j] = pointMap[i + m][k] + 1\n prePoint[i][j] = k\n postPoint[i + m][k] = j\n check = True\n\n break ## breaks out of the m loop. Why is it not conditioned by check ? TODO: ???\n\n if (pointMap[i][j] > max1):\n max1 = pointMap[i][j]\n posMax = cv2.KeyPoint(i, j, _size=0)\n \n else:\n posMax = None\n\n for i in range(points.shape[0]):\n for j in range(len(points[i])):\n if posMax:\n if (pointMap[i][j] > max2 and (i != posMax.pt[0] or j != posMax.pt[1]) and postPoint[i][j] == -1): #FIXME \"local variable 'posMax' referenced before assignment\" possible\n max2 = pointMap[i][j]\n posMax2 = cv2.KeyPoint(i, j, _size=0)\n\n\n\n if max1 == -1:\n return\n\n # DEFINES LANE 1 POINTS\n while (max1 >= 1):\n (x,y) = points[int(posMax.pt[0])][int(posMax.pt[1])].pt\n lane1.append(\n [x,y]\n )\n if (max1 == 1):\n break\n\n posMax = cv2.KeyPoint(\n posMax.pt[0]+1,\n prePoint[int(posMax.pt[0])][int(posMax.pt[1])],\n _size=0\n )\n\n max1 -= 1\n\n # DEFINES LANE 2 POINTS\n while (max2 >= 1):\n (x,y) = points[int(posMax2.pt[0])][int(posMax2.pt[1])].pt\n lane2.append(\n [x, y]\n )\n if (max2 == 1):\n break\n\n posMax2 = cv2.KeyPoint(\n posMax2.pt[0]+1,\n prePoint[int(posMax2.pt[0])][int(posMax2.pt[1])],\n _size=0\n )\n\n max2-= 1\n\n subLane1 = np.array(lane1[0:5])\n subLane2 = np.array(lane2[0:5])\n\n # checking if sublane has an empty value\n\n line1 = cv2.fitLine(subLane1, 2, 0, 0.01, 0.01)\n line2 = cv2.fitLine(subLane2, 2, 0, 0.01, 0.01)\n\n try:\n lane1X = (self.BIRDVIEW_WIDTH - line1[3]) * line1[0] / line1[1] + line1[2]\n except:\n lane1X = 0\n\n try:\n lane2X = (self.BIRDVIEW_WIDTH - line2[3]) * line2[0] / line2[1] + line2[2]\n except:\n lane2X = 0\n \n if (lane1X < lane2X):\n for i in range(len(lane1)):\n self.leftLane[int(np.floor(lane1[i][1] / self.slideThickness ))] = lane1[i]\n\n for i in range(len(lane2)):\n self.rightLane[int(np.floor(lane2[i][1] / self.slideThickness ))] = lane2[i]\n\n else:\n\n for i in range(len(lane1)):\n self.rightLane[int(np.floor(lane1[i][1] / self.slideThickness ))] = lane1[i]\n\n for i in range(len(lane2)):\n self.leftLane[int(np.floor(lane2[i][1] / self.slideThickness ))] = lane2[i]",
"def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497],[174.9497,176.0104]]])\r\n cutlineset.extend([[[175.4800,-3.05],[175.4800,-4.55]],[[174.7300,-3.8],[176.2300,-3.8]]])\r\n \r\n for cutline in cutlineset:\r\n for pos in cutline:\r\n pos[0]=pos[0]+globalconfig.CUTLINE_X_OFFSET\r\n pos[1]=pos[1]+globalconfig.CUTLINE_Y_OFFSET\r\n \r\n for row in range(0,globalconfig.X_ARRAY_NUM):\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,-3.0+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,174.68+globalconfig.CUTLINE_Y_OFFSET]])\r\n for line in range(0,globalconfig.Y_ARRAY_NUM):\r\n cutlineset.append([[0.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[-3.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[171.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[174.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n return cutlineset",
"def find_indirect_gap(self,rpts=5):\n # First find the miniumu of the upper band.\n # Start with a random point in the BZ.\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun1= lambda x: self.Ham_eigvals(x[0],x[1])[self.NL]\n # Optimize initial guess.\n x1up=optimize.minimize(fun1,x0up).x\n valup=fun1(x1up)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n # Also always check special points in the BZ\n x0up=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n x0up=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n \n # Repeat the same for the lower band\n x0dn=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun2= lambda x: -self.Ham_eigvals(x[0],x[1])[self.NL-1]\n # Optimize initial guess.\n x1dn=optimize.minimize(fun2,x0dn).x\n valdn=fun2(x1dn)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0dn=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n # Also always check special points in the BZ\n x0dn=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n x0dn=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n \n return valup+valdn,x1up,x1dn",
"def get_start_positions(img_in):\n\n def initialize_coordinates(kernel_h, kernel_w):\n \"\"\" locates positions of interest by traversing eroded image and\n saves 9 points on each area of interest to global matrix\n :param kernel_h height of kernel used for harsh erosion\n :param kernel_w width of kernel used for harsh erosion\"\"\"\n global init_coords\n\n count = 0\n y = 0\n while y < frame_height - kernel_h:\n x = 0\n while x < frame_width - kernel_w:\n locator = img[y:y+kernel_h, x:x+kernel_w, 2] > 0 + numpy.zeros((kernel_h, kernel_w))\n if numpy.any(locator):\n if count == 0:\n init_coords[count][0][0] = y - 2\n init_coords[count][0][1] = x + 2\n elif count == 1:\n init_coords[count][0][0] = y + 2\n init_coords[count][0][1] = x + 2\n elif count == 2:\n init_coords[count][0][0] = y + 2\n init_coords[count][0][1] = x + 2\n elif count == 3:\n init_coords[count][0][0] = y - 3\n init_coords[count][0][1] = x + 2\n elif count == 4:\n init_coords[count][0][0] = y + 3\n init_coords[count][0][1] = x - 5\n count += 1\n break\n x += kernel_w\n y += kernel_h\n\n # store 8 more points for each body part\n f = 1.5\n for count in range(5):\n init_coords[count][1][1] = init_coords[count][0][1] + 3*f\n init_coords[count][1][0] = init_coords[count][0][0] + 0\n init_coords[count][2][1] = init_coords[count][0][1] + 6*f\n init_coords[count][2][0] = init_coords[count][0][0] + 0\n init_coords[count][3][1] = init_coords[count][0][1] + 0\n init_coords[count][3][0] = init_coords[count][0][0] + 3*f\n init_coords[count][4][1] = init_coords[count][0][1] + 3*f\n init_coords[count][4][0] = init_coords[count][0][0] + 3*f\n init_coords[count][5][1] = init_coords[count][0][1] + 6*f\n init_coords[count][5][0] = init_coords[count][0][0] + 3*f\n init_coords[count][6][1] = init_coords[count][0][1] + 0\n init_coords[count][6][0] = init_coords[count][0][0] + 6*f\n init_coords[count][7][1] = init_coords[count][0][1] + 3*f\n init_coords[count][7][0] = init_coords[count][0][0] + 6*f\n init_coords[count][8][1] = init_coords[count][0][1] + 6*f\n init_coords[count][8][0] = init_coords[count][0][0] + 6*f\n\n limb_coords[0][0][0] = init_coords[0][5][0]\n limb_coords[0][0][1] = init_coords[0][5][1]\n limb_coords[1][0][0] = init_coords[1][5][0]\n limb_coords[1][0][1] = init_coords[1][5][1]\n limb_coords[2][0][0] = init_coords[2][5][0]\n limb_coords[2][0][1] = init_coords[2][5][1]\n limb_coords[3][0][0] = init_coords[3][5][0]\n limb_coords[3][0][1] = init_coords[3][5][1]\n limb_coords[4][0][0] = init_coords[4][5][0]\n limb_coords[4][0][1] = init_coords[4][5][1]\n\n img = img_in.copy()\n img = segment_red(img, 205, 135)\n erode(img, 14, 12)\n initialize_coordinates(14, 12)",
"def z_focus(block,cut,laser):\r\n\tcutlist = []\r\n\titerations = int(cut[\"final_dimension_z\"]/laser[\"z_spacing\"])\r\n\t#Currently x,y is decided to take up a good amount of the block, rather than having set distances and sizes\r\n\ty = cut[\"final_dimension_y\"]/2\r\n\toffset = laser[\"xy_spacing\"]\r\n\tx = 0\r\n\r\n\tcutlist.append([\"z_abs\",\"0\"])\r\n\tfor a in range(iterations):\r\n\t\tcutlist.append([\"jump\", f\"{x:.6f}\", f\"{y:.6f}\"])\r\n\t\tcutlist.append([\"mark\", f\"{x:.6f}\", f\"{-y:.6f}\"])\r\n\t\tcutlist.append([\"z_rel\", str(-laser[\"z_spacing\"])])\r\n\t\tx = x + offset\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)",
"def adjust_left_right_positions():\n # bp_* is synonym for 'Best Position Left/Right' and are adjusted \n # to optimize bmv_*\n p_l, bp_l = [pos] * 2\n p_r, bp_r = [pos + qlen] * 2\n\n # bmv_* are declared here in case they are untouched in optimization\n bmv_l = match_values[p_l / step]\n bmv_r = match_values[p_l / step]\n\n for f in range(flex):\n ll = _match(query, corpus[p_l - f: p_r])\n if ll > bmv_l:\n bmv_l = ll\n bp_l = p_l - f\n\n lr = _match(query, corpus[p_l + f: p_r])\n if lr > bmv_l:\n bmv_l = lr\n bp_l = p_l + f\n\n rl = _match(query, corpus[p_l: p_r - f])\n if rl > bmv_r:\n bmv_r = rl\n bp_r = p_r - f\n\n rr = _match(query, corpus[p_l: p_r + f])\n if rr > bmv_r:\n bmv_r = rr\n bp_r = p_r + f\n\n if verbose:\n print(\"\\n\" + str(f))\n print(\"ll: -- value: %f -- snippet: %s\" % (ll, corpus[p_l - f: p_r]))\n print(\"lr: -- value: %f -- snippet: %s\" % (lr, corpus[p_l + f: p_r]))\n print(\"rl: -- value: %f -- snippet: %s\" % (rl, corpus[p_l: p_r - f]))\n print(\"rr: -- value: %f -- snippet: %s\" % (rl, corpus[p_l: p_r + f]))\n\n return bp_l, bp_r, _match(query, corpus[bp_l : bp_r])",
"def find_direct_gap(self,rpts=5):\n # Start with a random point in the BZ.\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun1= lambda x: self.Ham_eigvals(x[0],x[1])[self.NL]-self.Ham_eigvals(x[0],x[1])[self.NL-1]\n # Optimize initial guess.\n x1up=optimize.minimize(fun1,x0up).x\n valup=fun1(x1up)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n # Also always check special points in the BZ\n x0up=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n x0up=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n \n return valup,x1up",
"def get_breakpoint_positions( y, ll, xi, ll_threshold = 5):\n\n EPS = 1e-12\n ## first get the local maxima of the log likelihoods\n bp_cands = [0]\n for i in xrange(1, ll.shape[0]-1):\n delta_ll_left = ll[i] - ll[i-1]\n delta_ll_right = ll[i] - ll[i+1]\n if (delta_ll_left > EPS and delta_ll_right > EPS and \n ll[i] > ll_threshold):\n bp_cands.append(i)\n bp_cands.append(ll.shape[0])\n bp_cands = np.array(bp_cands)\n return bp_cands",
"def num_43():\n \n def block(a, r=3, cs=3, row_order=True):\n \"\"\"Block slice an array using a window of (rs, cs) size\n \"\"\"\n lenr = a.shape[0]//rs\n lenc = a.shape[1]//cs\n if row_order:\n iter = [(i, j) for (i, j) in np.ndindex(lenr, lenc)]\n else:\n iter = [(j, i) for (i, j) in np.ndindex(lenr, lenc)]\n b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] for (i,j) in iter])\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n return b\n r = 6\n c = 6\n a = np.arange(r*c).reshape(r, c)\n vs = np.array(np.vsplit(a, 2))\n hs = np.array(np.hsplit(a, 2))\n #a.squeeze(axis=(2,3))\n rs = 3\n cs = 4\n #lenr = a.shape[0]//rs\n #lenc = a.shape[1]//cs\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n #b1 = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (j, i) in np.ndindex(lenr, lenc)])\n e = block(a, 3, 4, row_first=False)\n b = block(a, rs, cs, True)\n b1 = block(a, rs, cs, False)\n c = np.array([np.vsplit(i, 2) for i in np.hsplit(a, 2)])\n d = np.array([np.hsplit(i, 2) for i in np.vsplit(a, 2)])\n #c = c.reshape(lenr*lenc, rs, cs) \n return a, b, b1, c, d, e",
"def delete_unneeded_breakpoints2(y, xi, segment_bdy1, ll, \n delta_ll_threshold=2.0, log_func=sys.stdout.write):\n bps = np.array([x[0] for x in segment_bdy1] + [segment_bdy1[-1][1]])\n keep = np.ones_like(bps, dtype=bool)\n keep_going = True\n order = list(np.argsort(ll[bps[:-1]]))\n while keep_going:\n keep_going = False\n for i in order:\n pos = bps[i]\n if pos == 0 or pos == len(bps)-1:\n continue\n if not keep[i]:\n continue\n left = i-1\n while not keep[left]:\n left -= 1\n assert left >=0\n right = i+1\n while not keep[right]:\n right += 1\n assert right < len(bps)\n\n lpos = bps[left]\n rpos = bps[right]\n lpts = y[lpos:pos]\n rpts = y[pos:rpos]\n allpts = y[lpos:rpos]\n\n lxi = xi[lpos:pos]\n rxi = xi[pos:rpos]\n allxi = xi[lpos:rpos]\n\n mu_l = np.clip(lpts.sum( )/lxi.sum( ), 1e-2, None)*lxi\n mu_r = np.clip(rpts.sum( )/rxi.sum( ), 1e-2, None)*rxi\n mu_all = np.clip(allpts.sum( )/allxi.sum( ), 1e-2, None)*allxi\n\n delta_ll = ((-mu_l + lpts*np.log(mu_l)).sum( ) +\n (-mu_r + rpts*np.log(mu_r)).sum( ) -\n (-mu_all + allpts*np.log(mu_all)).sum( ))\n if delta_ll < delta_ll_threshold:\n keep[i] = False\n keep_going = True\n bps2 = bps[keep]\n log_func(\"Retaining %.2f pct of breakpoints\\n\"%(keep.sum()*100.0/keep.shape[0]))\n segment_bdy2 = [(bps2[i-1], bps2[i]) for i in xrange(1, len(bps2))]\n return segment_bdy2",
"def getBreakIndices(self):\n for i in self.raw.index[:-1]:\n if self.raw['stress'][i+1] > self.raw['stress'][i] and \\\n self.raw['stress'][i+2] < self.raw['stress'][i+1]:\n brkIdx1 = i+1 # brkIdx1: start of the first unloading\n break\n if self.reloading:\n for i in self.raw.index[brkIdx1+1:-1]:\n if self.raw['stress'][i+1] < self.raw['stress'][i] and \\\n self.raw['stress'][i+2] > self.raw['stress'][i+1]:\n brkIdx2 = i+1 # brkIdx2: end of the first unloading\n break\n # brkIdx3: Point on the NCL after the first reloading\n brkIdx3 = self.raw.query(f'stress == stress[{brkIdx1}]').index[1]\n # brkIdx4: index of the last point on the NCL\n brkIdx4 = self.raw.query('stress == stress.max()').index[0]\n self.secondUnloading = False\n else:\n brkIdx2 = self.raw.index[-1]\n brkIdx3 = None\n brkIdx4 = None\n\n self.brkIdx1 = brkIdx1\n self.brkIdx2 = brkIdx2\n self.brkIdx3 = brkIdx3\n self.brkIdx4 = brkIdx4\n return",
"def process_lim(pool_lim, area):\n\n pool_nolim = [] # No limitation\n pool_lim_n = [] # N limitation\n pool_lim_p = [] # P limitation\n # Colimitation driven by N (When the realized NPP allocation is smaller\n # thant the potential due to N but the other element is also limitant)\n pool_colim_n = []\n # Colimitation driven by P (When the realized NPP allocation is smaller\n # than the potential due to P but the other element is also limitant\n pool_colim_p = []\n # Real Colimitation = K <= 1D-6 (K is difference between P and N realized NPP allocation)\n pool_colim_np = []\n\n ndays = pool_lim.shape[1]\n npls = pool_lim.shape[0]\n\n for pls in range(npls):\n if area[pls]:\n no_lim = (pool_lim[pls, :] == 0).sum() / ndays * area[pls]\n lim_n = (np.count_nonzero(\n pool_lim[pls, :] == 1) / ndays) * area[pls]\n lim_p = (np.count_nonzero(\n pool_lim[pls, :] == 2) / ndays) * area[pls]\n colim_n = (np.count_nonzero(\n pool_lim[pls, :] == 4) / ndays) * area[pls]\n colim_p = (np.count_nonzero(\n pool_lim[pls, :] == 5) / ndays) * area[pls]\n colim_np = (np.count_nonzero(\n pool_lim[pls, :] == 6) / ndays) * area[pls]\n\n pool_nolim.append(no_lim)\n pool_lim_n.append(lim_n)\n pool_lim_p.append(lim_p)\n pool_colim_n.append(colim_n)\n pool_colim_p.append(colim_p)\n pool_colim_np.append(colim_np)\n\n return (np.sum(pool_nolim),\n np.sum(pool_lim_n),\n np.sum(pool_lim_p),\n np.sum(pool_colim_n),\n np.sum(pool_colim_p),\n np.sum(pool_colim_np))",
"def cleanOpenBranches(skeleton, skelton_copy, points, radii, length, clean = True, verbose = False):\n \n assert np.isfortran(skeleton);\n assert np.isfortran(skelton_copy);\n \n timer = tmr.Timer();\n timer_all = tmr.Timer();\n \n # find branch and end points\n deg = cpl.convolve_3d_indices(skeleton, t3d.n26, points, out_dtype = 'uint8');\n branchpoints = points[deg >= 3];\n e_pts = points[deg == 1];\n \n if verbose:\n timer.printElapsedTime('Detected %d branch and %d endpoints' % (branchpoints.shape[0], e_pts.shape[0]));\n timer.reset();\n \n #prepare temps\n #skel = skeleton.copy();\n skel_flat = np.reshape(skelton_copy, -1, order = 'A');\n strides = np.array(skelton_copy.strides);\n \n \n if verbose:\n timer.printElapsedTime('Detected %d branch and %d endpoints' % (branchpoints.shape[0], e_pts.shape[0]));\n timer.reset();\n \n label = np.arange(27);\n label = label.reshape([3,3,3]);\n label[1,1,1] = 0;\n \n critical_points = [e_pts];\n delete_points = [];\n \n for l in range(1, length + 1):\n #neighbours of end points\n e_pts_label = cpl.convolve_3d_indices(skelton_copy, label, e_pts);\n \n if verbose:\n timer.printElapsedTime('Done labeling %d / %d' % (l, length));\n timer.reset();\n \n #label zero points are non-critical short isolated branches\n e_pts_zero = e_pts_label == 0;\n #print 'zero length:', np.unravel_index(e_pts[e_pts_zero], skel.shape)\n if e_pts_zero.sum() > 0:\n keep = np.logical_not(e_pts_zero);\n for m in range(l):\n critical_points[m] = critical_points[m][keep];\n e_pts_label = e_pts_label[keep];\n e_pts = e_pts[keep];\n \n if verbose:\n timer.printElapsedTime('Ignored %d small branches' % (keep.sum()));\n timer.reset();\n \n e_pts_new = e_pts + np.sum((np.vstack(np.unravel_index(e_pts_label, label.shape)) - 1).T * strides, axis = 1)\n \n # did we hit a branch point\n delete = np.in1d(e_pts_new, branchpoints); #, assume_unique = True);\n keep = np.logical_not(delete);\n #print delete.shape, keep.shape, e_pts_new.shape\n \n #delete all path that hit a branch point\n if delete.sum() > 0:\n for m in range(l):\n delete_points.append(critical_points[m][delete]);\n #print 'deleting:', np.unravel_index(critical_points[m][delete], skel.shape)\n critical_points[m] = critical_points[m][keep];\n e_pts_new = e_pts_new[keep];\n \n if verbose:\n timer.printElapsedTime('Deleted %d points' % (delete.sum()));\n timer.reset();\n \n if l < length:\n skel_flat[e_pts] = False; # remove endpoints for new neighbour detection\n critical_points.append(e_pts_new);\n e_pts = e_pts_new;\n \n if verbose:\n timer.printElapsedTime('Cleanup iteration %d / %d done.' % (l, length));\n \n #gather all points\n if len(delete_points) > 0:\n delete_points = np.hstack(delete_points);\n delete_points = np.unique(delete_points);\n else:\n delete_points = np.zeros(0);\n \n if verbose:\n timer_all.printElapsedTime('Cleanup');\n \n if clean:\n skel_flat = np.reshape(skeleton, -1, order = 'F');\n skel_flat[delete_points] = False;\n keep_ids = np.logical_not(np.in1d(points, delete_points, assume_unique = True))\n points = points[keep_ids];\n radii = radii[keep_ids];\n return skeleton, points, radii\n \n return delete_points;",
"def build_reset_problem(self):\n\n print(\"Resetting blocks...\")\n print(\"Moved Blocks:\", self.moved_blocks)\n \n # Define block order by sorting by height\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2], reverse=True)\n \n # Build the initial data structures\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n\n # Add all blocks to be moved to the data structure\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n\n # Return the planning data structure\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems",
"def get_block_positions(self, fig):\n block_positions = []\n\n # Iterates through y + active_piece.y and x + active_piece.x\n for y, row in enumerate(fig, start=self.active_piece.y):\n for x, val in enumerate(row, start=self.active_piece.x):\n if val != 0:\n block_positions.append((x, y))\n\n return block_positions",
"def array_part_loops_pruning(loops, config):\n pruned_loops = []\n\n PE_lb = config['setting'][config['mode']\n ]['pruning']['array_part']['PE_num'][0]\n for loop in loops:\n if PE_lb == -1:\n pruned_loops.append(loop)\n else:\n prod = 1\n for l in loop:\n if l > 1:\n prod *= l\n if prod < PE_lb:\n continue\n pruned_loops.append(loop)\n\n return pruned_loops",
"def cuts(self) -> list[list[int]]:\n if self._cuts is not None:\n return self._cuts\n width = self.width\n height = self.height\n screen_region = Region(0, 0, width, height)\n cuts_sets = [{0, width} for _ in range(height)]\n\n if self.map is not None:\n for region, order, clip in self.map.values():\n region = region.intersection(clip)\n if region and (region in screen_region):\n region_cuts = region.x_extents\n for y in region.y_range:\n cuts_sets[y].update(region_cuts)\n\n # Sort the cuts for each line\n self._cuts = [sorted(cut_set) for cut_set in cuts_sets]\n return self._cuts",
"def cut_tracks(data_dict, up_trk, down_trk) :\n up_counter = 0\n down_counter = 0\n\n for tp in up_trk.scifitrackpoints() :\n if tp.has_data() :\n up_counter += 1\n for tp in down_trk.scifitrackpoints() :\n if tp.has_data() :\n down_counter += 1\n\n if up_counter < MIN_NUMBER_TRACKPOINTS :\n return True\n if down_counter < MIN_NUMBER_TRACKPOINTS :\n return True\n\n if up_trk.P_value() < P_VALUE_CUT :\n return True\n if down_trk.P_value() < P_VALUE_CUT :\n return True\n\n up_ref = None\n down_ref = None\n for tp in up_trk.scifitrackpoints() :\n if tp.station() == RECON_STATION and tp.plane() == RECON_PLANE :\n up_ref = tp\n for tp in down_trk.scifitrackpoints() :\n if tp.station() == RECON_STATION and tp.plane() == RECON_PLANE :\n down_ref = tp\n\n if up_ref is None :\n return True\n if down_ref is None :\n return True\n\n\n length = TRACKER_SEPARATION\n\n up_pos = [ up_ref.pos().x(), up_ref.pos().y() ]\n up_gra = [ up_ref.mom().x() / up_ref.mom().z(), \\\n up_ref.mom().y() / up_ref.mom().z() ]\n\n up_pos = up_tracker_correction( up_pos )\n up_gra = up_tracker_correction( up_gra )\n\n pro_pos = [ up_pos[0] + length*up_gra[0], up_pos[1] + length*up_gra[1] ]\n\n rad = math.sqrt( up_pos[0]**2 + up_pos[1]**2 )\n grad = math.sqrt( up_gra[0]**2 + up_gra[1]**2 )\n pro_rad = math.sqrt( pro_pos[0]**2 + pro_pos[1]**2 )\n\n if grad > GRADIENT_CUT :\n return True\n if rad > RADIUS_CUT :\n return True\n if pro_rad > PROJECTED_RADIUS_CUT :\n return True\n\n return False",
"def updraft_env_mask(tracer_2d, w_interp_2d, ql_2d, cb, ct, z_half, ql_tr = 1e-8):\n updraft_mask = np.ones_like(tracer_2d) #mask = 1 -> False, mask = 0 True\n tracer_mask = np.ones_like(tracer_2d)\n w_mask = np.ones_like(tracer_2d)\n ql_mask = np.ones_like(tracer_2d)\n nxy = np.shape(tracer_2d)[0]\n nz = np.shape(tracer_2d)[1]\n\n sigma_sum = 0.\n z_ql = 0.\n cloud_flag = False\n if np.isnan(cb) == False and np.isnan(ct) == False:\n z_ql = z_half[cb] + 0.25 * (z_half[ct] - z_half[cb])\n cloud_flag = True\n\n print \"z_ql = \", z_ql\n\n tracer_mean = np.mean(tracer_2d, axis=0)\n tracer_square_mean = np.mean(tracer_2d * tracer_2d, axis=0)\n tracer_variance = tracer_square_mean - tracer_mean * tracer_mean\n assert(tracer_variance.all() >= 0)\n tracer_std = np.sqrt(tracer_variance)\n\n for k in range(nz):\n sigma_sum += tracer_std[k]\n sigma_min = sigma_sum/(k+1.0) * 0.05 # threshold from the paper\n\n for i in range(nxy):\n if tracer_std[k] >= sigma_min:\n if tracer_2d[i,k] - tracer_mean[k] >= tracer_std[k]:\n updraft_mask[i,k] = 0\n tracer_mask[i,k] = 0\n # TODO - I think the paper condition should also include this\n # But it's not done in Pycles\n #else:\n # if tracer_2d[i,k] - tracer_mean[k] >= sigma_min:\n # updraft_mask[i,k] = 0\n\n if w_interp_2d[i,k] <= 0.:\n updraft_mask[i,k] = 1\n else:\n w_mask[i,k] = 0\n\n if cloud_flag:\n if z_half[k] >= z_ql and z_half[k] <= z_half[ct]:\n if ql_2d[i,k] < ql_tr:\n updraft_mask[i,k] = 1\n else:\n ql_mask[i,k] = 0\n\n env_mask = 1 - updraft_mask\n\n mask_dict = {}\n mask_dict[\"updraft\"] = updraft_mask\n mask_dict[\"env\"] = env_mask\n mask_dict[\"tracer\"] = tracer_mask\n mask_dict[\"w\"] = w_mask\n mask_dict[\"ql\"] = ql_mask\n\n return mask_dict",
"def __init__(self, selected_points, cut_depth, cut_breadth):\n\n\n self.cut_depth = cut_depth\n self.cut_breadth = cut_breadth\n\n self.points = selected_points\n\n self.vline = self.vlinecomp()\n self.hline = self.ortho_line_cut()\n\n self.mid_left = self.midpoint(0,1)\n self.mid_right = self.midpoint(2, 3)",
"def build_block_cross(self):\n from ambry.geo.util import find_geo_containment, find_containment\n from geoid import civick \n\n lr = self.init_log_rate(3000)\n\n def gen_bound():\n \n boundaries = self.library.dep('blockgroups').partition\n\n # Note, ogc_fid is the primary key. The id column is created by the shapefile. \n for i,boundary in enumerate(boundaries.query(\n \"SELECT AsText(geometry) AS wkt, gvid FROM blockgroups\")):\n lr('Load rtree')\n \n yield i, boundary['wkt'] , boundary['gvid'] \n \n def gen_points():\n\n for row in self.partitions.find(table = 'facilities_addresses').rows:\n if row['longitude'] and row['latitude']:\n yield (row['longitude'], row['latitude']), row['facilities_id']\n\n\n p = self.partitions.find_or_new(table='facilities_geoids')\n p.clean()\n\n with p.inserter() as ins:\n for point, point_o, cntr_geo, cntr_o in find_containment(gen_bound(),gen_points()):\n\n blockgroup_gvid = civick.Blockgroup.parse(cntr_o)\n tract_gvid = blockgroup_gvid.convert(civick.Tract)\n county_gvid = blockgroup_gvid.convert(civick.County)\n \n ins.insert(dict(facilities_id = point_o, \n blockgroup_gvid = str(blockgroup_gvid),\n tract_gvid = str(tract_gvid),\n county_gvid = str(county_gvid)\n ))\n \n lr('Marking point containment')",
"def collect_blocks():\n\n # Below are the position of (c,r) in a block.\n\n #########################\n # (0,0) # (1,0) # (2,0) #\n #########################\n #########################\n # (0,1) # (1,1) # (2,1) #\n #########################\n #########################\n # (0,2) # (1,2) # (2,2) #\n #########################\n\n for x in range(72):\n r, c = x // 9 % 3, x % 3\n if r == 0:\n if c == 0:\n yield x, x + 10\n yield x, x + 11\n yield x, x + 19\n yield x, x + 20\n elif c == 1:\n yield x, x + 8\n yield x, x + 10\n yield x, x + 17\n yield x, x + 19\n else:\n yield x, x + 7\n yield x, x + 8\n yield x, x + 16\n yield x, x + 17\n elif r == 1:\n if c == 0:\n yield x, x + 10\n yield x, x + 11\n elif c == 1:\n yield x, x + 8\n yield x, x + 10\n else:\n yield x, x + 8\n yield x, x + 7",
"def parks(self):\n point_array = [0, 2, 8, 12, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14]\n park_coords = []\n parks_sorted = []\n for i in range(4):\n for j in range(4):\n if self.as_list[i][j] == 'p':\n park_coords.append(tuple([i, j]))\n while len(park_coords) > 0:\n x, y = park_coords.pop(0)\n if len(parks_sorted) == 0:\n parks_sorted.append([(x, y)])\n else:\n borders_bool = []\n for block_no, park_block in enumerate(parks_sorted):\n borders_bool.append(False)\n for i, j in park_block:\n if abs(x - i) + abs(y - j) == 1:\n borders_bool[block_no] = True\n if (num_true := borders_bool.count(True)) == 1:\n parks_sorted[borders_bool.index(True)].append((x, y))\n elif num_true > 1:\n new_parks_sorted = []\n i_mega_park = None\n for block_no, park_block in enumerate(parks_sorted):\n if borders_bool[block_no]: # If it is bordering\n if i_mega_park is None:\n i_mega_park = block_no\n new_parks_sorted.append(park_block)\n else:\n new_parks_sorted[i_mega_park] += park_block\n new_parks_sorted[i_mega_park] += [(x, y)]\n parks_sorted = new_parks_sorted\n else:\n new_parks_sorted.append(park_block)\n parks_sorted = new_parks_sorted\n else:\n parks_sorted.append([(x, y)])\n\n return sum([point_array[len(block)] for block in parks_sorted])",
"def build_constraints_boundaries(self):\n\n # Trapezoidal and Hermite-Simpson methods can't compute\n # defects at the last node contrary to pseudospectral methods\n coll_method = self.options['tr_method'] in [\n 'trapezoidal', 'hermite-simpson']\n n_nodes = self.problem.prm['n_nodes'] - \\\n 1 if coll_method else self.problem.prm['n_nodes']\n\n # Defects lower and upper boundaries\n defects_low = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n defects_upp = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n\n # Path lower and upper boundaries\n path_low = np.hstack([self.problem.low_bnd.path]\n * (self.problem.prm['n_nodes']))\n path_upp = np.hstack([self.problem.upp_bnd.path]\n * (self.problem.prm['n_nodes']))\n\n # Events lower and upper boundaries\n event_low = self.problem.low_bnd.event\n event_upp = self.problem.upp_bnd.event\n\n # Assembly of the lower and upper boundaries vectors\n low = np.concatenate((defects_low, path_low, event_low))\n upp = np.concatenate((defects_upp, path_upp, event_upp))\n\n return low, upp",
"def test_ordered_pvarray_cuts_for_pvrow_view(ordered_pvarray):\n\n ordered_pvarray.cast_shadows()\n n_surfaces_0 = ordered_pvarray.ground.n_surfaces\n len_0 = ordered_pvarray.ground.length\n ordered_pvarray.cuts_for_pvrow_view()\n n_surfaces_1 = ordered_pvarray.ground.n_surfaces\n len_1 = ordered_pvarray.ground.length\n\n assert n_surfaces_1 == n_surfaces_0 + 3\n assert len_1 == len_0",
"def get_split_positions(read, min_gap):\n cigar = read.cigar\n # Cigar string is a list of tuples:\n if len(read.cigar) <= 1:\n return [] # no break points = empty list of break point positions\n\n ##\n # read has break points if cigar string is longer than 1\n\n # This is a list with the breakpoint tuples\n list_of_break_point_positions = []\n\n # set the current position on the genome\n if cigar[0][0] == 0:\n current_pos = int(read.positions[0])\n else:\n current_pos = int(read.positions[0]) - cigar[0][1]\n\n # Search for breakpoints in cigar and get the corresponding position on the genome\n\n i = 0\n for info_tuple in cigar:\n # If current segment in cigar string is aligned.\n if info_tuple[0] == 0:\n # Special case when at first segment:\n if i == 0 and cigar[1][1] >= min_gap: # first end-split\n list_of_break_point_positions.append((current_pos + info_tuple[1] , True))\n\n # Special case when at last segment:\n elif i == len(cigar) - 1 and cigar[i - 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos, False))\n\n # Internal segments:\n elif cigar[i - 1][1] >= min_gap and cigar[i + 1][1] >= min_gap:\n if cigar[i - 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos, False))\n if cigar[i + 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos + info_tuple[1] - 1, True))\n i += 1\n\n current_pos += info_tuple[1]\n\n return(list_of_break_point_positions)",
"def _add_boundaries(self):\n logger.info(\"add_boundaries: start of method\")\n\n import scipy.spatial as sptl\n import scipy.sparse as sprs\n Lx = self._Lx\n Ly = self._Ly\n Lz = self._Lz\n Np = self.num_pores()\n btype = self._btype\n boffset = 0.05\n\n #Translate internal pores to each face of domain\n poffset = np.zeros((7,3))\n poffset[[2,5],0] = [-Lx, Lx]\n poffset[[3,4],1] = [-Ly, Ly]\n poffset[[1,6],2] = [-Lz, Lz]\n pcoords = pcoords0 = self['pore.coords']\n for i in np.r_[1:7]:\n pcoords = np.concatenate((pcoords,pcoords0 + poffset[i,:]),axis=0)\n\n #Use some twisted logic to get bval list of + for boundary and - for periodic faces\n bval = [0, 1, 2, 3, 4, 5, 6]*(np.array([0, btype[2], btype[0], btype[1], btype[1], btype[0], btype[2]])*-2+1)\n ptype = np.zeros((Np,),dtype=int)\n for i in np.r_[1:7]:\n ptype = np.concatenate((ptype,np.ones((Np,),dtype=int)*bval[i]),axis=0)\n\n #pnum contains the internal ID number of the boundary pores (for connecting periodic points)\n pnum = self.pores()\n pnum = np.tile(pnum,7)\n\n Tri = sptl.Delaunay(pcoords)\n adjmat = sprs.lil_matrix((np.shape(pcoords)[0],np.shape(pcoords)[0]),dtype=int)\n for i in np.arange(0,np.shape(Tri.simplices)[0]):\n #Keep only simplices that are fully in real domain\n adjmat[Tri.simplices[i],Tri.simplices[i]] = 1\n adjmat = sprs.triu(adjmat,k=1,format=\"lil\")\n for i in np.arange(0,Np):\n #Add periodic throats to the netowrk (if any)\n tpore2 = pnum[adjmat.rows[i]][ptype[adjmat.rows[i]]<0]\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = self['throat.conns']\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n #Add boundary pores and throats to the network\n newporetyps = np.unique(ptype[adjmat.rows[i]][ptype[adjmat.rows[i]]>0])\n newporenums = np.r_[self.num_pores():self.num_pores()+np.size(newporetyps)]\n tpore2 = newporenums\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n self['throat.conns'] = conns\n bcoords = np.zeros((7,3),dtype=float)\n coords = self['pore.coords']\n bcoords[1,:] = [coords[i,0], coords[i,1], 0-Lz*boffset]\n bcoords[2,:] = [0-Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[3,:] = [coords[i,0], -Ly*boffset, coords[i,2]]\n bcoords[4,:] = [coords[i,0], Ly+Ly*boffset, coords[i,2]]\n bcoords[5,:] = [Lx+Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[6,:] = [coords[i,0], coords[i,1], Lz+Lz*boffset]\n newporecoords = bcoords[newporetyps,:]\n coords = np.concatenate((coords,newporecoords),axis=0)\n self['pore.coords'] = coords\n #Reset number of pores and throats (easier than tracking it)\n nums = np.r_[0:np.shape(coords)[0]]\n self['pore.numbering'] = nums\n self['pore.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n nums = np.r_[0:np.shape(conns)[0]]\n self['throat.numbering'] = nums\n self['throat.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n logger.debug(\"add_boundaries: end of method\")",
"def poss_block_byrc(self):\n block_horiz, block_vert = self.board_size // self.c_size, self.board_size // self.r_size\n lpos = [self.c_size * no for no in range(block_horiz)]\n vpos = [self.r_size * no for no in range(block_vert)]\n row_poss, col_poss = self.poss_by_block()\n row_poss = [row_poss[x:x + self.c_size] for x in vpos]\n row_poss = [[subset[no] for subset in r] for r in row_poss for no in range(self.r_size)]\n col_poss = [col_poss[x:x + self.r_size] for x in lpos]\n col_poss = [[subset[no] for subset in r] for r in col_poss for no in range(self.c_size)]\n # Rearrange to get blocks in right order (across-down)\n col_poss = [col_poss[i + j * block_vert] for i in range(block_vert) for j in range(block_vert)]\n return row_poss, col_poss",
"def extract_field_blocks(self):\n t_start = time.time()\n\n scale_range = [self.source_range[0], self.source_range[1],\n self.dest_range[0], self.dest_range[1]]\n counter = 0\n\n for source_path, target_path in self.path_pairs:\n\n os.makedirs(os.path.dirname(target_path), exist_ok=True)\n block_id = os.path.split(os.path.dirname(target_path))[-1]\n\n try:\n\n result_warp = gdal.Warp(data_settings.TEMP_PATH,\n source_path,\n srcSRS=self.extract_block_projection(\n source_path),\n dstSRS='EPSG:3067',\n cutlineDSName=data_settings.BLOCK_SHAPEFILE_PATH,\n cutlineWhere=\"LOHKO = '{}'\".format(\n block_id),\n cropToCutline=True,\n xRes=self.target_resolution,\n yRes=self.target_resolution,\n srcNodata=self.source_nodata,\n dstNodata=np.nan,\n outputType=gdal.GDT_CFloat32,\n multithread=True)\n\n arr = result_warp.ReadAsArray()\n assert ~np.isnan(arr).all(), \"Warped image contains only NaNs\"\n\n except (RuntimeError, AttributeError, AssertionError) as ex:\n\n print(\"{}\\tError (warp): {}\".format(block_id, ex))\n print(\"\\t\\tFrom\\t{}\".format(source_path))\n print(\"\\t\\tTo\\t{}\".format(data_settings.TEMP_PATH))\n\n continue\n\n finally:\n\n result_warp = None\n\n try:\n\n result_translate = gdal.Translate(target_path,\n settings.TEMP_PATH,\n outputType=gdal.GDT_Float32,\n scaleParams=[scale_range],\n noData=np.nan)\n\n arr = result_translate.ReadAsArray()\n\n assert ~np.isnan(arr).all(\n ), \"Translated image contains only NaNs\"\n assert np.nanmin(arr) >= self.dest_range[0], \"Translated values below lower destination range {}, min={}\".format(\n self.dest_range[0], np.nanmin(arr))\n assert np.nanmax(arr) <= self.dest_range[1], \"Translated values above upper destination range {}, max={}\".format(\n self.dest_range[1], np.nanmax(arr))\n\n print(\"{}\\tFrom\\t{}\".format(block_id, source_path))\n print(\"\\t\\tTo\\t{}\".format(target_path))\n\n plt.rcParams['figure.figsize'] = 1, 1\n\n if len(arr.shape) >= 3:\n\n plt.imshow(arr[:3].transpose(1, 2, 0))\n\n else:\n\n plt.imshow(arr, cmap='gray', vmin=0, vmax=1)\n\n plt.axis('off')\n plt.show()\n\n except (RuntimeError, AttributeError, AssertionError) as ex:\n\n print(\"{}\\tError (translate): {}\".format(block_id, ex))\n print(\"\\t\\tFrom\\t{}\".format(data_settings.TEMP_PATH))\n print(\"\\t\\tTo\\t{}\".format(target_path))\n\n finally:\n\n result_translate = None\n\n counter += 1\n\n t_delta = time.time() - t_start\n print(\"Processed {} field blocks in {:.0f}m {:.0f}s\".format(\n counter, t_delta // 60, t_delta % 60))"
] | [
"0.5430976",
"0.5360118",
"0.5335822",
"0.52487403",
"0.5235013",
"0.5226283",
"0.516665",
"0.5099744",
"0.50918156",
"0.5086627",
"0.50657815",
"0.5052516",
"0.497971",
"0.49528226",
"0.49450764",
"0.4940331",
"0.4931173",
"0.49244288",
"0.49213406",
"0.49072263",
"0.48992053",
"0.48969164",
"0.4881415",
"0.4849626",
"0.48433715",
"0.4842572",
"0.4831102",
"0.48275265",
"0.4827228",
"0.48270974"
] | 0.70449966 | 0 |
Given a pattern dataframe and UCSC region string, retrieve only patterns in that region. | def subset_region(df, region):
# Split the region string at each occurence of - or : (yields 3 elements)
chrom, start, end = re.split("[-:]", region)
start, end = int(start), int(end)
# Only keep patterns on the same chromosome as the region and
# within the start-end interval
subset = df.loc[
(df.chrom1 == chrom)
& (df.chrom2 == chrom)
& (df.start1 >= start)
& (df.start2 >= start)
& (df.end1 < end)
& (df.end2 < end),
:,
]
return subset | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pattern_search(pattern, dataset, column):\n # Filter\n dataset = dataset[dataset[column].str.contains(pattern, regex=True)]\n # Reset index\n dataset = dataset.reset_index(drop=True)\n # Return\n return dataset",
"def select_regions(data, region_col, regions, combine_subregions=True):",
"def _region_mask(self, cs, all_regions, xctr, yctr, hwcs):\n if not HAS_REGIONS:\n return None\n ctr_coord = ar.PixCoord(xctr, yctr)\n mask = None\n for reg_str in all_regions:\n # read ds9 string into a region class\n try:\n with set_log_level('CRITICAL'):\n frame_regions = ar.Regions.parse(reg_str, format='ds9')\n except Exception as err:\n log.debug(f'Region parser error: {err}')\n continue\n for fr in frame_regions:\n if cs == 'wcs':\n # convert to a pixel region first\n try:\n with set_log_level('CRITICAL'):\n fr = fr.to_pixel(hwcs)\n except Exception as err: # pragma: no cover\n # error could be anything, since regions package\n # is in early development state\n log.debug(f'Region WCS conversion error: {err}')\n continue\n\n # check if cursor is contained in a region\n # in any frame\n with set_log_level('CRITICAL'):\n contained = fr.contains(ctr_coord)\n if hasattr(contained, '__len__'):\n # PolygonPixelRegion returns an array, currently\n # (regions v0.4)\n contained = contained[0]\n\n if contained:\n # get mask from first matching region\n try:\n with set_log_level('CRITICAL'):\n mask = fr.to_mask()\n except Exception as err: # pragma: no cover\n # error could be anything, since regions package\n # is in early development state\n log.debug(f'Region mask error: {err}')\n continue\n else:\n log.info(f'Contained in {type(fr).__name__}')\n break\n if mask is not None:\n break\n\n # reset active frame\n return mask",
"def check_regs(region_df, chr_name=None, start_name=None, stop_name=None,\n strand_name=None, sample_name=None):\n\n if sample_name is None:\n region_df.index = np.repeat(default_id_sample, len(region_df))\n else:\n region_df = search_column(region_df, id_sample_aliases,\n id_sample_types, 'id_sample', sample_name)\n region_df = region_df.set_index(\"id_sample\", drop=True)\n region_df = region_df.sort_index()\n\n region_df = search_column(region_df, chr_aliases, chr_types, 'chr', chr_name)\n region_df = search_column(region_df, start_aliases, start_types, 'start', start_name)\n region_df = search_column(region_df, stop_aliases, stop_types, 'stop', stop_name)\n region_df = search_column(region_df, strand_aliases, strand_types, 'strand', strand_name)\n return region_df",
"def get_regions_mask(self, input):",
"def get_masked_regions(contig):\n masked_regions = \"\"\n\n seq = contig.seq\n contig_end = len(seq)-1\n masked = False\n for i, n in enumerate(seq):\n # mark the starting position of a softmasked region\n if not masked and n.islower():\n masked = True\n start = i\n\n # mark end position of softmasked region (can be end of contig)\n if masked and (n.isupper() or i == contig_end):\n masked = False\n end = i\n\n # store softmasked region in bed3 (chr, start, end) format\n masked_regions += f\"{contig.id}\\t{start}\\t{end}\\n\" # noqa: start exists\n\n return masked_regions",
"def filter_sgrnas_by_region(transcript_sgrna_df, sg_positions):\n global_pos_cols = []\n for pos in sg_positions:\n global_pos_cols.append('sgrna_global_' + str(pos))\n filtered_sgrna_df = transcript_sgrna_df[\n ((transcript_sgrna_df[global_pos_cols].min(axis=1) >= transcript_sgrna_df['region_start']) &\n (transcript_sgrna_df[global_pos_cols].min(axis=1) <= transcript_sgrna_df['region_end'])) |\n ((transcript_sgrna_df[global_pos_cols].max(axis=1) >= transcript_sgrna_df['region_start']) &\n (transcript_sgrna_df[global_pos_cols].max(axis=1) <= transcript_sgrna_df['region_end']))\n ].reset_index(drop=True)\n return filtered_sgrna_df",
"def filter_region_graph(data, region):\r\n MetaDct = data[1]\r\n f_MetaDct = {}\r\n for idx in MetaDct:\r\n if idx != ',':\r\n if MetaDct[idx].region == region:\r\n f_MetaDct[idx] = MetaDct[idx].country\r\n return f_MetaDct",
"def remove(df, pattern):\n return df[~df.index.isin(df.query(pattern).index)]",
"def subregionIntersection(highres,lowres,regions):\n \n hr = nb.load(highres)\n hr_data = hr.darrays[0].data\n \n lr = nb.load(lowres)\n \n lr_indices = regionIndices(lr,regions)\n \n hr_regions = list(set(hr_data[lr_indices]))\n \n return hr_regions",
"def filter_on_regexp(strings, pattern):\n filtered_strings = filter(lambda x: re.search(pattern, x), strings)\n return filtered_strings",
"def match_regions(self):\n l = []\n for r1 in self.regions_names():\n for r2 in self.metric.index:\n r11 = r1.replace('-', ' ').lower()\n r22 = r2.replace('-', ' ').lower()\n l.append([r1,r2,fuzz.ratio(r11, r22)])\n\n matched = np.array([x for x in l if x[2] > 80])\n\n return {key: value for (key, value) in matched[:,[1,0]]}",
"def prune(df, regex_list):\n for regex_pattern in regex_list:\n df = df[~df.case_action.str.contains(regex_pattern)]\n return df",
"def submask(mask, region):\n mask = mask[region['blc'][0]:region['trc'][0]+1, region['blc'][1]:region['trc'][1]+1]\n return mask",
"def findall(pattern, string, overlapping=True, sensitive=True, regexp=False):\n if regexp:\n return SE.occurrences_re(pattern, string)\n if overlapping:\n return SE.occurrences(pattern, string, sensitive)\n else:\n return SE.full_words(pattern, string, sensitive)",
"def search_premade(phrase = '*',\n language = 'en',\n url = 'http://data.ssb.no/api/v0/dataset'):\n url = '{url}?lang={language}'.format(\n url = url,\n language = language)\n \n df = pd.read_html(url)\n df = df[0]\n df.index = df['ID']\n df = df.iloc[:,[0,1]]\n df = df.sort_index()\n phrase = phrase.lower()\n \n if phrase != '*':\n df = df[(df.iloc[:,0].str.lower().str.contains(phrase)) | \n (df.iloc[:,1].str.lower().str.contains(phrase)) ]\n return df",
"def iter_band_search(self, pattern):\n raise NotImplementedError()",
"def zillow_df(frame, region_list, region_column_str, columns_list):\n frame = frame[frame[region_column_str].isin(region_list)]\n frame = frame[columns_list]\n frame[region_column_str] = frame[region_column_str].astype(str)\n return frame",
"def get_valid_regions(self):\n pass",
"def searchBreakend(self, region): \n\t\treturn filter(lambda X: X == region, self)",
"def processRegionSSD( self, r ):\n ixs = TemplateMatcher.matIxs( self.n ** 2 )\n ## 5 x 5 windows for region r\n ws = map( lambda x : TemplateMatcher.slice( r, self.n, x ), ixs )\n\n ts = map( lambda w : map( lambda t : TemplateMatcher.SSD( t, w ), self.templates ) , ws)\n tMatch = map( lambda x : np.argmin( x, axis = 0), ts )\n lMatch = map( lambda x : np.min( x, axis = 0) ,ts )\n ix = np.argmin( lMatch )\n return TemplateMatcher.coordOffset(ixs[ix], self.n ** 2)#, tMatch[ix] ixs[ix].x, ixs[ix].y, tMatch[ix]",
"def extractRegions(image, region_size_threshold=10000, accessibility=None):\n lbls, nb_lbls = skimage.measure.label(image, return_num=True, connectivity = 1)\n bl, re, ar, pxs = numbaExtractRegions(lbls, nb_lbls, region_size_threshold, accessibility)\n return bl, np.array(re[1:]), np.array(ar[1:]), pxs[1:]",
"def getStudyRegions(self):\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n self.cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in self.cursor:\n exclusionRows.append(state[0])\n query = 'SELECT * FROM sys.databases'\n df = pd.read_sql(query, self.conn)\n studyRegions = df[~df['name'].isin(exclusionRows)]['name']\n studyRegions = studyRegions.reset_index()\n studyRegions = studyRegions.drop('index', axis=1)\n self.studyRegions = studyRegions\n return studyRegions",
"def is_valid_region(region):\n return re.match(r'^[a-z0-9\\-]+$', region)",
"def get_inter_cds_regions(annotations):\n # Determine locations of inter-CDS regions for each chromosome\n inter_cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n inter_cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate through genes and store the ranges between them;\n # for TriTrypDB files, the gene boundaries are generally the same\n # as the CDS boundaries.\n for gene in genes:\n # Determine location for the region up to start of the gene\n end = int(gene.location.start)\n\n # Skip over snoRNAs, etc. that are nested inside of other genes\n # For example: TcCLB TcChr22-2 179,000:180,000\n if end <= start:\n continue\n\n # Add CDS to relevant list based on strand\n if strand is None:\n # Left-most gene\n inter_cds_regions[chr_id][gene.location.strand].append((start, end))\n elif strand != gene.location.strand:\n # Add ORFs in both directions at transcription switch sites (TSSs)\n inter_cds_regions[chr_id][+1].append((start, end))\n inter_cds_regions[chr_id][-1].append((start, end))\n else:\n # Within PTU; look for ORFs on same strand\n inter_cds_regions[chr_id][strand].append((start, end))\n\n # update start counter and strand\n start = int(gene.location.end)\n strand = gene.location.strand\n\n # add region after last gene\n inter_cds_regions[chr_id][strand].append((start, ch_end))\n\n return inter_cds_regions",
"def ffgs_regions():\n return [\n ('Hispaniola', 'hispaniola'),\n ('Central America', 'centralamerica')\n ]",
"def select_regions(args):\n assert args.files, \"Need a set of fastq files\"\n assert args.out, \"Need --out\"\n region = os.path.abspath(args.region)\n workdir = 'select'\n safe_makedir(workdir)\n out_file = os.path.join(workdir, splitext_plus(args.out)[0] + \"_cpg.bed\")\n out_snp_file = os.path.join(workdir, splitext_plus(args.out)[0] + '_snp.bed')\n if not file_exists(out_file):\n with file_transaction(out_file) as tx_out:\n with open(tx_out, 'w') as out_handle:\n # print >> out_handle, \"chrom\\tstart\\tend\\tcu\\tcm\\tstrand\\tgene\\tsample\"\n for in_vcf in args.files:\n snp_file = in_vcf.replace(\"rawcpg\", \"rawsnp\")\n sample = splitext_plus(os.path.basename(in_vcf))[0].split(\"_\")[0]\n get_het(snp_file, region, sample, out_snp_file)\n res = pybedtools.BedTool(in_vcf).intersect(b=region, wo=True)\n # cmd = (\"bedtools intersect -u -a {in_vcf} -b {region} > {tx_tmp}\")\n # do.run(cmd.format(**locals()), \"selecting %s\" % in_vcf)\n\n for record in res:\n gene = record[-2]\n chrom, pos, info, header, frmt = record[0], int(record[1]), record[7], record[8], record[9]\n cs = info.split(';')[0].split('=')[1]\n frmt = dict(zip(header.split(\":\"), frmt.split(':')))\n if is_good_cpg(frmt):\n tag = \"%s-%s-%s-%s\" % (frmt['CU'], frmt['CM'], gene, sample)\n print >> out_handle, \"%s\\t%s\\t%s\\t%s\\t.\\t%s\" % (chrom, pos, pos + 1, tag, cs)",
"def getStudyRegions():\n comp_name = os.environ['COMPUTERNAME']\n conn = py.connect('Driver=ODBC Driver 11 for SQL Server;SERVER=' +\n comp_name + '\\HAZUSPLUSSRVR; UID=SA;PWD=Gohazusplus_02')\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n cursor = conn.cursor()\n cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in cursor:\n exclusionRows.append(state[0])\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM sys.databases')\n studyRegions = []\n for row in cursor:\n if row[0] not in exclusionRows:\n studyRegions.append(row[0])\n studyRegions.sort(key=lambda x: x.lower())\n return studyRegions",
"def get_subregions(xint,conn):\n\n subregions = ('SELECT DISTINCT cvt.name, fip.value, f.name '\n 'FROM interaction i, feature_interaction fi, feature_interactionprop fip, ' \n 'feature f, cvterm cvt, cvterm cvt2, feature_relationship fr, feature f2 '\n 'WHERE f.feature_id = fi.feature_id AND fi.interaction_id = i.interaction_id '\n 'AND fi.feature_interaction_id = fip.feature_interaction_id '\n 'AND fi.role_id = cvt.cvterm_id '\n 'AND fip.type_id = cvt2.cvterm_id AND '\n 'cvt2.name = \\'subpart_info\\' AND f.feature_id = fr.subject_id '\n 'AND f2.feature_id = fr.object_id AND f.is_obsolete = \\'f\\' AND '\n 'f2.uniquename = %s AND i.uniquename = %s')\n subs = connect(subregions,xint,conn)\n return(subs)",
"def topic_pattern_match(pattern):\n client = AdminClient({\"bootstrap.servers\": \"PLAINTEXT://localhost:9092\"})\n topic_metadata = client.list_topics()\n topics = topic_metadata.topics\n filtered_topics = {key: value for key, value in topics.items() if contains_substring(key, pattern)}\n return len(filtered_topics) > 0"
] | [
"0.5442019",
"0.5398551",
"0.52937627",
"0.5278732",
"0.511519",
"0.508612",
"0.49498475",
"0.48897955",
"0.4878237",
"0.48184252",
"0.48183033",
"0.48031545",
"0.47911888",
"0.47765893",
"0.4776433",
"0.4775891",
"0.47595215",
"0.4746451",
"0.47112608",
"0.469774",
"0.46916112",
"0.467187",
"0.4669566",
"0.46545336",
"0.46347055",
"0.463426",
"0.4615597",
"0.46054575",
"0.46006787",
"0.45829087"
] | 0.6566416 | 0 |
Given a pyGSTi model generate a minimal generating set of gate sequences by taking a union of the fiducials and the germs. If basic gates macros given, generate a macro for each gate sequence as well. | def base_gate_sequence_and_macros(model, basic_gates_macros: dict = None):
prep_fiducials, meas_fiducials, germs = (
model.prep_fiducials(),
model.meas_fiducials(),
model.germs(),
)
# create minimal generating gate sequence
base_gate_sequence = list({k.str.split("@")[0] for k in prep_fiducials + germs + meas_fiducials})
base_gate_sequence.remove("{}")
base_gate_sequence.sort(key=len, reverse=True)
if basic_gates_macros:
# create generating gate sequence macros
base_gate_sequence_macros = [s.split("G") for s in base_gate_sequence]
for i, s in enumerate(base_gate_sequence_macros):
s = [basic_gates_macros[k] for k in s if basic_gates_macros.get(k) is not None]
base_gate_sequence_macros[i] = sequence_macros(s)
return base_gate_sequence, base_gate_sequence_macros
else:
return base_gate_sequence | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_seq_graph(sequence, macros, connects):\n seq_graph = nx.Graph()\n for idx, macro_name in enumerate(sequence):\n sub_graph = macros[macro_name].gen_graph()\n\n nx.set_node_attributes(\n sub_graph, {node: idx for node in sub_graph.nodes}, \"seqid\")\n seq_graph = nx.disjoint_union(seq_graph, sub_graph)\n\n for connect in connects:\n idx, jdx, edges = connect.split(\":\")\n _add_edges(seq_graph, edges, int(idx), int(jdx))\n\n return seq_graph",
"def build_GenotypeGVCFs_sbatch(working_dir, combined_gvcf_files, scratch=False, interval=None):\n \n name_batch1 = os.path.basename([item for item in combined_gvcf_files if \"batch1\" in item][0])\n interval_name = \"\"\n #there must be at least one batch so look for it, not elegant but works\n if name_batch1.split(\"batch1\") != \".g.vcf.gz\":\n interval_name = name_batch1.split(\"batch1\")[1].split(\".\")[0]\n job_name = \"GenotypeGVCFs{}\".format(interval_name)\n output_file = \"{}_joincalled{}.g.vcf.gz\".format(CONFIG[\"output_header\"], interval_name)\n #create the sbatch file to analyse the current batch of samples\n sbatch_file = os.path.join(working_dir, \"sbatch\", \"{}.sbatch\".format(job_name))\n with open(sbatch_file, \"w\") as GenotypeGVCFs:\n slurm = slurm_header(CONFIG[\"uppmax_project\"], job_name, working_dir)\n GenotypeGVCFs.write(slurm)\n GenotypeGVCFs.write(\"\\n\")\n #rsync to scratch all samples\n if scratch:\n GenotypeGVCFs.write(\"mkdir -p $SNIC_TMP/{} \\n\".format(job_name)) # create tmp directory\n GenotypeGVCFs.write(\"mkdir -p $SNIC_TMP/{}/VCF/ \\n\".format(job_name)) # create tmp directory\n #now cycle over the samples, build the GATK command\n combined_gvcf_string_input = \"\"\n for combined_gvcf in combined_gvcf_files:\n combined_gvcf_path_dir = combined_gvcf\n if scratch:\n GenotypeGVCFs.write(\"rsync -rptoDLv {}* $SNIC_TMP/{}/\\n\".format(combined_gvcf, job_name))\n combined_gvcf_name = os.path.basename(combined_gvcf)\n combined_gvcf_path_dir = \"$SNIC_TMP/{}/{}\".format(job_name, combined_gvcf_name)\n combined_gvcf_string_input += \"-V {} \\\\\\n\".format(combined_gvcf_path_dir)\n\n GATK_command= \"java -Xmx250g -jar {} -T GenotypeGVCFs \\\\\\n\".format(CONFIG[\"GATK\"])\n for option in CONFIG[\"walkers\"][\"GenotypeGVCFs\"]:\n GATK_command += \"{} \\\\\\n\".format(option)\n GATK_command += \"{} \".format(combined_gvcf_string_input)\n if interval is not None:\n GATK_command += \"-L {} \\\\\\n\".format(interval)\n\n if scratch:\n GATK_command += \"-o $SNIC_TMP/{}/VCF/{}\\n\".format(job_name, output_file)\n #once this is done rsync back to lupus\n GATK_command += \"rsync $SNIC_TMP/{}/VCF/{}* {}/VCF/\\n\".format(job_name, output_file , working_dir)\n else:\n GATK_command += \"-o {}/VCF/{}\\n\\n\".format(working_dir, output_file)\n GenotypeGVCFs.write(GATK_command)\n #return path to sbach file\n return sbatch_file",
"def sequence_macros(macros):\n\n def foo():\n for m in macros:\n m()\n\n return foo",
"def generate_modelSED_photo_fit(sp=None,sfh_form=4,filters=None,add_igm_absorption=0,igm_type=0,params_fsps=None,DL_Gpc=0.0,cosmo='flat_LCDM',\n\tH0=70.0,Om0=0.3,params_val=None,interp_filters_waves=[],interp_filters_trans=[]):\n\n\tdef_params_fsps, params_assoc_fsps, status_log = list_params_fsps()\n\n\tformed_mass = pow(10.0,params_val['log_mass'])\n\n\t# input model parameters to FSPS:\n\tfor pp in range(len(params_fsps)):\n\t\tstr_temp = params_assoc_fsps[params_fsps[pp]]\n\t\tif status_log[params_fsps[pp]] == 0:\n\t\t\tsp.params[str_temp] = params_val[params_fsps[pp]]\n\t\telif status_log[params_fsps[pp]] == 1:\n\t\t\tsp.params[str_temp] = pow(10.0,params_val[params_fsps[pp]])\n\n\t# generate the SED:\n\tif sfh_form==0 or sfh_form==1:\n\t\tage = pow(10.0,params_val['log_age'])\n\t\twave, extnc_spec = sp.get_spectrum(peraa=True,tage=age) ## spectrum in L_sun/AA\n\t\tmass = sp.stellar_mass\n\t\tdust_mass0 = sp.dust_mass ## in solar mass/norm\n\telif sfh_form==2 or sfh_form==3 or sfh_form==4:\n\t\tt0 = pow(10.0,params_val['log_t0'])\n\t\ttau = pow(10.0,params_val['log_tau'])\n\t\tage = pow(10.0,params_val['log_age'])\n\t\talpha = pow(10.0,params_val['log_alpha'])\n\t\tbeta = pow(10.0,params_val['log_beta'])\n\t\tSFR_fSM,mass,wave,extnc_spec,dust_mass0 = csp_spec_restframe_fit(sp=sp,sfh_form=sfh_form,formed_mass=formed_mass,age=age,tau=tau,t0=t0,alpha=alpha,beta=beta)\n\n\t# redshifting\n\tredsh_wave,redsh_spec0 = cosmo_redshifting(DL_Gpc=DL_Gpc,cosmo=cosmo,H0=H0,Om0=Om0,z=params_val['z'],wave=wave,spec=extnc_spec)\n\n\t# IGM absorption:\n\tif add_igm_absorption == 1:\n\t\tif igm_type == 0:\n\t\t\ttrans = igm_att_madau(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\t\telif igm_type == 1:\n\t\t\ttrans = igm_att_inoue(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\n\t# normalize:\n\tnorm0 = formed_mass/mass\n\tredsh_spec = redsh_spec0*norm0\n\tdust_mass = dust_mass0*norm0\n\n\t# filtering:\n\tphoto_SED_flux = filtering_interp_filters(redsh_wave,redsh_spec,interp_filters_waves,interp_filters_trans)\n\n\treturn photo_SED_flux",
"def build_macrocomplex(directory, output, max_chains=300, num_models=1, template=False, dirty=False, verbose=False, stech_string=False):\n print(\"Program is running, please wait...\")\n # Reads and stores pdb objects in a list\n in_pdbmodels = read_pdbs(directory, verbose)\n # Unifies all ids by sequence, updates the pdb list with new chain ids and returns a sequence dictionary: {seq: id,}\n seq_dict = unify_ids(in_pdbmodels, verbose)\n # Checks each pdb object for chain interactions and stores it in a dictionary of dictionaries:\n # {\n # Chain1_id : { residues_tuple_1_to_2 : chain1_object, chain2_object, residues_tuple_2_to_1}\n # Chain2_id : {residues_tuple_2_to_1 : chain2_object, chain1_object, residues_tuple_1_to_2}\n # ...\n # }\n interaction_dict = get_interaction_dict(in_pdbmodels, verbose=verbose)\n # Changes interaction_dict chain objects to CustomChain instances and adds the interactions to each instance\n update_interactions_dict(interaction_dict, verbose)\n stech_dict = {}\n # If a template or a string has been given to set Stoichometry, it generates a dictionary of it\n # { \"A\":5, \"B\":2, \"C\":6, .. }\n if template:\n stech_dict = get_template_stech_dict(template, seq_dict, verbose=verbose)\n elif stech_string:\n stech_dict = get_string_stech_dict(stech_string)\n # Starts iterating the interaction pair with more known interactions and generates the model/s\n out_pdbmodels = main_loop(num_models, output, interaction_dict, verbose, max_chains, dirty, stech_dict=stech_dict)\n # Saves the model/s to ciff format\n save_results(out_pdbmodels, output)",
"def compile(self, seq, registers):\n\n # Check which modes are actually being used\n used_modes = []\n for operations in seq:\n modes = [modes_label.ind for modes_label in operations.reg]\n used_modes.append(modes)\n\n used_modes = list(set(item for sublist in used_modes for item in sublist))\n\n # dictionary mapping the used modes to consecutive non-negative integers\n dict_indices = {used_modes[i]: i for i in range(len(used_modes))}\n nmodes = len(used_modes)\n\n # We start with an identity then sequentially update with the gate transformations\n T = np.identity(nmodes, dtype=np.complex128)\n\n # Now we will go through each operation in the sequence `seq` and apply it to T\n for operations in seq:\n name = operations.op.__class__.__name__\n params = par_evaluate(operations.op.p)\n modes = [modes_label.ind for modes_label in operations.reg]\n if name == \"Rgate\":\n G = np.exp(1j * params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"LossChannel\":\n G = np.sqrt(params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"Interferometer\":\n U = params[0]\n if U.shape == (1, 1):\n T = _apply_one_mode_gate(U[0, 0], T, dict_indices[modes[0]])\n elif U.shape == (2, 2):\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n U_expand = np.eye(nmodes, dtype=np.complex128)\n U_expand[np.ix_(modes, modes)] = U\n T = U_expand @ T\n elif name == \"PassiveChannel\":\n T0 = params[0]\n if T0.shape == (1, 1):\n T = _apply_one_mode_gate(T0[0, 0], T, dict_indices[modes[0]])\n elif T0.shape == (2, 2):\n T = _apply_two_mode_gate(T0, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n T0_expand = np.eye(nmodes, dtype=np.complex128)\n T0_expand[np.ix_(modes, modes)] = T0\n T = T0_expand @ T\n elif name == \"BSgate\":\n G = _beam_splitter_passive(params[0], params[1])\n T = _apply_two_mode_gate(G, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"MZgate\":\n v = np.exp(1j * params[0])\n u = np.exp(1j * params[1])\n U = 0.5 * np.array([[u * (v - 1), 1j * (1 + v)], [1j * u * (1 + v), 1 - v]])\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"sMZgate\":\n exp_sigma = np.exp(1j * (params[0] + params[1]) / 2)\n delta = (params[0] - params[1]) / 2\n U = exp_sigma * np.array(\n [[np.sin(delta), np.cos(delta)], [np.cos(delta), -np.sin(delta)]]\n )\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n\n ord_reg = [r for r in list(registers) if r.ind in used_modes]\n ord_reg = sorted(list(ord_reg), key=lambda x: x.ind)\n\n return [Command(ops.PassiveChannel(T), ord_reg)]",
"def all_genotype(ploidy):\n return [\"\".join(comb) for comb in cwr(\"ACGT-\", ploidy)]",
"def _modified_noise_model_program_header(noise_model: NoiseModel) -> \"Program\":\n from pyquil.quil import Program\n\n p = Program()\n defgates: Set[str] = set()\n for k in noise_model.gates:\n\n # obtain ideal gate matrix and new, noisy name by looking it up in the NOISY_GATES dict\n try:\n ideal_gate, new_name = get_modified_noisy_gate(k.gate, tuple(k.params))\n\n # if ideal version of gate has not yet been DEFGATE'd, do this\n if new_name not in defgates:\n p.defgate(new_name, ideal_gate)\n defgates.add(new_name)\n except NoisyGateUndefined:\n print(\n \"WARNING: Could not find ideal gate definition for gate {}\".format(k.gate),\n file=sys.stderr,\n )\n new_name = k.gate\n\n # define noisy version of gate on specific targets\n p.define_noisy_gate(new_name, k.targets, k.kraus_ops)\n\n # define noisy readouts\n for q, ap in noise_model.assignment_probs.items():\n p.define_noisy_readout(q, p00=ap[0, 0], p11=ap[1, 1])\n return p",
"def cmd_gaus():\n cmds = []\n cmds.append(\"r_m[0.0,-1,1]\")\n cmds.append(\"r_s[2.5,0,10]\")\n cmds.append('Gaussian::res(x,r_m,r_s)')\n return cmds",
"def build_msms():\r\n\r\n # Prepare include file with dynamic data\r\n f = open(os.path.join(GME_ROOT, \"Install\", \"GME_dyn.wxi\"), 'w')\r\n print >> f, \"<!-- DO NOT EDIT THIS FILE. WILL BE REGENERATED BY THE BUILD SCRIPTS -->\"\r\n print >> f, \"<Include>\"\r\n print >> f, \" <?define GUIDSTRMETAGME='%s' ?>\" % (tools.query_GUID(mta_for_xmp(METAGME_XMP)))\r\n print >> f, \" <?define GUIDSTRHFSM='%s' ?>\" % (tools.query_GUID(mta_for_xmp(HFSM_XMP)))\r\n print >> f, \" <?define GUIDSTRSF='%s' ?>\" % (tools.query_GUID(mta_for_xmp(SF_XMP)))\r\n print >> f, \" <?define GUIDSTRUML='%s' ?>\" % (tools.query_GUID(mta_for_xmp(UML_XMP)))\r\n print >> f, \"</Include>\"\r\n f.close()\r\n\r\n import glob\r\n sources = [f for f in glob.glob(os.path.join(GME_ROOT, \"Install\", \"*.wxs\")) if os.path.basename(f) not in ('GME.wxs', 'GME_bundle.wxs')]\r\n if prefs['arch'] == 'x64':\r\n sources.remove(os.path.join(GME_ROOT, \"Install\", \"GME_paradigms.wxs\"))\r\n for file_ in sources:\r\n extras = []\r\n if os.path.basename(file_) == 'GME_paradigms.wxs':\r\n extras = glob.glob(os.path.join(GME_ROOT, \"Install\", \"PIA*/*.wxi\"))\r\n tools.build_WiX([file_] + extras)",
"def generate_schreier_sims(self, af=False):\n\n n = self._degree\n u = self.basic_transversals\n basic_orbits = self._basic_orbits\n if len(u) == 0:\n for x in self.generators:\n if af:\n yield x._array_form\n else:\n yield x\n return\n if len(u) == 1:\n for i in basic_orbits[0]:\n if af:\n yield u[0][i]._array_form\n else:\n yield u[0][i]\n return\n\n u = list(reversed(u))\n basic_orbits = basic_orbits[::-1]\n # stg stack of group elements\n stg = [list(range(n))]\n posmax = [len(x) for x in u]\n n1 = len(posmax) - 1\n pos = [0]*n1\n h = 0\n while 1:\n # backtrack when finished iterating over coset\n if pos[h] >= posmax[h]:\n if h == 0:\n return\n pos[h] = 0\n h -= 1\n stg.pop()\n continue\n p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1])\n pos[h] += 1\n stg.append(p)\n h += 1\n if h == n1:\n if af:\n for i in basic_orbits[-1]:\n p = _af_rmul(u[-1][i]._array_form, stg[-1])\n yield p\n else:\n for i in basic_orbits[-1]:\n p = _af_rmul(u[-1][i]._array_form, stg[-1])\n p1 = _af_new(p)\n yield p1\n stg.pop()\n h -= 1",
"def test_get_qasm_all_gates(self):\n q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)\n qc = q_program.get_circuit()\n qr = q_program.get_quantum_register()\n cr = q_program.get_classical_register()\n qc.u1(0.3, qr[0])\n qc.u2(0.2, 0.1, qr[1])\n qc.u3(0.3, 0.2, 0.1, qr[2])\n qc.s(qr[1])\n qc.s(qr[2]).inverse()\n qc.cx(qr[1], qr[2])\n qc.barrier()\n qc.cx(qr[0], qr[1])\n qc.h(qr[0])\n qc.x(qr[2]).c_if(cr, 0)\n qc.y(qr[2]).c_if(cr, 1)\n qc.z(qr[2]).c_if(cr, 2)\n qc.barrier(qr)\n qc.measure(qr[0], cr[0])\n qc.measure(qr[1], cr[1])\n qc.measure(qr[2], cr[2])\n result = q_program.get_qasm()\n self.assertEqual(len(result), (len(qr.name) * 23 +\n len(cr.name) * 7 +\n 385))",
"def generate_mos(laygen, objectname_pfix, placement_grid, routing_grid_m1m2, devname_mos_boundary, devname_mos_body,\n devname_mos_dmy, m=1, m_dmy=0, origin=np.array([0,0])):\n pg = placement_grid\n rg12 = routing_grid_m1m2\n pfix = objectname_pfix\n\n # placement\n imbl0 = laygen.relplace(name=\"I\" + pfix + 'BL0', templatename=devname_mos_boundary, gridname=pg, xy=origin)\n refi=imbl0\n if not m_dmy==0:\n imdmyl0 = laygen.relplace(name=\"I\" + pfix + 'DMYL0', templatename=devname_mos_dmy, gridname=pg, refobj=refi, shape=[m_dmy, 1])\n refi=imdmyl0\n else:\n imdmyl0 = None\n im0 = laygen.relplace(name=\"I\" + pfix + '0', templatename=devname_mos_body, gridname=pg, refobj=refi, shape=[m, 1])\n refi=im0\n if not m_dmy==0:\n imdmyr0 = laygen.relplace(name=\"I\" + pfix + 'DMYR0', templatename=devname_mos_dmy, gridname=pg, refobj=refi, shape=[m_dmy, 1])\n refi=imdmyr0\n else:\n imdmyr0 = None\n imbr0 = laygen.relplace(name=\"I\" + pfix + 'BR0', templatename=devname_mos_boundary, gridname=pg, refobj=imdmyr0)\n md=im0.elements[:, 0]\n #route\n #gate\n rg0=laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=md[0].pins['G0'], refobj1=md[-1].pins['G0'])\n for _md in md:\n laygen.via(name=None, xy=[0, 0], refobj=_md.pins['G0'], gridname=rg12)\n #drain\n rdl0=laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=md[0].pins['D0'], refobj1=md[-1].pins['D0'])\n for _md in md:\n laygen.via(name=None, xy=[0, 1], refobj=_md.pins['D0'], gridname=rg12)\n #source\n rs0=laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=md[0].pins['S0'], refobj1=md[-1].pins['S1'])\n for _md in md:\n laygen.via(name=None, xy=[0, 0], refobj=_md.pins['S0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=md[-1].pins['S1'], gridname=rg12)\n #dmy\n if m_dmy>=2:\n mdmyl=imdmyl0.elements[:, 0]\n mdmyr=imdmyr0.elements[:, 0]\n laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdmyl[0].pins['D0'], refobj1=mdmyl[-1].pins['D0'])\n laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdmyr[0].pins['D0'], refobj1=mdmyr[-1].pins['D0'])\n laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdmyl[0].pins['S0'], refobj1=mdmyl[-1].pins['S1'])\n laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdmyr[0].pins['S0'], refobj1=mdmyr[-1].pins['S1'])\n for _mdmyl in mdmyl:\n laygen.via(name=None, xy=[0, 1], refobj=_mdmyl.pins['D0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=_mdmyl.pins['S0'], gridname=rg12)\n for _mdmyr in mdmyr:\n laygen.via(name=None, xy=[0, 1], refobj=_mdmyr.pins['D0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=_mdmyr.pins['S1'], gridname=rg12)\n return [imbl0, imdmyl0, im0, imdmyr0, imbr0]",
"def build_gate_1(gate, tags=None):\n\n def apply_constant_single_qubit_gate(psi, i, **gate_opts):\n mtags = _merge_tags(tags, gate_opts)\n psi.gate_(gate, int(i), tags=mtags, **gate_opts)\n\n return apply_constant_single_qubit_gate",
"def gtf_processing(genome=None, prefix='gencov'):\n all_bed = prefix + \".all.bed\"\n\n if not os.path.exists(all_bed) or os.stat(all_bed).st_size == 0:\n log.info(\"Preprocessing annotation...\")\n features = ('exon', 'gene', 'intron', 'intergenic')\n merged_exons, merged_genes = map(preprocess, features[:2])\n ins = {\n 'intron': [merged_genes, merged_exons],\n 'intergenic': [merged_genes, genome]\n }\n intron_bed, intergenic_bed = map(preprocess, features[2:], [ins, ins])\n\n log.info(\"Concatenate bed files for all elements...\")\n with open(all_bed, 'w') as out_bed:\n cat_all(merged_exons, merged_genes, intron_bed, intergenic_bed, out_bed=out_bed)\n\n for f in (merged_exons, merged_genes, intron_bed, intergenic_bed):\n os.remove(f)\n\n return all_bed",
"def get_gynodioecious_mating(simu, r_rate, parents_chooser,\n size, sex_seq, rec_sites, field='self_gen'):\n\n sex_mode = (simu.GLOBAL_SEQUENCE_OF_SEX,) + sex_seq\n\n selfing_tagger = get_selfing_tagger(simu, field)\n return simu.HomoMating(chooser=parents_chooser,\n generator=simu.OffspringGenerator(\n ops=[simu.Recombinator(rates=r_rate, loci=rec_sites),\n selfing_tagger],\n sexMode=sex_mode),\n subPopSize=size)",
"def sim12_g_simulation(datafiles, simulation_mag_zeropoint, simulation_exposure):\n return stuff.Simulation(datafiles / 'sim12' / 'sim12_g.list', simulation_mag_zeropoint, simulation_exposure)",
"def fusion_generate_mmmc_script(x: hammer_vlsi.HammerTool) -> str:\n mmmc_output = [] # type: List[str]\n\n def append_mmmc(cmd: str) -> None:\n x.verbose_tcl_append(cmd, mmmc_output)\n\n # Create an Innovus constraint mode.\n constraint_mode = \"my_constraint_mode\"\n sdc_files = [] # type: List[str]\n\n # Generate constraints\n clock_constraints_fragment = os.path.join(x.run_dir, \"clock_constraints_fragment.sdc\")\n with open(clock_constraints_fragment, \"w\") as f:\n f.write(x.sdc_clock_constraints)\n sdc_files.append(clock_constraints_fragment)\n\n # Generate port constraints.\n pin_constraints_fragment = os.path.join(x.run_dir, \"pin_constraints_fragment.sdc\")\n with open(pin_constraints_fragment, \"w\") as f:\n f.write(x.sdc_pin_constraints)\n sdc_files.append(pin_constraints_fragment)\n\n # Add the post-synthesis SDC, if present.\n post_synth_sdc = x.post_synth_sdc\n if post_synth_sdc is not None:\n sdc_files.append(post_synth_sdc)\n\n # TODO: add floorplanning SDC\n if len(sdc_files) > 0:\n sdc_files_arg = \"-sdc_files [list {sdc_files}]\".format(\n sdc_files=\" \".join(sdc_files)\n )\n else:\n blank_sdc = os.path.join(x.run_dir, \"blank.sdc\")\n x.run_executable([\"touch\", blank_sdc])\n sdc_files_arg = \"-sdc_files {{ {} }}\".format(blank_sdc)\n append_mmmc(\"create_constraint_mode -name {name} {sdc_files_arg}\".format(\n name=constraint_mode,\n sdc_files_arg=sdc_files_arg\n ))\n\n corners = x.get_mmmc_corners() # type: List[MMMCCorner]\n # In parallel, create the delay corners\n if corners:\n setup_corner = corners[0] # type: MMMCCorner\n hold_corner = corners[0] # type: MMMCCorner\n pwr_corner = corners[0] # type: MMMCCorner\n # TODO(colins): handle more than one corner and do something with extra corners\n for corner in corners:\n if corner.type is MMMCCornerType.Setup:\n setup_corner = corner\n if corner.type is MMMCCornerType.Hold:\n hold_corner = corner\n if corner.type is MMMCCornerType.Extra:\n pwr_corner = corner\n\n # First, create Innovus library sets\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=\"{n}.setup_set\".format(n=setup_corner.name),\n list=x.get_timing_libs(setup_corner)\n ))\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=\"{n}.hold_set\".format(n=hold_corner.name),\n list=x.get_timing_libs(hold_corner)\n ))\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=\"{n}.pwr_set\".format(n=pwr_corner.name),\n list=x.get_timing_libs(pwr_corner)\n ))\n # Skip opconds for now\n # Next, create Innovus timing conditions\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=\"{n}.setup_cond\".format(n=setup_corner.name),\n list=\"{n}.setup_set\".format(n=setup_corner.name)\n ))\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=\"{n}.hold_cond\".format(n=hold_corner.name),\n list=\"{n}.hold_set\".format(n=hold_corner.name)\n ))\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=\"{n}.pwr_cond\".format(n=pwr_corner.name),\n list=\"{n}.pwr_set\".format(n=pwr_corner.name)\n ))\n # Next, create Innovus rc corners from qrc tech files\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=\"{n}.setup_rc\".format(n=setup_corner.name),\n tempInCelsius=str(setup_corner.temp.value),\n qrc=\"-qrc_tech {}\".format(x.get_mmmc_qrc(setup_corner)) if x.get_mmmc_qrc(setup_corner) != '' else ''\n ))\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=\"{n}.hold_rc\".format(n=hold_corner.name),\n tempInCelsius=str(hold_corner.temp.value),\n qrc=\"-qrc_tech {}\".format(x.get_mmmc_qrc(hold_corner)) if x.get_mmmc_qrc(hold_corner) != '' else ''\n ))\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=\"{n}.pwr_rc\".format(n=pwr_corner.name),\n tempInCelsius=str(pwr_corner.temp.value),\n qrc=\"-qrc_tech {}\".format(x.get_mmmc_qrc(pwr_corner)) if x.get_mmmc_qrc(pwr_corner) != '' else ''\n ))\n # Next, create an Innovus delay corner.\n append_mmmc(\n \"create_delay_corner -name {name}_delay -timing_condition {name}_cond -rc_corner {name}_rc\".format(\n name=\"{n}.setup\".format(n=setup_corner.name)\n ))\n append_mmmc(\n \"create_delay_corner -name {name}_delay -timing_condition {name}_cond -rc_corner {name}_rc\".format(\n name=\"{n}.hold\".format(n=hold_corner.name)\n ))\n append_mmmc(\n \"create_delay_corner -name {name}_delay -timing_condition {name}_cond -rc_corner {name}_rc\".format(\n name=\"{n}.pwr\".format(n=pwr_corner.name)\n ))\n # Next, create the analysis views\n append_mmmc(\"create_analysis_view -name {name}_view -delay_corner {name}_delay -constraint_mode {constraint}\".format(\n name=\"{n}.setup\".format(n=setup_corner.name), constraint=constraint_mode))\n append_mmmc(\"create_analysis_view -name {name}_view -delay_corner {name}_delay -constraint_mode {constraint}\".format(\n name=\"{n}.hold\".format(n=hold_corner.name), constraint=constraint_mode))\n append_mmmc(\"create_analysis_view -name {name}_view -delay_corner {name}_delay -constraint_mode {constraint}\".format(\n name=\"{n}.pwr\".format(n=pwr_corner.name), constraint=constraint_mode))\n # Finally, apply the analysis view.\n append_mmmc(\"set_analysis_view -setup {{ {setup_view} }} -hold {{ {hold_view} }} -leakage {{ {pwr_view} }} -dynamic {{ {pwr_view} }}\".format(\n setup_view=\"{n}.setup_view\".format(n=setup_corner.name),\n hold_view=\"{n}.hold_view\".format(n=hold_corner.name),\n pwr_view=\"{n}.pwr_view\".format(n=pwr_corner.name)\n ))\n else:\n # First, create an Innovus library set.\n library_set_name = \"my_lib_set\"\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=library_set_name,\n list=x.get_timing_libs()\n ))\n # Next, create an Innovus timing condition.\n timing_condition_name = \"my_timing_condition\"\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=timing_condition_name,\n list=library_set_name\n ))\n # extra junk: -opcond ...\n rc_corner_name = \"rc_cond\"\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=rc_corner_name,\n tempInCelsius=120, # TODO: this should come from tech config\n qrc=\"-qrc_tech {}\".format(x.get_qrc_tech()) if x.get_qrc_tech() != '' else ''\n ))\n # Next, create an Innovus delay corner.\n delay_corner_name = \"my_delay_corner\"\n append_mmmc(\n \"create_delay_corner -name {name} -timing_condition {timing_cond} -rc_corner {rc}\".format(\n name=delay_corner_name,\n timing_cond=timing_condition_name,\n rc=rc_corner_name\n ))\n # extra junk: -rc_corner my_rc_corner_maybe_worst\n # Next, create an Innovus analysis view.\n analysis_view_name = \"my_view\"\n append_mmmc(\"create_analysis_view -name {name} -delay_corner {corner} -constraint_mode {constraint}\".format(\n name=analysis_view_name, corner=delay_corner_name, constraint=constraint_mode))\n # Finally, apply the analysis view.\n # TODO: introduce different views of setup/hold and true multi-corner\n append_mmmc(\"set_analysis_view -setup {{ {setup_view} }} -hold {{ {hold_view} }}\".format(\n setup_view=analysis_view_name,\n hold_view=analysis_view_name\n ))\n\n return \"\\n\".join(mmmc_output)",
"def _marshall_gatedefs(self):\n for gatedef in self.nuq2_ast['g_sect']:\n gate_name = gatedef['gate_name']\n op = ASTRegEx.OP.match(gate_name).group(1) # pylint: disable-msg=invalid-name\n arglist_match = ASTRegEx.ARGLIST.match(gate_name)\n arglist = arglist_match.group(1)\n arity = 0 if len(arglist) == 0 else len(arglist.split(','))\n self.gatedefs[self._op_sig(op, arity)] = gatedef",
"def print_model_generation(model):\n print('g1 = {} MW'.format(model.g[1].value))\n print('g2 = {} MW'.format(model.g[2].value))",
"def generate_testsystem(smiles = 'CCCC',\n forcefield_files = ['amber14/protein.ff14SB.xml', 'amber14/tip3p.xml'],\n forcefield_kwargs = {'removeCMMotion': False, 'ewaldErrorTolerance': 1e-4, 'constraints' : None, 'hydrogenMass' : 4 * unit.amus},\n nonperiodic_forcefield_kwargs = {'nonbondedMethod': app.NoCutoff},\n periodic_forcefield_kwargs = {'nonbondedMethod': app.PME},\n small_molecule_forcefield = 'gaff-2.11',\n padding=9*unit.angstroms,\n ionicStrength=0.0*unit.molar,\n water_model = 'tip3p',\n pressure = 1.0 * unit.atmosphere,\n temperature = 300 * unit.kelvin,\n barostat_period = 50,\n **kwargs\n ):\n from openforcefield.topology import Molecule\n from perses.utils.openeye import smiles_to_oemol\n from openmmforcefields.generators.system_generators import SystemGenerator\n from perses.utils.openeye import OEMol_to_omm_ff\n from simtk import openmm\n from qmlify.utils import pull_force_by_name\n\n oemol = smiles_to_oemol(smiles)\n off_molecules = [Molecule.from_openeye(oemol)]\n vac_system_generator = SystemGenerator(forcefields=forcefield_files,\n small_molecule_forcefield=small_molecule_forcefield,\n forcefield_kwargs=forcefield_kwargs,\n nonperiodic_forcefield_kwargs = nonperiodic_forcefield_kwargs, molecules = off_molecules)\n barostat = openmm.MonteCarloBarostat(pressure, temperature, barostat_period)\n sol_system_generator = SystemGenerator(forcefields=forcefield_files,\n small_molecule_forcefield=small_molecule_forcefield,\n forcefield_kwargs=forcefield_kwargs,\n periodic_forcefield_kwargs = periodic_forcefield_kwargs,\n molecules = off_molecules,\n barostat = barostat)\n\n\n vac_system, vac_positions, vac_topology = OEMol_to_omm_ff(oemol, vac_system_generator)\n\n #now i can attempt to solvate\n modeller = app.Modeller(vac_topology, vac_positions)\n modeller.addSolvent(sol_system_generator.forcefield, model=water_model, padding=padding, ionicStrength=ionicStrength)\n sol_positions, sol_topology = modeller.getPositions(), modeller.getTopology()\n sol_positions = unit.quantity.Quantity(value = np.array([list(atom_pos) for atom_pos in sol_positions.value_in_unit_system(unit.md_unit_system)]), unit = unit.nanometers)\n sol_system = sol_system_generator.create_system(sol_topology)\n\n vac_sys_pos_top = (vac_system, vac_positions, vac_topology)\n sol_sys_pos_top = (sol_system, sol_positions, sol_topology)\n\n #a quick assertion to make sure the nonbonded forces are being treated properly\n vac_nbf, sol_nbf = pull_force_by_name(vac_system, 'NonbondedForce'), pull_force_by_name(sol_system, 'NonbondedForce')\n assert not vac_nbf.usesPeriodicBoundaryConditions()\n assert sol_nbf.usesPeriodicBoundaryConditions()\n\n return vac_sys_pos_top, sol_sys_pos_top",
"def wrt_gau_input_once(self, imol):\n prefix = self.config['job_prefix'] \n inpfile = prefix + \"x\" +str(imol) + \".gjf\"\n t = self.template\n fp = open(inpfile, \"w\")\n link0 = t['link0']\n link0['%chk'] = prefix + \"x\" + str(imol) + \".chk\\n\"\n for key in link0:\n print >>fp, \"%s=%s\" % (key, link0[key]),\n print >>fp, \"%s\" % t['route'],\n print >>fp, \"\"\n print >>fp, \"%s\" % t['title']\n print >>fp, \"\"\n\n molspec = t['molspec']\n print >>fp, \"%s\" % molspec['spin_charge']\n \n onemol = self.model['mol'][imol]\n natom = onemol['natom']\n for atom in onemol['atom']:\n line = self.__build_gau_atom(atom)\n print >>fp, \"%s\" % line\n \n print >>fp, \"\"\n print >>fp, \"%s\" % t['tail'],\n \n fp.close()\n return",
"def define_gan(g_model, d_model):\r\n # make weights in the discriminator (some shared with the q model) as not trainable\r\n d_model.trainable = False\r\n # connect g outputs to d inputs\r\n d_output = d_model(g_model.output)\r\n # define composite model\r\n model = Model(g_model.input, d_output)\r\n # compile model\r\n opt = Adam(lr=0.0001, beta_1=0.5)\r\n model.compile(loss=['binary_crossentropy'], optimizer=opt)\r\n return model",
"def generateXMLmodel(quickLogger,\n base,\n galactic_file=\"gal_2yearp7v6_v0.fits\",\n isotropic_file=\"iso_p7v6source.txt\",\n catalog_file=\"gll_psc_v07.fit\"):\n\n\n try:\n checkForFiles(quickLogger,[base+\"_model.xml\"])\n quickLogger.info(base+\"_model.xml exists, won't create a new one.\")\n except(FileNotFound):\n quickLogger.info(base+\"_model.xml doesn't exist, will create a new one.\") \n try:\n checkForFiles(quickLogger,[base+\"_filtered_gti.fits\",galactic_file,isotropic_file,catalog_file])\n import make2FGLxml\n mymodel = make2FGLxml.srcList(catalog_file,base+\"_filtered_gti.fits\",base+\"_model.xml\")\n mymodel.makeModel(galactic_file, 'gal_2yearp7v6_v0', isotropic_file, 'iso_p7v6source')\n quickLogger.info(\"NOTE: if there are extended sources in your ROI, make sure the \"\\\n +\"correspoinding diffuse template is in the working directory.\")\n except(FileNotFound):\n raise FileNotFound",
"def generic_fsim_gate(\n fsim_angles: Dict[str, float], qubits: Tuple[cirq.GridQubit, cirq.GridQubit]\n) -> List[cirq.OP_TREE]:\n q_0, q_1 = qubits\n g_f = [\n cirq.Z(q_0)\n ** (\n -(\n fsim_angles[\"delta_minus_off_diag\"]\n + fsim_angles[\"delta_minus_diag\"]\n - 2 * fsim_angles[\"delta_plus\"]\n )\n / np.pi\n / 4.0\n ),\n cirq.Z(q_1)\n ** (\n (\n fsim_angles[\"delta_minus_off_diag\"]\n + fsim_angles[\"delta_minus_diag\"]\n + 2 * fsim_angles[\"delta_plus\"]\n )\n / np.pi\n / 4.0\n ),\n ] # type: List[cirq.OP_TREE]\n\n if not np.isclose(fsim_angles[\"phi\"], 0):\n g_f.append(cirq.CZ(q_0, q_1) ** (-fsim_angles[\"phi\"] / np.pi))\n\n if not np.isclose(fsim_angles[\"theta\"], 0):\n g_f.append(cirq.ISWAP(q_0, q_1) ** (-fsim_angles[\"theta\"] / (np.pi / 2.0)))\n\n g_f.append(\n cirq.Z(q_0)\n ** (-(fsim_angles[\"delta_minus_diag\"] - fsim_angles[\"delta_minus_off_diag\"]) / np.pi / 4.0)\n )\n g_f.append(\n cirq.Z(q_1)\n ** ((fsim_angles[\"delta_minus_diag\"] - fsim_angles[\"delta_minus_off_diag\"]) / np.pi / 4.0)\n )\n return g_f",
"def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models",
"def generate():",
"def _gen_ms(theta, nb_qubits):\n routine = QRoutine()\n\n for first_qb in range(nb_qubits):\n for second_qb in range(first_qb + 1, nb_qubits):\n routine.apply(RXX(theta), [first_qb, second_qb])\n\n return routine",
"def generate(self, ml):\n self.result = ''\n\n # Generate subsystems.\n for m in ml.active_subsystems():\n self.enter()\n m.generate(self, None)\n self.leave()\n assert(self.indent == 0)\n for m in ml.all_fifos():\n self.enter()\n m.generate(self, None)\n self.leave()\n assert(self.indent == 0)\n\n # Generate the top level.\n self.append(\"library ieee;\")\n self.append(\"use ieee.std_logic_1164.all;\")\n self.append(\"use ieee.numeric_std.all;\")\n self.append(\"entity mem is\")\n self.enter()\n self.append(\"port (\")\n self.enter()\n self._emit_downstream_ports(ml)\n self._emit_upstream_ports(ml)\n self.append(\"clk : in std_logic;\")\n self.append(\"rst : in std_logic\")\n self.leave()\n self.append(\");\")\n self.leave()\n self.append(\"end mem;\")\n self.append(\"architecture rtl of mem is\")\n self.enter()\n self._emit_downstream_signals(ml)\n self.leave()\n self.result += self.sigs\n self.append(\"begin\")\n self.result += self.code\n self.enter()\n self._connect_downstream_ports(ml)\n self._connect_upstream_ports(ml)\n self.leave()\n self.append(\"end rtl;\")\n assert(self.indent == 0)\n return self.result",
"def _build_genotypes(self):\n x = np.zeros(self.n)\n \n # Frequencies derived from HWE.\n num_hetero = 2 * self.maf * (1 - self.maf) * self.n\n num_homo_minor = self.maf ** 2 * self.n\n \n x[:num_hetero] = 1\n x[num_hetero:num_hetero+num_homo_minor] = 2\n np.random.shuffle(x)\n \n # Add noise for dosage values if needed.\n if self.dosage_var:\n x[x == 0] += np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 0]))\n )\n x[x == 1] += np.random.normal(0, self.dosage_var, len(x[x == 1]))\n x[x == 2] -= np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 2]))\n )\n\n # Mask some values if the call rate is not 1.\n if self.call_rate < 1:\n missing_rate = 1 - self.call_rate\n missing_number = missing_rate * self.n\n missing_idx = np.arange(0, self.n)\n np.random.shuffle(missing_idx)\n missing_idx = missing_idx[:missing_number]\n x[missing_idx] = np.nan\n \n return x"
] | [
"0.54505414",
"0.51954615",
"0.51067597",
"0.50813454",
"0.5022131",
"0.49704906",
"0.4932094",
"0.49202523",
"0.48829344",
"0.4879901",
"0.48722965",
"0.4869388",
"0.48138118",
"0.48023358",
"0.47683543",
"0.4765257",
"0.47583735",
"0.47547734",
"0.47535294",
"0.47480232",
"0.47444925",
"0.47435153",
"0.4739192",
"0.47210768",
"0.4709027",
"0.4694699",
"0.46860248",
"0.46859795",
"0.4671371",
"0.4660655"
] | 0.7839262 | 0 |
Generate a single macro from a list of macros | def sequence_macros(macros):
def foo():
for m in macros:
m()
return foo | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_macros(self, text):\n if text is None:\n return\n \n # join and split so we can accept a list or string. \n text = ''.join(text)\n for m in text.splitlines():\n name, body = m.split(None, 1)\n name, args = name.split('(', 1)\n args = '(%s' % args\n self.all[name] = c_ast.Macro(name, args, body)",
"def _expand_macros(tokdict,macrodict):\r\n def macro_value(m):\r\n return '(?:%s)' % macrodict[m.groupdict()['macro']]\r\n for key, value in tokdict.items():\r\n while re.search(r'{[a-z][a-z0-9-]*}', value):\r\n value = re.sub(r'{(?P<macro>[a-z][a-z0-9-]*)}',\r\n macro_value, value)\r\n tokdict[key] = value\r\n return tokdict",
"def test_make_macrobes(self):\n basic_test_runner(self, 'macrobes')",
"def _expand_macros(self, macro_ids) -> None:\n if macro_ids is None:\n return\n\n for macro_id in macro_ids:\n macro_cfg = self._app_cfg['macros'][macro_id]\n\n self._sweep_stages.extend(macro_cfg.get('sweeps', []))\n self._reset_stages.extend(macro_cfg.get('resets', []))\n self._process_stages.extend(macro_cfg.get('processes', []))",
"def macros(self):\r\n return macros.Macros(self)",
"def __expand_macros(self, text, macros):\n #print macros.items()\n for k, v in macros.items():\n text = re.sub(r'\\$\\(({key}|{key},[^)]*)\\)'.format(key=k),v, text)\n\n return text",
"def resolve_macros(lines: List[SourceLine]) -> List[SourceLine]:\n macros: Dict[str, Dict[str, Any]] = {}\n found_macro = None\n lines_to_remove = []\n for idx, srcline in enumerate(lines):\n if found_macro is None:\n match = re.match(mipsRE.MACRO, srcline.line)\n if match:\n found_macro = match[1]\n macros[match[1]] = {\n \"args\": [a.strip() for a in match[2].split(\", \")] if match[2] else None,\n \"lines\": [],\n }\n lines_to_remove.append(idx)\n else:\n if \".end_macro\" in srcline.line:\n found_macro = None\n lines_to_remove.append(idx)\n else:\n macros[found_macro][\"lines\"].append(srcline)\n lines_to_remove.append(idx)\n\n lines = [l for i, l in enumerate(lines) if i not in lines_to_remove]\n\n for idx, srcline in enumerate(lines):\n for macro, macro_info in macros.items():\n if macro in srcline.line:\n if macro_info[\"args\"] is None:\n lines[idx] = macro_info[\"lines\"]\n else:\n macro_with_args(idx, lines, macro, macro_info, srcline)\n\n return flatten(lines)",
"def special_defmacro(self, form):\n if not len(form) >= 3:\n raise SyntaxError(\"Not enough forms\")\n #print form\n form[0] = I('def')\n #print \"Macro s-exps:\", form\n fun = self.compileModule([form])\n #print \"macro-function:\", fun\n code = self.getCode(fun, mode='exec')\n ns = makeNS({})\n #print \"EVALING\", code, \"IN\", ns\n eval(code, ns)\n self.macros[form[1][0].name] = ns[form[1][0].name]\n #print \"GOT THE MACRO!\", ns[form[1].name]",
"def enrich_macro(self):\n if self.multi is True:\n # The macro has no inputs\n rxp = '\\\\' + self.macro_name + '\\s*(.*$)'\n else:\n # The macro has one or more inputs\n rxp = '\\\\' + self.macro_name + '(?![a-zA-Z])'\n return rxp",
"def get_macro(name, macros):\n for themacro in (macro for macro in macros if macro.get('name') == name):\n if themacro:\n return(themacro)\n else:\n pass",
"def magic_macro(self,parameter_s = ''):\n\n args = parameter_s.split()\n name,ranges = args[0], args[1:]\n #print 'rng',ranges # dbg\n cmds = self.extract_input_slices(ranges)\n macro = Macro(cmds)\n self.user_ns.update({name:macro})\n print 'Macro `%s` created. To execute, type its name (without quotes).' % name\n print 'Macro contents:'\n print str(macro).rstrip(),",
"def fill_macros(item: Any, macros: Dict[str, Dict[str, Any]] = None) -> Any:\n if not macros:\n return item\n if ismacro(item):\n macro, param = get_macro_and_param(item)\n return macros.get(macro, {}).get(param, item)\n if isinstance(item, dict):\n return {key: fill_macros(value, macros) for key, value in item.items()}\n if isinstance(item, list):\n return [fill_macros(it, macros) for it in item]\n if isinstance(item, tuple):\n return tuple(fill_macros(it, macros) for it in item)\n return item",
"def process_macro(parent, macro, style_macros, reference):\n mlist = [process_node(parent, style_node, style_macros, reference) \n for style_node in macro]\n return(mlist)",
"def macros(self) -> Dict[str, List[str]]:\n\n result: Dict[str, List[str]] = {}\n for spec in self.specs.values():\n result[spec.name] = []\n for macro in spec.macros.values():\n result[spec.name].append(macro[0])\n return result",
"def macro(self, macro_id):\r\n return macros.Macro(self, macro_id)",
"def define_and_write_macro(data):\n\n tree = data['vcxproj']['tree']\n ns = data['vcxproj']['ns']\n cmake = data['cmake']\n\n try:\n preprocessor = tree.xpath('//ns:PreprocessorDefinitions', namespaces=ns)[0]\n except IndexError:\n return\n\n if preprocessor.text:\n cmake.write('# Definition of Macros\\n')\n cmake.write('add_definitions(\\n')\n for preproc in preprocessor.text.split(\";\"):\n if preproc != '%(PreprocessorDefinitions)' and preproc != 'WIN32':\n cmake.write(' -D%s \\n' % preproc)\n # Unicode\n unicode = tree.find(\"//ns:CharacterSet\", namespaces=ns)\n if unicode is not None:\n if 'Unicode' in unicode.text:\n cmake.write(' -DUNICODE\\n')\n cmake.write(' -D_UNICODE\\n')\n cmake.write(')\\n\\n')",
"def macro_with_args(idx: int, lines: List[SourceLine], macro: str, macroinfo: Dict[str, Any], srcline: SourceLine):\n macroregex = fr\"{macro}\\((.+)\\)\"\n match = re.match(macroregex, srcline.line)\n if match:\n values = match[1].split(\", \")\n argsmap = {arg: val for arg, val in zip(macroinfo[\"args\"], values)}\n expanded_macro = []\n for s in macroinfo[\"lines\"]:\n modified_line = s.line\n for a, v in argsmap.items():\n modified_line = modified_line.replace(a, v)\n expanded_macro.append(SourceLine(line=modified_line, lineno=s.lineno, filename=s.filename))\n lines[idx] = expanded_macro # type: ignore",
"def latex_macros(\n samples, parameter_dict=None, labels=None, rounding=\"smart\"\n ):\n macros = \"\"\n data = {i: i for i in samples[0].keys()}\n if parameter_dict is not None:\n import copy\n\n data = copy.deepcopy(parameter_dict)\n for param in parameter_dict.keys():\n if not all(param in samples_dict.keys() for samples_dict in samples):\n logger.warning(\n \"{} not in list of parameters. Not generating \"\n \"macro\".format(param)\n )\n data.pop(param)\n for param, desc in data.items():\n for num, samples_dict in enumerate(samples):\n if labels:\n description = \"{}{}\".format(desc, labels[num])\n else:\n description = desc\n\n median = samples_dict[param].average(type=\"median\")\n confidence = samples_dict[param].confidence_interval()\n if rounding == \"smart\":\n from pesummary.utils.utils import smart_round\n\n median, upper, low = smart_round([\n median, confidence[1] - median, median - confidence[0]\n ])\n else:\n median = np.round(median, rounding)\n low = np.round(median - confidence[0], rounding)\n upper = np.round(confidence[1] - median, rounding)\n macros += (\n \"\\\\def\\\\%s{$%s_{-%s}^{+%s}$}\\n\" % (\n description, median, low, upper\n )\n )\n macros += (\n \"\\\\def\\\\%smedian{$%s$}\\n\" % (description, median)\n )\n macros += (\n \"\\\\def\\\\%supper{$%s$}\\n\" % (\n description, np.round(median + upper, 9)\n )\n )\n macros += (\n \"\\\\def\\\\%slower{$%s$}\\n\" % (\n description, np.round(median - low, 9)\n )\n )\n return macros",
"def convert_macro(macro):\n convert_dict = {\n 'CONTACTNAME' : 'user.name',\n 'CONTACTALIAS' : 'user.display_name',\n 'CONTACTEMAIL' : 'user.email',\n 'CONTACTPAGER' : 'user.pager',\n 'SERVICEDESC' : 'service.name',\n 'SERVICEDISPLAYNAME' : 'service.display_name',\n 'SERVICECHECKCOMMAND' : 'service.check_command',\n 'SERVICESTATE' : 'service.state',\n 'SERVICESTATEID' : 'service.state_id',\n 'SERVICESTATETYPE' : 'service.state_type',\n 'SERVICEATTEMPT' : 'service.check_attempt',\n 'MAXSERVICEATTEMPT' : 'service.max_check_attempts',\n 'LASTSERVICESTATE' : 'service.last_state',\n 'LASTSERVICESTATEID' : 'service.last_state_id',\n 'LASTSERVICESTATETYPE' : 'service.last_state_type',\n 'LASTSERVICESTATECHANGE' : 'service.last_state_change',\n 'SERVICEDOWNTIME' : 'service.downtime_depth',\n 'SERVICEDURATIONSEC' : 'service.duration_sec',\n 'SERVICELATENCY' : 'service.latency',\n 'SERVICEEXECUTIONTIME' : 'service.execution_time',\n 'SERVICEOUTPUT' : 'service.output',\n 'SERVICEPERFDATA' : 'service.perfdata',\n 'LASTSERVICECHECK' : 'service.last_check',\n 'SERVICENOTES' : 'service.notes',\n 'SERVICENOTESURL' : 'service.notes_url',\n 'SERVICEACTIONURL' : 'service.action_url',\n 'HOSTNAME' : 'host.name',\n 'HOSTADDRESS' : 'host.address',\n 'HOSTADDRESS6' : 'host.address6',\n 'HOSTDISPLAYNAME' : 'host.display_name',\n 'HOSTALIAS' : 'host.display_name',\n 'HOSTCHECKCOMMAND' : 'host.check_command',\n 'HOSTSTATE' : 'host.state',\n 'HOSTSTATEID' : 'host.state_id',\n 'HOSTSTATETYPE' : 'host.state_type',\n 'HOSTATTEMPT' : 'host.check_attempt',\n 'MAXHOSTATTEMPT' : 'host.max_check_attempts',\n 'LASTHOSTSTATE' : 'host.last_state',\n 'LASTHOSTSTATEID' : 'host.last_state_id',\n 'LASTHOSTSTATETYPE' : 'host.last_state_type',\n 'LASTHOSTSTATECHANGE' : 'host.last_state_change',\n 'HOSTDOWNTIME' : 'host.downtime_depth',\n 'HOSTDURATIONSEC' : 'host.duration_sec',\n 'HOSTLATENCY' : 'host.latency',\n 'HOSTEXECUTIONTIME' : 'host.execution_time',\n 'HOSTOUTPUT' : 'host.output',\n 'HOSTPERFDATA' : 'host.perfdata',\n 'LASTHOSTCHECK' : 'host.last_check',\n 'HOSTNOTES' : 'host.notes',\n 'HOSTNOTESURL' : 'host.notes_url',\n 'HOSTACTIONURL' : 'host.action_url',\n 'TOTALSERVICES' : 'host.num_services',\n 'TOTALSERVICESOK' : 'host.num_services_ok',\n 'TOTALSERVICESWARNING' : 'host.num_services_warning',\n 'TOTALSERVICESUNKNOWN' : 'host.num_services_unknown',\n 'TOTALSERVICESCRITICAL' : 'host.num_services_critical',\n 'COMMANDNAME' : 'command.name',\n 'NOTIFICATIONTYPE' : 'notification.type',\n 'NOTIFICATIONAUTHOR' : 'notification.author',\n 'NOTIFICATIONCOMMENT' : 'notification.comment',\n 'NOTIFICATIONAUTHORNAME' : 'notification.author',\n 'NOTIFICATIONAUTHORALIAS' : 'notification.author',\n 'TIMET' : 'icinga.timet',\n 'LONGDATETIME' : 'icinga.long_date_time',\n 'SHORTDATETIME' : 'icinga.short_date_time',\n 'DATE' : 'icinga.date',\n 'TIME' : 'icinga.time',\n 'PROCESSSTARTTIME' : 'icinga.uptime',\n 'TOTALHOSTSUP' : 'icinga.num_hosts_up',\n 'TOTALHOSTSDOWN' : 'icinga.num_hosts_down',\n 'TOTALHOSTSUNREACHABLE' : 'icinga.num_hosts_unreachable',\n 'TOTALHOSTSDOWNUNHANDLED' : '-',\n 'TOTALHOSTSUNREACHABLEUNHANDLED' : '-',\n 'TOTALHOSTPROBLEMS' : 'down',\n 'TOTALHOSTPROBLEMSUNHANDLED' : 'down-(downtime+acknowledged)',\n 'TOTALSERVICESOK' : 'icinga.num_services_ok',\n 'TOTALSERVICESWARNING' : 'icinga.num_services_warning',\n 'TOTALSERVICESCRITICAL' : 'icinga.num_services_critical',\n 'TOTALSERVICESUNKNOWN' : 'icinga.num_services_unknown',\n 'TOTALSERVICESWARNINGUNHANDLED' : '-',\n 'TOTALSERVICESCRITICALUNHANDLED' : '-',\n 'TOTALSERVICESUNKNOWNUNHANDLED' : '-',\n 'TOTALSERVICEPROBLEMS' : 'ok+warning+critical+unknown',\n 'TOTALSERVICEPROBLEMSUNHANDLED' : 'warning+critical+unknown-(downtime+acknowledged)',\n 'CHANGE_CUSTOM_CONTACT_VAR' : 'CHANGE_CUSTOM_USER_VAR',\n }\n\n #The following external commands are not supported:\n unsupported = [\n 'CHANGE_*MODATTR',\n 'CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD',\n 'CHANGE_HOST_NOTIFICATION_TIMEPERIOD',\n 'CHANGE_SVC_NOTIFICATION_TIMEPERIOD',\n 'DEL_DOWNTIME_BY_HOSTGROUP_NAME',\n 'DEL_DOWNTIME_BY_START_TIME_COMMENT',\n 'DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST',\n 'DISABLE_CONTACT_HOST_NOTIFICATIONS',\n 'DISABLE_CONTACT_SVC_NOTIFICATIONS',\n 'DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS',\n 'DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS',\n 'DISABLE_FAILURE_PREDICTION',\n 'DISABLE_HOST_AND_CHILD_NOTIFICATIONS',\n 'DISABLE_HOST_FRESHNESS_CHECKS',\n 'DISABLE_NOTIFICATIONS_EXPIRE_TIME',\n 'DISABLE_SERVICE_FRESHNESS_CHECKS',\n 'ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST',\n 'ENABLE_CONTACT_HOST_NOTIFICATIONS',\n 'ENABLE_CONTACT_SVC_NOTIFICATIONS',\n 'ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS',\n 'ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS',\n 'ENABLE_FAILURE_PREDICTION',\n 'ENABLE_HOST_AND_CHILD_NOTIFICATIONS',\n 'ENABLE_HOST_FRESHNESS_CHECKS',\n 'ENABLE_SERVICE_FRESHNESS_CHECKS',\n 'READ_STATE_INFORMATION',\n 'SAVE_STATE_INFORMATION',\n 'SET_HOST_NOTIFICATION_NUMBER',\n 'SET_SVC_NOTIFICATION_NUMBER',\n 'START_ACCEPTING_PASSIVE_HOST_CHECKS',\n 'START_ACCEPTING_PASSIVE_SVC_CHECKS',\n 'START_OBSESSING_OVER_HOST',\n 'START_OBSESSING_OVER_HOST_CHECKS',\n 'START_OBSESSING_OVER_SVC',\n 'START_OBSESSING_OVER_SVC_CHECKS',\n 'STOP_ACCEPTING_PASSIVE_HOST_CHECKS',\n 'STOP_ACCEPTING_PASSIVE_SVC_CHECKS',\n 'STOP_OBSESSING_OVER_HOST',\n 'STOP_OBSESSING_OVER_HOST_CHECKS',\n 'STOP_OBSESSING_OVER_SVC',\n 'STOP_OBSESSING_OVER_SVC_CHECKS',\n]\n # Strip the $$\n sane_macro = macro.translate(None, '$')\n #debug(sane_macro)\n # Return true when it's a global parameter\n if sane_macro in convert_dict:\n return False, '$' + convert_dict[sane_macro] + '$'\n elif sane_macro in unsupported:\n return False, 'UNSUPPORTED'\n elif not sane_macro in ['ARG1','ARG2','ARG3','ARG4','ARG5','ARG6']:\n return True, sane_macro.title()\n else:\n return False, ''",
"def start_macro(self, macrolist=None, folderlist=None, search_subdirs=None,\n **kwargs):\n # Handle different input types\n if isinstance(macrolist, types.GeneratorType):\n macrolist = list(macrolist)\n if isinstance(macrolist, AnyMacro):\n macrolist = macrolist.create_macros()\n elif isinstance(macrolist, list) and len(macrolist):\n if isinstance(macrolist[0], string_types):\n macrolist = [macrolist]\n if isinstance(macrolist[0], MacroCommand):\n macrolist = [macrolist]\n if isinstance(macrolist[0], list) and len(macrolist[0]):\n if isinstance(macrolist[0][0], MacroCommand):\n macrolist = [[mc.get_macro(index=0) for mc in elem]\n for elem in macrolist]\n elif isinstance(macrolist, string_types):\n if macrolist.startswith('[') and macrolist.endswith(']'):\n macrolist = macrolist.strip('[').rstrip(']')\n macrolist = [macrolist.split(', ')]\n else:\n macrolist = [[macrolist]]\n elif isinstance(macrolist, (type(None), AnyPyProcessOutputList)):\n pass\n else:\n raise ValueError('Wrong input argument for macrolist')\n # Check folderlist input argument\n if not folderlist:\n folderlist = [os.getcwd()]\n if not isinstance(folderlist, list):\n raise TypeError('folderlist must be a list of folders')\n # Extend the folderlist if search_subdir is given\n if (isinstance(search_subdirs, string_types) and\n isinstance(folderlist[0], string_types)):\n folderlist = sum([getsubdirs(d, search_subdirs)\n for d in folderlist], [])\n # Check the input arguments and generate the tasklist\n if macrolist is None:\n if self.cached_tasklist:\n tasklist = self.cached_tasklist\n else:\n raise ValueError('macrolist argument can only be ommitted if '\n 'the AnyPyProcess object has cached output '\n 'to process')\n elif isinstance(macrolist[0], collections.Mapping):\n tasklist = list(_Task.from_output_list(macrolist))\n elif isinstance(macrolist[0], list):\n arg_hash = make_hash([macrolist, folderlist, search_subdirs])\n if self.cached_tasklist and self.cached_arg_hash == arg_hash:\n tasklist = self.cached_tasklist\n else:\n self.cached_arg_hash = arg_hash\n tasklist = list(_Task.from_macrofolderlist(macrolist,\n folderlist))\n else:\n raise ValueError('Nothing to process for ' + str(macrolist))\n\n self.summery = _Summery(have_ipython=run_from_ipython(),\n silent=self.silent)\n\n if self.logfile_prefix is None:\n self.logfile_prefix = str(self.cached_arg_hash)[:4] + '_'\n\n # Start the scheduler\n process_time = self._schedule_processes(tasklist, self._worker)\n self.cleanup_logfiles(tasklist)\n # Cache the processed tasklist for restarting later\n self.cached_tasklist = tasklist\n self.summery.final_summery(process_time, tasklist)\n task_output = [task.get_output(include_task_info=self.return_task_info)\n for task in tasklist]\n return AnyPyProcessOutputList(task_output)",
"def get_macro(self, name: str) -> str:\n data = struct.pack('<HH{}s'.format(len(name)), 0, len(name), name.encode())\n return self.__intf(2, data)[2:].decode()",
"def write_macro(self, macro, mac_dir=\"\"):\n if (mac_dir == \"\"):\n mac_dir = os.getcwd()\n self._mac_dir = mac_dir+\"/\"\n self._mac_path = self._mac_dir+self._name+\".mac\"\n mac_file = open(self._mac_path, \"w\")\n for line in macro:\n mac_file.write(line)\n mac_file.close()",
"def make_macro(extension):\n def macro(body, args, kwargs, is_block, environ):\n if extension.is_void and body:\n return wrap(MarkupErrorElement(\n _(u'Macro “%s” was passed a body but does not '\n u'support it.') % extension.name))\n # ensure the body is not None, this could break extensions that\n # do not expect None as value.\n body = body or u''\n if not extension.is_isolated:\n arg = CreoleParser().parse(body, environ['reason'])\n else:\n arg = body\n if extension.argument_attribute and args:\n kwargs[extension.argument_attribute] = u' '.join(args)\n return wrap(extension.process(kwargs, arg, environ['reason']))\n return macro",
"def substitute_macroscripts(topconstruct):\n #pylint: disable=too-many-locals\n def generate_method(namecode):\n event_name, code = namecode\n method_name = syntax.Construct(syntax.VAR_NAME, event_name.args[0])\n method_name.resolution = RESOLUTION_NAKED\n fcn = syntax.Construct(syntax.FUNCTION_DEF, method_name, [], code)\n method = syntax.Construct(syntax.STRUCT_MEMBER_METHOD, fcn)\n return method\n\n def generate_handler_method(ondohandler):\n return generate_method(ondohandler.args)\n\n for _, macro_script_it in enumerate(\n query(\n [is_layering([syntax.MACROSCRIPT_DEF])],\n TreeItem(topconstruct))\n ):\n macro_script = macro_script_it.construct\n vname = macro_script.args[0]\n vnop = macro_script.args[1]\n exprseq = macro_script.args[2]\n # we need the upper \"program\" construct for inserting\n # statements\n\n # create a class for the macroscript\n sname = f\"MacroScript_{vname.args[0]}\"\n snamec = syntax.Construct(syntax.VAR_NAME, sname)\n snamec.resolution = RESOLUTION_NAKED\n\n if exprseq.construct == syntax.EXPR_SEQ:\n mname = syntax.Construct(syntax.VAR_NAME, \"execute\")\n mname.resolution = RESOLUTION_NAKED\n other_decls = []\n event_members = list(map(generate_method, [(mname, exprseq)]))\n events = [\"execute\"]\n else:\n clause_items = exprseq.args[0]\n on_do_handlers = list(filter(lambda c: c.construct == syntax.ON_DO_HANDLER, clause_items))\n other_decls = list(filter(lambda c: c.construct != syntax.ON_DO_HANDLER, clause_items))\n event_members = list(map(generate_handler_method, on_do_handlers))\n events = list(map(lambda c: c.args[0].args[0], on_do_handlers))\n #decl_class = syntax.Construct(syntax.STRUCT_DEF, snamec, members)\n\n decl_class = syntax.Construct(syntax.PY_MACROSCRIPT_CLASS, snamec, vname, event_members, events, other_decls)\n\n # and substitute the MASCROSCRIPT_DEF by a call to the struct\n # constructor\n snamec = syntax.Construct(syntax.VAR_NAME, sname)\n snamec.resolution = RESOLUTION_NAKED\n cnstr_call = syntax.Construct(syntax.CALL, snamec, vnop)\n macro_script_it.replace_construct(cnstr_call)\n\n # we now want to add this class declaration in the top level of the\n # program\n # (note: we do this after because this breaks indices... could cause other\n # problems?)\n function_program = find_first_parent([is_function_program], macro_script_it)\n function_program_construct = function_program.construct\n function_program_construct.args[0].insert(0, decl_class)",
"def test_macro(self):\n env = create_env()\n pkg = create_manifest(\"mock\")\n result = mock_lint(env, pkg,\n \"\"\"\n macro(test)\n cmd2()\n endmacro()\n cmd1()\n test()\n cmd3()\n \"\"\", checks=None, indentation=True)\n self.assertEqual([], result)\n\n result = mock_lint(env, pkg,\n \"\"\"\n macro(test)\n if()\n cmd()\n endif()\n endmacro()\n cmd1()\n test()\n cmd3()\n \"\"\", checks=None, indentation=True)\n self.assertEqual([\"INDENTATION\"], result)\n\n result = mock_lint(env, pkg,\n \"\"\"\n macro(test2)\n cmd()\n endmacro()\n macro(test)\n if()\n cmd()\n test2()\n cmd()\n endif()\n endmacro()\n cmd1()\n test()\n cmd3()\n \"\"\", checks=None, indentation=True)\n self.assertEqual([], result)\n result = mock_lint(env, pkg,\n \"\"\"\n macro(test4)\n cmd()\n if()\n cmd()\n endif()\n endmacro()\n macro(test3)\n test4()\n endmacro()\n macro(test2)\n test3()\n if()\n if()\n if()\n cmd()\n test3()\n endif()\n endif()\n endif()\n endmacro()\n macro(test)\n test2()\n if()\n cmd()\n test2()\n else()\n foreach(a b c d e)\n test2()\n endforeach()\n endif()\n endmacro()\n cmd1()\n test()\n cmd3()\n \"\"\", checks=None, indentation=True)",
"def evaluateMacro(compiled_expression):",
"def generate_latex_macros(\n self, labels=\"all\", parameter_dict=None, save_to_file=None,\n rounding=2\n ):\n import os\n\n if save_to_file is not None and os.path.isfile(\"{}\".format(save_to_file)):\n raise FileExistsError(\n \"The file {} already exists.\".format(save_to_file)\n )\n if labels != \"all\" and isinstance(labels, str) and labels not in self.labels:\n raise ValueError(\"The label %s does not exist.\" % (labels))\n elif labels == \"all\":\n labels = list(self.labels)\n elif isinstance(labels, str):\n labels = [labels]\n elif isinstance(labels, list):\n for ll in labels:\n if ll not in list(self.labels):\n raise ValueError(\"The label %s does not exist.\" % (ll))\n\n macros = self.latex_macros(\n [self.samples_dict[i] for i in labels], parameter_dict,\n labels=labels, rounding=rounding\n )\n if save_to_file is None:\n print(macros)\n else:\n with open(save_to_file, \"w\") as f:\n f.writelines([macros])",
"def render_macro(name, arguments, inline, config):\n for MacroKlass in config.get('macros', []):\n if MacroKlass.key == name:\n macro = MacroKlass(inline, config)\n if macro.handler:\n args, kwargs = [], {}\n for arg in arguments.split(','):\n if '=' in arg:\n k, v = arg.strip().split('=', 1)\n \n # TODO: wrap this in try...except as it might fail\n kwargs[k] = ast.literal_eval(v)\n else:\n args.append(ast.literal_eval(arg.strip()))\n return macro.handler(*args, **kwargs)",
"def macros_eval_order(macros: Dict = None) -> List[str]:\n if macros is None:\n return []\n\n # Resolve dependencies between macros\n deps = dict()\n for name, params in macros.items():\n parents = set()\n for param in params.values():\n if ismacro(param):\n macro, _ = get_macro_and_param(param)\n parents.add(macro)\n deps[name] = parents\n\n # Resolve evaluation order using dependencies\n order = [] # type: List[str]\n\n def _add(macro: str, stack: Tuple[str, ...] = ()):\n if macro in order:\n return\n if macro in stack:\n cycle = \" -> \".join(list(stack) + [macro])\n raise ValueError(f\"Unable to resolve order of macro evaluation (cycle: {cycle}, dependencies: {deps})\")\n if macro not in deps:\n raise ValueError(f\"Missing macro: {macro}, mentioned by : {' -> '.join(stack)})\")\n for parent in deps[macro]:\n _add(parent, (*stack, macro))\n order.append(macro)\n\n for macro in macros:\n _add(macro)\n\n # Macros should appear once and only once\n if len(order) != len(macros) or set(order) != set(macros):\n raise ValueError(f\"Order {order} incoherent with macros {macros.keys()}\")\n\n return order",
"def convert_to_macro(macro_dict):\n # Create root\n root = ET.Element(\"klayout-macro\")\n\n # Create empty dictionary of tags that represent sub elements to root\n subroot_elements = dict.fromkeys([\"description\", \"version\", \"category\",\"prolog\", \"epilog\",\\\n \"doc\", \"autorun\", \"autorun-early\", \"shortcut\", \"show-in-menu\",\\\n \"group-name\", \"menu-path\", \"interpreter\", \"dsl-interpreter-name\",\\\n \"text\"])\n\n # Create sub elements to root\n for tag in subroot_elements.keys():\n subroot_elements[tag] = ET.SubElement(root, tag)\n if macro_dict.get(tag, None) != None:\n subroot_elements[tag].text = macro_dict.get(tag)\n\n # Prettify the XML then return the raw text\n return prettify(root)"
] | [
"0.6791655",
"0.6284933",
"0.6263033",
"0.61974406",
"0.6181545",
"0.6170639",
"0.603242",
"0.60219055",
"0.6006982",
"0.5983674",
"0.593645",
"0.5891278",
"0.5878313",
"0.5833065",
"0.58317727",
"0.58231497",
"0.5785763",
"0.56771237",
"0.5628757",
"0.5620599",
"0.5515615",
"0.5466995",
"0.54648864",
"0.5453844",
"0.545112",
"0.5364249",
"0.5358044",
"0.5343504",
"0.53409934",
"0.5294591"
] | 0.7120662 | 0 |
Goes depth first through all possible moves until a solution was found or the maximum depth has been reached. | def run(self, max_depth):
while len(self.stack) > 0:
state = self.get_next_state()
if state.is_solution():
self.solutions.append(state.moves)
if len(state.moves) < max_depth:
self.create_children(state)
self.archive[state.get_tuple()] = len(state.moves)
# sort solutions best to worst
self.solutions.sort(key=len)
if self.solutions:
return self.solutions[0]
print("This depth is not sufficient.")
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list",
"def iterative_depth_search(self, board, player, t_max=30, min_depth=4, stop_at_depth=False):\n\n\t\tt_elapsed = 0.0\n\t\tbest_move, max_depth = None, 1\n\t\talpha, beta = -float('inf'), float('inf')\n\n\t\twhile max_depth <= min_depth or t_elapsed <= t_max:\n\t\t\tif stop_at_depth and max_depth > min_depth:\n\t\t\t\tbreak\n\n\t\t\tstart = time.time()\n\t\t\tbest_moves, best_val = self.alpha_beta_search(board, alpha, beta, player, 0, max_depth)\n\t\t\tt_elapsed += time.time() - start\n\t\t\tmax_depth += 1\n\t\t\tself.update()\n\n\t\t\t# Checkmate found.\n\t\t\tif abs(best_val) == float('inf'):\n\t\t\t\tself.moves_til_checkmate = len(best_moves)\n\t\t\t\tbreak\n\n\t\tbest_move = best_moves[0]\n\n\t\treturn best_move, best_val",
"def dfsl(board, depth_limit):\n # base cases\n if all(not piece.alive for piece in board.black_pieces):\n # goal! start building a path\n return []\n\n elif depth_limit == 0:\n # no path found to goal with this depth limit\n return None\n\n # recursive case: try all possible moves for all remaining pieces\n remaining_pieces = [p for p in board.white_pieces if p.alive]\n for piece in remaining_pieces:\n for newpos in piece.moves():\n oldpos = piece.pos\n eliminated_pieces = piece.makemove(newpos)\n result = dfsl(board, depth_limit-1)\n piece.undomove(oldpos, eliminated_pieces)\n\n if result is not None:\n # recursively found a sequence of moves to a goal state! hooray!\n # continue building the (reversed) sequence on the way back up\n result.append((piece, newpos))\n return result\n # otherwise, continue searching\n\n # no sequence found using any possible move (with this depth limit)\n return None",
"def search(board:Board, max_depth=3) -> DiGraph:\n\n n = 0 # node label which also serves as a node counter\n depth = 0\n \n G = nx.DiGraph()\n G.add_node(0, winner=None, player=0, board=board.state, board_p = board.display)\n \n # First branch in look ahead\n newleavelist=[]\n parent_node = n\n parent_board = Board(G.nodes[n]['board'][0], G.nodes[n]['board'][1])\n\n for move in ALL_MOVES:\n moves_available = parent_board.moves_available(player=0)\n if move not in moves_available:\n continue\n \n # Do move\n new_board = parent_board.update_board(Move(player=0, move=move))\n \n # Add move node to graph\n n=n+1\n G.add_node(n, winner=new_board.is_winner, player=1, board=new_board.state, board_p = new_board.display)\n G.add_edge(parent_node, n, move=move)\n if new_board.is_winner:\n continue\n newleavelist.append(n)\n \n depth=1\n # subsequent branches\n while depth < max_depth:\n leavelist = newleavelist[:]\n newleavelist = []\n for leave in leavelist: \n # Get parent board\n parent_board = Board(G.nodes[leave]['board'][0], G.nodes[leave]['board'][1])\n for move in ALL_MOVES:\n moves_available = parent_board.moves_available(player=depth%2)\n if move not in moves_available:\n continue\n # Do move\n new_board = parent_board.update_board(Move(player=depth%2, move=move))\n # Add move node to graph\n n=n+1\n G.add_node(n, winner=new_board.is_winner, player=1-depth%2, \n board=new_board.state, board_p=new_board.display)\n G.add_edge(leave, n, move=move)\n if new_board.is_winner:\n continue\n \n newleavelist.append(n)\n depth=depth+1\n return G",
"def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n visited_nodes = []\n start_node = problem.getStartState()\n visited_nodes.append(start_node)\n curr_node = start_node\n q = util.Queue()\n directions = util.Queue()\n q.push(curr_node)\n goal_found = problem.isGoalState(curr_node)\n\n while not goal_found:\n nxt_node_list = problem.getSuccessors(curr_node)\n nxt_node_found = False\n\n # Check if a child can be found which has not been visited\n for node in nxt_node_list:\n nxt_node = node[0]\n move = node[1]\n if nxt_node not in visited_nodes:\n nxt_node_found = True # mark that a child node has been found\n q.push(nxt_node) # add the node in the tree\n directions.push(move) # add the direction\n visited_nodes.append(nxt_node) # mark the node as visited\n break\n\n # If child not found, go to parent\n if not nxt_node_found:\n q.list.pop(0)\n directions.list.pop(0)\n\n if q.isEmpty(): break\n\n curr_node = q.list[0]\n goal_found = problem.isGoalState(curr_node)\n\n final_moves = []\n while not directions.isEmpty():\n final_moves.append(directions.pop())\n \n return final_moves\n #util.raiseNotDefined()",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n from game import Directions\n visited = set() # unique elements\n state = problem.getStartState()\n #returns starting agent's position\n waiting_list = util.Stack()\n # LIFO\n # last in first out\n # parents = collections.defaultdict(collections.UserDict)\n parents = {}\n #dictionary\n sequence = []\n #LIFO\n for action in problem.getSuccessors(state):\n # in order to push full-state values\n waiting_list.push(action)\n # enumarating tuple\n\n while not waiting_list.isEmpty():\n state = waiting_list.pop()\n \n visited.add(state[0])\n # node is visited and we wont visit those nodes\n \n for substate in problem.getSuccessors(state[0]):\n # take a look to successors of current node\n \n if substate[0] not in visited:\n # if not in visited \n # saving parents\n parents[substate[0]]={'parent':state} \n # generate new node\n waiting_list.push(substate)\n # push to stack\n if problem.isGoalState(substate[0]): \n target_state = substate \n #finding wayback\n\n\n while target_state[0] in parents.keys():\n temp=parents[target_state[0]]['parent']\n sequence.append(target_state[1])\n target_state = temp\n sequence.append(target_state[1])\n return sequence[::-1]",
"def solveOneStep(self):\n ### Student code goes here\n if (self.currentState.state == self.victoryCondition) or (self.currentState not in self.visited):\n self.visited[self.currentState] = True\n win_or_not = self.currentState.state == self.victoryCondition\n return win_or_not\n\n if not self.currentState.nextChildToVisit: \n its = 0\n for movable in self.gm.getMovables():\n its += 1\n # time test\n # too long \n if its == \"too long\":\n return \"too long\"\n #make every move in movable\n self.gm.makeMove(movable)\n new = self.gm.getGameState()\n new_gs = GameState(new, self.currentState.depth+1, movable)\n \n if new_gs not in self.visited:\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.gm.reverseMove(movable) \n \n num_children = len(self.currentState.children)\n if self.currentState.nextChildToVisit < num_children:\n new = self.currentState.children[self.currentState.nextChildToVisit]\n self.currentState.nextChildToVisit = self.currentState.nextChildToVisit + 1\n self.gm.makeMove(new.requiredMovable)\n self.currentState = new\n #recurse\n return self.solveOneStep()\n else:\n self.currentState.nextChildToVisit = self.currentState.nextChildToVisit + 1\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n #recurse\n return self.solveOneStep()",
"def depthFirstSearch(problem):\n #print \"Start:\", problem.getStartState()\n #print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n #print \"Start's successors:\", problem.getSuccessors(problem.getStartState())\n \n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n #created a frontier Stack for DFS\n #Here the stack acts as a LIFO stack\n neighbourNodes = util.Stack()\n #created a list of moves which will be returned in then end\n moves = []\n #pushed the start node and empty moves list, onto the frontier stack\n neighbourNodes.push((problem.getStartState(),moves))\n #this is a set of nodes which have been seen, to avoid adding nodes already visited \n seenNodes = set()\n #condition evaluated based on the existence of elements in the frontier stack\n while not neighbourNodes.isEmpty():\n #last node in the stack is popped and its state and action is stored\n poppedNodeState, poppedNodeAction = neighbourNodes.pop()\n #condition to check if the node is already been visited\n if(poppedNodeState in seenNodes):\n #if yes then it just skips the iteration using the continue statement\n continue\n #condition to check if the current node is the goal node\n if problem.isGoalState(poppedNodeState):\n #if yes then return the action or moves to be performed list\n return poppedNodeAction\n #if not visited before then node is added to the seenNodes set\n seenNodes.add(poppedNodeState)\n #loop to parse the successor nodes and check and add them to the frontier stack\n for state, action, cost in problem.getSuccessors(poppedNodeState):\n #checking if the successor node has already been visited before\n if(state in seenNodes):\n #if yes then it skips that node\n continue\n #else it adds that successor along with it action appeneded with the already existing actions\n neighbourNodes.push((state, poppedNodeAction+[action]))\n #the list of moves if finally returned\n return moves\n #util.raiseNotDefined()",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n\n # print(\"Start:\", problem.getStartState())\n # print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n # print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n\n # Initialize a frontier, and push the initial state into the frontier\n frontier = util.Stack()\n frontier.push([(problem.getStartState(), 'move', 0)])\n # Initialize a explored set to store the visited nodes\n exploredSet = set()\n\n # Check the content of frontier\n while not frontier.isEmpty():\n stateList = list()\n stateList = frontier.pop()\n # print (stateList)\n # What we focus on is the next state, not the (previous state + next state), so we should take the last element\n nextState = stateList[len(stateList) - 1]\n # Check the current state is goal or not\n if problem.isGoalState(nextState[0]):\n # Initial a path, which is the way to the goal state\n path = list()\n for eachMove in stateList:\n path.append(eachMove[1])\n # If the initial state is the goal state, there's no need to explore other nodes, so that's called special condition\n if len(path) == 1:\n return path[0]\n # This is the normal condition, we should convey the path except the first one, because we haven't define what's \"move\"\n else:\n return path[1:]\n # If this is a state which we don't visit, add it to the explored set(this is called GSA)\n if not nextState[0] in exploredSet:\n exploredSet.add(nextState[0])\n # Give me your child nodes\n for childState in problem.getSuccessors(nextState[0]):\n nextStateList = stateList[:]\n # we focus on the path, so we have to record the every move from the initial state to the current one\n nextStateList.append(childState)\n frontier.push(nextStateList)\n\n # Or maybe there's no way to the goal state\n else:\n return \"There's no way.\"",
"def depth_limited_search(problem, limit):\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path",
"def iterativeDeepeningSearch(problem):\n \"*** YOUR CODE HERE FOR TASK 1 ***\"\n\n # Retrieve the init state\n # state model ( (position, depth), path, cost)\n initState = ( (problem.getStartState(), 1) , ['Stop'], 0)\n limit = 1\n while True:\n # Initialization each iteration\n open = util.Stack()\n open.push(initState)\n closed = {}\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0][0]\n currDepth = currState[0][1]\n currPath = currState[1]\n currCost = currState[2]\n\n closed[currPos] = currCost\n if currDepth <= limit:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n nextDepth = currDepth + 1\n for each in successors:\n nextCost = currCost + each[2]\n nextPath = currPath + [each[1]]\n if each[0] not in closed.keys() or nextCost < closed[each[0]]:\n temp = ( (each[0], nextDepth), nextPath, nextCost)\n open.push(temp)\n if problem.isGoalState(temp[0][0]):\n return nextPath[1:]\n limit += 1",
"def solveOneStep(self):\n ### Student code goes here\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n current_depth = self.currentState.depth\n found_move = False\n while self.currentState.parent:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n count = self.currentState.nextChildToVisit\n if len(self.currentState.children) > count:\n found_move = True\n break\n if not found_move:\n for all_visited in self.visited.keys():\n all_visited.nextChildToVisit = 0\n current_depth += 1\n if len(self.visited) == 1:\n all_possible_moves = self.gm.getMovables()\n for every_move in all_possible_moves:\n self.gm.makeMove(every_move)\n new_game_state = GameState(self.gm.getGameState(), current_depth, every_move)\n new_game_state.parent = self.currentState\n self.visited[new_game_state] = False\n self.currentState.children.append(new_game_state)\n self.gm.reverseMove(every_move)\n while current_depth != self.currentState.depth:\n count = self.currentState.nextChildToVisit\n self.currentState.nextChildToVisit += 1\n if len(self.currentState.children) > count:\n self.currentState = self.currentState.children[count]\n next_move = self.currentState.requiredMovable\n self.gm.makeMove(next_move)\n else:\n found_move = False\n while self.currentState.parent:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n if len(self.currentState.children) > self.currentState.nextChildToVisit:\n found_move = True\n break\n if not found_move:\n return False\n\n if self.currentState.state != self.victoryCondition:\n self.visited[self.currentState] = True\n all_possible_moves = self.gm.getMovables()\n next_depth = current_depth + 1\n for every_move in all_possible_moves:\n self.gm.makeMove(every_move)\n new_game_state = GameState(self.gm.getGameState(), next_depth, every_move)\n if new_game_state not in self.visited:\n self.visited[new_game_state] = False\n new_game_state.parent = self.currentState\n self.currentState.children.append(new_game_state)\n self.gm.reverseMove(every_move)\n return False\n else:\n return True",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # current path stack\n path_stack = util.Stack()\n action_stack = util.Stack()\n path_stack.push(problem.getStartState())\n\n # visited (so don't )\n visited = []\n visited.append(problem.getStartState())\n\n i = 0\n while not path_stack.isEmpty():\n\n # check goal state\n if problem.isGoalState(path_stack.list[-1]): # check if goal\n return action_stack.list\n\n # get next possible state (choose first in list)\n successors = problem.getSuccessors(path_stack.list[-1])\n forward=False\n for successor in successors:\n ss,aa,_ = successor\n if ss not in visited:\n\n path_stack.push(ss)\n action_stack.push(aa)\n visited.append(ss) # you don't pop visited\n forward=True\n break\n\n # backtrack\n if forward==False:\n path_stack.pop()\n action_stack.pop()\n\n i+=1\n #if i==25:\n # import pdb; pdb.set_trace()\n #print(path_stack.list)",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state #state of the game\n self.parent = parent #parent of the node\n self.action = action #action that led to that node\n self.pathCost = pathCost #total cost of tha path until that node\n\n def solution(self): #return the path to the goal node\n path = [] #path is a list of actions\n tempNode = self #temp node is the goal node\n while tempNode.state != problem.getStartState(): #until we get to the initial node\n path.insert(0, tempNode.action) #insert at the start of the list\n tempNode = tempNode.parent #go to the parent of the node\n return path #return list of actions\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost #total cost is the total cost of the parent + the cost of the last action\n child = Node(successor, parent, action, pathCost) #create new child node\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0) #create initial node with start state and no parent\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Stack() #dfs uses a stack\n frontier.push(initialNode) #insert initial node to the stack\n explored = set() #explored nodes are added to a set\n\n while not frontier.isEmpty(): #while stack is not empty\n nextNode = frontier.pop() #extract the last node entered\n explored.add(nextNode.state) #add the state of the node to the explored set\n for successor, action, stepCost in problem.getSuccessors(nextNode.state): #for every successor create a new child\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list: #if child is not already explored or is not in the stack\n if problem.isGoalState(child.state): # if node is goal node we return the path of actions\n return child.solution()\n frontier.push(child) #insert it into the stack\n\n return [] #if stack is empty\n util.raiseNotDefined()",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n # Initialize a stack\n open = util.Stack()\n\n # Retrieve the init state\n initState = (problem.getStartState(), ['Stop'], 0)\n open.push(initState)\n closed = []\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0]\n currPath = currState[1]\n currCost = currState[2]\n\n if problem.isGoalState(currPos):\n return currPath[1:]\n else:\n closed.append(currPos)\n if currState not in closed:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n for each in successors:\n if each[0] not in closed:\n temp = (each[0], currPath+[each[1]], currCost+each[2])\n open.push(temp)\n return False",
"def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n while to_move < len(movables):\n # Make the move\n movable_statement = movables[to_move]\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n\n # Find out if this state has already been explored\n visited = False\n for visited_state in self.visited.keys():\n if visited_state.state == new_state:\n visited = True\n\n # If the new state hasn't been visited then add it as a child then move down to this child\n if not visited:\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.currentState = new_gs\n break\n\n # Else skip this state and try going to the next movable statement\n else:\n # print(\"SKIP THIS STATE\")\n self.gm.reverseMove(movable_statement)\n to_move += 1\n\n # Went all the way down to a leaf, backtrack\n if (to_move >= len(movables)):\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n\n return False",
"def depthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Stack() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]",
"def iterative_minimax_strategy(game: Any) -> Any:\n s = Stack()\n id0 = 0\n d = {0: Tree([id0, game, None])}\n s.add(0)\n\n while not s.is_empty():\n id1 = s.remove()\n item = [id1]\n if d[id1].children == []:\n for move in d[id1].value[1].current_state.get_possible_moves():\n game1 = copy.deepcopy(d[id1].value[1])\n game1.current_state = game1.current_state.make_move(move)\n id0 += 1\n d[id0] = Tree([id0, game1, None])\n d[id1].children.append(id0)\n item.append(id0)\n else:\n item.extend(d[id1].children)\n for num in item:\n if d[num].value[1].is_over(d[num].value[1].current_state):\n d[num].value[2] = -1\n elif d[num].children != [] and all(d[x].value[2] is not None\n for x in d[num].children):\n d[num].value[2] = max([(-1) * d[y].value[2]\n for y in d[num].children])\n else:\n s.add(num)\n i = 0\n for q in d[0].children:\n if d[q].value[2] == -1:\n i = d[0].children.index(q)\n return game.current_state.get_possible_moves()[i]",
"def depth_first(board, depth):\n for mole in moles(board):\n new_board = hit(board, mole)\n if new_board == 0:\n return (mole,)\n elif depth > 0:\n result = depth_first(new_board, depth-1)\n if result:\n return (mole,) + result \n return False",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n start = problem.getStartState()\n stack = util.Stack() # stack to keep track of frontier nodes where pacman has move\n stack.push(start)\n explored = set() # to keep track of explored areas\n route = []\n\n while not stack.isEmpty():\n current_position = stack.pop()\n explored.add(current_position)\n\n if problem.isGoalState(current_position):\n break\n for each in problem.getSuccessors(current_position):\n if each[0] not in explored: # x,y coordinates of positions we haven't visited are pushed onto stack\n # print(each)\n stack.push(each[0])\n route.append((current_position, each[0], each[1])) # record of movements to rebuild path (from,to,how)\n\n x = len(route)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if route[x - 1][0] != route[x - 2][1]: # starts from goal and works backwards\n route.remove(route[x - 2])\n x = len(route)\n else:\n x -= 1\n # print(route)\n return [action[2] for action in route]",
"def solve(self):\n while self.character.path[-1] != 88:\n n = self.next_move()\n if n is None:\n self.character.path += ['Error: Could not find full path (budget does not suffice or unreachable).']\n break\n self.character.path += [n]\n self.updated_occupied_locations()\n self.currentTurn += 1",
"def search(self, is_max, possible_moves, state, depth, alpha, beta):\n temp_state = state.deepcopy()\n best_move = None\n best_move_val = float('-inf') if is_max else float('inf')\n \n for move in possible_moves:\n for to in move['to']:\n \n if time() > self.thinking_time:\n return best_move, best_move_val\n \n temp_state.board.move_pawn(move['from'], to)\n temp_state.next_turn()\n _, val = self.minimax(temp_state, not(is_max), depth+1, alpha, beta)\n \n temp_state.board.move_pawn(to, move['from'])\n temp_state.undo_turn()\n \n if is_max and val > best_move_val:\n alpha = max(val, alpha)\n best_move_val = val\n best_move = (move['from'], to)\n \n if not(is_max) and val < best_move_val:\n beta = min(val, beta)\n best_move_val = val\n best_move = (move['from'], to)\n \n if beta <= alpha: #pruning\n return best_move, best_move_val\n \n return best_move, best_move_val",
"def search(board):\n depth = 0\n while True:\n result = depth_first(board, depth)\n if result:\n return result\n else:\n depth += 1",
"def DFS(initial_state, check_dict): \r\n \r\n print(\"Implementing DFS...\")\r\n q = deque()\r\n q.append(initial_state)\r\n accomplished = False\r\n \r\n while len(q) != 0:\r\n path = q.pop()\r\n \r\n if is_goal(path[-1][0]):\r\n goal = path\r\n accomplished = True\r\n break\r\n \r\n state_container = next_possible_states(path, check_dict, False)\r\n for i in state_container:\r\n if len(path) <= 1:\r\n temp = list(path)\r\n temp.append(i)\r\n q.append(temp)\r\n else:\r\n if i[0] != path[-2][0]:\r\n temp = list(path)\r\n temp.append(i)\r\n q.append(temp)\r\n\r\n \r\n if accomplished:\r\n print(\"Solved! Number of moves:\", len(goal) - 1)\r\n return goal, True\r\n else:\r\n print(\"Cannot be solved. Number of moves:\", len(path) - 1)\r\n print(path)\r\n return path, False",
"def solveOneStep(self):\n ### Student code goes here\n state = self.currentState\n #print (type(state))\n self.visited[state] = True\n #print (type(self.gm.getGameState()))\n moves = self.gm.getMovables()\n print (\"CURRENTSTATE\" + str(self.currentState.state))\n print (\"MOVABLES:\")\n if moves:\n for m in moves:\n print (str(m))\n print (\"CHILDINDEX:\")\n print (state.nextChildToVisit)\n print (\"*********\")\n if state.state == self.victoryCondition:\n return True\n #if no child to expand then go back\n if not moves or state.nextChildToVisit >= len(moves):\n self.currentState = state.parent\n if state.requiredMovable is not None:\n self.gm.reverseMove(state.requiredMovable)\n # expand\n else:\n\n next_move = moves[state.nextChildToVisit]\n self.gm.makeMove(next_move)\n state.nextChildToVisit += 1\n\n #if to parent or if visited then skip\n while (((state.parent is not None) and (self.gm.getGameState() == state.parent.state))) or GameState(self.gm.getGameState(), 0, None) in self.visited:\n print (\"PARENT FOUND!\")\n self.gm.reverseMove(next_move)\n if state.nextChildToVisit >= len(moves):\n self.currentState = state.parent\n return False\n else:\n next_move = moves[state.nextChildToVisit]\n self.gm.makeMove(next_move)\n state.nextChildToVisit += 1\n\n next_state = GameState(self.gm.getGameState(), state.depth + 1, next_move)\n next_state.parent = state\n #next_state.requiredMovable = next_move\n state.children.append(next_state)\n self.currentState = next_state\n print (state.nextChildToVisit)\n return False",
"def solveOneStep(self):\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n movables = self.gm.getMovables()\n self.visited[self.currentState] = True\n\n for move in movables:\n self.gm.makeMove(move)\n gs = GameState(self.gm.getGameState(), self.currentState.depth + 1, move)\n if gs in self.visited:\n self.gm.reverseMove(move)\n continue\n self.currentState.children.append(gs)\n gs.parent = self.currentState\n self.gm.reverseMove(move)\n\n while self.currentState.nextChildToVisit < len(self.currentState.children):\n gs = self.currentState.children[self.currentState.nextChildToVisit]\n self.currentState.nextChildToVisit += 1\n if gs in self.visited:\n continue\n self.gm.makeMove(gs.requiredMovable)\n self.currentState = gs\n return False\n\n if self.current_state.parent:\n self.gm.reverseMove(self.current_state.requiredMovable)\n self.currentState = self.current_state.parent\n return False",
"def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n self.visited_states.append(self.currentState.state)\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n # If current state has no children, make children\n if not self.currentState.children:\n for movable_statement in movables:\n # Make the move\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n # print (\"new state \", new_state)\n\n # If the new state hasn't been visited and isn't in the queue then add it as a child and to the queue\n if (new_state not in self.visited_states):\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.visited[new_gs] = True\n self.visited_states.append(new_state)\n self.gs_queue.append(new_gs)\n\n self.gm.reverseMove(movable_statement)\n\n # Return false if no more to explore\n if not self.gs_queue:\n return False\n\n # Revert to state at when current and next start to change\n root_curr = self.currentState\n self.currentState = self.gs_queue.popleft()\n root_new = self.currentState\n\n # Backtrack to when current node and new node start to diverge\n if root_new.depth == root_curr.depth:\n while root_curr.state != root_new.state:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n root_new = root_new.parent\n else:\n while root_curr.requiredMovable:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n\n # Return game master to state that we are exploring\n # Find path between root and current state\n path = []\n currNode = self.currentState\n while currNode != root_curr:\n path.append(currNode.requiredMovable)\n currNode = currNode.parent\n\n # Created backwards path, now make moves from root to current state\n path.reverse()\n for movable_statement in path:\n self.gm.makeMove(movable_statement)\n\n return False",
"def depthFirstSearch(problem):\n \n from game import Directions\n North = Directions.NORTH\n South = Directions.SOUTH\n East = Directions.EAST\n West = Directions.WEST \n \n pathDict = {}\n visited = set()\n #visited start\n visited.add(problem.getStartState())\n #initial successors\n successor = problem.getSuccessors(problem.getStartState())\n for initSucc in successor:\n pathDict[initSucc[0]] = [initSucc[1]]\n #loop\n while (1):\n #if fringe = null, return failure\n if (len(successor) == 0):\n print \"Fringe is empty\"\n return util.raiseNotDefined()\n #(v, path) = fringe.pop\n succLocation = successor[0][0]\n succDirection = successor[0][1]\n del successor[0]\n #if isGoal = true, return path\n if problem.isGoalState(succLocation):\n return pathDict[succLocation]\n #if visited = false\n if succLocation not in visited:\n #visited = true\n visited.add(succLocation)\n #L = expand(v,path)\n tempSuccList = problem.getSuccessors(succLocation)\n #Fringe <- L\n for succ in reversed(tempSuccList):\n successor.insert(0,succ)\n pathDict[succ[0]] = []\n pathDict[succ[0]].extend(pathDict[succLocation])\n pathDict[succ[0]].append(succ[1])",
"def solveOneStep(self):\n ### Student code goes here\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n current_move = False\n current_depth = self.currentState.depth + 1\n list_movables = self.gm.getMovables()\n\n while not current_move:\n count = self.currentState.nextChildToVisit\n if len(list_movables) <= count:\n if not self.currentState.parent:\n return False\n else:\n self.gm.reverseMove(self.currentState.requiredMovable)\n list_movables = self.gm.getMovables()\n self.currentState = self.currentState.parent\n current_depth = self.currentState.depth + 1\n continue\n\n next_move = list_movables[count]\n self.gm.makeMove(next_move)\n new_game_state = GameState(self.gm.getGameState(), current_depth, next_move)\n if new_game_state in self.visited:\n self.currentState.nextChildToVisit += 1\n self.gm.reverseMove(next_move)\n else:\n self.currentState.nextChildToVisit += 1\n new_game_state.parent = self.currentState\n self.currentState.children.append(new_game_state)\n self.currentState = new_game_state\n current_move = next_move\n\n if self.currentState.state != self.victoryCondition:\n self.visited[self.currentState] = True\n return False\n else:\n return True"
] | [
"0.7213512",
"0.7089381",
"0.6998385",
"0.6994788",
"0.6951225",
"0.6930633",
"0.6860129",
"0.6857788",
"0.6846639",
"0.6840226",
"0.6836247",
"0.683497",
"0.68027216",
"0.67122006",
"0.668429",
"0.6641582",
"0.6599496",
"0.65836287",
"0.65786326",
"0.65759915",
"0.6541374",
"0.6524091",
"0.65198135",
"0.65151626",
"0.65001345",
"0.6464481",
"0.6463242",
"0.64496183",
"0.6439206",
"0.6418565"
] | 0.77762467 | 0 |
Call get_hike_distance on each hike for each weather station. Adds columns to df_hike Input parameters | def get_closest_station(df_hike, df_weather):
closest_station = []
station_distance = []
for hike_idx in df_hike.index:
hike_long = df_hike.loc[hike_idx, 'long']
hike_lat = df_hike.loc[hike_idx, 'lat']
distances = []
for stat_idx in df_weather.index:
stat_long = df_weather.loc[stat_idx, 'LONGITUDE']
stat_lat = df_weather.loc[stat_idx, 'LATITUDE']
distance = get_hike_distance(
hike_lat, hike_long, stat_lat, stat_long)
distances.append(distance)
shortest_idx = np.argmax(distances)
distance = max(distances)
station = df_weather.loc[int(shortest_idx), 'name']
closest_station.append(station)
station_distance.append(distance)
df_hike['closet_station'] = closest_station
df_hike['station_distance'] = station_distance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_distance_features(df_kek):\n df = pd.DataFrame([])\n df['distance'] = get_distance_vector(df_kek, 'latitude', 'longitude', 'del_latitude', 'del_longitude')\n df['distance_dest_from_center'] = get_distance_vector(df_kek, 'center_latitude', 'center_longitude',\n 'del_latitude', 'del_longitude')\n df['distance_start_from_center'] = get_distance_vector(df_kek, 'center_latitude', 'center_longitude',\n 'latitude', 'longitude')\n df['route_distance'] = df_kek.apply(lambda x: get_route_distance(x['route']), axis=1)\n df[df['route_distance'] == 0.0] = df['route_distance'].median()\n df = pd.concat([df, pd.get_dummies(df_kek['main_id_locality'], prefix='City')], axis=1)\n return df",
"def calculateDistances(df):\n return",
"def create_cols_distances(df):\n #create a column for haversine distance\n df['distance'] = haversine_array(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n df['manhattan_distance'] = dummy_manhattan_distance(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n df['bearing'] = bearing_array(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n return df",
"def get_hike_distance(df1lat, df1long, df2lat, df2long):\n # approximate radius of earth in km\n R = 6373.0\n\n lat1 = radians(df1lat)\n lon1 = radians(df1long)\n lat2 = radians(df2lat)\n lon2 = radians(df2long)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n return distance",
"def neighbor_distance_statistics(self):\n # Compute\n # 1. Distance between different timings of the same day on the same route for each airline.\n # 2. Distance between airlines for close by timings.\n # 3. Distance between neighboring dates for same timing.\n price_column = self.train_df.columns.get_loc('Price')\n departure_datetime_column = self.train_df.columns.get_loc('Departure_datetime')\n dep_time_column = self.train_df.columns.get_loc('Dep_Time')\n date_of_journey_column = self.train_df.columns.get_loc('Date_of_Journey')\n\n def update_price_ratio_dict(dep_time_to_price_dict, same_day_price_ratio_dict):\n if len(dep_time_to_price_dict) > 1:\n dep_time_sorted = sorted(dep_time_to_price_dict.keys())\n for key_i in range(len(dep_time_sorted) - 1):\n for key_j in range(key_i + 1, len(dep_time_sorted)):\n dep_time_first = dep_time_sorted[key_i]\n dep_time_second = dep_time_sorted[key_j]\n\n if dep_time_first not in same_day_price_ratio_dict:\n same_day_price_ratio_dict[dep_time_first] = dict()\n if dep_time_second not in same_day_price_ratio_dict[dep_time_first]:\n same_day_price_ratio_dict[dep_time_first][dep_time_second] = []\n\n price_ratio = dep_time_to_price_dict[dep_time_second] * 1.0 / dep_time_to_price_dict[\n dep_time_first]\n same_day_price_ratio_dict[dep_time_first][dep_time_second].append(price_ratio)\n\n same_day_price_ratio_stats = dict()\n for airline in self.train_df['Airline'].unique():\n same_day_price_ratio_stats[airline] = dict()\n source_codes_airline = self.train_df[self.train_df['Airline'] == airline]['Source_code'].unique()\n\n for source_code in source_codes_airline:\n destination_codes_airline = self.train_df[(self.train_df['Airline'] == airline) & (self.train_df['Source_code'] == source_code)][\n 'Destination_code'].unique()\n same_day_price_ratio_stats[airline][source_code] = dict()\n for destination_code in destination_codes_airline:\n same_day_price_ratio_stats[airline][source_code][destination_code] = dict()\n n_stops_arr = sorted(self.train_df[(self.train_df['Airline'] == airline) &\n (self.train_df['Source_code'] == source_code) &\n (self.train_df['Destination_code'] == destination_code)]['n_stops'].unique())\n # TODO Convert n_stops column into int. If not able to do why is it so?\n n_stops_arr = [int(float(x)) for x in n_stops_arr]\n for n_stops in n_stops_arr:\n same_day_price_ratio_stats[airline][source_code][destination_code][n_stops] = dict()\n routes_arr = self.train_df[(self.train_df['Airline'] == airline) &\n (self.train_df['Source_code'] == source_code) &\n (self.train_df['Destination_code'] == destination_code) &\n (self.train_df['n_stops'] == n_stops)]['Route'].unique()\n for route in routes_arr:\n same_day_price_ratio_stats[airline][source_code][destination_code][n_stops][route] = dict()\n same_day_price_ratio_dict = dict()\n filter_dict = {'Airline': airline, 'Source_code': source_code, 'Destination_code': destination_code,\n 'n_stops': n_stops, 'Route': route}\n df = self.get_subset_data(df=self.train_df, filter_dict=filter_dict)\n\n i = 0\n prev_date_of_journey = df.iloc[i, date_of_journey_column]\n prev_dep_time = df.iloc[i, dep_time_column]\n dep_time_to_price_dict = dict()\n dep_time_to_price_dict[prev_dep_time] = df.iloc[i, price_column]\n i += 1\n while i < len(df):\n cur_date_of_journey = df.iloc[i, date_of_journey_column]\n cur_dep_time = df.iloc[i, dep_time_column]\n cur_price = df.iloc[i, price_column]\n\n if prev_date_of_journey != cur_date_of_journey:\n update_price_ratio_dict(dep_time_to_price_dict, same_day_price_ratio_dict)\n\n # reset dep time to price mapping\n dep_time_to_price_dict = dict()\n else:\n if cur_dep_time not in dep_time_to_price_dict:\n dep_time_to_price_dict[cur_dep_time] = cur_price\n else:\n # If multiple instances of price available, then consider the lowest one\n if cur_price < dep_time_to_price_dict[cur_dep_time]:\n dep_time_to_price_dict[cur_dep_time] = cur_price\n\n i += 1\n prev_dep_time = cur_dep_time\n prev_date_of_journey = cur_date_of_journey\n\n update_price_ratio_dict(dep_time_to_price_dict, same_day_price_ratio_dict)\n same_day_price_ratio_stats[airline][source_code][destination_code][n_stops][route] = same_day_price_ratio_dict\n\n output_folder = \"../statistics\"\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n with open(os.path.join(output_folder, \"same_day_price_ratio_stats.json\"), \"w\") as fd:\n json.dump(obj=same_day_price_ratio_stats, fp=fd)",
"def compute_distance(df):\n pass",
"def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist",
"def get_hechms_stations(pool):\n\n hechms_stations = {}\n\n connection = pool.connection()\n try:\n with connection.cursor() as cursor:\n sql_statement = \"SELECT * FROM `station` WHERE `id` like %s\"\n row_count = cursor.execute(sql_statement, \"10_____\")\n if row_count > 0:\n results = cursor.fetchall()\n for dict in results:\n hechms_stations[dict.get(\"name\")] = [dict.get(\"id\"), dict.get(\"latitude\"), dict.get(\"longitude\")]\n return hechms_stations\n else:\n return None\n except Exception as exception:\n error_message = \"Retrieving hechms stations failed\"\n logger.error(error_message)\n traceback.print_exc()\n raise exception\n finally:\n if connection is not None:\n connection.close()",
"def test_EstimateDistances_fromThreeway(self):\n d = EstimateDistances(self.al, JC69(), threeway=True)\n d.run()\n canned_result = {('b', 'e'): 0.495312,\n ('c', 'e'): 0.479380,\n ('a', 'c'): 0.089934,\n ('a', 'b'): 0.190021,\n ('a', 'e'): 0.495305,\n ('b', 'c'): 0.0899339}\n result = d.getPairwiseDistances(summary_function=\"mean\")\n self.assertDistsAlmostEqual(canned_result, result)",
"def preprocess(df_kek):\n df = pd.DataFrame([])\n df['ETA'] = df_kek['ETA']\n df['EDA'] = df_kek['EDA']\n df['ESP'] = df['EDA'] / df['ETA']\n if 'p200' in df_kek.columns:\n df['p200'] = df_kek['p200']\n df['p500'] = df_kek['p500']\n df['p1000'] = df_kek['p1000']\n df['route_num'] = df_kek['route'].apply(lambda x: 0 if pd.isna(x) else len(polyline.decode(x)))\n df = pd.concat([df, add_time_features(set_time_by_timezone(df_kek))], axis=1)\n df = pd.concat([df, add_distance_features(df_kek)], axis=1)\n\n return df",
"def nearest_loop(row, gdf2,geometry_cols=['geo_lon','geo_lat'],src_column=None,surrounding=False):\r\n def haversine_distance(origin, destination):\r\n lon1, lat1 = origin\r\n lon2, lat2 = destination\r\n radius = 6371000 # meters\r\n \r\n dlat = math.radians(lat2-lat1)\r\n dlon = math.radians(lon2-lon1)\r\n a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \\\r\n * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n d = radius * c\r\n return d\r\n\r\n # start the main iteration\r\n if row.geometry.type == 'Polygon':\r\n point_xy = np.array((row.geometry.centroid.x,\r\n row.geometry.centroid.y))\r\n if row.geometry.type in ['Point', 'LineString']:\r\n point_xy = np.array((row.geometry.x, row.geometry.y)) \r\n # Select most current stations datasets.\r\n closest = None\r\n closest_distance = 99999999999\r\n for _, station in gdf2.iterrows():\r\n d = haversine_distance((point_xy[0], point_xy[1]),\r\n (station[geometry_cols[0]], station[geometry_cols[1]]))\r\n if d < closest_distance:\r\n closest = station\r\n closest_distance = d\r\n # if surroung \r\n if surrounding:\r\n closest1 = []\r\n closest_distance = closest_distance+surrounding\r\n i = 0\r\n for _, station in gdf2.iterrows():\r\n d = haversine_distance((point_xy[0], point_xy[1]),\r\n (station[geometry_cols[0]], station[geometry_cols[1]]))\r\n if d < closest_distance:\r\n closest1.append(station)\r\n i += 1\r\n closest = closest1\r\n return closest[src_column]",
"def closest_stations(latlong, df):\n names = df['name'].values\n station_dists = {}\n for (lat, lon, name) in list(df[['Lat', 'Lon', 'name']].value_counts().index):\n if not(np.isnan(lat) or np.isnan(lon)):\n station_dists[name] = haversine(latlong, (lat, lon)) \n \n return sorted(station_dists.items(), key=lambda x: x[1])",
"def create_dat_from_geodataframe(gdf, DAT_header, output_file, wind_field='gust_mph'):\n t0 = time()\n t1 = time()\n db = HazusDB()\n sql = \"\"\"\n select Tract as FIPS, CenLongit as lon, CenLat as lat from\n (select * from\n syHazus.dbo.syTract) a\n inner join \n (select StateFips\n FROM [syHazus].[dbo].[syState]\n where HUState = 1\n and StateFips < 55) b\n on Substring(a.CountyFips, 1, 2) like b.StateFips+ '%'\n \"\"\"\n queryset = db.query(sql)\n queryset['geometry'] = [Point(x, y) for x, y in zip(\n queryset['lon'], queryset['lat'])]\n centroids_all = gpd.GeoDataFrame(queryset, geometry='geometry')\n buff = gdf.geometry.buffer(0.2)\n buff_gdf = gpd.GeoDataFrame(geometry=buff.geometry)\n buff_gdf['dis'] = 1\n dissolve = buff_gdf.dissolve(by='dis')\n centroids_intersect = centroids_all.intersects(dissolve.unary_union)\n centroids = centroids_all[centroids_intersect == True]\n print(time() - t1)\n\n print('formatting data for idw')\n t1 = time()\n xy = np.asarray([[x.x, x.y] for x in gdf.geometry])\n z = np.asarray([x for x in gdf[wind_field]])\n xis = np.asarray([x.x for x in centroids.geometry])\n yis = np.asarray([x.y for x in centroids.geometry])\n print(time() - t1)\n\n print('interpolating values')\n t1 = time()\n kdtree = cKDTree(xy)\n zis = []\n for xi, yi in zip(xis, yis):\n zi = idw(kdtree, z, xi, yi)\n zis.append(zi)\n print(time() - t1)\n zis = np.asarray(zis)\n zis = zis * 0.44704\n\n print('formatting dataframe for output')\n t1 = time()\n tracts = list(map(lambda x: x + ' ', centroids.FIPS))\n longs = list(map(lambda x: '{0:.4f}'.format(x) + ' ', xis))\n lats = list(map(lambda x: '{0:.4f}'.format(x) + ' ', yis))\n windSpeeds = list(map(lambda x: '{0:.5f}'.format(x) + ' ', zis))\n zeros = list(map(lambda x: '0' + '{0:.5f}'.format(x * 0) + ' ', zis))\n windSpeedsLast = list(map(lambda x: '{0:.5f}'.format(x), zis))\n df = pd.DataFrame({'tracts': tracts, 'longs': longs, 'lats': lats,\n 'windSpeeds': windSpeeds, 'zeros': zeros, 'windSpeedsLast': windSpeedsLast})\n print(time() - t1)\n\n print('writing output DAT file')\n t1 = time()\n # creates and opens the export DAT file\n pd.DataFrame().to_csv(output_file, header=False, index=False)\n export = open(output_file, \"w\")\n\n # adds columns to the DAT file header\n DAT_header.append('')\n DAT_header.append(\n ' ident elon nlat ux vy w (m/s)')\n\n # writes header to DAT file\n for row in DAT_header:\n export.write(row + '\\n')\n\n # writes data to DAT file\n for row in range(len(df[df.columns[0]])):\n writeRow = ''\n for item in df.iloc[row]:\n writeRow = writeRow + item\n export.write(writeRow + '\\n')\n export.close()\n print(time() - t1)\n print('Total elasped time: ' + str(time() - t0))",
"def augmentDistance(self):\n\n for key,value in self._models.iteritems():\n src=[float(i) for i in value['src'].replace('#',' ').split()]\n tgt=[float(i) for i in value['tgt'].replace('#',' ').split()]\n\n dist = haversine((np.mean(src[0:2]),np.mean(src[2:])),\n (np.mean(tgt[0:2]),np.mean(tgt[2:])),\n miles=True)\n self._models[key]['distance'] = dist\n\n return",
"def main(daymet_dir,pickles,start_date='1980-10-01',end_date='2020-09-30',huc_col = 'huc8', **kwargs):\r\n\tprint(f'The huc col being processed is: {huc_col}')\r\n\t################################################################\r\n\t#first do the daymet data \r\n\t#read in all the files in this dir and combine them into one df\r\n\tearly=FormatData(glob.glob(daymet_dir+f'*_12_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\tmid=FormatData(glob.glob(daymet_dir+f'*_2_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\tlate=FormatData(glob.glob(daymet_dir+f'*_4_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\t################################################################\r\n\t#next do the snotel data \r\n\toutput=[]\r\n\r\n\t#read in some pickled objects, these look like a list of dfs with each being a station for the full time period \r\n\tfor item in ['PREC','TAVG','WTEQ']:\r\n\t\t#get the pickled objects for each parameter \r\n\t\tfiles = glob.glob(pickles+f'*{item}_{start_date}_{end_date}_snotel_data_list') #hardcoded currently\r\n\t\tdf=FormatData(files,drop_cols=['year','month','day']).read_in_pickles()\r\n\t\toutput.append(df) #the df here is 365 days x ~30 yrs x 237 stations so these are pretty big dfs\r\n\t\r\n\t#join the three enviro params \r\n\toutput_df = reduce(lambda left,right: pd.merge(left,right,how='inner',on=['date','id']), output)\r\n\t\r\n\t\r\n\t#convert the temp column from F to C \r\n\toutput_df['TAVG'] = (output_df['TAVG']-32)*(5/9) \r\n\t#there are a couple of erroneous temp values, remove those \r\n\toutput_df = output_df.loc[output_df['TAVG'] <= 50]\r\n\r\n\t#convert prec and swe cols from inches to cm \r\n\toutput_df['PREC'] = output_df['PREC']*2.54\r\n\toutput_df['WTEQ'] = output_df['WTEQ']*2.54\r\n\t\r\n\t#remove rows that have one of the data types missing- this might need to be amended because \r\n\t#it means that there are different numbers of records in some of the periods. \r\n\toutput_df=output_df.dropna()\r\n\t\r\n\t#cast the snotel id col to int to add the hucs \r\n\toutput_df['id'] = output_df['id'].astype('int')\r\n\r\n\t#add the as yet nonexistant hucs data to the outputs \r\n\thucs = kwargs.get('hucs')\r\n\toutput_df[huc_col] = output_df['id'].map(hucs)\r\n\r\n\t#there are multiple snotel stations in some of the basins, \r\n\t#combine those so there is just one number per basin like the \r\n\t#daymet and RS data. \r\n\r\n\toutput_df=output_df.groupby([huc_col,'date'])[['PREC','WTEQ','TAVG']].mean().reset_index()\r\n\r\n\tperiod_list = []\r\n\tfor p1,p2 in zip(['early','mid','late'],[early,mid,late]): \r\n\t\t\t#get snotel first\r\n\t\t#make a temporal chunk of data \r\n\t\tsnotel_chunk=FormatData(None,time_period=p1).split_yearly_data(output_df)\r\n\r\n\t\t##########working below here\r\n\t\t############################\r\n\t\t#calculate the snow droughts for that chunk \r\n\t\tif (p1 == 'mid') | (p1 == 'late'): \r\n\t\t\tsnotel_drought=CalcSnowDroughts(snotel_chunk,swe_c='WTEQ',precip='PREC',temp='TAVG',start_year=1991,sort_col=huc_col).prepare_df_cols()\r\n\t\t\t#print('snotel')\r\n\t\t\t#print(snotel_drought)\r\n\t\telse: \r\n\t\t\tsnotel_drought=CalcSnowDroughts(snotel_chunk,swe_c='WTEQ',precip='PREC',temp='TAVG',sort_col=huc_col).prepare_df_cols()\r\n\r\n\t\t#get cols of interest \r\n\t\t#snotel_drought=snotel_drought[['huc8','year','dry','warm','warm_dry']]\r\n\t\t#rename cols so they don't get confused when data are merged \r\n\t\t#snotel_drought.columns=['huc8','year']+['s_'+column for column in snotel_drought.columns if not (column =='huc8') | (column=='year')]\r\n\t\t\r\n\t\t#then do the same for daymet \r\n\t\tif (p1 == 'mid') | (p1 == 'late'): \r\n\t\t\tdaymet_drought=CalcSnowDroughts(p2,start_year=1991,sort_col=huc_col).prepare_df_cols()\r\n\t\telse: \r\n\t\t\tdaymet_drought=CalcSnowDroughts(p2,sort_col=huc_col).prepare_df_cols()\r\n\t\t#print('daymet',daymet_drought)\r\n\t\t#daymet_drought=daymet_drought[['huc8','year','dry','warm','warm_dry']]\r\n\t\t\r\n\t\t#daymet_drought.columns=['huc8','year']+['d_'+column for column in daymet_drought.columns if not (column =='huc8') | (column=='year')]\r\n\r\n\t##########################################\r\n\t\r\n\t\t#run the kmeans with drought types as intiilization conditions (centroids) for the clusters\r\n\t\t\r\n\t\t#these are all of the huc 4 basins in the study area \r\n\t\thuc4s = ['1708','1801','1710','1711','1709','1701','1702','1705','1703','1601','1707','1706','1712','1704']\r\n\t\ts_output = []\r\n\t\td_output = []\r\n\t\tfor huc4 in huc4s: \r\n\t\t\thuc4_s = sd.prep_clusters(snotel_drought,huc4,huc_col=huc_col) #get the subset of the snow drought data for a given huc4\r\n\t\t\thuc4_d = sd.prep_clusters(daymet_drought,huc4,huc_col=huc_col)\r\n\t\t\t#make the centroids that serve as the intialization for the kmeans clusters- these are like endmembers (ish)\r\n\t\t\ts_centroids = DefineClusterCenters(huc4_s,'WTEQ','PREC','TAVG').combine_centroids() #makes a numpy array with four centroids\r\n\t\t\td_centroids = DefineClusterCenters(huc4_d,'swe','prcp','tavg').combine_centroids() #makes a numpy array with four centroids\r\n\r\n\t\t\t#clusters should be like: {0:dry, 1:warm, 2:warm_dry, 3:no_drought} 6/8/2021 DOUBLE CHECK\r\n\t\t\t#run kmeans for the snotel data\r\n\t\t\ts_clusters = sd.run_kmeans(huc4_s[['WTEQ','PREC','TAVG']].to_numpy(),huc4_s['label'],s_centroids)\r\n\t\t\ts_clusters = sd.add_drought_cols_to_kmeans_output(s_clusters, huc_col=huc_col) #add a few cols needed for plotting \r\n\t\t\t#run kmeans for the daymet data \r\n\t\t\td_clusters = sd.run_kmeans(huc4_d[['swe','prcp','tavg']].to_numpy(),huc4_d['label'],d_centroids)\r\n\t\t\td_clusters = sd.add_drought_cols_to_kmeans_output(d_clusters, huc_col=huc_col) #add a few cols needed for plotting \r\n\r\n\t\t\ts_output.append(s_clusters)\r\n\t\t\td_output.append(d_clusters)\r\n\t\ts_plot = pd.concat(s_output)\r\n\r\n\t\t#select the cols of interest and rename so there's no confusion when dfs are merged \r\n\t\ts_plot=s_plot[[huc_col,'year','dry','warm','warm_dry']]\r\n\t\ts_plot.columns=[huc_col,'year']+['s_'+column for column in s_plot.columns if not (column == huc_col) | (column=='year')]\r\n\r\n\t\td_plot = pd.concat(d_output)\r\n\t\td_plot=d_plot[[huc_col,'year','dry','warm','warm_dry']]\r\n\t\td_plot.columns=[huc_col,'year']+['d_'+column for column in d_plot.columns if not (column == huc_col) | (column=='year')]\r\n\t\r\n\t\t#merge the two datasets into one df \r\n\t\tdfs = s_plot.merge(d_plot,on=[huc_col,'year'],how='inner')\r\n\t\t\r\n\t\t#deal with the scenario that there are basins with less than 30 years of data, remove those here\r\n\t\tdfs = sd.remove_short_dataset_stations(dfs,huc_col)\r\n\t\tperiod_list.append(dfs)\r\n\r\n\tplot_counts(period_list,kwargs.get('stats_dir'),huc_col=huc_col,**kwargs)",
"def get_hikes():\n\n return Hike.query.all()",
"def create_avg_speed_cols(df):\n #create speed column. this should be correlated with the day component\n #and may give additional insight\n df['avg_speed_haversine'] = 1000*df['distance'].values/df['trip_duration']\n df['avg_speed_manhattan'] = 1000*df['manhattan_distance'].values/df['trip_duration']\n\n return df",
"def get_all_HEA_measurements(self):\n pass",
"def _tunnel_shearzone_data(self):\n file_loc = self.data_path / \"03_GeologicalMapping\" / \"01_TunnelIntersections\"\n columns = [\"x\", \"y\", \"z\", \"true_dip_direction\", \"dip\", \"tunnel\", \"shearzone\"]\n\n path = file_loc / \"Tunnel_intersections.txt\"\n df = pd.read_csv(path, sep=None, names=columns, engine=\"python\")\n df[\"shearzone\"] = df[\"shearzone\"].apply(rename_sz)\n df = df.rename(\n columns={\n \"true_dip_direction\": \"azimuth_struc\",\n \"tunnel\": \"borehole\",\n }\n )\n return df",
"def extract_vanHove_all(c,conn,wind=1):\n\n (fin,min_track_length) = conn.execute(\"select fout,min_track_length from vanHove where comp_key = ?\",c).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',c[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n \n vanHove = [_extract_vanHove(g,step,0,wind) for step in range(1,min_track_length)]\n \n del g\n Fin.close()\n del Fin\n \n return vanHove,temp,dtime",
"def quater_hour_ticker(*args):\n markets = fetch_markets()\n map(populate_quater_hour_data, markets)\n return",
"def Find_nearest_dwd_stations(inpt_data,\r\n date_start='20051201',\r\n date_end='20201231',\r\n dwd_time_format='%Y%m%d%H',\r\n data_category='air_temperature',\r\n temp_resolution='hourly',\r\n no_of_nearest_stations=4,\r\n memory_save=True,\r\n Output='True'):\r\n if isinstance(data_category,list):\r\n if len(list(data_category)) > 1:\r\n print(\r\n 'Currently only one dwd category allowed, please run function multiple times for each category'\r\n )\r\n return None\r\n \r\n #convert time to datetime\r\n dt_start=datetime.strptime(date_start,'%Y%m%d')\r\n dt_end=datetime.strptime(date_end,'%Y%m%d')\r\n print('Start quering data from DWD')\r\n #define the database folder\r\n pypath = os.path.dirname(os.path.abspath(__file__))\r\n table_dir = pypath + '\\\\' + 'tables'\r\n dbase_dir = pypath + '\\\\' + 'dbase' \r\n #%% we check all available stations and create a valid list\r\n filename_stations=update_stationlist(time_res='hourly',dbase_dir=table_dir)\r\n stations_all=pd.read_csv(filename_stations, dtype={'STATIONS_ID': object})\r\n # delete all stations which do not cover the category\r\n dwd_stations=stations_all[stations_all[data_category]==True].copy()\r\n #correct to datetime\r\n dwd_stations['date_end']=pd.to_datetime(stations_all.date_end,format='%Y%m%d')\r\n dwd_stations['date_start']=pd.to_datetime(stations_all.date_start,format='%Y%m%d')\r\n # clean to stations which cover the campaign time #dt_low <= dt <= dt_high:\r\n dwd_stations=dwd_stations[(dwd_stations.date_start<=dt_start) & (dwd_stations.date_end>=dt_end)]\r\n #make a geodataframe out of it\r\n dwd_stations=gpd.GeoDataFrame(dwd_stations,geometry=gpd.points_from_xy(dwd_stations.geo_lon, dwd_stations.geo_lat))\r\n \r\n #loop through all rows to get the n closest points\r\n distances=pd.DataFrame()\r\n for _, station in dwd_stations.iterrows():\r\n distances[station.STATIONS_ID]=inpt_data.distance(station.geometry)\r\n \r\n #%% get the n stations with smallest distance and update database\r\n id_nearest_stations=distances.apply(lambda s: s.nsmallest(no_of_nearest_stations).index.tolist(), axis=1).values.tolist() #station ids\r\n #get them as unique values by sum a list of lists https://bit.ly/353iZQB\r\n id_dwd_stations=list(set(sum(id_nearest_stations,[])))\r\n \r\n #update the database\r\n db_dwd_stations=import_stations(time_res=temp_resolution,time_format=dwd_time_format,campaign_time=[dt_start,dt_end],data_category=data_category,station_ids=id_dwd_stations,dbase_dir=dbase_dir,Output=Output,table_dir=table_dir,memory_save=memory_save)\r\n \r\n #distance of nearest stattions\r\n dist_nearest_stations=pd.DataFrame(np.sort(distances.values)[:,:no_of_nearest_stations]).values.tolist() #distances themself\r\n #create new columns in the input data\r\n station_col_nm=list()\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_station_'+str(i))\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_distance_'+str(i))\r\n #create new dataframe\r\n distance_data=pd.concat([pd.DataFrame(id_nearest_stations).astype(int),pd.DataFrame(dist_nearest_stations)],axis=1)\r\n distance_data.columns=station_col_nm\r\n #add to main dataset\r\n inpt_data=pd.concat([inpt_data, distance_data],axis=1) \r\n \r\n return inpt_data,db_dwd_stations",
"def add_shortest_route(df):\n\n df['gmaps_dist'] = df.apply(lambda row: gmaps.getTotDist((row['pick_lon'], row['pick_lat']), (row['drop_lon'], row['drop_lat'])), axis=1)\n df['gmaps_dur'] = df.apply(lambda row: gmaps.getTotDur((row['pick_lon'], row['pick_lat']), (row['drop_lon'], row['drop_lat'])), axis=1)",
"def stream_ingest(df):\n global index\n\n i=0\n coords= []\n datum = collections.OrderedDict()\n for index, row in df.iterrows():\n datum[\"symbol\"]=str(df.iloc[index,0])\n datum[\"spot_price\"]=float(df.iloc[index,1])\n datum[\"option_type\"] = str(df.iloc[index, 4])\n datum[\"exposure\"] = str(df.iloc[index, 6])\n datum[\"strike_price\"] = float(df.iloc[index, 7])\n datum[\"maturity_y\"] = int(df.iloc[index, 8])\n datum[\"maturity_m\"] = int(df.iloc[index, 9])\n datum[\"maturity_d\"] = int(df.iloc[index, 10])\n datum[\"calendar\"] = str(df.iloc[index, 11])\n datum[\"day_count\"] = str(df.iloc[index, 12])\n datum[\"risk_free_rate\"] = float(df.iloc[index, 13])\n datum[\"dividend_rate\"] = float(df.iloc[index, 14])\n datum[\"calc_dt_y\"] = int(df.iloc[index, 15])\n datum[\"calc_dt_m\"] = int(df.iloc[index, 16])\n datum[\"calc_dt_d\"] = int(df.iloc[index, 17])\n datum[\"volatility\"] = float(df.iloc[index, 18])\n coords.append(h_db.encode_datum(my_type, datum))\n\n i= i + 1\n # Pump data in batches\n if i % DATA_PACK == 0:\n response = h_db.insert_records(\n table_name=NEW_TABLE,\n data=coords,\n list_encoding=ENCODING,\n options={})\n coords = []\n time.sleep(INGEST_FREQ)\n print(response)\n\n # Flush the last batch\n if i % DATA_PACK != 0:\n response = h_db.insert_records(\n table_name=NEW_TABLE,\n data=coords,\n list_encoding=ENCODING,\n options={})\n\n # 3 second delay to mimic real time ingest\n time.sleep(INGEST_FREQ)\n print(response)\n return coords",
"def kpi_function(kwargs):\n start_time = kwargs['start_time']\n end_time = kwargs['end_time']\n project = kwargs['project']\n client_id = kwargs['client_id']\n device_id = kwargs['device_id']\n dataset_src = kwargs['dataset_src']\n dataset_dest = kwargs['dataset_dest']\n table_src = kwargs['table_src']\n table_dest = kwargs['table_dest']\n location = kwargs['location']\n config = kwargs['config']\n def station_data(station_id, start, end):\n data = Daily(station_id, start, end)\n data = data.fetch()\n data.reset_index(inplace=True)\n data = data[['time', 'tavg', 'tmin', 'tmax']]\n # print(data)\n return data\n def costic(df):\n alias_a = df['tmin']\n alias_b = df['tmax']\n\n if 15 > alias_b:\n return 15 - (alias_a + alias_b) / 2\n elif 15 <= alias_a:\n return 0\n else:\n return (15 - alias_a) * (0.08 + 0.42 * (15 - alias_a) / (alias_b - alias_a))\n\n current_time = arrow.utcnow().floor('day')\n # if end_time <= current_time:\n output = config\n rules = pd.DataFrame(output['rules'])\n rules.index = rules.index.astype(int)\n rules.sort_index(inplace=True)\n rules = rules[['kpi_name',\n 'alias_a',\n 'alias_b',\n 'alias_c',\n 'alias_d',\n 'alias_e',\n 'alias_f',\n 'equation_1',\n 'equation_2',\n 'aggregation',\n 'start_time',\n 'end_time',\n 'mute_logs']].copy()\n rules = rules.where(pd.notnull(rules), None)\n settings = output['settings']['mute_rules']\n station_id = output['settings']['station_id']\n country = output['settings']['country']\n state = output['settings']['state']\n df = rules\n # create an empty dataframe\n logs = pd.DataFrame()\n # get access to bigqyery\n # credentials, project_id = google.auth.default()\n # client = bigquery.Client(credentials=credentials)\n if settings is False:\n data_rd = pd.read_csv('main_data.csv')\n data_rd['time'] = pd.to_datetime(data_rd['time_local'])\n if data_rd.shape[0] > 0:\n left_table = data_rd.drop(['alias','value','time_local'],axis=1)\n left_table = left_table.groupby(\"time\").min().reset_index()\n sub_df = data_rd[['time','alias','value']].copy()\n sub_df['value'] = pd.to_numeric(sub_df['value'])\n right_table = pd.pivot_table(sub_df,index='time',columns='alias',values='value').reset_index()\n data_rd = pd.merge(left_table,right_table,on='time', how='outer')\n if data_rd.shape[0] > 0:\n min_time = data_rd.time.min()\n max_time = data_rd.time.max()\n if station_id is not None:\n meteo_data = station_data(station_id, min_time, max_time)\n # print(meteo_data)\n data_rd['costic_HDD_15C'] = meteo_data.apply(costic, axis=1)\n data_rd = data_rd.merge(meteo_data, on='time', how='left')\n data_rd[['tavg', 'tmax', 'tmin']] = data_rd[['tavg', 'tmax', 'tmin']].astype('float')\n print(data_rd.head())\n data_rd.to_csv(\"file_name.csv\", index=False)\n for i in range(df.shape[0]):\n if df.iloc[i]['kpi_name'] == 'costic_HDD_15C':\n obj = kpi(df.iloc[i], data_rd, country, state, logs)\n dff = obj.return_log()\n logs = pd.concat([logs, dff], axis=0)\n if df.iloc[i]['mute_logs']:\n aliases_list = df.iloc[i]['mute_logs'].split(',')\n for alias in aliases_list:\n logs = logs[~logs.aliases.str.contains(alias.strip())]\n else:\n print(f\"Data is not available from {start_time} to {end_time}\")\n if logs.shape[0]>0:\n logs['time'] = logs['time'].dt.date\n logs.rename(columns={\"time\":\"date\"},inplace=True)\n logs['client_id'] = client_id\n logs['device_id'] = device_id\n logs['date'] = logs['date'].astype(str)\n print(tabulate(logs[['date','value']]))\n # load the logs dataframe to destination table in the bigquery\n # table = client.get_table(f\"{project}.{dataset_dest}.{table_dest}\")\n else:\n print('no logs saved')\n else:\n print(\"All rules are muted\")",
"def get_neigh_demand(city):\n\n # get station set S with more than 10 charge equipment\n static_file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n static_feature = pd.read_csv(static_file_path, header=0)\n station_set = set(static_feature[static_feature.num >= 10].index)\n\n # calculate 10 nearest neighborhoods for each station, sort by distance and store their index, get a map\n neighbor_distance_map = {}\n matrix_distance = np.load(exp_data_path + os.sep + 'similarity' + os.sep + 'similarity_distance_{}_numpy.npy'.format(city), allow_pickle=True)\n all_distance_map = {i: [] for i in range(station_count[city])}\n for i in range(station_count[city]):\n if i not in station_set:\n continue\n for j in range(station_count[city]):\n if j not in station_set:\n continue\n all_distance_map[i].append((j, matrix_distance[i][j]))\n all_distance_map[i].sort(key=lambda x : x[1], reverse=True)\n neighbor_distance_map[i] = [idx for idx, distance in all_distance_map[i][:10]]\n\n # 11 times header, get static neighborhood feature for each station(in S), get csv: neighbor_feature_{city}.csv\n ALL_HEADER = ['index']\n ALL_HEADER.extend(GENERAL_HEADER)\n for i in range(10):\n for j in GENERAL_HEADER:\n ALL_HEADER.append('{}_{}'.format(j, i))\n\n raw_data = np.empty((len(neighbor_distance_map), len(ALL_HEADER)))\n for i, idx in enumerate(neighbor_distance_map.keys()):\n raw_data[i][0] = idx\n raw_data[i][1:1+len(GENERAL_HEADER)] = static_feature.iloc[idx]['num':'mall']\n for j in range(10):\n neighbor_idx = neighbor_distance_map[idx][j]\n raw_data[i][1+len(GENERAL_HEADER)*(j+1):1+len(GENERAL_HEADER)*(j+2)] = static_feature.iloc[neighbor_idx]['num':'mall']\n neighbor_feature_data = pd.DataFrame(raw_data, columns=ALL_HEADER)\n print('neighbor feature')\n print(neighbor_feature_data)\n\n neighbor_feature_path = exp_data_path + os.sep + 'static' + os.sep + 'static_neighor_feature_{}.csv'.format(city)\n if os.path.exists(neighbor_feature_path):\n os.remove(neighbor_feature_path)\n neighbor_feature_data.to_csv(neighbor_feature_path)\n\n # create final csv(11 times header with basic info(time_index + time_embed_index))\n # if index in S, fill basic info, neighbor_feature and demand\n\n demand = np.load(exp_data_path + os.sep + 'station' + os.sep + 'demand_{}.npy'.format(city), allow_pickle=True)\n time_count = demand.shape[1]\n\n DEMAND_HEADER = []\n DEMAND_HEADER.extend(ALL_HEADER)\n DEMAND_HEADER.extend(['time_index', 'time_embed', 'demand'])\n neighbor_demand_raw_data = np.empty(((len(neighbor_distance_map)*time_count, len(DEMAND_HEADER))))\n\n # get time map like {\"0800\": 1, \"0830\": 2, ....}\n time_index_map = np.load(exp_data_path + os.sep + 'station_list' + os.sep + 'time_index.npy', allow_pickle=True)\n time_index_map = dict(time_index_map.tolist())\n time_map = {t: i for i, t in enumerate(sorted(set([k[-4:] for k in time_index_map['rev_index'].keys()])))}\n\n cur_idx = 0\n for time_idx in range(time_count):\n time_embed_idx = time_map[time_index_map['index'][time_idx][-4:]]\n for station_idx in station_set:\n neighbor_demand_raw_data[cur_idx][0:len(ALL_HEADER)] = neighbor_feature_data.loc[neighbor_feature_data['index']==station_idx, 'index':'mall_9']\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)] = time_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+1] = time_embed_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+2] = demand[station_idx][time_idx][-1]\n # todo add slow demand and quick demand here\n cur_idx = cur_idx + 1\n print(cur_idx, neighbor_demand_raw_data.shape)\n\n neighbor_demand_data = pd.DataFrame(neighbor_demand_raw_data, columns=DEMAND_HEADER)\n print('neighbor demand')\n print(neighbor_demand_data)\n\n neighbor_demand_path = exp_data_path + os.sep + 'static' + os.sep + 'neighbor_demand_{}.csv'.format(city)\n if os.path.exists(neighbor_demand_path):\n os.remove(neighbor_demand_path)\n neighbor_demand_data.to_csv(neighbor_demand_path)",
"def getHourlyWind(self, keyword):\n\n\t\tweather_data = self.getHourlyWeatherFromCSV(keyword, \"f\", \"wind\")\n\t\twind_values = [] # Array that will contain all the wind data\n\t\twind_data = {} # Dictionary of wind data\n\n\t\t# Getting humidity data\n\t\tfor data in weather_data:\n\t\t\twind_data[\"x\"] = self.helper.getDateInEpoch(data[\"date\"])\n\t\t\twind_data[\"y\"] = float(data[\"wind\"].split(\" \")[1])\n\t\t\twind_values.append(wind_data)\n\t\t\twind_data = {}\n\n\t\treturn wind_values",
"def half_hour_ticker(*args):\n markets = fetch_markets()\n map(populate_half_hour_data, markets)\n return",
"def calculate_all_distances(self):\n self.close_distance = self.calculate_distance(self.close_distance_factor)\n self.medium_distance = self.calculate_distance(self.medium_distance_factor)\n self.far_distance = self.calculate_distance(self.far_distance_factor)",
"def calculate_H(s_lat,s_lon,e_lat,e_lon):\n R = 6371.0\n snlat = radians(s_lat)\n snlon = radians(s_lon)\n elat = radians(e_lat)\n elon = radians(e_lon)\n actual_dist = 6371.01 * acos(sin(snlat) * sin(elat) + cos(snlat) * cos(elat) * cos(snlon - elon))\n actual_dist = actual_dist * 1000\n return actual_dist"
] | [
"0.59621465",
"0.56795853",
"0.56663597",
"0.55515057",
"0.54293764",
"0.5317513",
"0.5096402",
"0.5072886",
"0.49965146",
"0.49763572",
"0.4970515",
"0.49424827",
"0.4929317",
"0.4915355",
"0.48880303",
"0.48811126",
"0.4879275",
"0.4869938",
"0.48475063",
"0.48345596",
"0.47952253",
"0.4772152",
"0.47579056",
"0.4749452",
"0.4732687",
"0.4721767",
"0.4719546",
"0.4709015",
"0.4707578",
"0.47018516"
] | 0.69872105 | 0 |
Subsets and Splits