query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Convert the elements of a container to standard Python types. This method converts a container with elements to standard Python types. If the input container is of the type C{dict}, only its values are touched. Those values, as well as all elements of input sequences, must support a C{ToDict} method returning a serialized version.
def ContainerToDicts(container): if isinstance(container, dict): ret = dict([(k, v.ToDict()) for k, v in container.items()]) elif isinstance(container, _SEQUENCE_TYPES): ret = [elem.ToDict() for elem in container] else: raise TypeError("Unknown container type '%s'" % type(container)) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize_dict(container: Dict) -> Dict:\n for key, value in container.items():\n container[key] = serialize_obj(value)\n return container", "def serialize_dict(container: Dict) -> Dict:\n for key, value in container.items():\n container[key] = serialize_obj(value)\n return container", "def to_python(self, value):\n if isinstance(value, (dict, list)):\n return value\n if value is None:\n return value\n return self.to_dict(value)", "def ensure_json_serializable(dict_, normalize_containers=False, verbose=0):\n dict_ = copy.deepcopy(dict_)\n\n def _norm_container(c):\n if isinstance(c, dict):\n # Cast to a normal dictionary\n if isinstance(c, OrderedDict):\n if type(c) is not OrderedDict:\n c = OrderedDict(c)\n else:\n if type(c) is not dict:\n c = dict(c)\n return c\n\n walker = ub.IndexableWalker(dict_)\n for prefix, value in walker:\n if isinstance(value, tuple):\n new_value = list(value)\n walker[prefix] = new_value\n elif isinstance(value, np.ndarray):\n new_value = value.tolist()\n walker[prefix] = new_value\n elif isinstance(value, (np.integer)):\n new_value = int(value)\n walker[prefix] = new_value\n elif isinstance(value, (np.floating)):\n new_value = float(value)\n walker[prefix] = new_value\n elif isinstance(value, (np.complexfloating)):\n new_value = complex(value)\n walker[prefix] = new_value\n elif isinstance(value, decimal.Decimal):\n new_value = float(value)\n walker[prefix] = new_value\n elif isinstance(value, fractions.Fraction):\n new_value = float(value)\n walker[prefix] = new_value\n elif isinstance(value, pathlib.Path):\n new_value = str(value)\n walker[prefix] = new_value\n elif hasattr(value, '__json__'):\n new_value = value.__json__()\n walker[prefix] = new_value\n elif normalize_containers:\n if isinstance(value, dict):\n new_value = _norm_container(value)\n walker[prefix] = new_value\n\n if normalize_containers:\n # normalize the outer layer\n dict_ = _norm_container(dict_)\n return dict_", "def to_python(self, value):\n if isinstance(value, str):\n return value\n\n if hasattr(value, \"to_python\"):\n return value.to_python()\n\n BaseDocument = _import_class(\"BaseDocument\")\n if isinstance(value, BaseDocument):\n # Something is wrong, return the value as it is\n return value\n\n is_list = False\n if not hasattr(value, \"items\"):\n try:\n is_list = True\n value = {idx: v for idx, v in enumerate(value)}\n except TypeError: # Not iterable return the value\n return value\n\n if self.field:\n self.field._auto_dereference = self._auto_dereference\n value_dict = {\n key: self.field.to_python(item) for key, item in value.items()\n }\n else:\n Document = _import_class(\"Document\")\n value_dict = {}\n for k, v in value.items():\n if isinstance(v, Document):\n # We need the id from the saved object to create the DBRef\n if v.pk is None:\n self.error(\n \"You can only reference documents once they\"\n \" have been saved to the database\"\n )\n collection = v._get_collection_name()\n value_dict[k] = DBRef(collection, v.pk)\n elif hasattr(v, \"to_python\"):\n value_dict[k] = v.to_python()\n else:\n value_dict[k] = self.to_python(v)\n\n if is_list: # Convert back to a list\n return [\n v for _, v in sorted(value_dict.items(), key=operator.itemgetter(0))\n ]\n return value_dict", "def _convert_values_to_correct_datatypes(d: dict):\n for key, value in d.items():\n if isinstance(value, dict):\n __class__._convert_values_to_correct_datatypes(value)\n elif isinstance(value, list):\n d[key] = [__class__._convert_value_to_correct_datatype(item) for item in value]\n else:\n d[key] = __class__._convert_value_to_correct_datatype(value)", "def from_container(cls, container_object):\n _dict = {}\n for key, value in container_object.items():\n if isinstance(value, dict):\n value = cls.from_container(value)\n if isinstance(value, list):\n for i in range(len(value)):\n # one level is all that is necessary for what we are doing.\n if isinstance(value[i], dict):\n value[i] = cls.from_container(value[i])\n _dict[key] = value\n return cls(_dict)", "def ContainerFromDicts(source, c_type, e_type):\n if not isinstance(c_type, type):\n raise TypeError(\"Container type '%s' is not a type\" % type(c_type))\n\n if source is None:\n source = c_type()\n\n if c_type is dict:\n ret = dict([(k, e_type.FromDict(v)) for k, v in source.items()])\n elif c_type in _SEQUENCE_TYPES:\n ret = c_type(map(e_type.FromDict, source))\n else:\n raise TypeError(\"Unknown container type '%s'\" % c_type)\n\n return ret", "def _cast_types(self, input_dict):\n return cast_types(input_dict, self.params['dtype'])", "async def dump_container(obj, container, container_type, params=None, field_archiver=None):\n field_archiver = field_archiver if field_archiver else dump_field\n elem_type = params[0] if params else None\n if elem_type is None:\n elem_type = container_type.ELEM_TYPE\n\n obj = [] if obj is None else get_elem(obj)\n if container is None:\n return None\n for elem in container:\n fvalue = await field_archiver(None, elem, elem_type, params[1:] if params else None)\n obj.append(fvalue)\n return obj", "def _convert_container(\n container, constructor_name, columns_name=None, dtype=None, minversion=None\n):\n if constructor_name == \"list\":\n if dtype is None:\n return list(container)\n else:\n return np.asarray(container, dtype=dtype).tolist()\n elif constructor_name == \"tuple\":\n if dtype is None:\n return tuple(container)\n else:\n return tuple(np.asarray(container, dtype=dtype).tolist())\n elif constructor_name == \"array\":\n return np.asarray(container, dtype=dtype)\n elif constructor_name == \"sparse\":\n return sp.sparse.csr_matrix(container, dtype=dtype)\n elif constructor_name == \"dataframe\":\n pd = pytest.importorskip(\"pandas\", minversion=minversion)\n return pd.DataFrame(container, columns=columns_name, dtype=dtype, copy=False)\n elif constructor_name == \"pyarrow\":\n pa = pytest.importorskip(\"pyarrow\", minversion=minversion)\n array = np.asarray(container)\n if columns_name is None:\n columns_name = [f\"col{i}\" for i in range(array.shape[1])]\n data = {name: array[:, i] for i, name in enumerate(columns_name)}\n return pa.Table.from_pydict(data)\n elif constructor_name == \"polars\":\n pl = pytest.importorskip(\"polars\", minversion=minversion)\n return pl.DataFrame(container, schema=columns_name)\n elif constructor_name == \"series\":\n pd = pytest.importorskip(\"pandas\", minversion=minversion)\n return pd.Series(container, dtype=dtype)\n elif constructor_name == \"index\":\n pd = pytest.importorskip(\"pandas\", minversion=minversion)\n return pd.Index(container, dtype=dtype)\n elif constructor_name == \"slice\":\n return slice(container[0], container[1])\n elif constructor_name == \"sparse_csr\":\n return sp.sparse.csr_matrix(container, dtype=dtype)\n elif constructor_name == \"sparse_csc\":\n return sp.sparse.csc_matrix(container, dtype=dtype)", "def convert_iter_to_type(iterable, target_type):\n if isinstance(iterable, collections.Mapping):\n dic = {}\n for k, v in iterable.items():\n dic[k] = convert_iter_to_type(v, target_type)\n output = dic\n else:\n lst = []\n for obj in iterable:\n if _is_nsiterable(obj):\n obj = convert_iter_to_type(obj, target_type)\n lst.append(obj)\n output = target_type(lst)\n return output", "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.get_swagger_types()):\n value = self.get_from_container(attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result", "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.get_swagger_types()):\n value = self.get_from_container(attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result", "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.get_swagger_types()):\n value = self.get_from_container(attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result", "def convert(input):\n\n\tif isinstance(input, dict):\n\t\treturn dict([(convert(key), convert(value)) for key, value in input.items()])\n\telif isinstance(input, list):\n\t\treturn [convert(element) for element in input]\n\telif sys.version_info < (3,) and isinstance(input, unicode):\n\t\treturn input.encode('utf-8')\n\telse:\n\t\treturn input", "def apply_to_collection(\n data: Any,\n dtype: Union[type, Any, Tuple[Union[type, Any]]],\n function: Callable,\n *args: Any,\n wrong_dtype: Optional[Union[type, Tuple[type]]] = None,\n include_none: bool = True,\n **kwargs: Any,\n) -> Any:\n # Breaking condition\n if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)):\n return function(data, *args, **kwargs)\n\n elem_type = type(data)\n\n # Recursively apply to collection items\n if isinstance(data, Mapping):\n out = []\n for k, v in data.items():\n v = apply_to_collection(\n v, dtype, function, *args, wrong_dtype=wrong_dtype, include_none=include_none, **kwargs\n )\n if include_none or v is not None:\n out.append((k, v))\n if isinstance(data, defaultdict):\n return elem_type(data.default_factory, OrderedDict(out))\n return elem_type(OrderedDict(out))\n\n is_namedtuple = _is_namedtuple(data)\n is_sequence = isinstance(data, Sequence) and not isinstance(data, str)\n if is_namedtuple or is_sequence:\n out = []\n for d in data:\n v = apply_to_collection(\n d, dtype, function, *args, wrong_dtype=wrong_dtype, include_none=include_none, **kwargs\n )\n if include_none or v is not None:\n out.append(v)\n return elem_type(*out) if is_namedtuple else elem_type(out)\n\n if _is_dataclass_instance(data):\n # make a deepcopy of the data,\n # but do not deepcopy mapped fields since the computation would\n # be wasted on values that likely get immediately overwritten\n fields = {}\n memo = {}\n for field in dataclasses.fields(data):\n field_value = getattr(data, field.name)\n fields[field.name] = (field_value, field.init)\n memo[id(field_value)] = field_value\n result = deepcopy(data, memo=memo)\n # apply function to each field\n for field_name, (field_value, field_init) in fields.items():\n v = None\n if field_init:\n v = apply_to_collection(\n field_value,\n dtype,\n function,\n *args,\n wrong_dtype=wrong_dtype,\n include_none=include_none,\n **kwargs,\n )\n if not field_init or (not include_none and v is None): # retain old value\n v = getattr(data, field_name)\n try:\n setattr(result, field_name, v)\n except dataclasses.FrozenInstanceError as e:\n raise MisconfigurationException(\n \"A frozen dataclass was passed to `apply_to_collection` but this is not allowed.\"\n \" HINT: is your batch a frozen dataclass?\"\n ) from e\n return result\n\n # data is neither of dtype, nor a collection\n return data", "def serialize_to_python(value):\n serialization_cls = _get_serializer_for_value(value, serializing=True)\n\n if serialization_cls is None:\n raise TypeError(\n 'Unsupported type %s passed to serialize_to_python(). '\n 'Value: %r'\n % (type(value), value))\n\n return serialization_cls.serialize_to_python(value)", "def apply_to_collection(data: Any, dtype: Union[type, tuple], function: Callable, *args: Any, wrong_dtype: Optional[Union[type, tuple]]=None, **kwargs: Any) ->Any:\n elem_type = type(data)\n if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)):\n return function(data, *args, **kwargs)\n if isinstance(data, Mapping):\n return elem_type({k: apply_to_collection(v, dtype, function, *args, **kwargs) for k, v in data.items()})\n if isinstance(data, tuple) and hasattr(data, '_fields'):\n return elem_type(*(apply_to_collection(d, dtype, function, *args, **kwargs) for d in data))\n if isinstance(data, Sequence) and not isinstance(data, str):\n return elem_type([apply_to_collection(d, dtype, function, *args, **kwargs) for d in data])\n return data", "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(CorePrimitive, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "def map_collection(func, collection):\n datatype = type(collection)\n if isinstance(collection, Mapping):\n return datatype((key, func(val)) for key, val in collection.items())\n if is_string(collection):\n return collection\n elif isinstance(collection, Iterable):\n return datatype(map(func, collection))\n else:\n return collection", "def serialise(input):\n if isinstance(input, dict):\n serialised = [(serialise(k), serialise(v)) for (k, v) in input.items()]\n return dict(serialised)\n elif isinstance(input, list):\n return [serialise(x) for x in input]\n elif isinstance(input, bytes):\n # Try decoding as JSON\n try:\n d = json.loads(input)\n return serialise(d)\n except (json.JSONDecodeError, UnicodeError):\n pass\n\n # Not valid JSON: Fall back to base64\n return serialise(base64.encodebytes(input).decode())\n else:\n return input", "def convert(self):\n if isinstance(self.json, list):\n return self.iter_list(self.json)\n\n if isinstance(self.json, dict):\n return self.iter_plain(self.json)\n\n return self.json", "def _to_dict(self, item):\n if isinstance(item, Buffer):\n ret = {}\n fields = item._all_fields()\n for field in fields:\n ret[field.attr_name()] = self._to_dict(getattr(item, field.attr_name()))\n return ret\n\n if isinstance(item, Struct):\n ret = {}\n for field in item._container_.fields:\n if hasattr(field, 'name'):\n ret[field.name] = self._to_dict(field.get_value(item))\n elif isinstance(field, FieldListContainer):\n for inner_field in field.fields:\n if not isinstance(inner_field, AnonymousField):\n ret[inner_field.name] = self._to_dict(inner_field.get_value(item))\n return ret\n\n if isinstance(item, bytearray):\n return '0x' + binascii.hexlify(item) if item else ''\n\n if isinstance(item, list):\n return [self._to_dict(x) for x in item]\n\n return item", "def _asdict(self) -> Dict[Text, Any]:\n return self.as_base_types()", "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(Item, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "def to_dict(self, target_dict=None):\n if target_dict is None:\n target_dict = self.storage\n\n result_dict = dict()\n\n def to_inner_dict(actual_value):\n if hasattr(actual_value, 'to_dict'):\n return actual_value.to_dict()\n else:\n return actual_value\n\n for key, value in target_dict.iteritems():\n if value is not None:\n if isinstance(value, dict):\n result_dict[key] = self.to_dict(target_dict=value)\n elif isinstance(value, list):\n temp = list()\n\n for item in value:\n temp.append(to_inner_dict(actual_value=item))\n result_dict[key] = temp\n else:\n result_dict[key] = to_inner_dict(actual_value=value)\n\n return result_dict", "def to_dict(self, target_dict=None):\n if target_dict is None:\n target_dict = self.storage\n\n result_dict = dict()\n\n def to_inner_dict(actual_value):\n if hasattr(actual_value, 'to_dict'):\n return actual_value.to_dict()\n else:\n return actual_value\n\n for key, value in target_dict.iteritems():\n if value is not None:\n if isinstance(value, dict):\n result_dict[key] = self.to_dict(target_dict=value)\n elif isinstance(value, list):\n temp = list()\n\n for item in value:\n temp.append(to_inner_dict(actual_value=item))\n result_dict[key] = temp\n else:\n result_dict[key] = to_inner_dict(actual_value=value)\n\n return result_dict", "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(\n map(lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value)\n )\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(\n map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\")\n else item,\n value.items(),\n )\n )\n else:\n result[attr] = value\n if issubclass(Type, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "def _make_immutable(value):\n if isinstance(value, dict):\n return Object(value)\n elif isinstance(value, (list, tuple)):\n return Array(value)\n elif (\n value is None or\n isinstance(value, string_types) or\n isinstance(value, (int, float, bool, Document, Object, Array, Link))\n ):\n return value\n\n raise TypeError(\"Invalid type in document. Got '%s'.\" % type(value))" ]
[ "0.61161685", "0.61161685", "0.6107625", "0.5863955", "0.5799655", "0.57992005", "0.578522", "0.5781495", "0.5722491", "0.5664061", "0.56154233", "0.5547942", "0.55431443", "0.55431443", "0.55431443", "0.5537256", "0.5404099", "0.5372495", "0.5330275", "0.52827215", "0.5273691", "0.52644855", "0.52365905", "0.52198666", "0.5218132", "0.5155695", "0.51408035", "0.51408035", "0.51396435", "0.51360494" ]
0.72010106
0
user_stream() should ignore Follow objects with stale actor references.
def test_stream_stale_follows(self): self.user2.delete() self.assertNotIn('Two', str(user_stream(self.user1)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stream_for_user(self, user):\n follows = self.filter(user=user)\n qs = (Action.objects.stream_for_actor(follow.actor) for follow in follows if follow.actor is not None)\n return reduce(or_, qs, Action.objects.none()).order_by('-timestamp')", "def auto_unfollow_nonfollowers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n # put user IDs here that you want to keep following even if they don't\n # follow you back\n users_keep_following = set([])\n\n not_following_back = following - followers\n\n # make sure the \"already followed\" file exists\n if not os.path.isfile(ALREADY_FOLLOWED_FILE):\n with open(ALREADY_FOLLOWED_FILE, \"w\") as out_file:\n out_file.write(\"\")\n\n # update the \"already followed\" file with users who didn't follow back\n already_followed = set(not_following_back)\n af_list = []\n with open(ALREADY_FOLLOWED_FILE) as in_file:\n for line in in_file:\n af_list.append(int(line))\n\n already_followed.update(set(af_list))\n del af_list\n\n with open(ALREADY_FOLLOWED_FILE, \"w\") as out_file:\n for val in already_followed:\n out_file.write(str(val) + \"\\n\")\n\n for user_id in not_following_back:\n if user_id not in users_keep_following:\n t.friendships.destroy(user_id=user_id)\n print(\"unfollowed %d\" % (user_id))", "def unfollow(self, user):\n f = self.followed.filter_by(followed_id=user.id).first()\n if f:\n db.session.delete(f)", "def follow_user(cls, user, following):\r\n pass", "def following_and_storing(self, user_obj):\n if self.following(user_obj['user']):\n self.monitored_users.append({'user': user_obj['user'], 'username': user_obj['username'],\n 'followDate': datetime.now().timestamp()})", "async def get_streams_followed():\n\tuserid, username, display_name, token = get_user(name=config[\"username\"])\n\theaders = {\n\t\t'Client-ID': config['twitch_clientid'],\n\t\t'Authorization': f\"Bearer {token}\",\n\t}\n\treturn [stream async for stream in get_paginated(\"https://api.twitch.tv/helix/streams/followed\", data={\"user_id\": userid}, headers=headers)]", "def follow_user(cls, user, following):\n pass", "def unfollow(self, user):\n if self.is_following(user):\n self.followed.remove(user)\n return self", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "def follow(self, follower, followee):\n pass", "def _init_stream(self):\n stream = tweepy.Stream(self.auth, self)\n\n try:\n print('Trying to create stream...')\n # Cannot follow based on screen name, get ids\n self.trolling_ids = [\n str(self.twitter_api.get_user(screen_name=screen_name).id)\n for screen_name in SCREEN_NAMES_TO_FOLLOW\n ]\n\n stream.filter(follow=self.trolling_ids)\n\n except Exception as e:\n print('*****************************************************')\n print('**** Stream error, init_stream. Trying again... ****')\n print('*****************************************************')\n print(e)\n\n # Try again to create the stream\n time.sleep(30)\n self._init_stream()", "def user_disappears(self, user):\n pass", "def auto_follow_followers_for_user(user_screen_name, count=5):\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers_for_user = set(t.followers.ids(screen_name=user_screen_name)[\"ids\"][:count]);\n do_not_follow = get_do_not_follow_list()\n \n for user_id in followers_for_user:\n try:\n if (user_id not in following and \n user_id not in do_not_follow):\n\n t.friendships.create(user_id=user_id, follow=False)\n print(\"followed %s\" % user_id)\n\n except TwitterHTTPError as e:\n print(\"error: %s\" % (str(e)))", "def test_follow_duplicate(self):\n activity = {\n \"@context\": \"https://www.w3.org/ns/activitystreams\",\n \"id\": \"https://example.com/users/rat/follows/123\",\n \"type\": \"Follow\",\n \"actor\": \"https://example.com/users/rat\",\n \"object\": \"https://example.com/user/mouse\",\n }\n\n with patch(\"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async\"):\n views.inbox.activity_task(activity)\n\n # the follow relationship should exist\n follow = models.UserFollows.objects.get(user_object=self.local_user)\n self.assertEqual(follow.user_subject, self.remote_user)\n\n with patch(\n \"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async\"\n ) as mock:\n views.inbox.activity_task(activity)\n self.assertEqual(mock.call_count, 1)\n response_activity = json.loads(mock.call_args[1][\"args\"][1])\n self.assertEqual(response_activity[\"type\"], \"Accept\")\n\n # the follow relationship should STILL exist\n follow = models.UserFollows.objects.get(user_object=self.local_user)\n self.assertEqual(follow.user_subject, self.remote_user)", "def follow_reciprocated(self, target):\n if random.randint(1, 1000) == 1: # 1 in 20 are public @replies\n self.tweet_user(target)\n else:\n try:\n self.dm_user(target)\n except:\n pass", "def test_undo_follow_request(self):\n self.local_user.manually_approves_followers = True\n self.local_user.save(\n broadcast=False, update_fields=[\"manually_approves_followers\"]\n )\n with patch(\"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async\"):\n request = models.UserFollowRequest.objects.create(\n user_subject=self.remote_user, user_object=self.local_user\n )\n self.assertTrue(self.local_user.follower_requests.exists())\n\n activity = {\n \"type\": \"Undo\",\n \"id\": \"bleh\",\n \"to\": [\"https://www.w3.org/ns/activitystreams#Public\"],\n \"cc\": [\"https://example.com/user/mouse/followers\"],\n \"actor\": self.remote_user.remote_id,\n \"@context\": \"https://www.w3.org/ns/activitystreams\",\n \"object\": {\n \"@context\": \"https://www.w3.org/ns/activitystreams\",\n \"id\": request.remote_id,\n \"type\": \"Follow\",\n \"actor\": \"https://example.com/users/rat\",\n \"object\": \"https://example.com/user/mouse\",\n },\n }\n\n views.inbox.activity_task(activity)\n\n self.assertFalse(self.local_user.follower_requests.exists())", "def unsafe_follow_by_username(self, username: str) -> None:\n uid = self.username_to_id(username)\n self.api.follow(uid)", "def get_user_stream(user):\n spaces = get_accessible_spaces(user)\n ret = model_stream(Space, target_object_id__in=spaces)[:10]\n return ret", "def user_unfollow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.remove(following)\n db.session.commit()\n return {'unfollowed': True}", "def new_unfollow(self, user_id, user_name):\n url_unfollow = self.url_unfollow % (user_id)\n try:\n unfollow = self._send_post_request(url_unfollow)\n if unfollow.status_code == 200:\n self.unfollow_counter += 1\n log_string = \"Unfollow: %s #%i.\" % (user_name,\n self.unfollow_counter)\n self.log.debug(log_string)\n return unfollow\n except:\n self.log.debug(\"Exept on unfollow!\")\n return False", "def unfollow(user, actor, send_action=False):\n Follow.objects.filter(user = user, object_id = actor.pk,\n content_type = ContentType.objects.get_for_model(actor)).delete()\n if send_action:\n action.send(user, verb=_('stopped following'), target=actor)", "def unfollow_user(username):\n user_ID = before_request()\n user_ID = None\n if user_ID != None:\n user_ID = str(g.user['_id'])\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n mongo.db.users.update({'_id': g.user['_id']}, {\n '$pull': {'follows': whom_id}})\n flash('You are no longer following \"%s\"' % username)\n if redis_obj.get(user_ID):\n return redirect(url_for('user_timeline', username=username, userId=pickle.loads(redis_obj.get(user_ID))))\n else:\n redis_obj.delete(session['user_id'])\n print \"Invalidating cache after Unfollow\"\n return redirect(url_for('user_timeline', username=username))", "def add_untracked_followers(self):\n\n self.log.debug(\"CHECK FOR UNTRACKED FOLLOWERS\")\n followers_ids_api = self.api.followers_ids()\n target = Target.objects.filter(hunter=self.user)\\\n .filter(status=Target.FOLLOWER)\n followers_ids_django = [t.hunted.twitter_id for t in target]\n\n untracked_followers_ids = filter(\n lambda x: unicode(x) not in followers_ids_django,\n followers_ids_api)\n\n untracked_followers, remainder = lookup_users_by_id(self.api,\n untracked_followers_ids)\n for untracked_follower in untracked_followers:\n twitter_account, created = \\\n utils.get_or_create_twitter_account(untracked_follower)\n target, created = Target.objects.get_or_create(\n hunter=self.user, hunted=twitter_account)\n if target.status == Target.PURGATORY:\n # Yay someone we targeted reciprocated follow\n self.follow_reciprocated(target)\n else:\n print target.status\n # Either a totally external follow, an ingrate changed mind,\n # or someone who we chatted became interested and followed\n # Either way the action is the same, follow him\n target.status = Target.FOLLOWER\n target.save()\n self.log.debug(\" => Add follower: %s\" % twitter_account.screen_name)", "def follow(self, user):\n if not self.is_following(user):\n f = Follow(follower=self, followed=user)\n db.session.add(f)", "def on_deleted_follow(sender, instance: models_actstream.Follow, **kwargs):\n content_type = ContentType.objects.get_for_id(instance.content_type_id)\n log.debug(\"Unfollowing %s %s\" % (content_type.name, instance.object_id))\n dillo.tasks.feeds.repopulate_timeline_content(\n instance.content_type_id, instance.object_id, instance.user_id, 'unfollow'\n )", "def unfollow(self, user_index, following_index):\n if user_index >= self.num_users or following_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user_index} and {following_index} were requested.\"\n )\n if self.users_hat[following_index, user_index] == 1:\n self.users_hat[following_index, user_index] = 0\n elif self.is_verbose():\n self.log(f\"User {following_index} was not following user {user_index}\")", "def get_followers(user):\n if user.has_key('followers_list'):\n pass\n else:\n if user.has_key('followers_count'):\n if user['followers_count'] > 4999:\n pages = user['followers_count'] / 5000\n f_list = []\n for page in range(pages):\n try:\n follower_set = api.GetFollowers(user_id=user['id'], cursor=page, count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n f_list = friends_list + f_list\n time.sleep(60)\n user['followers_list'] = f_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(f_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)\n else:\n try:\n follower_set = api.GetFollowers(user_id=user['id'], count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n user['followers_list'] = friends_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(friends_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)", "def unsubscribe_user_follow(self, uuid: UUID) -> bool:\n return self._generic_unsubscribe('/users/follows', uuid)", "def user_appears(self, user):\n pass" ]
[ "0.72431296", "0.613551", "0.60416275", "0.5982041", "0.59290546", "0.5918249", "0.58727956", "0.58393335", "0.58125633", "0.58125633", "0.57773435", "0.5733144", "0.5720436", "0.56932294", "0.5689734", "0.56663066", "0.56498605", "0.5634246", "0.56290513", "0.56242585", "0.55957127", "0.5576265", "0.5561096", "0.55406046", "0.5529876", "0.5497517", "0.54823625", "0.54808766", "0.5456625", "0.54402757" ]
0.7202341
1
Extract webcam data from its URL
def parse_url(url): url_parts = url.split('/') webcam_name = url_parts[-3] + 'CAM' + url_parts[-2] file_ext = url[-5:-1] last_update = 0. return { 'url': url[:-1], # Skip end of line 'name': webcam_name, 'imgpath': os.path.join(WEBCAM_DIR, webcam_name, '%d' + file_ext), 'last_update': last_update }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_from_webcam(self):\n print \"try fetch from webcam...\"\n stream=urllib.urlopen('http://192.168.0.20/image/jpeg.cgi')\n bytes=''\n bytes+=stream.read(64500)\n a = bytes.find('\\xff\\xd8')\n b = bytes.find('\\xff\\xd9')\n\n if a != -1 and b != -1:\n jpg = bytes[a:b+2]\n bytes= bytes[b+2:]\n i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),1)\n \n return i\n else:\n print \"did not receive image, try increasing the buffer size in line 13:\"", "def get_from_webcam():\n print \"try fetch from webcam...\"\n stream=urllib.urlopen('http://192.168.0.20/image/jpeg.cgi')\n bytes=''\n bytes+=stream.read(64500)\n a = bytes.find('\\xff\\xd8')\n b = bytes.find('\\xff\\xd9')\n\n if a != -1 and b != -1:\n jpg = bytes[a:b+2]\n bytes= bytes[b+2:]\n i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_COLOR)\n return i\n else:\n print \"did not receive image, try increasing the buffer size in line 13:\"", "def from_http_stream(ip, port):\n # Replace the URL with your own IPwebcam shot.jpg IP:port\n url = f\"http:/{ip}:{port}/shot.jpg\"\n\n while True:\n\n # Use urllib to get the image and convert into a cv2 usable format\n img_arr = np.array(\n bytearray(urllib.request.urlopen(url).read()), dtype=np.uint8\n )\n img = cv2.imdecode(img_arr, -1)\n cv2.imshow(\"IPWebcam\", img)\n\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n cv2.destroyAllWindows()", "def get_data(self):\n global CAM\n while CAM.isOpened():\n _, frame = CAM.read()\n _, jpeg = cv2.imencode('.jpg', frame)\n encoded_img = \"data:image/jpg;base64,\" + str(base64.b64encode(jpeg.tobytes()).decode())\n SIO.emit('video_frame',\n {'frame': encoded_img},\n namespace='/live-stream')\n sleep(self.delay)", "def read(self):\n stream = None\n bytes = None\n try:\n stream = urllib2.urlopen(self.snapshot)\n bytes = stream.read()\n except Exception, e:\n logger.info(\"error handing the url % s\" % self.snapshot)\n return False, None\n\n try:\n i = cv2.imdecode(np.fromstring(bytes, dtype=np.uint8), flags=1)\n return True, i\n except Exception, snapshoterror:\n logger.warning(\"check get %s\" % self.snapshot)\n return False, None", "def retrieveURL(mw, url):\n req = urllib2.Request(url, None, {'User-Agent': 'Mozilla/5.0 (compatible; Anki)'})\n resp = urllib2.urlopen(req)\n # ct = resp.info().getheader(\"content-type\")\n filecontents = resp.read()\n # strip off any query string\n url = re.sub(r\"\\?.*?$\", \"\", url)\n path = unicode(urllib2.unquote(url.encode(\"utf8\")), \"utf8\")\n fname = os.path.basename(path)\n if not fname:\n fname = checksum(filecontents)\n return mw.col.media.writeData(unicode(fname), filecontents)", "def get_content(url):\n img=requests.get(url).content\n return img", "def obtenerVideo(camara):\n val, frame = camara.read()\n return val, frame", "def get_image_from_camera(self, url):\n if DEBUG:\n print(\"[DEBUG] Getting image from BlueIris url: %s\" % url)\n\n resp = urllib.request.urlopen(url)\n image = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED)\n self.timestamp = time.time()\n self.trigger_image = image\n self.processed_image = image # Start off by having processed image same as initial image\n\n self._init_new_image()\n # if DEBUG:\n # # print(\"[DEBUG] [ImageFrame.get_image_from_camera] Image width: {}, height: {}\".format(\n # self.width, self.height))\n\n # return the image\n return self.trigger_image", "def get_camera_feed(self):\r\n # get the frame..from cam feed\r\n read_status, self.frame = self.capture.read()\r\n return self.frame", "def get_video_info(url):\n ydl = youtube_dl.YoutubeDL()\n ydl.add_default_info_extractors()\n\n try:\n return ydl.extract_info(url, download=False)\n except youtube_dl.DownloadError:\n return None", "def get_info_of_url(url):\n pass", "def soup_process_video(input_url):\r\n # scrape the url\r\n fp = urllib.request.urlopen(input_url)\r\n #read bytes\r\n mybytes = fp.read()\r\n mystr = mybytes.decode(\"utf8\")\r\n fp.close()\r\n soup = BeautifulSoup(mystr,'html.parser')\r\n return (soup.find(\"a\", {'class': \"download-btn\"}).get('href'))", "def get_url_info(input_url):\n print(\"URL:\", input_url.url)\n print(\"Connection status:\", input_url.status_code)\n print(\"Time elapsed to connect to URL:\", input_url.elapsed)\n print(\"URL headers:\", input_url.headers)\n print(\"URL type:\", type(input_url.content))", "def video_feed(self):\r\n model.video.link(self.link)\r\n age_net, gender_net = model.video.caffe_models()\r\n return Response(model.video.video_detector(age_net, gender_net),mimetype='multipart/x-mixed-replace; boundary=frame')", "def capture_image():\n\n endpoint = CAMERA_CAPTURE_URL + \"/camera/capture\"\n if DEBUG:\n print(\"Calling endpoint '%s'\" % endpoint)\n\n response = requests.get(endpoint)\n\n if response.status_code == 200:\n return response.content\n else:\n if DEBUG:\n print(\"Call to endpoint '%s' returned status code %s. Reason: %s\" % (endpoint, str(response.status_code), response.content))\n return None", "def get_raw_data(url):\n\n req = requests.get(url, stream=True)\n req.raw.decode_content = True\n return req.raw", "def video_handle_for_demo():\n frame = cv2.imread(\"vision.png\")\n\n return frame", "def getVotacion(self, url):", "def cam():\n\treturn Response(gen(camera),\n\t\t\t\t\tmimetype='multipart/x-mixed-replace; boundary=frame'), 200", "def capture():\n\tcap = cv2.VideoCapture(0)\n\tret, frame = cap.read()\n\tcap.release()\n\tcv2.destroyAllWindows()\n\treturn frame", "def get_videos(url):\n videos = []\n if 'cinebix.com' in url:\n resolve_media(url,videos)\n return videos\n \n html = requests.get(url, headers=mozhdr).text\n mlink = SoupStrainer('div', {'class':re.compile('^singcont')})\n videoclass = BeautifulSoup(html, parseOnlyThese=mlink)\n try:\n links = videoclass.findAll('iframe')\n for link in links:\n url = link.get('src')\n resolve_media(url,videos)\n except:\n pass\n\n mlink = SoupStrainer('div', {'class':'entry-excerpt'})\n videoclass = BeautifulSoup(html, parseOnlyThese=mlink)\n try:\n links = videoclass.findAll('iframe')\n for link in links:\n if 'http' in str(link):\n url = link.get('src')\n resolve_media(url,videos)\n except:\n pass\n\n try:\n url = videoclass.p.a.get('href')\n resolve_media(url,videos)\n except:\n pass \n \n return videos", "def _get_video_from_html(self, results_page, verbose=False):\n d = json.loads(results_page.text)\n for record in d['data']['records']:\n video_url = record['videoUrl']\n if verbose:\n print \"Video url: \" + video_url\n self._download_from_url(video_url)", "def stream_frames(video_capture):", "def get_data_from_web():\n pass", "def joblib_read_img_url(url):\n\n from matplotlib.image import imread\n fd = urlopen(url, timeout=10)\n return imread(io.BytesIO(fd.read()))", "def getFrames():\n\t\tfor cam in Camera.CAMERAS: cam.getFrame()", "def requesturl(url):\n r = requests.get(url)\n text = r.text.strip()\n try:\n image = Image.open(io.BytesIO(r.content))\n return {\n 'source_url': url,\n 'url': r.url,\n 'md5': getmd5(image),\n 'img_grey': image_to_byte_array(convertgrey(image)),\n 'height': image.height,\n 'width': image.width,\n 'datetime_created': datetime.datetime.now()\n }\n except:\n if 'Error' in text:\n text = find_between(text)\n\n return {\n 'error': text,\n 'source_url': url,\n 'url': r.url,\n 'datetime_created': datetime.datetime.now()\n }", "def load(url):\n response = requests.get(url)\n pil_image = Image.open(BytesIO(response.content)).convert(\"RGB\")\n # convert to BGR format\n image = np.array(pil_image)[:, :, [2, 1, 0]]\n return image", "def read_image_from_video_stream(camera_id: int) -> ndarray:\n cap = cv2.VideoCapture(camera_id)\n cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))\n width = 1920\n height = 1080\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n if (cap.isOpened() == False):\n print(\"Error opening video stream or file\")\n ret, frame = cap.read()\n cap.release()\n return frame" ]
[ "0.7223624", "0.7047085", "0.64235824", "0.6267462", "0.5997199", "0.58141863", "0.5808723", "0.58079857", "0.5693405", "0.5635531", "0.5551949", "0.5531054", "0.55178297", "0.55173516", "0.5516314", "0.54888344", "0.5441601", "0.5432836", "0.5432544", "0.54296404", "0.54140913", "0.5398326", "0.5397244", "0.5393617", "0.53861994", "0.537635", "0.5374276", "0.53723735", "0.53686243", "0.5357203" ]
0.7602071
0
Add , and attributes to indicate a 'major semibreve' (as opposed to a 'minor semibreve').
def sb_major_minor(children_of_voiceStaff): indices_BrevesOrTuplets = [-1] for element in children_of_voiceStaff: if (element.name == 'tuplet') or (element.hasAttribute('dur') and (element.getAttribute('dur').value == 'brevis' or element.getAttribute('dur').value == 'longa' or element.getAttribute('dur').value == 'maxima')): indices_BrevesOrTuplets.append(children_of_voiceStaff.index(element)) for i in range(0, len(indices_BrevesOrTuplets)-1): start = indices_BrevesOrTuplets[i] end = indices_BrevesOrTuplets[i+1] number_sb = end - start - 1 # Case 1: Even number of semibreves if number_sb % 2 == 0: cont_sb = 0 for j in range(start+1, end): cont_sb = cont_sb + 1 # 2nd, 4th, 6th, ... semibreve in the sequence; generally, these are the ones that are Major (default case), but there are exceptions if cont_sb % 2 == 0: previous_sb = children_of_voiceStaff[j-1] # The exception: tenuto marks (downward stems) in the previous note (1st, 3rd, 5th, ... semibreve) if previous_sb.hasAttribute('artic') and previous_sb.getAttribute('artic').value == 'ten': previous_sb.addAttribute('quality', 'major') previous_sb.addAttribute('num', '1') previous_sb.addAttribute('numbase', '2') # The default case: else: current_sb = children_of_voiceStaff[j] current_sb.addAttribute('quality', 'major') current_sb.addAttribute('num', '1') current_sb.addAttribute('numbase', '2') else: pass # Case 2: Odd number of semibreves else: # This can (should) only happen when there is a 2:1 tuplet at one end of the sequence of semibreves, # so that the whole tuplet is equal to just 1 minor semibreve, # and the semibreve that precedes/follows it (ususally has a downward stem to indicate its longer duration in the group) is the Major Semibreve that completes the Perfect Breve. # Without this grouping (major semibreve and tuplet), we are left with an even number of semibreves that can be grouped into minor-major pairs, as usual. start_element = children_of_voiceStaff[start] end_element = children_of_voiceStaff[end] # If the 2:1 tuplet precedes of the sequence of semibreves if (start_element.name == 'tuplet' and start_element.getAttribute('num').value == '2' and start_element.getAttribute('numbase').value == '1'): # The semibreve that follows this 2:1 tuplet should be major (completing the perfection) major_sb = children_of_voiceStaff[start + 1] major_sb.addAttribute('quality', 'major') major_sb.addAttribute('num', '1') major_sb.addAttribute('numbase', '2') # The other semibreves are grouped into minor-major pairs cont_sb = 0 for j in range(start+2, end): cont_sb = cont_sb + 1 # The second semibreve of each pair: generally it is Major (default case), but there are exceptions if cont_sb % 2 == 0: previous_sb = children_of_voiceStaff[j-1] # The exception: tenuto marks (downward stems) in the previous note (1st, 3rd, 5th, ... semibreve) if previous_sb.hasAttribute('artic') and previous_sb.getAttribute('artic').value == 'ten': previous_sb.addAttribute('quality', 'major') previous_sb.addAttribute('num', '1') previous_sb.addAttribute('numbase', '2') # The default case: else: current_sb = children_of_voiceStaff[j] current_sb.addAttribute('quality', 'major') current_sb.addAttribute('num', '1') current_sb.addAttribute('numbase', '2') # The first semibreve of each pair (it is generally minor, so we don't make any changes to it) else: pass # If the 2:1 tuplet follows the sequence of semibreves elif (end_element.name == 'tuplet' and end_element.getAttribute('num').value == '2' and end_element.getAttribute('numbase').value == '1'): # The semibreve that precedes the 2:1 tuplet, should be major (completing the perfection) major_sb = children_of_voiceStaff[end - 1] major_sb.addAttribute('quality', 'major') major_sb.addAttribute('num', '1') major_sb.addAttribute('numbase', '2') # The other semibreves are grouped into minor-major pairs cont_sb = 0 for j in range(start+1, end-1): cont_sb = cont_sb + 1 # The second semibreve of each pair: generally it is Major (default case), but there are exceptions if cont_sb % 2 == 0: previous_sb = children_of_voiceStaff[j-1] # The exception: tenuto marks (downward stems) in the previous note (1st, 3rd, 5th, ... semibreve) if previous_sb.hasAttribute('artic') and previous_sb.getAttribute('artic').value == 'ten': previous_sb.addAttribute('quality', 'major') previous_sb.addAttribute('num', '1') previous_sb.addAttribute('numbase', '2') # The default case: else: current_sb = children_of_voiceStaff[j] current_sb.addAttribute('quality', 'major') current_sb.addAttribute('num', '1') current_sb.addAttribute('numbase', '2') # The first semibreve of each pair (it is generally minor, so we don't make any changes to it) else: pass # Mistake case: If there is no tuplet 2:1 at any of the ends of the sequence, there shouldn't be an odd number of semibreves else: print("This shouldn't happen! \nThere is an odd number of semibreves between two perfect breves (or tuplets that are equivalent to a perfect breve), \nwhich doesn't allow to form minor-major (or major-minor) pairs of semibreves.") print("You can find these breves between the " + str(start_element.name) + " with id " + str(start_element.id) + " and the " + str(end_element.name) + " with id " + str(end_element.id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def semver():\n return \".\".join([str(v) for v in VERSION])", "def semantic_version(self) -> str:\n\n version_core = f\"{self.major_version}.{self.minor_version}.{self.patch_version}\"\n sep = \"-\" if self.pre_release != \"\" else \"\"\n\n return f\"{version_core}{sep}{self.pre_release}\"", "def test_semver_property(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n expected = ('major', 'minor', 'patch', 'build')\n\n self.assertEqual(v1.semver, expected)", "def calc_semivariance(self):\n bounds = self.range_bins\n r = []\n v = []\n for b in bounds:\n d = self._get_data_distance(0., b)\n r.append(b)\n v.append(0.5 * np.ma.var(d)) # semivariance\n o = {'r': np.asarray(r), 'sigma': np.asarray(v)}\n self.statistic.update({'semivariogram': o})", "def minor_def(self):\r\n if self.pronunciation:\r\n return f\"{self.name}, {self.lat}, {self.lon}, {self.pronunciation}\".rstrip(\", \")\r\n return f\"{self.name}, {self.lat}, {self.lon}\"", "def setsem2(self, semparams):\n self.sem_params2S = semparams\n self.sem_params2E = semparams", "def semver(cls, rev):\r\n def parse_extra(delimiter, value):\r\n if not value:\r\n return None, None\r\n else:\r\n components = value.split(delimiter, 1)\r\n return components[0], None if len(components) == 1 else components[1]\r\n\r\n def parse_patch(patch):\r\n patch, pre_release = parse_extra('-', patch)\r\n if pre_release:\r\n pre_release, build = parse_extra('+', pre_release)\r\n else:\r\n patch, build = parse_extra('+', patch)\r\n return patch, pre_release, build\r\n\r\n def parse_components(value):\r\n if not value:\r\n yield None\r\n else:\r\n for atom in value.split('.'):\r\n yield cls._parse_atom(atom)\r\n\r\n try:\r\n major, minor, patch = rev.split('.', 2)\r\n patch, pre_release, build = parse_patch(patch)\r\n components = [int(major), int(minor), int(patch)]\r\n components.extend(parse_components(pre_release))\r\n components.extend(parse_components(build))\r\n return cls(*components)\r\n except ValueError:\r\n raise cls.BadRevision(\"Failed to parse '%s' as a semantic version number\" % rev)", "def _get_semver_versions(self, versions):\n semver = []\n for ver in versions:\n semver.append(api.to_semver(ver))\n return semver", "def semimajor(P,M):\n if type(P) != Quantity:\n P = P*u.day\n if type(M) != Quantity:\n M = M*u.M_sun\n a = ((P/2/np.pi)**2*const.G*M)**(1./3)\n return a.to(u.AU)", "def pep440_from_semver(semver):\n segment = ''\n if semver.prerelease:\n segment = '.dev{}'.format('.'.join(semver.prerelease))\n local_version = '.'.join(semver.build)\n local_version = local_version.replace('-', '.')\n version_str = '{}.{}.{}{}'.format(semver.major, semver.minor, semver.patch, segment)\n # Include the local version if we are not a true release\n if local_version and semver.prerelease:\n version_str = '{}+{}'.format(version_str, local_version)\n return version_str", "def get_semver_versions(self, versions):\n semver = []\n for ver in versions:\n semver.append(api.to_semver(ver))\n return semver", "def report_version(self, data):\n self.firmata_version.append(data[0]) # add major\n self.firmata_version.append(data[1]) # add minor", "def bump_minor(self: _R, inc: int = 1) -> _R:\n if not self.is_stable and self.micro == 0:\n return self.get_stable().bump_minor(inc - 1)\n\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major, self.minor + inc, 0),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )", "def parse_major(self, header_text: str) -> None:\n major_match = re.search(r\"(?<=Major:)\\s*?\\K.*?(?=\\n)\", header_text, RE_OPT)\n if not major_match:\n raise ValueError(\"Major not found\")\n self.major = major_match.group(0).strip()", "def set_major(net_id, major):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"UPDATE Member SET major='\"+major+\"' WHERE netID='\"+net_id+\"'\"\n cursor.execute(sql_string)\n connection.commit()", "def add_sig(self, s):\n self.sigs += ' ' * self.indent + s + '\\n'", "def bump_major(self: _R, inc: int = 1) -> _R:\n if not self.is_stable and self.minor == 0 and self.micro == 0:\n return self.get_stable().bump_major(inc - 1)\n\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major + inc, 0, 0),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )", "def addSemanticsAnnotation(self, *args):\n return _libsbml.ASTNode_addSemanticsAnnotation(self, *args)", "def minor_version(self, minor_version):\n\n self._minor_version = minor_version", "def minor_version(self, minor_version):\n\n self._minor_version = minor_version", "def root_semrep(syntree, semkey='SEM'):\n from nltk.grammar import FeatStructNonterminal\n\n node = syntree.node\n assert isinstance(node, FeatStructNonterminal)\n try:\n return node[semkey]\n except KeyError:\n print node,\n print \"has no specification for the feature %s\" % semkey\n raise", "def addSemicircles(self):\n #axes setup\n self.setup_axes(animate=False)\n self.axes.move_to(ORIGIN)\n self.axes.shift(LEFT*5)\n \n #equations of circle\n global equation_upper, equation_lower\n equation_upper = lambda x : math.sqrt((self.x_max)**2 - x**2)\n equation_lower = lambda x : -1*math.sqrt((self.x_max)**2 - x**2)\n\n #get_graph for upper and lower semicircle\n global graph_upper, graph_lower\n graph_upper = self.get_graph(equation_upper, color=BLUE)\n graph_lower = self.get_graph(equation_lower, color=BLUE)\n\n #write graphs\n self.add(graph_upper,graph_lower)", "def showSeparator():\n\treturn (1, 0)", "def minor_xvals(self):\n return self.prepStrs", "def add_evasion_chance(sem, data, ml_model_desc, feature_lst, key_lst):\n # first semester students don't have previous evasion chance calculated\n if sem == 1: \n return\n \n # train feature list and key list should have same length\n assert(len(feature_lst) == len(key_lst))\n\n # get base value \n (amount, grad_base_val, evade_base_val, migr_base_val) = \\\n get_tot_grad_evd_migr(data)\n\n # iterate through the list with the keys\n for index in range(len(key_lst)):\n\n # get key and student for that particular info \n key = key_lst[index]\n stu = data[key]\n\n # if the student left before the semester we are considering\n if sem > stu.get_num_semesters():\n\n # if the student stayed for one semester, use the base as value\n if stu.get_num_semesters() == 1: \n pred_lst = [grad_base_val, evade_base_val, migr_base_val]\n\n # else\n else: \n # put last semester as the evasion chance\n pred_lst = [grad_base_val, evade_base_val, migr_base_val]\n #pred_lst = stu.evasion_chance[(stu.get_num_semesters(), ml_model_desc)]\n\n # else, it's a normal case\n else: \n pred_lst = [grad_base_val, evade_base_val, migr_base_val]\n #pred_lst = stu.evasion_chance[(sem - 1, ml_model_desc)]\n \n assert(len(pred_lst) == 3)\n\n # add it to the training feature instance\n feature_lst[index].append(pred_lst[GRAD_IND])\n feature_lst[index].append(pred_lst[EVADE_IND])\n feature_lst[index].append(pred_lst[MIGR_IND])", "def new_years_eve(year):\n return (year, DEC, 31)", "def insertInitialCoreFuelAssem(r, report):\n report[NEUTRONICS_SECTION][INITIAL_CORE_FUEL_ASSEMBLY] = newReports.Table(\n INITIAL_CORE_FUEL_ASSEMBLY,\n \"Summary of Initial Core Fuel Assembly\",\n header=[\n \"Assembly Name\",\n \"Enrichment %\",\n \"# of Assemblies at BOL\",\n ],\n )\n assemTypes = defaultdict(int)\n enrichment = defaultdict(float)\n for assem in r.core.getAssemblies(Flags.FUEL):\n enrichment[assem.p.type] = round(assem.getFissileMassEnrich() * 100, 7)\n assemTypes[assem.p.type] = assemTypes[assem.p.type] + 1\n for typeA in assemTypes:\n report[NEUTRONICS_SECTION][INITIAL_CORE_FUEL_ASSEMBLY].addRow(\n [\n typeA,\n enrichment[typeA],\n assemTypes[typeA],\n ]\n )", "def generate_chroma_major_minor_sevenths():\n return _generate_chroma(CHORD_TEMPLATES_MAJOR_MINOR + CHORD_TEMPLATES_SEVENTHS)", "def major_version(self, major_version):\n\n self._major_version = major_version", "def major_version(self, major_version):\n\n self._major_version = major_version" ]
[ "0.534129", "0.5172261", "0.49749923", "0.47594857", "0.4746601", "0.47156894", "0.46635336", "0.46603906", "0.46444976", "0.45895258", "0.45784786", "0.45728213", "0.45512292", "0.44898224", "0.4460569", "0.44557804", "0.44548148", "0.44284213", "0.43896654", "0.43896654", "0.4366456", "0.4366392", "0.43636996", "0.43598697", "0.43338078", "0.43331933", "0.43277502", "0.43277398", "0.43029985", "0.43029985" ]
0.5632589
0
Estimate the number of syllables for a word
def estimate(word): parts = re.split(r'[^aeiouy]+', word) valid_parts = [] for part in parts: if part != '': valid_parts.append(part) syllables = 0 for p in re_subsyllables: if p.match(word): syllables -= 1 for p in re_addsyllables: if p.match(word): syllables += 1 syllables += len(valid_parts) if syllables <= 0: syllables = 1 return syllables
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_syllables(self, word):\n\n return 1", "def total_syllables(target_text):\n\n splited_text = target_text.split()\n count = 0\n for word in splited_text:\n count = count + word_syllables(word)\n return count", "def num_of_syllables(self, word):\n\n if word.lower() in self.cmu_dict:\n return len([phoneme for phoneme in self.cmu_dict[word.lower()][0]\n if phoneme[-1].isdigit()])\n # If word is unknown, assume 1 syllable/3 letters (average for English)\n else:\n return len(word)//3", "def count_syllables(words):\n\n\n count = 0\n\n for word in words:\n word_count = count_syllables_in_word(word)\n count = count + word_count\n return count", "def num_syllables(self, word):\n # TODO: provide an implementation!\n word = word.lower()\n D = self._pronunciations\n #D = nltk.corpus.cmudict.dict()\n if(word not in D.keys()):\n #print word not in CMUDictionary\n return 1\n\n #count stores no of syllables for each pronunciation of the word\n count = []\n\n #for each pronunciation\n for x in D[word]:\n n = 0\n #for each syllable\n for y in x:\n #if vowel sound\n if y[-1].isdigit():\n n = n + 1\n count.append(n)\n # return the pronunciation having least syllables\n return min(count)\n #return min([len([y for y in x if y[-1].isdigit()]) for x in D[word.lower()]])", "def syllable_counter(word):\n letters = [c for c in list(word.lower()) if c.isalpha()]\n\n if len(letters) == 0:\n return 0\n\n if len(letters) in [1, 2]:\n return 1\n\n num_syllables = 0\n last_syllable_pos = 0\n for i, letter in enumerate(letters):\n if letter not in VOWELS:\n if i and letters[i - 1] in VOWELS:\n num_syllables += 1\n last_syllable_pos = i\n syllable = ''\n elif i == len(letters) - 1:\n if letter != 'e':\n num_syllables += 1\n elif i - last_syllable_pos >= 2:\n num_syllables += 1\n\n return num_syllables or 1", "def countsyllables_en(word):\r\n\tif not word:\r\n\t\treturn 0\r\n\r\n\t# Remove final silent 'e'\r\n\tif word[-1] == \"e\":\r\n\t\tword = word[:-1]\r\n\r\n\t# Check for a cached syllable count\r\n\tif word in fallback_cache:\r\n\t\treturn fallback_cache[word]\r\n\r\n\t# Count vowel groups\r\n\tresult = 0\r\n\tprev_was_vowel = False\r\n\tfor char in word:\r\n\t\tis_vowel = char in VOWELS or char == 'y'\r\n\t\tif is_vowel and not prev_was_vowel:\r\n\t\t\tresult += 1\r\n\t\tprev_was_vowel = is_vowel\r\n\r\n\t# Add & subtract syllables\r\n\tfor r in fallback_addsyl:\r\n\t\tif r.search(word):\r\n\t\t\tresult += 1\r\n\tfor r in fallback_subsyl:\r\n\t\tif r.search(word):\r\n\t\t\tresult -= 1\r\n\r\n\t# Cache the syllable count\r\n\tfallback_cache[word] = result\r\n\r\n\treturn result", "def num_syllables(self, word):\n \"\"\"\n using the logic of vowel counting, count all vowels in the pronunciations\n \"\"\"\n dictionary = self._pronunciations;\n # check if word is present in the CMU dictionary\n if word in dictionary :\n word_pronunciations = dictionary[word.lower()]\n else :\n return 1\n \n vowels = ['A', 'E', 'I', 'O', 'U']\n \n ## find the shorter pronunciation for word\n shorter_arr = [];\n for pronunciation in word_pronunciations :\n if len(pronunciation) > len(shorter_arr) : shorter_arr = pronunciation\n \n num_length = 0\n \n for phoneme in shorter_arr :\n if phoneme[:1] in vowels : num_length += 1\n \n return num_length", "def _get_num_syllables(doc: Doc, min_syllables: int = 1):\n text = (word for word in doc if not word.is_punct and \"'\" not in word.text)\n syllables_per_word = tuple(syllapy.count(word.text) for word in text)\n return sum(c for c in syllables_per_word if c >= min_syllables)", "def count_syllables(text):\n\n import re\n\n # Make a list of vowel sounds presenting in the text (converted to lower-case letters)\n syllable_list = re.findall(r'[aiouy]+e*|e(?!d\\b|ly)[aiouye]?|[td]ed|le\\b', text.lower())\n # Find the size of the list\n count = len(syllable_list)\n\n return count", "def get_syllables(word):\n\tif word not in syllable_dict:\n\t\ttry: syllables = wordApi.getHyphenation(word)\n\t\texcept UnicodeEncodeError:\n\t\t\tsyllable_dict[word] = np.NaN\n\t\tif not syllables:\n\t\t\tsyllables = wordApi.getHyphenation(word.lower())\n\t\t\tif not syllables:\n\t\t\t\tsyllables = wordApi.getHyphenation(word.capitalize())\n\t\t\t\tif not syllables:\n\t\t\t\t\tsyllable_dict[word] = np.NaN\n\t\t\t\t\treturn syllable_dict[word]\n\t\tsyllable_dict[word] = len(syllables)\n\treturn syllable_dict[word]", "def word_syllables(word):\n\n count = 0\n endings = '!@#$%^&*()_+[]{}:;,.eE\"'+\"'\"\n\n while word[-1] in endings:\n word = word[: -1]\n\n if len(word) <= 3:\n return 1\n\n vows = 'aeiouAEIOU'\n prev_char_vow = False\n for char in word:\n if char in vows:\n if not prev_char_vow:\n count = count + 1\n prev_char_vow = True\n else:\n prev_char_vow = False\n\n if word[-1] in 'Yy':\n count = count + 1\n\n return count", "def syllable_count(word):\n # Count the vowels in the word\n # Subtract one vowel from every dipthong\n count = len(re.findall(r'([aeiouyAEIOUY]+)', word))\n # Subtract any silent vowels\n if len(word) > 2:\n if word[-1] == 'e' and \\\n not is_vowel(word[-2]) and \\\n is_vowel(word[-3]):\n count = count - 1\n return count", "def update_syllable_count(word, syll_count):\n\n syllables = word.split('-')\n for i in range(1, 4):\n for j in range(len(syllables) - i + 1):\n gram = '-'.join(syllables[j: j + i])\n count = syll_count.setdefault(gram, 0)\n syll_count[gram] = count + 1", "def count_syllables_in_word(word):\n\n count = 0\n\n endings = '!,;.?:'\n last_char = word[-1]\n\n if last_char in endings:\n processed_word = word[0:-1]\n else:\n processed_word = word\n\n\n if len(processed_word) <= 3:\n return 1\n if processed_word[-1] in 'Ee':\n processed_word = processed_word[0:-1]\n\n vowels = 'aeiouAEIOU'\n prev_char_was_vowel = False\n\n for char in processed_word:\n if char in vowels:\n if not prev_char_was_vowel:\n count += 1\n prev_char_was_vowel = True\n\n else:\n prev_char_was_vowel = False\n\n if processed_word[-1] in 'yY':\n count += 1\n \n\n return count", "def count_syllables(book):\n d = dict(cmudict.entries())\n with open(book, 'r') as myfile:\n booky = myfile.read().lower()\n tokenized_book = nltk.word_tokenize(booky)\n\n count = 0\n for word in tokenized_book:\n count += ( nsly(word, d))\n\n return count", "def count_syllables(word):\n vowels = \"aeiouy\"\n count = 0\n last_was_vowel = False\n for letter in word:\n found_vowel = False\n for v in vowels:\n if v == letter:\n if not last_was_vowel: count += 1 # don't count diphthongs\n found_vowel = last_was_vowel = True\n break\n if not found_vowel: # If full cycle and no vowel found, set last_was_vowel to false\n last_was_vowel = False\n\n\n if len(word) > 2 and word[-2:] == \"es\" and count > 1: # Remove es - it's \"usually\" silent (?)\n count -= 1\n\n if len(word) > 4 and word[-1:] == \"e\": # remove silent e\n count -= 1\n\n if len(word) > 1 and word[-2:] == \"ee\": # adds 1 for na\n count += 1\n\n if len(word) > 1 and word[-2:] == \"na\": # adds 1 for na\n count += 1\n\n # Check for special case words\n special_case = ['eloise','i']\n if word in special_case:\n count += 1\n\n return count", "def syll_over_text(data_word):\n\n step = 200\n y = []\n temp_syll = []\n\n for count, word in enumerate(data_word, 1):\n\n temp_syll.append(textstat.syllable_count(word))\n\n if count >= step:\n y.append(sum(temp_syll)/len(temp_syll))\n temp_syll = temp_syll[1:]\n\n x = range(step,len(y)+step)\n return x,y", "def n_syllables_per_word(\n doc_or_tokens: types.DocOrTokens, *, lang: Optional[str] = None\n) -> tuple[int, ...]:\n if lang is None:\n if isinstance(doc_or_tokens, Doc):\n lang = doc_or_tokens.lang_\n else:\n raise ValueError(\n \"`lang` must be specified when computing n syllables per word \"\n \"from an iterable of tokens\"\n )\n hyphenator = utils.load_hyphenator(lang=lang)\n words = utils.get_words(doc_or_tokens)\n return tuple(len(hyphenator.positions(word.lower_)) + 1 for word in words)", "def syllable_counter(string):\n\ti = 0 # index of while loop \n\tcounter = 0 # counter of syllables\n\tvowels = ['a','e','i','o','u','y','e '] # what are vowels\n\tdiphthongs = ['ee', 'ei', 'ea', 'oo', 'oi', 'oy', 'ou', 'ai', 'ie', 'ey', 'ay'] #what are diphthongs\n\tindex = 0 \n\n\twhile string[index] != ' ': # break at space\n\t\tchar = string[index] # look at each letter in string\n\t\tnext_char = string[index+1] # and the letter following\n\t\tif char.isalpha():\n\t\t\tif char in vowels: \n\t\t\t\tif (char + next_char in diphthongs): \n\t\t\t\t\tcounter = counter + 1 # count\n\t\t\t\t\tindex = index + 1 # skips second letter in diphthong\n\t\t\t\telif (char == 'e' and next_char == ' '): # assume if e at end of word, is not syllable\n\t\t\t\t\tpass # don't count\n\t\t\t\telse: \n\t\t\t\t\tcounter = counter + 1 # if it's a solitary vowel, add one to counter\n\t\tindex = index + 1\n\n\treturn counter", "def countsyllables_nlde(word):\r\n\tresult = 0\r\n\tprev_was_vowel = word[0] in VOWELS\r\n\tfor char in word[1:]:\r\n\t\tis_vowel = char in VOWELS\r\n\t\tif prev_was_vowel and not is_vowel:\r\n\t\t\tresult += 1\r\n\t\tprev_was_vowel = is_vowel\r\n\r\n\tif (len(word) > 1 and word[0] in VOWELS\r\n\t\t\tand word.endswith('e') and not word[-2] in VOWELS):\r\n\t\tresult += 1\r\n\treturn result or 1", "def number_syllables(self):\n return len(self.array_form)", "def n_syllables(doc_or_tokens: types.DocOrTokens, *, lang: Optional[str] = None) -> int:\n # docs are hashable, so we can leverage the lru cache as-is\n if isinstance(doc_or_tokens, Doc):\n nspw = n_syllables_per_word(doc_or_tokens, lang=lang)\n # otherwise, let's get an iterable of words but cast it to a hashable tuple\n # so we can leverage the lru cache on this and related calls in, say, n_long_words\n else:\n words = utils.get_words(doc_or_tokens)\n nspw = n_syllables_per_word(tuple(words), lang=lang)\n return sum(nspw)", "def n_polysyllable_words(\n doc_or_tokens: types.DocOrTokens,\n *,\n lang: Optional[str] = None,\n min_n_syllables: int = 3,\n) -> int:\n # docs are hashable, so we can leverage the lru cache as-is\n if isinstance(doc_or_tokens, Doc):\n nspw = n_syllables_per_word(doc_or_tokens, lang=lang)\n # otherwise, let's get an iterable of words but cast it to a hashable tuple\n # so we can leverage the lru cache on this and related calls in, say, n_long_words\n else:\n words = utils.get_words(doc_or_tokens)\n nspw = n_syllables_per_word(tuple(words), lang=lang)\n return itertoolz.count(ns for ns in nspw if ns >= min_n_syllables)", "def n_monosyllable_words(\n doc_or_tokens: types.DocOrTokens, *, lang: Optional[str] = None\n) -> int:\n # docs are hashable, so we can leverage the lru cache as-is\n if isinstance(doc_or_tokens, Doc):\n nspw = n_syllables_per_word(doc_or_tokens, lang=lang)\n # otherwise, let's get an iterable of words but cast it to a hashable tuple\n # so we can leverage the lru cache on this and related calls in, say, n_long_words\n else:\n words = utils.get_words(doc_or_tokens)\n nspw = n_syllables_per_word(tuple(words), lang=lang)\n return itertoolz.count(ns for ns in nspw if ns == 1)", "def _label_width(text):\n width = 0\n for lineno, line in enumerate(text.split(u'\\n')):\n size = [_BIG_FONT, _SMALL_FONT][lineno > 0] # Cool idiom, huh?\n width = max(width, size * len(line))\n return width", "def getWordCharCount(w):\r\n rus = len(re.findall(r\"[а-я]\",w))\r\n eng = len(re.findall(r\"[a-z]\",w))\r\n c = len(w) \r\n return c, rus, eng", "def getWordScore(word, n):\n score=0\n for i in range(len(word)):\n addition=SCRABBLE_LETTER_VALUES[word[i]]\n score+=addition*(len(word))\n if len(word)==n:\n score+=50\n return score", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def generate_syllable():\n return generate_vowel() + generate_consonant()" ]
[ "0.85625124", "0.84106743", "0.83903486", "0.8303203", "0.8157759", "0.8099385", "0.79931957", "0.79399204", "0.7871262", "0.7720224", "0.76509124", "0.76158285", "0.7581889", "0.7564774", "0.7540934", "0.7310568", "0.7064575", "0.7019314", "0.69613206", "0.69463545", "0.6879662", "0.6773334", "0.6710158", "0.6672178", "0.6422191", "0.6408502", "0.62027955", "0.6141316", "0.61241835", "0.61075866" ]
0.85197634
1
takes the list of transcript csv files and adds spoken words associated with task 2 to a txt file
def csv_to_txt(): print('csv to text') input_files = sys.argv[1] i = 0 for filename in os.listdir(input_files): print(i, filename[11:-4]) output_txt_file = '' current_csv_df = pd.read_csv(sys.argv[1] + filename) for index, row in current_csv_df.iterrows(): if (row['task_number'] == TASK_3[0] or row['task_number'] == TASK_3[1]) and type( row['spoken_word']) != float: output_txt_file += " " + row['spoken_word'] txt_file = open('jan27_memory_texts/' + filename[11:-4] + '.txt', "a") txt_file.write(output_txt_file.lstrip(' ')) txt_file.close() i+=1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(directory, csv_file, task_name):\n csv_data = pd.read_csv(csv_file)\n colnames = csv_data.columns.tolist()\n\n edat_files = glob.glob(directory + \"*.edat*\")\n text_files = glob.glob(directory + \"*-*.txt\")\n all_files = edat_files + text_files\n pairs = []\n paired_texts = []\n\n for text_file in text_files:\n [text_fname, _] = os.path.splitext(text_file)\n for edat_file in edat_files:\n [edat_fname, _] = os.path.splitext(edat_file)\n if text_fname == edat_fname:\n pairs.append([text_file, edat_file])\n\n for pair in pairs:\n paired_texts.append(pair[0])\n\n unpaired_texts = list(set(text_files) - set(paired_texts))\n three_files = []\n pop_idx = []\n\n # List of lists\n for i_file in range(len(unpaired_texts)):\n for j_pair in range(len(paired_texts)):\n if (unpaired_texts[i_file][:len(unpaired_texts[i_file])-6] in paired_texts[j_pair]):\n three_files.append([paired_texts[j_pair], pairs[j_pair][1],\n unpaired_texts[i_file]])\n pop_idx.append(i_file)\n\n for rm in reversed(pop_idx):\n unpaired_texts.pop(rm)\n\n # three_files is the text files and edats that form a triad (one edat, two\n # similarly named text files).\n for triad in three_files:\n for i_pair in reversed(range(len(pairs))):\n if triad[0:2] == pairs[i_pair]:\n pairs.pop(i_pair)\n\n two_texts = []\n all_two_texts = []\n two_text_pairs = []\n\n for i_file in range(len(unpaired_texts)):\n for j_file in range(i_file + 1, len(unpaired_texts)):\n if (unpaired_texts[i_file][:len(unpaired_texts[i_file])-6] in unpaired_texts[j_file]):\n all_two_texts.append(i_file)\n all_two_texts.append(j_file)\n two_text_pairs.append([i_file, j_file])\n\n all_two_texts = sorted(all_two_texts, reverse=True)\n\n # two_texts is the text files that pair with other text files.\n for i_pair in range(len(two_text_pairs)):\n two_texts.append([unpaired_texts[two_text_pairs[i_pair][0]],\n unpaired_texts[two_text_pairs[i_pair][1]]])\n\n for i_file in all_two_texts:\n unpaired_texts.pop(i_file)\n\n # one_text is the remaining un-paired text files.\n one_text = [[unpaired_texts[i_file]] for i_file in range(len(unpaired_texts))]\n\n # Determine subject IDs and timepoints for all files.\n # Assumes that files will be named according to convention\n # blahblahblah_[subj]-[tp].txt or blahblahblah-[subj]-[tp].txt.\n one_text_subjects = [get_subject(file_[0]) for file_ in one_text]\n one_text_timepoints = [get_timepoint(file_[0]) for file_ in one_text]\n two_text_subjects = [get_subject(pair[0]) for pair in two_texts]\n two_text_timepoints = [get_timepoint(pair[0]) for pair in two_texts]\n three_file_subjects = [get_subject(triad[0]) for triad in three_files]\n three_file_timepoints = [get_timepoint(triad[0]) for triad in three_files]\n pair_subjects = [get_subject(pair[0]) for pair in pairs]\n pair_timepoints = [get_timepoint(pair[0]) for pair in pairs]\n\n af_files = ([item for sublist in pairs for item in sublist] +\n [item for sublist in two_texts for item in sublist] +\n [item for sublist in three_files for item in sublist] +\n [item for sublist in one_text for item in sublist])\n\n one_edat = list(set(all_files) - set(af_files))\n one_edat = [[edat] for edat in one_edat]\n one_edat_subjects = [get_subject(file_[0]) for file_ in one_edat]\n one_edat_timepoints = [get_timepoint(file_[0]) for file_ in one_edat]\n\n all_subjects = (one_text_subjects + two_text_subjects + three_file_subjects +\n pair_subjects + one_edat_subjects)\n all_notetype = (([\"one_text\"] * len(one_text_subjects)) +\n ([\"two_texts\"] * len(two_text_subjects)) +\n ([\"three_files\"] * len(three_file_subjects)) +\n ([\"pair\"] * len(pair_subjects)) +\n ([\"one_edat\"] * len(one_edat_subjects)))\n all_timepoints = (one_text_timepoints + two_text_timepoints +\n three_file_timepoints + pair_timepoints +\n one_edat_timepoints)\n all_file_sets = one_text + two_texts + three_files + pairs + one_edat\n\n organized_dir = org_dir_dict.get(task_name)\n\n for i_subj in range(len(all_subjects)):\n month = timepoint_dict.get(task_name).get(all_timepoints[i_subj])\n files_note = note_dict.get(all_notetype[i_subj])\n if len(all_subjects) > 4:\n try:\n print(\"Successfully organized %s-%s\" % (all_subjects[i_subj], month))\n print(\"Moved:\")\n subject_id = all_subjects[i_subj]\n files = all_file_sets[i_subj]\n note = organize_files(subject_id, month, files, organized_dir)\n note.append(files_note)\n orged = 1\n orgedwhen = time.strftime(\"%Y/%m/%d\")\n orgedby = \"PY\"\n except IOError:\n print(\"%s-%s couldn't be organized.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n note = files_note\n orged = 0\n orgedwhen = \"\"\n orgedby = \"\"\n\n try:\n if all_notetype[i_subj] == \"pair\":\n print(\"Successfully converted %s-%s\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 1\n convedwhen = time.strftime(\"%Y/%m/%d\")\n convedby = \"PY\"\n else:\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n except IOError:\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n else:\n print(\"%s-%s couldn't be organized.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n note = files_note\n orged = 0\n orgedwhen = \"\"\n orgedby = \"\"\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n\n csv_data = add_subject(csv_data, all_subjects[i_subj],\n all_timepoints[i_subj], orged, orgedwhen, orgedby,\n conved, convedwhen, convedby, note)\n\n csv_data = csv_data[colnames]\n csv_data.to_csv(csv_file, index=False)", "def write_file(tweets):\n with open((folderlink + \"markov_sentences.txt\"), \"w\") as text_file:\n for tweet in tweets:\n text_file.write (tweet + '\\n')\n with file ((folderlink + \"markov_sentences.txt\"), 'r') as f:\n text = f.read()\n text_model = markovify.NewlineText(text)\n print \"model successful \\n\\n\\n\\n\"\n for i in range(5):\n print(text_model.make_short_sentence(140, tries=100))\n text_file.close()", "def build_transcript(speaker_label_transcript):\n with open('main_transcript.txt', 'a') as the_file:\n for t in speaker_label_transcript:\n the_file.write(f\"{t['speaker']}:\\n\")\n the_file.write(f\"{t['content']}\\n\\n\")", "def read_corpus(file_path, source):\n data = []\n for line in open(file_path):\n sent = line.strip().split(' ')\n # only append <s> and </s> to the target sentence\n if source == 'tgt':\n sent = ['<s>'] + sent + ['</s>'] #TODO: Change\n data.append(sent)\n return data", "def process_raw_phrases(file_path):", "def amalgamate_all_txts_into_one(\n path_to_folder=\"L:\\\\word_docs\\\\NLP\\\\stemming\\\\combinedRTFDOCX\\\\\",\n save_path_all_txt=\"L:\\\\word_docs\\\\NLP\\\\\",\n future_option=False):\n\n #initialise\n all_txt_stem = \"\"\n\n\n for txt_file in os.listdir(path_to_folder):\n path_to_doc = os.path.join(path_to_folder, txt_file)\n\n #open the file\n pt_txt = open_txt_file(path_to_doc)\n all_txt_stem = all_txt_stem + \" \" + pt_txt\n\n # save all_txt\n \n save_filtered_txt_file(\"made_up\\\\all_txt_stemmed2.txt\", all_txt_stem, save_path_all_txt)\n \n # in order to count, use tokens \n counts = count_tokens(all_txt_stem)", "def read_corpus(file_path, source):\n data = []\n for line in open(file_path, encoding='utf-8'):\n sent = line.strip().split(' ')\n # only append <s> and </s> to the target sentence\n if source == 'tgt':\n sent = ['<s>'] + sent + ['</s>']\n data.append(sent)\n\n return data", "def process(self, terms):\n for entry in self.files:\n try:\n logger.info('file - {0}'.format(entry.path))\n\n # notional output file path\n path_sentences = self.path.joinpath('{0}.csv'.format(entry.path.stem))\n path_summary = self.path.joinpath('{0}-summary.csv'.format(entry.path.stem))\n logger.info('will save to - {0}'.format(path_sentences.resolve()))\n\n reports = self.inspect_doc(entry, terms)\n\n # receiving a list of dicts\n # therefore pandas can package into a useful outcome\n if len(reports) > 0:\n frame_sentences = pd.DataFrame(reports)\n\n frame_sentences = frame_sentences[['page', 'term', 'sentence']]\n logger.info('saving sentence file to - {0}'.format(path_sentences.resolve()))\n frame_sentences.to_csv(str(path_sentences.resolve()))\n \n frame_summary = frame_sentences.pivot_table(\n index='page',\n columns='term',\n aggfunc='size',\n fill_value=0\n )\n logger.info('saving summary file to - {0}'.format(path_sentences.resolve()))\n frame_summary.to_csv(str(path_summary.resolve()))\n\n\n except Exception as e:\n logger.error(e)", "def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def create_tokens_li():\n cnt=0\n for file in docs:\n file_name = open(\"./corpus/\"+ str(file) + \".txt\")\n print(cnt)\n cnt+=1\n words = file_name.read()\n tokens_doc = nltk.word_tokenize(words)\n tokens_doc = [w.lower() for w in tokens_doc]\n #tokens_doc = [snowball_stemmer.stem(token) for token in tokens_doc]\n tokens_doc = [token for token in tokens_doc if token not in nltk.corpus.stopwords.words('english')]\n tokens_li.append(tokens_doc)\n\n\n #storing in json file\n with open('savers/tokens.json', 'w') as fp:\n json.dump(tokens_li, fp)", "def train_for(labels, filenames):\n stt = stt_google\n csvfiles = []\n writers = []\n for index, filename in enumerate(filenames):\n currfile = open(filename, 'ab')\n csvfiles.append(currfile)\n writers.append(csv.writer(currfile))\n # record instances until it doesn't interpret any text\n speech = stt.listen_for_speech()\n while(speech):\n hypotheses = []\n for hypothesis in speech:\n hypotheses.append(hypothesis['utterance'])\n #write hypotheses\n for index, label in enumerate(labels):\n writers[index].writerow([label] + hypotheses)\n speech = stt.listen_for_speech()\n for csvfile in csvfiles:\n csvfile.close", "def concatinate_documents_to_single_doc(): \n stars_list=[\"one_star\",\"two_star\",\"three_star\",\"four_star\",\"five_star\"]\n docs_path=\"C:\\supporting_evidence\\external resources\\IMDB\\movie_articles\"\n for star in stars_list:\n curr_star_docs_sentence=\"\"\n for filename in os.listdir(docs_path+\"\\\\\"+ star):\n with open(docs_path+\"\\\\\"+ star+\"\\\\\"+filename, 'r') as f:\n doc_lines=f.read()\n curr_star_docs_sentence+=doc_lines\n with open(star+\"_single_doc.txt\",'wb') as csvfile:\n f=csv.writer(csvfile)\n f.writerow([curr_star_docs_sentence])", "def extractWords(self, inputDataset):\n reviewFile = open(inputDataset, \"r\", encoding=\"utf-8-sig\")\n for record in reviewFile:\n record = record.strip().split(\"\\t\") # tab-delimited .txt file\n self.addUnigrams(int(record[0]), record[1])\n reviewFile.close()", "def append_corpus(output):\n files = []\n output_path = output + \"/ig/\" + \"ig_corpus.txt\"\n for root, directories, filenames in os.walk(output + \"/ig/\"):\n for filename in filenames:\n files.append(os.path.join(root, filename))\n corpusfiles = filter(lambda x: \".txt\" in x, files)\n if not os.path.exists(os.path.dirname(output_path)):\n os.makedirs(os.path.dirname(output_path))\n with open(output_path, \"w+\") as corpusFile:\n for file in corpusfiles:\n fileH = open(file, \"r\")\n corpusFile.write(fileH.read())", "def translationText(language, listOfWords):\n txt = open(language+\".txt\", mode=\"r\").readlines()\n translatedWords = []\n for word in listOfWords:\n for line in txt:\n if line.split()[0] == word:\n translatedWords.append(line.split()[1])\n return translatedWords", "def translate_phrases(translator, phrases, language):\n for phrase in phrases:\n translator.type_phrase_to_translate(phrase)\n sleep(0.5)\n translated_phrase = translator.read_translated_phrase()\n add_translation_to_file(language, translated_phrase)", "def save_words(csvf, word_set_id, orig_set_id=''):\n words = []\n headings = []\n\n with open(csvf, \"r\", encoding='utf-8-sig') as file:\n reader = csv.reader(file, delimiter=',')\n\n # Create dictionary keys\n for row in reader:\n i = 0\n while (i < len(row)):\n headings.append(row[i])\n i += 1\n break\n\n # Save STR values to each person\n for row in reader:\n i = 0\n word = {}\n\n while (i < len(row)):\n key = str(headings[i])\n value = row[i]\n word[key] = value\n i += 1\n words.append(word)\n\n # Get heading names\n lang1 = headings[0] # Original Language\n lang1p = headings[1] # Original transliteration\n lang2 = headings[2] # Translation Language\n lang2p = headings[3] # Translation transliteration\n wtype = headings[4] # Type of word (noun, verb)\n\n orig_lang_id = (db.execute(\n \"SELECT id FROM languages WHERE name = ?\", (lang1, )).fetchall())[0]['id']\n trans_lang_id = (db.execute(\n \"SELECT id FROM languages WHERE name = ?\", (lang2, )).fetchall())[0]['id']\n\n for w in words:\n word_type_id = (db.execute(\n \"SELECT id FROM word_type WHERE type = ?\", (w[wtype], )).fetchall())[0]['id']\n\n new_orig_word_id = (db.execute(\"INSERT INTO words ('wordstr', 'language_id', 'type', 'pronunciation') VALUES (?, ?, ?, ?)\",\n (w[lang1], orig_lang_id, word_type_id, w[lang1p])\n )).lastrowid\n con.commit()\n new_translated_word_id = (db.execute(\"INSERT INTO words ('wordstr', 'language_id', 'type', 'pronunciation') VALUES (?, ?, ?, ?)\",\n (w[lang2], trans_lang_id, word_type_id, w[lang2p])\n )).lastrowid\n con.commit()\n db.execute(\"INSERT INTO word_set_words (word_set_id, word_id) VALUES (?, ?)\",\n (word_set_id, new_translated_word_id))\n con.commit()\n # if orig_set_id is set\n if (orig_set_id != ''):\n db.execute(\"INSERT INTO word_set_words (word_set_id, word_id) VALUES (?, ?)\",\n (int(orig_set_id), new_orig_word_id))\n con.commit()\n # insert orig and its translation equivalent\n db.execute(\"INSERT INTO word_translation (orig_lang, trans_lang, orig_word, trans_word) VALUES (?, ?, ?, ?)\",\n (orig_lang_id, trans_lang_id, new_orig_word_id, new_translated_word_id))\n con.commit()\n # reverse orig & translation\n db.execute(\"INSERT INTO word_translation (orig_lang, trans_lang, orig_word, trans_word) VALUES (?, ?, ?, ?)\",\n (trans_lang_id, orig_lang_id, new_translated_word_id, new_orig_word_id))\n con.commit()\n file.close()\n return len(words)", "def import_spontaneous_speech_corpus(corpus_name, directory, **kwargs):\n\n dialect = kwargs.pop('dialect', 'textgrid')\n stop_check = kwargs.pop('stop_check', None)\n call_back = kwargs.pop('call_back', None)\n speaker_source = kwargs.pop('speaker_source', None)\n delimiter = kwargs.pop('delimiter', None)\n\n corpus = SpontaneousSpeechCorpus(corpus_name,directory)\n\n words = []\n phones = []\n textgrids = []\n wavs = []\n if call_back is not None:\n call_back('Finding files...')\n call_back(0,1)\n cur = 0\n for root, subdirs, files in os.walk(directory):\n if stop_check is not None and stop_check():\n return\n for f in files:\n if dialect == 'textgrid' and f.lower().endswith('.textgrid'):\n textgrids.append(os.path.join(root,f))\n elif dialect == 'buckeye' and f.endswith('.words'):\n words.append(os.path.join(root,f))\n elif dialect == 'buckeye' and f.endswith('.phones'):\n phones.append(os.path.join(root,f))\n elif dialect == 'timit' and f.endswith('.wrd'):\n words.append(os.path.join(root,f))\n elif dialect == 'timit' and f.endswith('.phn'):\n phones.append(os.path.join(root,f))\n elif f.endswith('.wav'):\n wavs.append(os.path.join(root,f))\n if dialect == 'textgrid':\n word_tier_name = kwargs.pop('word_tier_name', None)\n phone_tier_name = kwargs.pop('phone_tier_name', None)\n dialogs = align_textgrid_info(textgrids, wavs, speaker_source, stop_check, call_back)\n else:\n dialogs = align_dialog_info(words, phones, wavs, speaker_source, stop_check, call_back)\n if call_back is not None:\n call_back('Processing discourses...')\n call_back(0,len(dialogs))\n cur = 0\n\n for d, v in dialogs.items():\n if stop_check is not None and stop_check():\n return\n if call_back is not None:\n cur += 1\n call_back(cur)\n discourse_info = {'name':d}\n if dialect == 'textgrid':\n if 'textgrid' not in v:\n continue\n data = textgrids_to_data(v['textgrid'], word_tier_name,\n phone_tier_name,\n v['speaker'], delimiter)\n else:\n if 'words' not in v:\n continue\n if 'phones' not in v:\n continue\n data = files_to_data(v['words'], v['phones'], dialect)\n discourse_info['speaker'] = Speaker(v['speaker'])\n\n if 'wav' in v:\n discourse_info['wav_path'] = v['wav']\n corpus.add_discourse(data, discourse_info,delimiter=delimiter)\n return corpus", "def write_tok_to_file(self):\n dir_path = os.path.join(self.output_path, 'tokens')\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n for dataset_name, dataset in self.amr_corpus.items():\n f = open(os.path.join(dir_path, dataset_name + '_tok.txt'), 'w')\n for doc_name, doc in dataset.items():\n for amr_id, amr_data in doc.items():\n amr_strings = self.amr_corpus[dataset_name][doc_name][amr_id]['amr_string_triples']\n if not amr_strings:\n continue\n tok = ' '.join(self.amr_corpus[dataset_name][doc_name][amr_id]['tok'])\n f.write(tok + '\\n')\n f.close()", "def main():\n filepath = input(\"Enter the Source File: \")\n with open(filepath, encoding=\"utf-8\") as f:\n sentences = f.readlines()\n sentences = \" \".join(sentences)\n\n summary = summarize_sentences(sentences)\n\n filepath_index = filepath.find(\".txt\")\n outputpath = filepath[:filepath_index] + \"_lexRank.txt\"\n\n with open(outputpath, \"w\") as w:\n for sentence in summary:\n w.write(str(sentence) + \"\\n\")", "def semcor2token(args):\r\n input_files = list_files(*args.input_files)\r\n output_dir = Path(args.output_dir)\r\n if not output_dir.is_dir():\r\n try:\r\n output_dir.mkdir()\r\n except:\r\n print('Invalid output directory name. Files will be stored in default directory.', file = stderr)\r\n output_dir = output_default / 'typetoken'\r\n if not output_dir.is_dir():\r\n output_dir.mkdir()\r\n multiword = args.multiword\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n filename = corpus_file.shortname + '.txt'\r\n dirname = output_dir / corpus_file.concordance\r\n if not dirname.exists():\r\n dirname.mkdir()\r\n output_file_name = dirname / filename\r\n with output_file_name.open('w') as output_file:\r\n for word in corpus_file.text.find_all(['wf', 'punc']):\r\n if word.name == 'punc':\r\n output_file.write('\\t'.join([word.string, word.string, 'punc\\n']))\r\n elif not multiword:\r\n for token in Token.from_tag(word).get_components():\r\n if args.verbose and type(token.status)==tuple:\r\n token_id = '/'.join([corpus_file.shortname, token.wordform])\r\n report_token_status(token, token_id)\r\n output_file.write('\\t'.join([token.wordform, token.lemma, token.pos]) + '\\n')\r\n else:\r\n token = Token.from_tag(word)\r\n if args.verbose and type(token.status)==tuple:\r\n token_id = '/'.join([corpus_file.shortname, token.wordform])\r\n report_token_status(token, token_id)\r\n output_file.write('\\t'.join([token.wordform, token.lemma, token.pos]) + '\\n')", "def read_file(filename):\n\n sentences = open(filename).read().strip().split(\"\\n\\n\") #separate tweets\n ret = []\n for sent in sentences:\n lines = sent.split(\"\\n\") #each word in the tweet\n pairs = [L.split(\"\\t\") for L in lines] #Funniest O\n tokens = [tok for tok,tag in pairs]\n tags = [tag for tok,tag in pairs]\n ret.append( (tokens,tags) )\n return ret", "def write_corpus_to_file(output_file, corpus): \n \n file = open(output_file, 'w')\n for line in corpus: \n file.write(line)\n print ('Corpus has been writted in file')\n file.close()", "def write_textfiles(train, val, anno):\n \"\"\" used only for side effect \"\"\"\n # fn creates text file line in form '<filename> <int mapping of grip type>'\n # for each train/val file and writes each line in corresponding\n # train.txt/val.txt for training\n # to_line = lambda fname:'{} {}'.format(fname,labelmap[anno[fname]['grip']])\n to_line = lambda fname:'{} {}'.format(fname,labelmap[anno[fname]['grip']])\n train_str = '\\n'.join(map(to_line, train))\n val_str = '\\n'.join(map(to_line, val))\n\n with open('train.txt', 'w') as trainfile:\n trainfile.write(train_str)\n with open('val.txt', 'w') as valfile:\n valfile.write(val_str)", "def train(filename, supervised=False):\n p = re.compile(r'[a-zA-Z0-9_:;,.\"?\\' ]+')\n data = []\n all_text = {}\n infile = csv.DictReader(open(filename), delimiter=',', quotechar='\"')\n for row in infile:\n text_id = row['id']\n text = row['text']\n author = row['author'] if supervised else None\n\n # remove special characters\n new_text = ''\n for word in text:\n for letter in word:\n reg = p.match(letter)\n if reg is not None:\n new_text += reg.group()\n\n data.append((text_id, new_text, author))\n if supervised:\n if author not in all_text.keys():\n all_text[author] = ''\n else:\n sentences = sentence_tokenizer.tokenize(new_text)\n all_text[author] += ' '.join(sentences) + ' '\n # print(\"{} {} {}\".format(text_id, text, author))\n if supervised:\n return data, all_text\n else:\n return data", "def semcor2run(args):\r\n input_files = list_files(*args.input_files)\r\n output_dir = Path(args.output_dir)\r\n if not output_dir.is_dir():\r\n try:\r\n output_dir.mkdir()\r\n except:\r\n print('Invalid output directory name. Files will be stored in default directory.', file = stderr)\r\n output_dir = output_default / 'running_text'\r\n output_dir.mkdir()\r\n multiword = args.multiword\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n filename = corpus_file.shortname + '.txt'\r\n dirname = output_dir / corpus_file.concordance\r\n if not dirname.exists():\r\n dirname.mkdir()\r\n output_file_name = dirname / filename\r\n with output_file_name.open('w') as output_file:\r\n for paragraph in corpus_file.text.find_all('p'):\r\n for word in paragraph.find_all(['wf', 'punc']):\r\n if word.name == 'punc':\r\n output_file.write(word.string)\r\n elif not multiword:\r\n for token in Token.from_tag(word).get_components():\r\n output_file.write(' {}/{}'.format(token.wordform, token.pos))\r\n else:\r\n token = Token.from_tag(word)\r\n output_file.write(' {}/{}'.format(token.wordform, token.pos))\r\n output_file.write('\\n')", "def transcribe_file_with_word_time_offsets(speech_file):\n from google.cloud import speech\n from google.cloud.speech import enums\n from google.cloud.speech import types\n client = speech.SpeechClient()\n\n with io.open(speech_file, 'rb') as audio_file:\n content = audio_file.read()\n\n audio = types.RecognitionAudio(content=content)\n config = types.RecognitionConfig(\n encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,\n language_code='en-US',\n enable_word_time_offsets=True)\n\n response = client.recognize(config, audio)\n\n word_with_ts = []\n for result in response.results:\n #print result\n alternative = result.alternatives[0]\n print('Transcript: {}'.format(alternative.transcript))\n\n for word_info in alternative.words:\n word = word_info.word\n start_time = word_info.start_time\n end_time = word_info.end_time\n word_with_ts.append((word ,start_time.seconds + start_time.nanos * 1e-9, end_time.seconds + end_time.nanos * 1e-9))\n #print('Word: {}, start_time: {}, end_time: {}'.format(\n # word,\n # start_time.seconds + start_time.nanos * 1e-9,\n # end_time.seconds + end_time.nanos * 1e-9))\n return word_with_ts", "def combine_documents(path=os.path.join(os.curdir, \"data/processed\"), name='corpus.txt'):\n outname=os.path.join(path, name)\n if os.path.exists(outname):\n os.remove(outname)\n filenames = [f for f in os.listdir(path) if fnmatch.fnmatch(f, '*.txt')]\n with open(outname, 'w') as outfile:\n print \"Combining documents...\"\n for fname in filenames:\n print fname\n with open(os.path.join(path, fname)) as infile:\n outfile.write(infile.read())", "def _read_data_taskA(data_path: str=\"path\", tokenizer=None, \n bert: bool=False, \n mode: str=\"raw\", \n tagger=None, \n test: bool=False, \n test_samples=None\n ):\n print(f\"\\n[dataset]: Loading data from '{data_path}'...\")\n sentences = []\n labels = []\n tok_list = []\n words_list = []\n targets_list = []\n target_final = []\n\n data_dict = read_json_data(data_path) if not test else test_samples\n #print(\"data_dict:\", len(data_dict))\n\n for entry in data_dict:\n # tokenize data sentences\n if bert:\n tokens = tokenizer.tokenize(entry[\"text\"])\n tokens.insert(0, \"[CLS]\") # RoBERTa \"<s>\" <-> BERT \"[CLS]\" \n tokens.append(\"[SEP]\") # RoBERTa \"</s>\" <-> BERT \"[SEP]\"\n else:\n tokens = tokenizer(entry[\"text\"])\n \n words_list.extend(tokens)\n tok_list.append(tokens)\n\n if mode == \"tokenize\":\n sentences.append(tokens)\n elif mode == \"raw\":\n sentences.append(entry[\"text\"])\n\n # count target words\n t_list = []\n if not test:\n targets = entry[\"targets\"]\n tgt_list = []\n if len(targets) > 0:\n t_list.append(targets)\n for tgt in targets:\n targets_list.append(tgt[1])\n tgt_list.append(tgt[1])\n else:\n t_list.append([])\n\n # tag input tokens\n b_tok = tokenizer if bert else None\n tags = tagger(targets, tokens, bert_tokenizer=b_tok)\n #print(tags)\n\n labels.append(tags)\n target_final.append(tgt_list)\n\n else:\n labels.append(\"dummy\")\n target_final.append(0)\n \n if not test: \n assert len(sentences) == len(labels)\n print(\"sentences:\",len(sentences))\n print(\"labels:\",len(labels))\n\n # count words occurency and frequency \n word_counter = collections.Counter(words_list)\n distinct_words = len(word_counter)\n print(f\"Number of distinct words: {distinct_words}\")\n \n # count target words occurency and frequency\n tgts_counter = collections.Counter(targets_list)\n distinct_tgts = len(tgts_counter)\n print(f\"Number of distinct targets: {distinct_tgts}\")\n\n return sentences, labels, targets_list, word_counter\n else:\n return list(zip(sentences, labels, target_final, tok_list))", "def get_word_list(file_name):\n\n\tstoryEdit = []\n\n\t#Reads the file starting after the beginning\t\n\tf = open(file_name,'r')\n\tlines = f.readlines()\n\tcurr_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\tlines = lines[curr_line+1:]\n\n\n\t#Loops through each row, making everything lowercase and replacing all punctuation\n\tfor row in lines:\n\t \trow = row.lower()\n\t \trow = row.translate(string.maketrans(\"\",\"\"), string.punctuation)\n\t \tstoryEdit += row.split()\n\n\n\t#Returns the final list as \n\treturn storyEdit" ]
[ "0.65899956", "0.6246819", "0.6209257", "0.6177756", "0.60997456", "0.60955375", "0.6093305", "0.6085026", "0.5978077", "0.5953307", "0.59093267", "0.58949584", "0.5873323", "0.58294004", "0.58261067", "0.5818217", "0.57632166", "0.5745265", "0.57408845", "0.5730289", "0.57262254", "0.5720753", "0.5719614", "0.57046735", "0.5696908", "0.56671494", "0.563904", "0.56326175", "0.5628801", "0.5623748" ]
0.6972013
0
This function takes the CSV file with the extracted pause features and the CSV with the extracted syllable features
def combine_pause_syllable(): pause_csv = pd.read_csv(sys.argv[1]) syllable_csv = pd.read_csv(sys.argv[2]) merged = pause_csv.merge(syllable_csv, on=TRANSCRIPT_ID) # adding pause-syllable columns # speech rate merged[COOKIE_SPEECH_RATE] = merged[COOKIE_SYLLABLE_COUNT] / merged[COOKIE_DURATION] merged[READING_SPEECH_RATE] = merged[READING_SYLLABLE_COUNT] / merged[READING_DURATION] merged[MEMORY_SPEECH_RATE] = merged[MEMORY_SYLLABLE_COUNT] / merged[MEMORY_DURATION] # average syllable duration merged[COOKIE_AVERAGE_SYLLABLE_DURATION] = merged[COOKIE_PHONATION_TIME] / merged[COOKIE_SYLLABLE_COUNT] merged[READING_AVERAGE_SYLLABLE_DURATION] = merged[READING_PHONATION_TIME] / merged[READING_SYLLABLE_COUNT] merged[MEMORY_AVERAGE_SYLLABLE_DURATION] = merged[MEMORY_PHONATION_TIME] / merged[MEMORY_SYLLABLE_COUNT] # pause per syllable merged[COOKIE_PAUSE_PER_SYLLABLE] = merged[COOKIE_NUMBER_OF_PAUSES] / merged[COOKIE_SYLLABLE_COUNT] merged[READING_PAUSE_PER_SYLLABLE] = merged[READING_NUMBER_OF_PAUSES] / merged[READING_SYLLABLE_COUNT] merged[MEMORY_PAUSE_PER_SYLLABLE] = merged[MEMORY_NUMBER_OF_PAUSES] / merged[MEMORY_SYLLABLE_COUNT] # merged[SPEECH_RATE] = combined_syllable_count / merged[DURATION] # merged[AVERAGE_SYLLABLE_DURATION] = merged[PHONATION_TIME] / combined_syllable_count merged[HAS_DEMENTIA] = merged[TRANSCRIPT_ID].apply(lambda x: x[0] == 'E') merged.to_csv('jan27_language_features.csv', sep=',', header=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_syllable_features_from_txt():\n input_files = sys.argv[1]\n csv_name = sys.argv[2]\n syllable_stats = pd.DataFrame(columns=SYLLABLE_COLUMNS)\n re_word = re.compile(r'[\\w-]+')\n i = 0\n for filename in os.listdir(input_files):\n if filename != '.DS_Store':\n print(filename, i)\n syllable_count = 0\n for line in open(input_files+filename):\n for word in re_word.findall(line):\n syllable_count += estimate(word)\n syllable_stats = syllable_stats.append({\n TRANSCRIPT_ID: filename[:-4],\n MEMORY_SYLLABLE_COUNT: syllable_count,\n }, ignore_index=True)\n i += 1\n syllable_stats = syllable_stats.set_index(TRANSCRIPT_ID)\n syllable_stats.to_csv(csv_name+'.csv')", "def preprocess(self):\n\n self._build_labels_dict(['one', 'two', 'three', 'four', 'five'])\n\n with open(self.data_path + self.file_name, 'rb') as csvfile:\n\n reader = csv.reader(csvfile, delimiter=\",\")\n for row in reader:\n self.texts.append(row[1])\n self.labels.append(self.labels_index[row[0]])\n\n print('Found %s texts.' % len(self.texts))", "def other_features_(tweet, cleaned_tweet):\n #print(\"WARNING>>>>>>>>>>>>>>>>> VADERSENTIMENT DISABLED\")\n sentiment = nlp.sentiment_analyzer.polarity_scores(tweet)\n\n words = cleaned_tweet #Get text only\n\n syllables = textstat.syllable_count(words) #count syllables in words\n num_chars = sum(len(w) for w in words) #num chars in words\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n\n\n twitter_objs = count_twitter_objs(tweet) #Count #, @, and http://\n features = [FKRA, FRE, syllables, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['compound'],\n twitter_objs[2], twitter_objs[1],]\n #features = pandas.DataFrame(features)\n return features", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def preprocess_feature(df):", "def load_csv_data(filepath, textcol=\"text\"):\n df = pd.read_csv(filepath)\n samples = [ str(text) for text in df[textcol] ]\n labels = [ str(intent) for intent in df[\"label\"] ]\n\n return samples, labels", "def parse_feature(self, feature_key, lines):\n ...", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples", "def parse_features(self, skip=...):\n ...", "def parse_features(self, skip=...):\n ...", "def clfFeature(feature, mode):\r\n \r\n feature_path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\features\\\\' + feature + '.txt'\r\n classlist = ['negative', 'positive']\r\n features = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n file = open(review, 'r', encoding='utf8').read().lower()\r\n wordlist = []\r\n featreader = csv.reader(open(feature_path, 'r'), delimiter= '\\n')\r\n for word in featreader:\r\n if word[0] in file:\r\n wordlist.append(word[0])\r\n df = pd.DataFrame({'File': [title], feature.capitalize(): [', '.join(wordlist)]}).set_index('File')\r\n features = features.append(df)\r\n \r\n return features", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, mode): #check later if we can merge this function with the SQuAD preprocessing \n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if mode!=\"ae\":\n tokens_a = tokenizer.tokenize(example.text_a)\n else: #only do subword tokenization.\n tokens_a, labels_a, example.idx_map= tokenizer.subword_tokenize([token.lower() for token in example.text_a], example.label )\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if mode!=\"ae\":\n label_id = label_map[example.label]\n else:\n label_id = [-1] * len(input_ids) #-1 is the index to ignore\n #truncate the label length if it exceeds the limit.\n lb=[label_map[label] for label in labels_a]\n if len(lb) > max_seq_length - 2:\n lb = lb[0:(max_seq_length - 2)]\n label_id[1:len(lb)+1] = lb\n \n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features", "def prepareSemanticDifferential():\r\n\t\r\n filename = (\"OsgoodOriginal.csv\") \r\n fileIn = open(filename, 'r')\r\n allData = []\r\n line = fileIn.readline()\r\n while line != \"\":\r\n line = fileIn.readline().strip()\r\n if line != \"\":\r\n values = line.split(',')\r\n wordData = {}\r\n wordData['word'] = str(values[0])\r\n wordData['evaluation'] = float(values[1])\r\n wordData['activity'] = float(values[2])\r\n wordData['potency'] = float(values[3])\r\n allData.append(wordData)\r\n fileIn.close()\r\n return allData", "def out_featuretxt(self):\n return self.outputfrominput(inputformat='csv', stripextension='.csv', addextension='.features.csv')", "def preprocess_csv():\n filename = DATA_DIR + 'text_classification/codi/intents.csv'\n df = pd.read_csv(filename, header=None)\n df = df.dropna()\n classes = df[1].unique()\n class_list = classes.tolist()\n df[0] = df[0].apply(clean_text)\n df[1] = df[1].apply(lambda x: class_list.index(x))\n counts = df[1].value_counts()\n\n # omit classes with too few examples\n omit = counts[counts < 2].index.values\n omitted = df[df[1].isin(omit)]\n included = df[~df[1].isin(omit)]\n y = included.pop(1)\n\n x_train, x_test, y_train, y_test = train_test_split(included, y, test_size=0.1, stratify=y, random_state=42)\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, stratify=y_train,\n random_state=42)\n train_df: pd.DataFrame = pd.concat([x_train, y_train], axis=1)\n val_df: pd.DataFrame = pd.concat([y_val, x_val], axis=1)\n test_df: pd.DataFrame = pd.concat([y_test, x_test], axis=1)\n\n # add omitted examples back to training sets\n train_df: pd.DataFrame = pd.concat([train_df, omitted], axis=0)\n train_df = train_df.reindex(columns=[1, 0])\n x_train: pd.DataFrame = pd.concat([x_train, omitted[0]], axis=0)\n y_train: pd.DataFrame = pd.concat([y_train, omitted[1]], axis=0)\n\n # save to file\n train_df.to_csv('train.csv', header=False, index=False)\n val_df.to_csv('val.csv', header=False, index=False)\n test_df.to_csv('test.csv', header=False, index=False)\n np.savetxt('classes.txt', classes, fmt='%s')\n\n return (train_df, val_df, test_df,\n x_train.values, y_train.values, x_val.values, y_val.values, x_test.values, y_test.values, classes)", "def extract_pause_features():\n input_files = sys.argv[1]\n pause_statistics = pd.DataFrame(columns=PAUSE_COLUMNS)\n for filename in os.listdir(input_files):\n if filename != '.DS_Store':\n file_pauses = pd.read_csv(input_files+filename)\n print(filename)\n\n # task duration\n cookie_duration = file_pauses[file_pauses[TASK] == COOKIE_THEFT_TASK][AUDIO_FILE_LENGTH].iloc[0]\n reading_duration = file_pauses[file_pauses[TASK] == READING_TASK][AUDIO_FILE_LENGTH].iloc[0]\n memory_duration = file_pauses[file_pauses[TASK] == MEMORY_TASK][AUDIO_FILE_LENGTH].iloc[0]\n\n # length of pauses\n cookie_pause_lengths = file_pauses[file_pauses[TASK] == COOKIE_THEFT_TASK][PAUSE_LENGTH]\n reading_pause_lengths = file_pauses[file_pauses[TASK] == READING_TASK][PAUSE_LENGTH]\n memory_pause_lengths = file_pauses[file_pauses[TASK] == MEMORY_TASK][PAUSE_LENGTH]\n\n # number of pauses\n cookie_pause_number = len(file_pauses[file_pauses[TASK] == COOKIE_THEFT_TASK].index)\n reading_pause_number = len(file_pauses[file_pauses[TASK] == READING_TASK].index)\n memory_pause_number = len(file_pauses[file_pauses[TASK] == MEMORY_TASK].index)\n\n if cookie_duration - cookie_pause_lengths.sum() < 0:\n print(\"NEGATIVE COOKIE TIME \", filename)\n print(cookie_duration, cookie_pause_lengths.sum())\n\n if reading_duration - reading_pause_lengths.sum() < 0:\n print(\"NEGATIVE READING TIME \", filename)\n print(reading_duration, reading_pause_lengths.sum())\n if memory_duration - memory_pause_lengths.sum() < 0:\n print(\"NEGATIVE MEMORY TIME \", filename)\n print(memory_duration, memory_pause_lengths.sum())\n\n pause_statistics = pause_statistics.append({\n TRANSCRIPT_ID: filename[:-4],\n COOKIE_NUMBER_OF_PAUSES: cookie_pause_number,\n COOKIE_MAXIMUM_PAUSE_DURATION: cookie_pause_lengths.max(),\n COOKIE_DURATION: cookie_duration,\n COOKIE_PHONATION_TIME: cookie_duration - cookie_pause_lengths.sum(),\n COOKIE_PROPORTION_OF_TIME_SPENT_SPEAKING: (cookie_duration -\n cookie_pause_lengths.sum()) / cookie_duration,\n COOKIE_PAUSE_RATE: cookie_pause_number/cookie_duration,\n COOKIE_MEAN_PAUSE_LENGTH: cookie_pause_lengths.mean(),\n COOKIE_STD_PAUSE_LENGTH: cookie_pause_lengths.std(),\n\n READING_NUMBER_OF_PAUSES: reading_pause_number,\n READING_MAXIMUM_PAUSE_DURATION: reading_pause_lengths.max(),\n READING_DURATION: reading_duration,\n READING_PHONATION_TIME: reading_duration - reading_pause_lengths.sum(),\n READING_PROPORTION_OF_TIME_SPENT_SPEAKING: (reading_duration -\n reading_pause_lengths.sum()) / reading_duration,\n READING_PAUSE_RATE: reading_pause_number / reading_duration,\n READING_MEAN_PAUSE_LENGTH: reading_pause_lengths.mean(),\n READING_STD_PAUSE_LENGTH: reading_pause_lengths.std(),\n\n MEMORY_NUMBER_OF_PAUSES: memory_pause_number,\n MEMORY_MAXIMUM_PAUSE_DURATION: memory_pause_lengths.max(),\n MEMORY_DURATION: memory_duration,\n MEMORY_PHONATION_TIME: memory_duration - memory_pause_lengths.sum(),\n MEMORY_PROPORTION_OF_TIME_SPENT_SPEAKING: (memory_duration -\n memory_pause_lengths.sum()) / memory_duration,\n MEMORY_PAUSE_RATE: memory_pause_number / memory_duration,\n MEMORY_MEAN_PAUSE_LENGTH: memory_pause_lengths.mean(),\n MEMORY_STD_PAUSE_LENGTH: memory_pause_lengths.std()\n }, ignore_index=True)\n pause_statistics = pause_statistics.set_index(TRANSCRIPT_ID)\n pause_statistics.to_csv('jan27_extracted_pauses.csv', sep=',', header=True)", "def read_data(filename,label=None,preprocessor=space_tokenizer):\n df = pd.read_csv(filename)\n return [preprocessor(string) for string in df['sentences'].values]", "def load_data_multilabel(traning_data_path,vocab_word2index, vocab_label2index,sentence_len,training_portion=0.95):\n file_object = codecs.open(traning_data_path, mode='r', encoding='utf-8')\n lines = file_object.readlines()\n random.shuffle(lines)\n label_size=len(vocab_label2index)\n X = []\n Y = []\n for i,line in enumerate(lines):\n raw_list = line.strip().split(\"__label__\")\n input_list = raw_list[0].strip().split(\" \")\n input_list = [x.strip().replace(\" \", \"\") for x in input_list if x != '']\n x=[vocab_word2index.get(x,UNK_ID) for x in input_list]\n label_list = raw_list[1:]\n label_list=[l.strip().replace(\" \", \"\") for l in label_list if l != '']\n label_list=[vocab_label2index[label] for label in label_list]\n y=transform_multilabel_as_multihot(label_list,label_size)\n X.append(x)\n Y.append(y)\n if i<10:print(i,\"line:\",line)\n\n X = pad_sequences(X, maxlen=sentence_len, value=0.) # padding to max length\n number_examples = len(lines)\n training_number=int(training_portion* number_examples)\n train = (X[0:training_number], Y[0:training_number])\n\n test_number=int((number_examples-training_number)/2)\n\n\n test = (X[training_number+ 1:training_number+test_number], Y[training_number + 1:training_number+test_number])\n valid = (X[training_number + test_number + 1:],\n Y[training_number + test_number + 1:])\n\n return train,test,valid", "def feature_extraction(inputFile, text, label):\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df[text].replace(np.nan, '', inplace=True)\r\n for idx, line in df.iterrows():\r\n try:\r\n words = line[text]\r\n newWords = ''.join(words.split())\r\n df.set_value(idx, text, newWords)\r\n except:\r\n pass\r\n tf = TfidfVectorizer(analyzer='char', encoding=\"utf8\", min_df=10)\r\n\r\n x = tf.fit_transform(df[text])\r\n x = x.toarray()\r\n print(x.shape)\r\n y = df[label]\r\n\r\n return x, y", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, mode): #check later if we can merge this function with the SQuAD preprocessing\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if mode!=\"ae\":\n tokens_a = tokenizer.tokenize(example.text_a)\n else: #only do subword tokenization.\n tokens_a, labels_a, example.idx_map= tokenizer.subword_tokenize([token.lower() for token in example.text_a], example.label )\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n target_indices = find_target_indices(tokens_a, tokens)\n if target_indices is None:\n target_indices = (1, 1 + len(tokens_a))\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if mode!=\"ae\":\n label_id = label_map[example.label]\n else:\n label_id = [-1] * len(input_ids) #-1 is the index to ignore\n #truncate the label length if it exceeds the limit.\n lb=[label_map[label] for label in labels_a]\n if len(lb) > max_seq_length - 2:\n lb = lb[0:(max_seq_length - 2)]\n label_id[1:len(lb)+1] = lb\n\n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n target_indices=target_indices))\n return features", "def Preprocess_MR(path=\"datasets/raw/rt10662\"):\n\n output_path = \"datasets/preprocessed/MR_Data\"\n\n # load positive and negative data\n with io.open(os.path.join(path, \"rt-polarity.pos\"), encoding='latin-1') as f:\n pos_data = f.readlines()\n pos_data = [sentence.strip() for sentence in pos_data]\n with io.open(os.path.join(path, \"rt-polarity.neg\"), encoding='latin-1') as f:\n neg_data = f.readlines()\n neg_data = [sentence.strip() for sentence in neg_data]\n\n labels = compute_labels(pos_data, neg_data)\n text, labels = shuffle_data(pos_data + neg_data, labels)\n\n # split data in 70%/20%/10% train/test/dev split\n train_len = ((len(text) / 10) * 7) + (len(text) % 10)\n test_len = (len(text) / 10) * 2\n dev_len = len(text) / 10\n\n trX = text[0:train_len]\n teX = text[train_len:train_len + test_len]\n vaX = text[train_len + test_len: train_len + test_len + dev_len]\n\n trY = labels[0:train_len]\n teY = labels[train_len:train_len + test_len]\n vaY = labels[train_len + test_len: train_len + test_len + dev_len]\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n dat1 = pd.DataFrame({'label': trY})\n dat2 = pd.DataFrame({'sentence': trX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"train_binary_sent.csv\"), encoding='utf-8', index=False)\n\n\n dat1 = pd.DataFrame({'label': teY})\n dat2 = pd.DataFrame({'sentence': teX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"test_binary_sent.csv\"), encoding='utf-8', index=False)\n\n dat1 = pd.DataFrame({'label': vaY})\n dat2 = pd.DataFrame({'sentence': vaX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"dev_binary_sent.csv\"), encoding='utf-8', index=False)", "def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features", "def preprocess(self):\n df = pd.read_csv(self.input, index_col = 0)\n diseaseCodes = pd.read_csv(self.metadata, sep = self.separator, index_col = 0, quotechar = '\"')\n\n diseaseColumn = []\n\n if self.transposeMetadataMatrix:\n diseaseCodes = diseaseCodes.T\n\n #iterate through all sample IDs and select the corresponding disease/annotation from the metadata for it\n for sample in df.index:\n try:\n diseaseCode = diseaseCodes[sample][self.diseaseColumn]\n except:\n diseaseCode = \"NotAvailable\"\n benchutils.logWarning(\"WARNING: No classLabel code found for sample \" + str(sample) + \". Assign class NotAvailable.\")\n diseaseColumn.append(diseaseCode)\n\n df.insert(0, column=\"classLabel\", value=diseaseColumn)\n\n df_without_missings = df.dropna(subset=['classLabel'])\n filePrefix = self.input.split(\"/\")[-1].split(\".\")[\n 0] # split path by / to receive filename, split filename by . to receive filename without ending\n filename = self.output + filePrefix + \"_withClassLabels.csv\"\n df_without_missings.to_csv(filename)\n return filename", "def textFeature(mode):\r\n \r\n classlist = ['negative', 'positive']\r\n data = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n df1 = pd.DataFrame()\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n text = open(review, 'r', encoding='utf8').read()\r\n df = pd.DataFrame({'File': [title], 'Text': [text], 'Label': [label]}).set_index('File')\r\n df1 = df1.append(df)\r\n data = data.append(df1)\r\n \r\n return data", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def load_simple_features(self, simple_features):\n self.simple_features = pd.DataFrame(simple_features).T\n self.simple_features.fillna(False, inplace=True)\n self.simple_features = self.simple_features.astype(bool)\n\n # Aggregate features descriptions\n self.simple_features_description = {}\n for binary in simple_features:\n for token in simple_features[binary]:\n if token not in self.simple_features_description:\n self.simple_features_description[token] = \\\n simple_features[binary][token]" ]
[ "0.6551174", "0.5803363", "0.58026767", "0.5801477", "0.5794281", "0.57716185", "0.5760765", "0.5737755", "0.5737755", "0.5737755", "0.5680732", "0.56744295", "0.56744295", "0.5643753", "0.5616709", "0.5581041", "0.55776185", "0.55607", "0.5560613", "0.55591387", "0.5516825", "0.55157727", "0.5515528", "0.5498705", "0.54862714", "0.5470271", "0.5466337", "0.54643947", "0.5458838", "0.5453753" ]
0.6147171
1
The goal of this logistic regression is to classify whether an individual is a healthy control or is a dementia patient. The data is stratified and undergoes 10fold cross validation.
def create_logistic_regression(): pause_data = shuffle(pd.read_csv(sys.argv[1])) pause_data = pause_data.replace([np.inf, -np.inf], np.nan).dropna() # X = pause_data.drop([HAS_DEMENTIA, TRANSCRIPT_ID], axis=1) X = pause_data[MEMORY_FEATURES] y = pause_data[HAS_DEMENTIA] split_tracker = [] rskf = RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=36851234) # n_repeats 10 too for train_index, test_index in rskf.split(X, y): X_train, X_test = X.iloc[list(train_index)], X.iloc[list(test_index)] y_train, y_test = y.iloc[list(train_index)], y.iloc[list(test_index)] logmodel = LogisticRegression() logmodel.fit(X_train, y_train) predictions = logmodel.predict(X_test) split_tracker.append({ TRAIN: train_index, TEST: test_index, PREDICTIONS: predictions, Y_TEST: y_test }) accuracy = [] f1 = [] auc = [] print("Predictions", split_tracker[0]) for predictions in split_tracker: # print(classification_report(predictions[Y_TEST], predictions[PREDICTIONS])) accuracy.append(accuracy_score(predictions[Y_TEST], predictions[PREDICTIONS])) f1.append(f1_score(predictions[Y_TEST], predictions[PREDICTIONS])) auc.append(roc_auc_score(predictions[Y_TEST], predictions[PREDICTIONS])) print(accuracy) accuracy = np.array(accuracy) f1 = np.array(f1) auc = np.array(auc) print(len(accuracy)) print('mean accuracy: ', accuracy.mean()) print('mean f1 score: ', f1.mean()) print('mean auc: ', auc.mean())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logistic_train_metrics(df):\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n model_reg = dill.load(open('maa_conflict_model.dill', 'rb'))\n\n return model_reg", "def run_logistic_regression(training, testing, feature_cols, outcome_col):\n if 'intercept' not in training.columns:\n training['intercept'] = 1\n if 'intercept' not in testing.columns:\n testing['intercept'] = 1\n intercept_feature_cols = feature_cols + ['intercept']\n logit = sm.Logit(training[outcome_col], training[intercept_feature_cols])\n fitted_logit_model = logit.fit()\n logit_diagnostics = get_diagnostics(testing[outcome_col], testing[intercept_feature_cols], fitted_logit_model, model_type = 'logit')\n predicted_logit_probs = fitted_logit_model.predict(testing[intercept_feature_cols])\n\n return fitted_logit_model, logit_diagnostics, predicted_logit_probs", "def train_logistic_regression(train_x, train_y):\n\n logistic_regression_model = LogisticRegression(penalty='l2', C=1.0)\n logistic_regression_model.fit(train_x, train_y)\n return logistic_regression_model", "def trainRegressionModel(X,y):\n # # instantiate a logistic regression model, and fit with X and y\n # model = LogisticRegression()\n # model = model.fit(X, y)\n # # check the accuracy on the training set\n # print(model.score(X, y))\n #X['intercept'] = 1.0\n #del X['isCapitalized']\n #del X['isNN']\n #del X['isNNP']\n #del X['isJJ']\n #del X['isUpper']\n #del X['isPrecedingIN']\n logit = sm.Logit(y, X)\n result = logit.fit()\n print(result.summary())\n print(result.conf_int())\n model = LogisticRegression()\n model = model.fit(X, y)\n print(model.score(X, y))\n print(y.mean())\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n model2 = LogisticRegression()\n model2.fit(X_train, y_train)\n # predict class labels for the test set\n predicted = model.predict(X_test)\n print(predicted)\n for i in predicted:\n if i==1:\n print(\"Test:\"+str(i))\n print(max(predicted))\n #generate class probabilities\n probs = model2.predict_proba(X_test)\n print(probs)\n # generate evaluation metrics\n print(\"Accuracy: \"+str(metrics.accuracy_score(y_test, predicted)))\n print(\"AUC: \"+str(metrics.roc_auc_score(y_test, probs[:, 1])))\n print(metrics.confusion_matrix(y_test, predicted))\n print(metrics.classification_report(y_test, predicted))\n\n from sklearn.cross_validation import cross_val_score\n # evaluate the model using 10-fold cross-validation\n scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\n print(scores)\n print(scores.mean())", "def analysis(houses:pd.DataFrame) -> None:\n \n \"\"\"\n #Me just trying to fit the data without any outside influences\n f= f'SELLER_HOUSE ~ SQFT_PER + PRICE + C(LOCATION)' \n result= smf.logit(formula= str(f), data= houses).fit()\n print(result.summary2())\n y= ['SELLER_HOUSE']\n x= ['SQFT_PER', 'PRICE', 'LOC_699 - Not Defined', 'LOC_AA - Airport Area', 'LOC_CG - Columbus Grove',\n 'LOC_CV - Cypress Village', 'LOC_EASTW - Eastwood', 'LOC_EC - El Camino Real', 'LOC_GP - Great Park',\n 'LOC_IRSP - Irvine Spectrum', 'LOC_LGA - Laguna Altura', 'LOC_NK - Northpark', 'LOC_NW - Northwood', \n 'LOC_OC - Oak Creek', 'LOC_OH - Orchard Hills', 'LOC_OT - Orangetree', 'LOC_PS - Portola Springs', \n 'LOC_QH - Quail Hill', 'LOC_SH - Shady Canyon', 'LOC_SJ - Rancho San Joaquin', 'LOC_STG - Stonegate', \n 'LOC_Stonegate', 'LOC_TR - Turtle Rock', 'LOC_TRG - Turtle Ridge', 'LOC_UP - University Park',\n 'LOC_UT - University Town Center', 'LOC_WB - Woodbridge', 'LOC_WD - Woodbury', \n 'LOC_WI - West Irvine', 'LOC_WN - Walnut (Irvine)', 'LOC_WP - Westpark']\n x_train, x_test, y_train, y_test= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train, y_train.values.ravel())\n y_pred= logreg.predict(x_test)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test, y_test), 3))\n # This model is really bad\n \n \"\"\"\n \n \"\"\n houses= houses.drop(['DAYS_ON_MARKET', 'ADDRESS', 'LOCATION',\n 'STATUS', 'PROPERTY_TYPE', 'ZIP_CODE'], axis= 1)\n columns= houses.columns.values.tolist()\n y= ['SELLER_HOUSE']\n x= [i for i in columns if i not in y]\n \n # Over Sampling Using SMOTE \n x_train, _, y_train, _= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n x_columns= x_train.columns\n \n os= SMOTE(random_state= 0)\n os_x, os_y= os.fit_sample(x_train, y_train)\n os_x= pd.DataFrame(data= os_x, columns= x_columns)\n os_y= pd.DataFrame(data= os_y, columns= y)\n \n \n #Recursive Feature Elimination\n logreg= LogisticRegression(max_iter= 600)\n rfe= RFE(logreg, 20)\n rfe= rfe.fit(os_x, os_y.values.ravel())\n \n lst= [i for count, i in enumerate(x) if rfe.support_[count] == True]\n X= os_x[lst]\n Y= os_y['SELLER_HOUSE']\n \n \n #logit_model= sm.Logit(Y, X)\n #result= logit_model.fit()\n #print(result.summary2()) # Model choosen by RCE\n \n #These are features have a p-value less than 0.05\n final_x= ['BATHS', 'ZIP_92602.0', 'ZIP_92618.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n #final_x= ['ZIP_92602.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n X2= os_x[final_x]\n \n logit_model2= sm.Logit(Y, X2)\n result2= logit_model2.fit()\n print(result2.summary2()) # Final Model\n \n x_train2, x_test2, y_train2, y_test2= train_test_split(X2, Y, test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train2, y_train2)\n \n y_pred= logreg.predict(x_test2)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test2, y_test2), 2))\n \n conf_matrix= confusion_matrix(y_test2, y_pred)\n print(conf_matrix)\n # So 22+61 correct predictions and 13+44 wrong predictions\n \n logit_roc_auc = roc_auc_score(y_test2, logreg.predict(x_test2))\n fpr, tpr, _ = roc_curve(y_test2, logreg.predict_proba(x_test2)[:,1])\n plt.figure()\n plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)\n plt.plot([0, 1], [0, 1],'r--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()\n \"\"", "def test_logistic_regression_c_parameter(params, X_train, X_test, y_train, y_test):", "def test_logistic_regression(x, y, tune):\n # Perform classification without tuning\n lrc = LogisticRegression()\n pipeline = create_pipeline(lrc)\n return accuracy(pipeline, x, y)", "def LogisticRegression_sklearn(X_train, X_test, y_train, y_test):\n\n\tlog_reg = LogisticRegression()\n\tlog_reg.fit(X_train, y_train.ravel())\n\tyPred =log_reg.predict(X_test)\n\n\t#Printing metrics of the logistic regression model\n\tprint('Accuracy:', metrics.accuracy_score(y_test, yPred))\n\tprint('Precision:', metrics.precision_score(y_test, yPred))\n\tprint('Recall', metrics.recall_score(y_test, yPred))\n\n\t#confusion matrix\n\n\tconfusionMatrix = matrix.confusion_matrix(y_test, yPred)\n\tsb.heatmap(pd.DataFrame(confusionMatrix), annot= True, fmt='g')\n\tplt.title('Confustion matrix with default value 1')\n\tplt.ylabel('True values')\n\tplt.xlabel('Predicted values')\n\tplt.show()", "def train_logistic_regression(X_train_input, y_train_input, C=1):\r\n from sklearn.linear_model import LogisticRegression\r\n logr_clf = LogisticRegression(C=C)\r\n logr_clf.fit(X_train_input, y_train_input)\r\n return logr_clf", "def log_reg(x, y, s):\n usx = np.array(x)\n usy = np.array(y)\n\n # split data into train and validation set\n x_train, x_test, y_train, y_test = train_test_split(usx, usy, test_size=s)\n cls_log = LogisticRegression()\n cls_log.fit(x_train, y_train)\n y_predict = cls_log.predict(x_test)\n\n # select only the probabilities of being fraud\n y_pred_prob = cls_log.predict_proba(x_test)[:, 1]\n return y_predict, y_test, y_pred_prob", "def test_train_logist(x_train_variable, y_train_dep):\n # Ensure the function works\n try:\n lrc = cls.train_logistic(x_train_variable, y_train_dep)\n logging.info(\"Successful Logistic Model\")\n except Exception as err:\n logging.error(\"Errors in Fitting the Logistic Regression\")\n raise err\n return lrc", "def test_class_logistic(\n y_train_dep,\n y_test_dep,\n y_train_preds_lr,\n y_test_preds_lr):\n # Ensure the function works\n try:\n cls.classification_logistic_results(\n y_train_dep, y_test_dep, y_train_preds_lr, y_test_preds_lr\n )\n logging.info(\n \"Successfully Plotting Classification Results using logistic regression\"\n )\n except Exception as err:\n logging.error(\"Errors in plotting logistic classification results\")\n raise err\n # Ensure the output exists\n try:\n assert os.path.isfile(\"images/results/assessment_logistic.png\")\n except AssertionError as err:\n logging.error(\"Errors in plotting logistic classification file\")\n raise err", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def log_reg(x_train, y_train):\n\n log_reg_classifier = LogisticRegression(max_iter=1000, solver='lbfgs')\n log_reg_classifier.fit(x_train, y_train)\n return log_reg_classifier\n\n # log_reg_classifier.fit(x_train, y_train)", "def run_logistic(X_train, X_test, y_train, y_test, C=1, penalty = 'l2', solver = 'lbfgs'):\n \n logreg = LogisticRegression(fit_intercept=True, C=C, penalty = penalty, solver = solver)\n logreg.fit(X_train, y_train)\n get_scores(logreg, X_train, X_test, y_train, y_test)", "def cross_validate(X, Y, folds=5):\n\n log = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, max_iter=200, multi_class='ovr', n_jobs=3,\n penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n verbose=0, warm_start=False)\n \n\n \n\n\n scores_log = [] \n scores_forest = []\n index = np.arange(X.shape[0])\n score_log = 0\n score_forest = 0\n \n for i in range(folds):\n score_log = 0\n score_forest = 0\n \n test_index = np.random.choice(index, int(X.shape[0]*1/folds),replace=False)\n index = np.setdiff1d(np.arange(X.shape[0]),test_index)\n \n test_x = X[test_index]\n test_y = Y[test_index]\n\n log.fit(X[index],Y[index])\n pred_log = log.predict(test_x)\n \n ran.fit(X[index],Y[index])\n pred_ran = ran.predict(test_x)\n \n for i in range(len(test_y)):\n if(pred_log[i] == test_y[i]):\n score_log += 1\n if(pred_ran[i] == test_y[i]):\n score_forest += 1\n scores_log.append(score_log/len(test_y))\n scores_forest.append(score_forest/len(test_y))\n \n\n return (np.mean(scores_log),np.mean(scores_forest))", "def cross_val_logistic_regression(variables, targeted):\n\n\tlogistic = linear_model.LogisticRegression()\n\tpca = decomposition.PCA()\n\tpipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])\n\n\tn_components = [4,5]\n\tCs = np.logspace(-10,10,10)\n\n\testimator = GridSearchCV(\n\t\tpipe,\n\t\tdict(pca__n_components = n_components,\n\t\t\tlogistic__C = Cs)\n\t\t)\n\t\n\tpredicted = cross_val_predict(\n\testimator, variables, targeted, cv = 5)\n\n\treturn {\n\t\t'accuracy':metrics.accuracy_score(targeted, predicted),\n\t\t'report':metrics.classification_report(targeted, predicted)\n\t\t}", "def train_logistic_regression(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n lr = LogisticRegressionClassifier(feat_extractor.corpus_length, feat_extractor)\n alpha = 1e0\n # beta = 1e-4\n for epoch in range(8):\n loss = 0.\n acc = 0\n indices = np.arange(len(train_exs))\n np.random.shuffle(indices)\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z) \\\n # + beta * np.expand_dims(lr.w, axis=0).dot(np.expand_dims(lr.w, axis=1))[0, 0]\n predict = int(feat.dot(np.expand_dims(lr.w, axis=1))[0, 0] > 0)\n acc += (predict == y)\n grad = (z - y) * feat.toarray()[0] # + 2 * beta * lr.w\n lr.w = lr.w - alpha * grad\n print(\"epoch {:d}, loss: {:f}, accuracy: {:f}\".format(epoch, loss / len(train_exs), acc / len(train_exs)))\n\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z)\n print(\"training loss: {:f}\".format(loss / len(train_exs)))\n\n return lr", "def fit_logistic_regression():\n\n logger.debug(\"Running the fit_logistic_regression function now\")\n\n #Loading the configuration\n with open(os.path.join(\"config\",\"config.yml\"), \"r\") as f:\n config = yaml.safe_load(f)\n\n #Loading and pre processing the data\n logger.debug(\"Loading and pre processing the data\")\n train_df = load_data(config[\"load_data\"][\"train_file\"])\n train_df = pre_process_data(train_df, resample = True, resample_count = 500000)\n\n #Defining Pipeline\n pipeline = Pipeline([\n ('tfidf', TfidfVectorizer(analyzer='word', token_pattern=r'[A-Za-z0-9@-]+')),\n ('model', LogisticRegression(random_state=12345, verbose = 1, solver = 'saga')),\n ])\n\n #Defining parameters to vary\n parameters = {\n 'tfidf__max_df': (0.25, 0.5, 0.75),\n 'tfidf__max_features': (None, 5000, 10000, 50000),\n 'tfidf__ngram_range': ((1, 1), (1, 2)),\n 'model__C': (0.01, 1, 100)\n }\n\n scoring_list = [\"accuracy\", \"f1\", \"precision\", \"recall\", \"roc_auc\"]\n \n #Performing 5fold CV to determine best hyperparameters\n model = GridSearchCV(pipeline, parameters, cv=5,\n n_jobs=-1, verbose=1, scoring=scoring_list, refit='f1',)\n\n t0 = datetime.datetime.now()\n\n model.fit(train_df[\"Review\"].tolist(), train_df[\"Ratings\"].to_numpy())\n \n logger.info(\"Grid Search performed in {}\".format(str(datetime.datetime.now()-t0)))\n\n #Saving results\n res_df = pd.DataFrame(model.cv_results_)\n res_df.to_csv(os.path.join(config[\"summary_stats\"][\"save_location\"], \"LogisticRegressionResults.csv\"))\n \n #Saving the model\n pickle.dump(model, open(os.path.join(config[\"models\"][\"save_location\"], \"LogisticRegression.pkl\"),'wb'))\n\n return", "def train_logisticRegression(data: np.array, labels: np.array)->None:\n\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n n_categories = np.size(labels, 1)\n\n data = np.hstack((np.ones((n_examples, 1)), data))\n\n print(data[0:5, :])\n\n X_train, X_test, y_train, y_test, idx_test = split_data(data, labels, 0.7)\n\n convergence_goal = 1e-3\n learning_rate = 0.01\n\n theta = np.random.uniform(size=((n_features+1, n_categories)))\n\n for i in range(n_categories):\n\n cost_var = 1\n\n previous_cost = 1e6\n iterations = 0\n cost_to_plot = []\n\n while cost_var > convergence_goal:\n iterations += 1\n cost, grad = costFunction(X_train, y_train[:, i], theta[:, i])\n theta[:, i] = update_theta(theta[:, i], grad, learning_rate)\n cost_var = previous_cost - cost\n previous_cost = cost\n if iterations == 1: cost_var = 1\n cost_to_plot.append(cost)\n # print(cost)\n\n plt.plot(range(iterations), cost_to_plot, 'g-', label = 'cost')\n plt.xlabel('iterations')\n plt.ylabel('cost')\n # plt.show()\n\n predictions = lrPredict(theta, X_test)\n\n print(predictions[0:5, :])\n print(y_test[0:5, :])\n\n accuracy = np.mean([p == l for p, l in zip(predictions, y_test)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass", "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def logistic_regression(X, y, fold_number=10, iteration=1000):\n \n # add additional dimension and set y=-1 if y==0\n X['x0'] = 1\n y[y==0] = -1\n \n # data preparation\n D = X.shape[1]\n fold = KFold(n_splits=fold_number)\n eta = 0.01 / 4600\n \n # record 10 output\n loss_function_list = []\n w_list = []\n \n for train_index, test_index in fold.split(X, y):\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n length = X_train.shape[0]\n w = np.zeros(D) # initialize w\n loss_function = []\n for ite in range(iteration+1): \n gradient = sum((1-expit(y_train.values[i]*X_train.values[i].dot(w)))*y_train.values[i]*X_train.values[i] for i in range(length))\n loss_function.append(sum(np.log(expit(y_train.values[i]*X_train.values[i].dot(w))) for i in range(length)))\n w += eta * gradient\n w_list.append(w)\n loss_function_list.append(loss_function)\n \n return w_list, loss_function_list", "def logistic_predict(weights, data):\n\n # TODO: Finish this function\n\n return y", "def predict_evidences(self, X):", "def run_logistic_regression(file_path):\n\n df_train = pd.read_csv(f'{file_path}/without_anom.csv')\n features_list = ['Direction', 'Speed']\n df_train = df_train[features_list]\n\n scalar = MaxAbsScaler()\n\n X_train = scalar.fit_transform(df_train)\n\n logistic_model = LogisticRegression()\n\n # multi_model = MultiOutputRegressor(LogisticRegression())\n #\n # multi_model.fit(X_train, X_train)\n # multi_predict = multi_model.predict(X_train)\n\n logistic_model.fit(X_train, X_train)\n predict = logistic_model.predict(X_train)", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def stability_logistic(x, y, **kwargs):\n rlr = RandomizedLogisticRegression(n_jobs=kwargs.get('n_jobs', 4))\n if 'param' in kwargs:\n rlr.set_params(**kwargs['param'])\n rlr.fit(x, y)\n return rlr.get_support()", "def preprocessor(df):\r\n df.replace(\"?\", np.nan, inplace=True)\r\n\r\n categorical_cols = [col for col in df.columns if df[col].dtype == np.dtype(np.object)]\r\n df = drop_rows(df, categorical_cols)\r\n\r\n ## one record per patient (where they had max of time_in_hospital)\r\n df = df.loc[df.groupby(\"patient_nbr\", sort=False)['time_in_hospital'].idxmax()]\r\n df.drop(columns = ['patient_nbr'], inplace=True)\r\n\r\n #convert categorical \r\n df = categorical_to_numerical(df)\r\n df = map_diagnosis(df)\r\n df = map_admissions(df)\r\n\r\n #one-hot encoding \r\n categorical_columns = [col for col in df.columns if df[col].dtype == np.dtype(object)]\r\n df = one_hot_encoder(df, categorical_columns)\r\n df.columns = map(str.lower, df.columns)\r\n\r\n\r\n #train-test-split\r\n target_variable = 'readmitted'\r\n Y_feature = df[target_variable]\r\n X_features = df.drop(columns=[target_variable])\r\n X_train, X_test, y_train, y_test = train_test_split(X_features,Y_feature, test_size=0.2, random_state = 42)\r\n\r\n\r\n # normalize of numerical columns\r\n mm_scaler = MinMaxScaler()\r\n X_train = pd.DataFrame(mm_scaler.fit_transform(X_train), columns = X_train.columns) \r\n X_test = pd.DataFrame(mm_scaler.fit_transform(X_test), columns = X_test.columns)\r\n\r\n return (X_train, X_test, y_train, y_test)", "def main():\r\n x = [\r\n [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ], [ 0,0 ], [ 0,0 ],\r\n [ 0,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 0,0 ], [ 1,0 ],\r\n [ 1,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ]\r\n ]\r\n\r\n # Encoding of the correct classes for the training material\r\n y = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0]\r\n b = BinaryLogisticRegression(x, y)\r\n b.fit()\r\n b.print_result()", "def train_logistic_regression(train_exs: List[SentimentExample],\n feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n indexer = feat_extractor.get_indexer()\n weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n learning_rate = 0.1\n for i in range(15):\n for ex in train_exs:\n features_of_str = feat_extractor.extract_features(ex.words, False)\n expo = math.exp(np.dot(weights, features_of_str))\n possibility = expo / (1 + expo)\n gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n return LogisticRegressionClassifier(weights, feat_extractor)\n\n # Methods for plotting average training loss\n\n # x = np.arange(0, 14)\n # # learning_rate = 1\n # indexer = feat_extractor.get_indexer()\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, gradient_of_w)\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n #\n # # learning_rate = 0.01\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # learning_rate = 0.01\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n #\n # # learning_rate = 0.1\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # learning_rate = 0.1\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n # plt.xlabel('Epochs')\n # plt.ylabel('Average Training Loss')\n # plt.legend(['step size 1', 'step size 0.01', 'step size 0.1'], loc='upper left')\n # plt.show()\n # return LogisticRegressionClassifier(weights, feat_extractor)" ]
[ "0.6831762", "0.6677965", "0.6661084", "0.66070837", "0.65637374", "0.65123886", "0.6491418", "0.64245576", "0.6359924", "0.6351835", "0.633985", "0.6330328", "0.6271656", "0.62617517", "0.6238637", "0.62014663", "0.6134061", "0.610931", "0.608315", "0.606158", "0.6054889", "0.59467244", "0.59444207", "0.592961", "0.5928683", "0.5927828", "0.59052515", "0.5895243", "0.588843", "0.5875967" ]
0.68424803
0
updates state value using update_value_factory if state is in the storage.
def test_add_or_update_state_for_state_in_storage(self): def test_update_value(name, value): return f'{name}-{value}' state_manager = ActorStateManager(self._fake_actor) state_change_tracker = state_manager._get_contextual_state_tracker() val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value)) self.assertEqual('state1-value1', val) state = state_change_tracker['state1'] self.assertEqual(StateChangeKind.update, state.change_kind)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateValue(self, state):\n return self.getQValue(state, self.policy[state[0], state[1]])", "def update(self):\n self._data.update()\n\n self._state = self._data.get_value(self._type)", "def update(self, key, val):\n state_dict = self.todict()\n assert key in state_dict\n state_dict[key] = val\n return self.state_factory.build(state_dict)", "def update(self):\r\n if self._block.info_values is not None:\r\n self._state = self._block.info_values.get(self._sensor_name, None)", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def update_state(self):\n if self._coordinator.data:\n # get consumption value\n value_list = self._coordinator.data['values']\n values = [v['value'] for v in value_list]\n self._state = f\"{sum(values):.2f}\"", "def update_store(self, value, index):\n if index == 1:\n self.state[self.M] = value\n else:\n self.state[-1] = value", "def update_state(self, context):\n pass", "def update(self):\n self.data.update()\n for sensor in self.data.daikinskyport.get_sensors(self._index):\n if sensor[\"type\"] == self._type and self._sensor_name == sensor[\"name\"]:\n self._state = sensor[\"value\"]", "def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')", "def update(self):\n _LOGGER.debug(\"Requesting update from sensor...\")\n self._cuby.refresh_devices()\n\n state = \\\n float(self._cuby.devices[self._device_id][self._sensor_name])\n\n if self._sensor_name in FORMAT_NUMBERS:\n self._state = '{0:.1f}'.format(state)\n else:\n self._state = state", "def update_state(self, dstate):\n pass", "def update_action_value(self, state, action, value):\n self.value_function[to_table_index(state, action)] = value", "def update_state(state_id):\n update_state = request.get_json()\n if type(update_state) is not dict:\n abort(400, {'Not a JSON'})\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n else:\n for key, value in update_state.items():\n setattr(state, key, value)\n storage.save()\n return jsonify(state.to_dict())", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data[self._json_key]", "def update_to_state(self, game_state):\n pass", "def update(self, **kwargs) -> \"FullPseudoSpectralState\":\n new_values = {}\n if \"state\" in kwargs:\n raise ValueError(\n \"do not update attribute 'state' directly, update individual fields\"\n )\n for name, new_val in kwargs.items():\n # Check that shapes and dtypes match\n if getattr(getattr(self, name), \"shape\", None) != getattr(\n new_val, \"shape\", None\n ):\n raise ValueError(f\"found mismatched shapes for {name}\")\n if getattr(getattr(self, name), \"dtype\", None) != getattr(\n new_val, \"dtype\", None\n ):\n raise TypeError(f\"found mismatched dtypes for {name}\")\n if name in {\"q\", \"qh\"}:\n # Special handling for q and qh, make spectral and assign to state\n new_val = self.state.update(**{name: new_val})\n name = \"state\"\n elif name in {\"uh\", \"vh\", \"uqh\", \"vqh\"}:\n # Handle other spectral names, store as non-spectral\n new_val = _generic_irfftn(new_val)\n name = name[:-1]\n elif name == \"p\":\n new_val = _generic_rfftn(new_val)\n name = \"ph\"\n elif name == \"dqdt\":\n new_val = _generic_rfftn(new_val)\n name = \"dqhdt\"\n # Check that we don't have duplicate destinations\n if name in new_values:\n raise ValueError(f\"duplicate updates for {name}\")\n # Set up the actual replacement\n new_values[name] = new_val\n # Produce new object with processed values\n return dataclasses.replace(self, **new_values)", "def _update_state_value(self, old_state, new_state):\n if not self._test_mode:\n if isinstance(new_state, int):\n self._state_values[old_state] += self._alpha * \\\n (new_state - self._state_values[old_state])\n else:\n self._state_values[old_state] += self._alpha * \\\n (self._state_values[new_state] - self._state_values[old_state])", "def update(self):\n self._state = self._state", "def test_update_state(self):\n pass", "def update_state(self, progress, policy_state=None):\n raise NotImplementedError", "async def update(self):\n resp = await self._request('get', 'state')\n if resp:\n for line in resp.splitlines():\n key, val = line.strip().split(None, 1)\n if val == 'on' or val == 'off':\n val = (val == 'on')\n self.state_data[key] = val\n else:\n self.state_data[key] = val", "def updateState(self):\n self.state = self.microgridPolicy.computeState();", "def update(self):\n self._bloomskystorm.refresh_devices()\n\n state = \\\n self._bloomskystorm.devices[self._device_id]['Storm'][self._sensor_name]\n\n if self._sensor_name in FORMAT_NUMBERS:\n self._state = '{0:.2f}'.format(state)\n else:\n self._state = state", "def value(self, state):\n raise NotImplementedError", "def value(self, state):\n raise NotImplementedError", "def update_entity_state(self, state: dict) -> None:\n self._attr_is_on = self.entity_description.value(\n state[self.entity_description.key]\n )\n self.async_write_ha_state()", "def value(self, state):\n\t\traise NotImplementedError", "async def updated(self, value):\n pass", "def update_q_values(self, state, value):\n if self.prev_state is not None and self.learning:\n reward = self.reward(Game.game_state(state))\n self.q_values[self.represent_state(self.prev_state), self.prev_action] += self.alpha * (\n reward + self.gamma * value - self.prev_q_val)" ]
[ "0.7151833", "0.665996", "0.6558452", "0.6502737", "0.64651436", "0.63845944", "0.63560015", "0.6289542", "0.6248779", "0.62400794", "0.6227977", "0.61939865", "0.6159757", "0.61569464", "0.61077875", "0.61051774", "0.6100336", "0.6094352", "0.60911036", "0.60332465", "0.5980989", "0.5968544", "0.5966778", "0.5957", "0.59519386", "0.59519386", "0.5929403", "0.59287316", "0.59260464", "0.59220713" ]
0.6914396
1
Run black and isort
def format(ctx): for cmd in ("black .", "isort ."): ctx.run(cmd, echo=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_black(self):\n chdir(REPO_ROOT)\n cmd = [\"black\", \"-v\", \"--check\", *SRC_DIRS]\n print(\"running:\", \" \".join(str(part) for part in cmd))\n proc = run(cmd, capture_output=True)\n assert proc.returncode == 0, f\"black issues:\\n{proc.stderr.decode('utf-8')}\"", "def main():\n utl.calibrate(False)\n undistort(False)\n edge_detect(False)\n transform(False)\n identify_line(False)\n lane_line(True)", "def black(context):\n exec_cmd = \"black --check --diff .\"\n run_cmd(context, exec_cmd)", "def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )", "def isort(command, checkonly=False):\n print(\n \"\"\"\nRunning isort the Python code import sorter\n===========================================\n\"\"\"\n )\n cmd = \"isort --check-only --diff .\" if checkonly else \"isort .\"\n command.run(cmd, echo=True, pty=POSIX)", "def main():\r\n algos = [merge_sort, quick_sort, heap_sort, radix_sort, bucket_sort_general]\r\n array_sizes = [5000, 10000, 15000, 20000, 50000, 75000, 100000, 150000]\r\n results = {algo.__name__: [] for algo in algos}\r\n for algo in algos:\r\n result = []\r\n for size in array_sizes:\r\n time = test(algo, size)\r\n result.append(time)\r\n results[algo.__name__] = result\r\n\r\n display_results(results, array_sizes)", "def hxlsort():\n run_script(hxlsort_main)", "def isort(context):\n exec_cmd = \"isort . --check --diff\"\n run_cmd(context, exec_cmd)", "def main():\n\n parser = argparse.ArgumentParser(description='codec_compare')\n parser.add_argument('path', metavar='DIR',\n help='path to images folder')\n args = parser.parse_args()\n classpath = args.path\n classname = classpath.split('/')[1]\n\n images = set(listdir_full_path(classpath))\n if len(images) <= 0:\n print \"\\033[91m[ERROR]\\033[0m\" + \" no source files in ./images.\"\n sys.exit(1)\n\n codeclist_full = set(['aom', 'deepcoder', 'deepcoder-lite', 'fuif', 'fvdo', 'hevc', 'kakadu', 'jpeg',\n 'pik', 'tat', 'xavs', 'xavs-fast', 'xavs-median', 'webp'])\n\n bpp_targets = set([0.06, 0.12, 0.25, 0.50, 0.75, 1.00, 1.50, 2.00])\n for image in images:\n width, height, depth = get_dimensions(image, classname)\n name, imgfmt = os.path.splitext(image)\n imgfmt = os.path.basename(image).split(\".\")[-1]\n derivative_images = []\n if classname[:6] == 'classB':\n derivative_images = create_derivatives(image, classname)\n else:\n derivative_images.append((image, imgfmt))\n\n for derivative_image, pix_fmt in derivative_images:\n json_dir = 'metrics'\n mkdir_p(json_dir)\n json_file = os.path.join(json_dir,\n os.path.splitext(os.path.basename(derivative_image))[0] + \".\" + pix_fmt + \".json\")\n # if os.path.isfile(json_file):\n # print \"\\033[92m[JSON OK]\\033[0m \" + json_file\n # continue\n main_dict = dict()\n derivative_image_metrics = dict()\n for codecname in codeclist_full:\n convertflag = 1\n caseflag = pix_fmt\n if (codecname == 'webp' or codecname == 'tat' or 'deepcoder' in codecname) and depth != '8':\n continue\n if 'xavs' in codecname and depth != '8' and depth != '10':\n continue\n if 'classE' in classname and ('tat' in codecname or 'xavs' in codecname or 'deepcoder' in codecname):\n continue\n if codecname == 'kakadu' and classname[:6] == 'classB':\n convertflag = 0\n caseflag = imgfmt\n bpp_target_metrics = dict()\n for bpp_target in bpp_targets:\n print(codecname)\n if codecname == 'aom' and classname[:6] == 'classB':\n # ('AERIAL2' in image or 'CATS' in image or 'XRAY' in image or 'GOLD' in image or 'TEXTURE1' in image):\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + 'av1'\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif codecname == 'kakadu' and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif 'xavs' in codecname and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif codecname == 'fvdo' and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_pgm' + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.pgm')\n original_image = image\n else:\n if codecname == 'fuif' and 'tif' in imgfmt:\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '.tif_' + str(bpp_target) + '_' + pix_fmt + '.' + codecname\n elif codecname == 'webp' or codecname == 'tat':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_yuv420p.' + codecname\n else:\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + pix_fmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image_path = os.path.join('outputs', codecname, 'decoded')\n decoded_image = ''\n for decodedfile in os.listdir(decoded_image_path):\n encoderoot = '_'.join(os.path.splitext(os.path.basename(encoded_image_name))[0].split('_')[:-1])\n if encoderoot in decodedfile:\n if ('tat' in codecname or 'webp' in codecname) and os.path.splitext(os.path.basename(decodedfile))[1] == '.yuv':\n decoded_image = os.path.join('outputs', codecname, 'decoded', decodedfile)\n print(decoded_image)\n if ('tat' not in codecname or 'webp' not in codecname) and os.path.splitext(os.path.basename(decodedfile))[1] != '.yuv':\n decoded_image = os.path.join('outputs', codecname, 'decoded', decodedfile)\n if 'classE' not in classname and 'classB' not in classname and os.path.isfile(decoded_image):\n decoded_image = convert_decoded(decoded_image, width, height, depth, codecname)\n original_image = convert_decoded(derivative_image, width, height, depth, 'reference')\n else:\n original_image = derivative_image\n\n print('Reference:' + original_image)\n print('Encoded:' + encoded_image)\n print('Decoded:' + decoded_image)\n if (os.path.isfile(original_image) and os.path.isfile(decoded_image) and os.path.isfile(encoded_image)):\n if 'classE' in classname:\n metrics = compute_metrics_HDR(original_image, decoded_image, encoded_image, bpp_target,\n codecname, width, height, pix_fmt, depth)\n\n elif 'classB' in classname:\n metrics = compute_metrics(original_image, decoded_image, encoded_image, bpp_target, codecname,\n width, height, pix_fmt)\n else:\n metrics = compute_metrics_SDR(original_image, decoded_image, encoded_image, bpp_target,\n codecname, width,\n height, imgfmt, depth)\n measured_bpp = (os.path.getsize(encoded_image) * 1.024 * 8) / (float((int(width) * int(height))))\n bpp_target_metrics[measured_bpp] = metrics\n else:\n continue\n \n derivative_image_metrics[codecname] = bpp_target_metrics\n main_dict[derivative_image] = derivative_image_metrics\n\n mkdir_p(json_dir)\n with open(json_file, 'w') as f:\n f.write(json.dumps(main_dict, indent=2))", "def run_algo(self, th):\n p = self.run_proc(['threshold', str(th), 'input_0.png',\n 'output.png'])\n self.wait_proc(p, timeout=self.timeout)\n return", "def test_isort(self):\n chdir(REPO_ROOT)\n cmd = [\"isort\", \"-df\", \"-rc\", \"-c\", *SRC_DIRS]\n print(\"running:\", \" \".join(str(part) for part in cmd))\n proc = run(cmd, capture_output=True)\n assert proc.returncode == 0, f\"isort issues:\\n{proc.stdout.decode('utf-8')}\"", "def bench(count=10):\n from sys import platform\n from time import time, asctime\n from numpy import average, zeros\n\n filename = 'csample.jpg'\n f = readgray(filename)\n fbin=threshad(f,150)\n se = img2se(binary([[0,1,0],[1,1,1],[0,1,0]]),'NON-FLAT',to_int32([[0,1,0],[1,2,1],[0,1,0]]))\n m=thin(fbin)\n tasks=[\n [' 1. Union bin ','union(fbin,fbin)'],\n [' 2. Union gray-scale ','union(f,f)'],\n [' 3. Dilation bin, secross ','dilate(fbin)'],\n [' 4. Dilation gray, secross ','dilate(f)'],\n [' 5. Dilation gray, non-flat 3x3 SE ','dilate(f,se)'],\n [' 6. Open bin, secross ','open(fbin)'],\n [' 7. Open gray-scale, secross ','open(f)'],\n [' 8. Open gray, non-flat 3x3 SE ','open(f,se)'],\n [' 9. Distance secross ','dist(fbin)'],\n ['10. Distance Euclidean ','dist(fbin,sebox(),\"euclidean\")'],\n ['11. Geodesic distance secross ','gdist(fbin,m)'],\n ['12. Geodesic distance Euclidean ','gdist(fbin,m,sebox(),\"euclidean\")'],\n ['13. Area open bin ','areaopen(fbin,100)'],\n ['14. Area open gray-scale ','areaopen(f,100)'],\n ['15. Label secross ','label(fbin)'],\n ['16. Regional maximum, secross ','regmax(f)'],\n ['17. Open by rec, gray, secross ','openrec(f)'],\n ['18. ASF by rec, oc, secross, 1 ','asfrec(f)'],\n ['19. Gradient, gray-scale, secross ','gradm(f)'],\n ['20. Thinning ','thin(fbin)'],\n ['21. Watershed ','cwatershed(f,fbin)']]\n result = zeros((21),'d')\n for t in xrange(len(tasks)):\n print tasks[t][0],tasks[t][1]\n t1=time()\n for k in xrange(count):\n a=eval(tasks[t][1])\n t2=time()\n result[t]= (t2-t1)/(count+0.0)\n print version() +' Benchmark'\n print 'Made on ',asctime(),' computer=',platform\n print 'image filename=',filename,' width=', f.shape[1],', height=',f.shape[0]\n print ' Function time (sec.)'\n for j in xrange(21):\n print tasks[j][0], result[j]\n print ' Average ', average(result)\n out=[]", "def black(command, checkonly=False):\n print(\n \"\"\"\nRunning Black the Python code formatter\n=======================================\n\"\"\"\n )\n cmd = \"black --check --diff .\" if checkonly else \"black .\"\n command.run(cmd, echo=True, pty=POSIX)", "def main():\n base_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir,\n )\n default_output_path = os.path.join(base_dir, \"output\", \"out.png\")\n default_texture_path = os.path.join(base_dir, \"textures\", \"grid.png\")\n\n default_options = {\n \"resolution\": (1512, 762),\n \"texture_path\": default_texture_path,\n \"output_path\": default_output_path,\n \"iterations\": 200, # Increase this for good results\n \"camera_position\": [3.1, 1.570796, 0.],\n \"num_processes\": multi.cpu_count(),\n \"chunk_size\": 9000,\n \"gain\": 1,\n \"normalize\": 0,\n \"spin\": 0.7,\n }\n args = parse_args(default_options)\n\n output_path = os.path.dirname(args.output_path)\n if not os.path.exists(output_path):\n print(\"Error: Output path does not exist at:\")\n print(args.output_path)\n print(\"Create the directory or change the path then try again.\")\n print_help_and_exit()\n\n\n try:\n texture = spm.imread(args.texture_path)\n except FileNotFoundError as error:\n print(error)\n print(\"Error: Texture file not found at:\")\n print(args.texture_path)\n print_help_and_exit()\n\n # Convert to float to work in linear colour space\n texture = convert_image_to_float(texture)\n if not args.no_srgb:\n # Convert to sRGB before resizing for correct results\n srgbtorgb(texture)\n\n texture = convert_image_to_float(\n spm.imresize(texture, 2.0, interp=\"bicubic\"),\n )\n\n black_hole = KerrBlackHole(args.spin)\n raytracer = KerrRaytracer(\n black_hole,\n args.camera_position,\n texture,\n args.resolution,\n args.iterations,\n args.num_processes,\n args.chunk_size,\n shuffle=not args.disable_shuffle,\n )\n raytracer.generate_image()\n print(\"Raytracing Completed Succesfully.\")\n print(\n \"Total raytracing time:\",\n datetime.timedelta(seconds=(time.time() - raytracer.start_time)),\n )\n\n colour = post_process(raytracer.colour_buffer_preproc, args.gain, args.normalize)\n\n save_to_img(\n colour,\n args.output_path,\n args.resolution,\n srgb_out=not args.no_srgb,\n )", "def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass", "def task_format():\n print(HERE)\n return {\"actions\": [\"isort -y \", [\"black\", HERE]], \"verbosity\": 1}", "def run(self):\n\n # need to think about outpath\n\n # Make sure all files are here and okay...\n\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n self.config.logger.info(\"Running redMaPPer on pixel %d\" % (self.pixel))\n\n firstpass = RunFirstPass(self.config)\n\n if not os.path.isfile(firstpass.filename):\n firstpass.run()\n firstpass.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Firstpass file %s already present. Skipping...\" % (firstpass.filename))\n\n self.config.catfile = firstpass.filename\n\n # Clear out the firstpass memory\n del firstpass\n\n like = RunLikelihoods(self.config)\n\n if not os.path.isfile(like.filename):\n like.run()\n like.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Likelihood file %s already present. Skipping...\" % (like.filename))\n\n self.config.catfile = like.filename\n\n # Clear out the likelihood memory\n del like\n\n perc = RunPercolation(self.config)\n\n if not os.path.isfile(perc.filename):\n perc.run()\n perc.output(savemembers=True, withversion=False)\n else:\n self.config.logger.info(\"Percolation file %s already present. Skipping...\" % (perc.filename))\n\n self.config.stop_file_logging()", "def white_2_comp(fns, lowerp, upperp, utaper, ltaper, npow, bindir, sacbin):\n whitefilter = bindir + '/white_2cmp' + ' 1>/dev/null'\n # whitefilter = bindir+'/white_2cmp'\n saccmd = sacbin + ' 1>/dev/null'\n srcE, tarE, eqtarE = fns[0]\n srcN, tarN, eqtarN = fns[1]\n p1 = sp.Popen(saccmd, shell=True, bufsize=0, stdin=sp.PIPE, stdout=None)\n child1 = p1.stdin\n print >> child1, \"r %s %s\" % (eqtarE + '_tmp', eqtarN + '_tmp')\n print >> child1, \"abs\"\n print >> child1, \"smooth mean h 128\"\n print >> child1, \"w aaa bbb\"\n print >> child1, \"r aaa\"\n print >> child1, \"subf bbb\"\n print >> child1, \"abs\"\n print >> child1, \"addf aaa\"\n print >> child1, \"addf bbb\"\n print >> child1, \"div 2\"\n print >> child1, \"w a1.avg\"\n print >> child1, \"r %s %s\" % (tarE + '_tmp', tarN + '_tmp')\n print >> child1, \"divf a1.avg\"\n print >> child1, \"w %s %s\" % (tarE, tarN)\n print >> child1, \"q\"\n err1 = child1.close()\n ret1 = p1.wait()\n if err1 or ret1 != 0:\n raise RuntimeError, '%r failed with exit code %d' % (saccmd, err1)\n if os.path.isfile('./aaa'):\n os.remove('./aaa')\n if os.path.isfile('./bbb'):\n os.remove('./bbb')\n if os.path.isfile('./a1.avg'):\n os.remove('./a1.avg')\n os.remove(eqtarN + '_tmp')\n os.remove(eqtarE + '_tmp')\n os.remove(tarN + '_tmp')\n os.remove(tarE + '_tmp')\n p2 = sp.Popen(whitefilter, shell=True, bufsize=0, stdin=sp.PIPE, stdout=None)\n child2 = p2.stdin\n print >> child2, ltaper, lowerp, upperp, utaper, npow, tarE, tarN\n err2 = child2.close()\n ret2 = p2.wait()\n if err2 or ret2 != 0:\n raise RuntimeError, '%r failed with exit code %d' % (whitefilter, err2)\n os.remove(tarE)\n os.remove(tarN)\n return 1", "def blacken(session):\n session.install(DEPS[\"black\"])\n check_black = get_path(\"scripts\", \"blacken_all_files.py\")\n session.run(\"python\", check_black)", "def run_main_test():\r\n\r\n print(\"\"\"\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n +++ Performing Main LZJD Full File Test +++\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n \"\"\")\r\n # iterate over the files in the directory\r\n for f in listdir(SRC):\r\n if isfile(join(SRC, f)):\r\n # prepare a dictionary with the digests ready to compare\r\n DIGESTS[f] = {'src': None, 'r2': None, 'ghidra': None}\r\n\r\n # calculate digest of src file\r\n DIGESTS[f]['src'] = digest(join(SRC, f))\r\n\r\n # name adjustment\r\n f2 = f.replace(\".c\", \".o\")\r\n\r\n # calculate digest of ghidra and r2 outputs\r\n DIGESTS[f]['ghidra'] = digest(join(GHIDRA_PATH, GHIDRA_NAME.format(f2)))\r\n DIGESTS[f]['r2'] = digest(join(R2DEC_PATH, R2DEC_NAME.format(f2)))\r\n\r\n # obtain the similarity from source\r\n SCORES[f] = {'ghidra': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['ghidra']),\r\n 'r2': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['r2']),\r\n 'x': get_lzjd_sim(DIGESTS[f]['ghidra'], DIGESTS[f]['r2'])}\r\n\r\n gidra_doms = 0\r\n for f in SCORES:\r\n print(\"{0:12}: Scores G:{1:20} R2:{2:20} X:{3:20} D:{4:20}\".format(f,\r\n SCORES[f]['ghidra'],\r\n SCORES[f]['r2'],\r\n SCORES[f]['x'],\r\n SCORES[f]['ghidra'] - SCORES[f]['r2']))\r\n if SCORES[f]['ghidra'] > SCORES[f]['r2']:\r\n gidra_doms += 1\r\n print(\"Ghidra Dominated on {} files\".format(gidra_doms))\r\n # This section of code prepares visualizations on the data for easy analysis\r\n plot_scatter(SCORES, title=\"LZJD Full File scores\")\r\n\r\n # obtian the scores as input data to the plots\r\n bxplt_data_gd = [score['ghidra'] for score in SCORES.values()]\r\n bxplt_data_r2 = [score['r2'] for score in SCORES.values()]\r\n\r\n # run pairwise t test\r\n print(\"Performing T-Test on LZJD Distance of files\")\r\n run_ttest(bxplt_data_gd, bxplt_data_r2)", "def fix():\n _run_in_venv([\"black\"] + TEST_FILES)\n _run_in_venv([\"isort\"] + TEST_FILES)", "def brut_mind(code = Code(), display=False):\n\tfrom itertools import permutations\n\tprint(f\"code is {code}\")\n\n\tturn = 0\n\tcompleted = False\n\tused_colors = []\n\twhile not completed:\n\t\t# 1st phase\n\t\tfor color in range(9):\n\t\t\tif display:\n\t\t\t\tprint(f'trying {color}')\n\t\t\tscore = code.score([color,color,color,color,color])\n\t\t\tused_colors+= [color for i in range(score[0])]\n\t\t\tif len(used_colors) == 5:\n\t\t\t\tbreak\n\t\tif display:\n\t\t\tprint(f\"colors found are {used_colors}\")\n\n\t\t# 2nd phase\n\t\tfor attempt in list(permutations(used_colors)):\n\t\t\tif display:\n\t\t\t\tprint(f\"attempt {attempt}\")\n\t\t\tscore = code.score(list(attempt))\n\t\t\tcompleted = score == (5,0)\n\t\t\tif completed:\n\t\t\t\tbreak\n\t\t\tturn +=1\n\tprint(f\"the code was {attempt}\")\n\n\tprint(f\"completed in {turn} turn(s)\")", "def main():\n\n tests = [500, 1000, 10000]\n results = {\n 'Insertion Sort': 0.0,\n 'Shell Sort': 0.0,\n 'Python Sort': 0.0\n }\n\n for test in tests:\n i = 0\n\n while i < 100:\n test_list = gen_random_list(test)\n results['Insertion Sort'] += insertion_sort(test_list)[0]\n results['Shell Sort'] += shell_sort(test_list)[0]\n results['Python Sort'] += python_sort(test_list)[0]\n i += 1\n\n print(\"Sort results for list of size %s items:\" % test)\n for key, value in results.items():\n print(\"%s took %10.7f seconds to run, on average.\" % (key, (value/100)))\n print(\"\\n\")", "def imageProcessing():\n\n # Parser initialization\n parser = argparse.ArgumentParser(description=colourers.toCyan('Image processor for reading/writing images into BMP/PNG formats and applying transformations on it.'))\n \n # Formats Parser\n group = parser.add_argument_group(colourers.toGreen('formats'))\n formatParser = group.add_mutually_exclusive_group(required=True)\n formatParser.add_argument('--bmp',\n type=str,\n metavar=colourers.toRed('<bmp file name>'), \n help=colourers.toMagenta('bmp file to parse'))\n formatParser.add_argument('--png',\n type=str,\n metavar=colourers.toRed('<png file name>'),\n help=colourers.toMagenta('png file to parse'))\n\n # Printers Parser\n group = parser.add_argument_group(colourers.toYellow('printers'))\n printers = group.add_mutually_exclusive_group()\n printers.add_argument('--header',\n help=colourers.toMagenta('print the file format header'),\n action='store_true')\n printers.add_argument('--print-color',\n '-pc',\n type=int,\n nargs=2,\n metavar=(colourers.toRed('<width>'), colourers.toRed('<height>')),\n help=colourers.toMagenta('pixel to print'))\n printers.add_argument('--histogram',\n action='store_true',\n help=colourers.toMagenta('print histogram associated'))\n printers.add_argument('--output',\n '-o',\n type=str,\n metavar=colourers.toRed('<output file>'),\n help=colourers.toMagenta('image output file'))\n\n # Transformers Parser\n transformers = parser.add_argument_group(colourers.toBlue('transformers'))\n transformers.add_argument('--half',\n action='store_true',\n help='applying the filter on one half of the image')\n transformers.add_argument('--rotate',\n '-r',\n type=int,\n choices=[90, 180, 270],\n metavar=colourers.toRed('<degree of rotation>'),\n help=colourers.toMagenta('rotate the image'))\n transformers.add_argument('--scale',\n '-s',\n type=int,\n nargs='+',\n action=required_length(1, 2),\n metavar=(colourers.toRed('<scaleRatio> | [<width>'), colourers.toRed('<height>')),\n help=colourers.toMagenta('scale/shrink the image'))\n transformers.add_argument('--contrast',\n '-c',\n type=float,\n metavar=colourers.toRed('<contrast factor>'),\n help=colourers.toMagenta('apply a factor contrast'))\n transformers.add_argument('--grayscale',\n '-gs',\n action='store_true',\n help=colourers.toMagenta('to grayscale image'))\n transformers.add_argument('--binary',\n '-b',\n action='store_true',\n help=colourers.toMagenta('to binary image'))\n transformers.add_argument('--invert',\n '-i',\n action='store_true',\n help=colourers.toMagenta('to inverted image, equivalent to --contrast -1'))\n transformers.add_argument('--channel',\n type=str,\n choices=['blue', 'green', 'red'],\n metavar=colourers.toRed('<channel>'),\n nargs='+',\n action=required_length(1, 2),\n help=colourers.toMagenta('to the specified channel'))\n \n # Filters Parser\n filters = parser.add_argument_group(colourers.toCyan('filters'))\n filters.add_argument('--edge-detection',\n '-ed',\n type=str,\n choices=['canny', 'sobel', 'prewitt', 'roberts', 'kirsch'],\n metavar=colourers.toRed('<filter name>'),\n help=colourers.toMagenta('perform an edge detection'))\n filters.add_argument('--retrieve-color',\n '-rv',\n action='store_true',\n help=colourers.toMagenta('retrieve the colors of a grayscale image'))\n filters.add_argument('--edge-enhancement',\n '-ee',\n action='store_true', \n help=colourers.toMagenta('applying increased edge enhancement filter'))\n filters.add_argument('--sharpen',\n action='store_true',\n help=colourers.toMagenta('sharpening the image'))\n filters.add_argument('--unsharp',\n action='store_true',\n help=colourers.toMagenta('unsharp the image')) \n filters.add_argument('--denoise',\n action='store_true',\n help=colourers.toMagenta('denoise the image'))\n filters.add_argument('--texture-detection',\n '-td',\n action='store_true',\n help=colourers.toMagenta('applying texture detection (Gabor Filter)'))\n filters.add_argument('--blur',\n type=str,\n choices=['simple', 'more', 'average', 'gaussian', 'motion'],\n metavar=colourers.toRed('<type of blur>'),\n help=colourers.toMagenta('perform the selected blur'))\n filters.add_argument('--blur-iteration',\n '-bi',\n type=int,\n default=1,\n metavar=colourers.toRed('<number of iteration>'),\n help=colourers.toMagenta('apply N times the blur function'))\n filters.add_argument('--emboss',\n action='store_true',\n help=colourers.toMagenta('perform an embossing filter'))\n filters.add_argument('--overlap',\n type=str,\n nargs='+',\n metavar=colourers.toRed('<image to overlap>'),\n help=colourers.toMagenta('overlap an image given on the selected image'))\n\n # Args parsing\n args = parser.parse_args()\n\n filename = \"\"\n # BMP Block\n if args.bmp:\n filename = args.bmp\n\n if not os.path.isfile(filename):\n colourers.error('\"{}\" does not exist !'.format(filename))\n sys.exit(-1)\n colourers.success('Success Opening {}...'.format(filename))\n\n bmp = BMP(filename)\n half = args.half\n\n if args.print_color:\n width, height = args.print_color\n colourers.info(f'Printing pixel color of ({width}, {height})')\n Printers.printPixel(bmp, width, height)\n sys.exit(0)\n \n elif args.header:\n colourers.info(f'Printing BMP header of {bmp.filename}')\n Printers.printHeader(bmp)\n sys.exit(0)\n \n elif args.histogram:\n colourers.info(f'Printing color histogram of {bmp.filename}')\n Printers.printHistogram(bmp)\n sys.exit(0)\n \n if (args.rotate or args.scale or args.contrast or args.grayscale or \n args.binary or args.channel or args.edge_detection or args.retrieve_color or\n args.edge_enhancement or args.blur or args.emboss or args.overlap or args.texture_detection or\n args.denoise or args.sharpen or args.unsharp):\n if not hp.atLeastOne(args.output, (\n args.rotate,\n args.scale,\n args.contrast,\n args.grayscale,\n args.binary,\n args.channel,\n args.edge_detection,\n args.retrieve_color,\n args.edge_enhancement,\n args.blur,\n args.emboss,\n args.overlap,\n args.texture_detection,\n args.denoise,\n args.sharpen,\n args.unsharp\n )):\n parser.error('--rotate/--scale/--contrast/--grayscale/--binary/--channel/--edge-detection/--retrieve-color/--edge-enhancement/--blur/--emboss/--overlap/--texture-detection/--denoise/--sharpen/--unsharp and --output must be given together')\n \n if args.rotate:\n degree = args.rotate\n colourers.info(f'Rotating image to {degree} degree')\n bmp.imageData = Transformers.rotate(bmp, degree)\n\n if args.scale:\n if len(args.scale) == 2:\n width, height = args.scale\n colourers.info(f'Scaling image to {width}x{height} pixels')\n bmp.imageData = Transformers.scale(bmp, height, width)\n else:\n scaleRatio = args.scale[0]\n\n colourers.info(f'Scaling image to {scaleRatio} scale ratio')\n\n height = int(hp.readLittleEndian(bmp.height))\n width = int(hp.readLittleEndian(bmp.width))\n\n bmp.imageData = Transformers.scale(bmp, height * scaleRatio, width * scaleRatio)\n \n if args.contrast:\n factor = args.contrast\n colourers.info(f'Applying a factor contrast of {factor}')\n bmp.imageData = Transformers.contrast(bmp, factor)\n \n if args.grayscale:\n colourers.info(f'Applying grayscale mask to the image')\n bmp.imageData = Transformers.grayscale(bmp, half)\n \n if args.binary:\n colourers.info(f'Applying binary mask to the image')\n bmp.imageData = Transformers.binary(bmp, half)\n \n if args.invert:\n colourers.info(f'Inverting image colours')\n bmp.imageData = Transformers.invert(bmp, half)\n \n if args.channel:\n if len(args.channel) == 2:\n c1, c2 = args.channel\n colourers.info(f'Keeping only {c1} and {c2} channels of the image')\n bmp.imageData = Transformers.toChannel(bmp, [c1, c2], half)\n else:\n channel = args.channel[0]\n colourers.info(f'Keeping only {channel} channel of the image')\n bmp.imageData = Transformers.toChannel(bmp, channel, half)\n \n if args.denoise:\n colourers.info(f'Denoising the image')\n bmp.imageData = Filters.wienerFilter(bmp.imageData, gaussianKernel(9, sigma=0.33), K=10)\n \n if args.texture_detection:\n colourers.info(f'Applying texture detection (Gabor Filter)')\n bmp.imageData = Filters.gaborFilter(bmp.imageData, gaborKernel(0))\n \n if args.edge_enhancement:\n colourers.info(f'Applying increased edge enhancement filter')\n bmp.imageData = Filters.iee(bmp.imageData)\n\n if args.edge_detection:\n filterName = args.edge_detection\n if filterName == 'canny':\n colourers.info(f'Performing Canny filter for edge detection')\n bmp.imageData = Filters.ced(bmp.imageData, sigma=0.33, kernelSize=9, weakPix=50)\n if filterName == 'sobel':\n colourers.info(f'Performing Sobel filter for edge detection')\n bmp.imageData = Filters.sed(bmp.imageData, sigma=0.33, kernelSize=9)\n if filterName == 'prewitt':\n colourers.info(f'Performing Prewitt filter for edge detection')\n bmp.imageData = Filters.ped(bmp.imageData, sigma=0.33, kernelSize=9)\n if filterName == 'roberts':\n colourers.info(f'Performing Roberts filter for edge detection')\n bmp.imageData = Filters.red(bmp.imageData, sigma=0.33, kernelSize=9)\n if filterName == 'kirsch':\n colourers.info(f'Performing Kirsch filter for edge detection')\n bmp.imageData = Filters.ked(bmp.imageData, sigma=0.33, kernelSize=9)\n\n if args.sharpen:\n colourers.info(f'Sharpening the image')\n bmp.imageData = Filters.sharpen(bmp.imageData)\n \n if args.unsharp:\n colourers.info(f'Unsharpening the image')\n bmp.imageData = Filters.unsharp(bmp.imageData)\n\n if args.retrieve_color:\n colourers.info(f'Retrieving color')\n bmp.imageData = Filters.retrieveColor(bmp.imageData)\n \n if args.blur:\n blurType = args.blur\n colourers.info(f'Performing a {blurType} blur')\n for _ in range(args.blur_iteration):\n blurFunc = Filters.blur.switcher.get(blurType)\n bmp.imageData = blurFunc(bmp.imageData)\n \n if args.emboss:\n colourers.info(f'Performing emboss filter')\n bmp.imageData = Filters.emboss(bmp.imageData)\n \n if args.overlap:\n overlappers = []\n for ov in args.overlap:\n overlappers.append(BMP(ov).imageData)\n colourers.info(f'Performing an overlapping between {bmp.filename} and {args.overlap}')\n bmp.imageData = Filters.overlap(bmp.imageData, overlappers)\n \n if args.output:\n outputFile = args.output\n hp.saveBMP(bmp, bmp.imageData, outputFile)\n colourers.success(f'Succesfully saved into {outputFile}')\n sys.exit(0)\n \n parser.error('Give at least one more argument')\n \n # PNG Block\n else:\n filename = args.png\n\n if not os.path.isfile(filename):\n print('\"{}\" does not exist'.format(filename), file=sys.stderr)\n sys.exit(-1)\n print('Success Opening {}...'.format(filename))\n \n png = PNG(filename)", "def black_check(ctx):\n ctx.run(f\"{VENV_PREFIX} black --check {COMMON_MODULES_AS_PARAM}\")", "def testBlackAndWhite(name = \"smokey.gif\"):\n image = Image(name)\n print(\"Close the image window to see the transformation\")\n image.draw()\n blackAndWhite(image)\n image.draw()", "def main(folder, outputfile):\n parser = argument_parser()\n args = parser.parse_args()\n\n show_all = args.show_all\n verbose = args.verbose\n\n random.seed(args.rng_seed)\n\n args.files = folder\n print args.files\n\n try:\n image = Image.open(args.files[0])\n except IOError, msg:\n print >> sys.stderr, msg\n return 1\n if image.mode == 'P':\n image = image.convert('RGB')\n \n if image.size[0] > args.w:\n image = image.resize((args.w, int((float(args.w)/image.size[0]) *\n image.size[1])), Image.ANTIALIAS)\n\n if not show_all:\n def nothing(a, b):\n pass\n do_something = nothing\n elif args.saving:\n do_something = Imsave(\"saved/\" + args.files[0][:-4] + \"_\" +\n str(image.size[0]) + \"/\").save\n else:\n import im_debug\n do_something = im_debug.show\n\n if verbose:\n import time\n class Logger:\n def __init__(self):\n self.t = 0\n\n def __call__(self, m):\n t_n = time.time()\n if self.t > 0:\n print >> sys.stderr, \"\\t\" + str(t_n - self.t)\n print >> sys.stderr, m\n self.t = t_n\n logger = Logger()\n\n else:\n def logger(m):\n pass\n \n if args.manual_mode:\n import manual\n try:\n lines = manual.find_lines(image)\n except manual.UserQuitError:\n #TODO ask user to try again\n return 1\n else:\n if args.l_cache:\n filename = (\"saved/cache/\" + args.files[0][:-4] + \"_\" +\n str(image.size[0]))\n cache_dir = \"/\".join(filename.split('/')[:-1])\n if os.path.exists(filename):\n lines, l1, l2, bounds, hough = pickle.load(open(filename))\n print >> sys.stderr, \"using cached results\"\n else:\n lines, l1, l2, bounds, hough = linef.find_lines(image, do_something, logger)\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n d_file = open(filename, 'wb')\n pickle.dump((lines, l1, l2, bounds, hough), d_file)\n d_file.close()\n else:\n lines, l1, l2, bounds, hough = linef.find_lines(image, do_something, logger)\n\n grid, lines = gridf.find(lines, image.size, l1, l2, bounds, hough,\n show_all, do_something, logger)\n if show_all:\n im_g = image.copy()\n draw = ImageDraw.Draw(im_g)\n for l in grid[0] + grid[1]:\n draw.line(l, fill=(64, 255, 64), width=1)\n do_something(im_g, \"grid\", name=\"grid\")\n\n intersections = intrsc.b_intersects(image, lines, show_all, do_something, logger)\n board = intrsc.board(image, intersections, show_all, do_something, logger)\n\n logger(\"finished\")\n\n # TODO! refactor this mess:\n if len(args.files) == 1:\n\n if args.sgf_output:\n print board.asSGFsetPos()\n else:\n print board\n \n else:\n game = output.Game(19, board) #TODO size parameter\n #for f in args.files[1:]:\n for i, f in enumerate(args.files):\n try:\n image = Image.open(f)\n except IOError, msg:\n print >> sys.stderr, msg\n continue\n if verbose:\n print >> sys.stderr, \"Opening\", f\n if image.mode == 'P':\n image = image.convert('RGB')\n if image.size[0] > args.w:\n image = image.resize((args.w, int((float(args.w)/image.size[0]) *\n image.size[1])), Image.ANTIALIAS)\n board = intrsc.board(image, intersections, show_all, do_something, logger)\n if args.sgf_output:\n game.addMove(board)\n else:\n with open(outputfile + str(i) + \".txt\", \"w\") as f:\n f.write(str(board))\n\n if args.sgf_output:\n print game.asSGF()\n\n return 0", "def main(_):\n print('argument to expand', ARGS.video_in)\n print('argument expanded', glob.glob(ARGS.video_in))\n video_count = 0\n for video_filename in glob.glob(ARGS.video_in):\n print('start parsing', video_filename)\n data = skvideo.io.ffprobe(video_filename)['video']\n rate_str = six.ensure_str(data['@r_frame_rate']).split('/')\n rate = float(rate_str[0]) / float(rate_str[1])\n print('detected frame rate:', rate)\n\n print('load frames:')\n video = skvideo.io.vreader(video_filename)\n frame_count = 0\n file_count = 0\n for frame in video:\n if (frame_count > ARGS.offset) and \\\n ((frame_count-ARGS.offset)%ARGS.skip == 0) and \\\n (frame_count/rate >= ARGS.from_s) and \\\n (frame_count/rate <= ARGS.to_s or ARGS.to_s == -1):\n print(frame_count,)\n img = Image.fromarray(frame)\n if ARGS.crop:\n img = crop(img, ARGS.size)\n # save file\n file_number = file_count + video_count * ARGS.multiple + ARGS.start\n if ARGS.format_ext.lower() == 'jpg':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.jpg'.format(file_number))\n img.save(file_out, 'JPEG')\n elif ARGS.format_ext.lower() == 'png':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.png'.format(file_number))\n img.save(file_out, 'PNG')\n else:\n print('unrecognize format', ARGS.format_ext)\n sys.exit()\n file_count += 1\n frame_count += 1\n video_count += 1", "def sort(self):\n\n img_files = os.listdir(self.path)\n\n img_list = {}\n\n for img_file in img_files:\n filename = os.path.join(self.path, img_file)\n\n try:\n img = Image.open(filename)\n except:\n continue\n\n print \"Analyzing %s\" % img_file\n\n points = self.points(img.size[0], img.size[1])\n key = \"\"\n for point in points:\n\n # Get the average color for each point\n ave_points = self.diamond_points(point[0], point[1])\n red = 0\n green = 0\n blue = 0\n for ave_point in ave_points:\n try:\n rgb = img.getpixel(ave_point)\n red += rgb[0]\n green += rgb[1]\n blue += rgb[2]\n except IndexError:\n pass\n red /= len(ave_points)\n green /= len(ave_points)\n blue /= len(ave_points)\n\n # Bitdepths:\n # 12 bit - 4096 colors, range 0-F, divide by 16\n # 9 bit - 512 colors, range 0-7, divide by 32\n # 6 bit - 64 colors, range 0-3, divide by 64\n # 3 bit - 8 colors, range 0-1, divide by 128\n\n if self.num_colors == 8:\n div = 128\n elif self.num_colors == 64:\n div = 64\n elif self.num_colors == 512:\n div = 32\n elif self.num_colors == 4096:\n div = 16\n else:\n self.usage()\n\n # Lower the bitdepth\n red = int(red / div)\n green = int(green / div)\n blue = int(blue / div)\n\n # Add to the key\n key += \"%x%x%x\" % (red, green, blue)\n\n # Add the key if needed\n if key not in img_list:\n img_list[key] = []\n\n # Add the file to the list\n img_list[key].append(img_file)\n\n # Go through and rename the files, based on the img_list dictionary\n # and the prefix\n num = 1\n for img in sorted(img_list.iterkeys()):\n for filename in sorted(img_list[img]):\n name, ext = os.path.splitext(filename)\n new_filename = \"%s%04d%s\" % (self.prefix, num, ext)\n full_filename = os.path.join(self.path, filename)\n full_new_filename = os.path.join(self.path, new_filename)\n if os.path.isfile(full_new_filename):\n print \"File %s exists - aborting!\" % full_new_filename\n return\n\n os.rename(full_filename, full_new_filename)\n print \"Renamed %s to %s.\" % (filename, new_filename)\n num += 1", "def main():\n run_test_draw_upside_down_wall()" ]
[ "0.59754866", "0.59357375", "0.59338224", "0.5830252", "0.5777101", "0.5715761", "0.57027775", "0.5696344", "0.5681235", "0.5680232", "0.5654622", "0.5618479", "0.5586576", "0.55820316", "0.5538225", "0.5526683", "0.5522204", "0.55172414", "0.5488912", "0.546539", "0.54405814", "0.535953", "0.53540486", "0.53423834", "0.5341752", "0.53331226", "0.5283204", "0.5251864", "0.52431643", "0.5242008" ]
0.6124772
0
Wait until toy is detected or timeout is reached.
async def toy_detected(self, toy_wait_time): logging.info("Waiting for toy") try: await asyncio.wait_for(self._wait_for_toy(), timeout=toy_wait_time) logging.info("Toy detected") return True except asyncio.TimeoutError: logging.info("No toy detected") return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for(self, timeout):\n ready = False\n # Dividing sleep time by 300 instead of 30 double CPU load but cuts\n # IMU timestamp variation from about 20% to less than 1%\n sleep_time = (timeout / 1000.0) / 30\n stop_time = time.monotonic_ns() + (timeout * 1000000.0)\n while not ready and time.monotonic_ns() < stop_time:\n ready = GPIO.input(self.gpio_pin)\n time.sleep(sleep_time)\n return ready", "def wait_for_completion(self, timeout=10):\n cur_status = self.runtime_status()\n while cur_status not in ['FAILED', 'KILLED', 'FINISHED']:\n time.sleep(0.2)\n timeout -= 0.2\n cur_status = self.runtime_status()\n if timeout < 0:\n break\n\n return timeout > 0", "def wait_until(self, check, timeout=None):\n self._wait_in_process_loop(lambda: (check(),None),timeout=timeout)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def wait(self, timeout):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def wait_until_idle(self):\n while True:\n time.sleep(self.__interface.WT_STATE_LOOKUP)\n\n if not self.is_busy:\n break", "def wait_until_responsive(self, check, timeout, pause,\n clock=timeit.default_timer):\n\n ref = clock()\n now = ref\n while (now - ref) < timeout:\n if check():\n return\n time.sleep(pause)\n now = clock()\n\n raise Exception(\n 'Timeout reached while waiting on service!'\n )", "def _wait_on_condition(self, timeout):\n self.__condition.wait(timeout)", "def wait(self, axis, timeout=10):\n if not self.enabled:\n return\n\n # Wait for the motor to stop moving\n moving = True\n seconds = int(round(time.time() * 1000))\n \n # check moving flag\n while moving:\n time.sleep(0.01)\n flags = self.status(axis)\n if (flags[0] and flags[1])==True:\n moving = False\n return False\n else: # Timeout\n moving = True\n if timeout == -1:\n pass\n elif (int(round(time.time() * 1000))-seconds)/1000 > timeout:\n return True", "def wait(self, timeout=None):\n assert False, \"Deriving class must implement\"", "def wait(self, timeout=None):\n if timeout is None:\n timeout = self.timeout\n started = time.time()\n while True:\n if self.get_ip():\n self.state = State.RUNNING\n return True\n else:\n time.sleep(1)\n if timeout != 0:\n if time.time() - started > timeout:\n return False", "def timeout(self):\n self._status_update(\"Pyloton: Timeout\")\n time.sleep(3)", "def timeout_wait(self):\n if self._dtr_enabled:\n while (self.__micros() - self._resume_time) < 0:\n if False:\n break # TODO: Check for printer status here\n else:\n while (self.__micros() - self._resume_time) < 0:\n pass", "def wait(self, timeout=None):\n with self.condition:\n if not self.ready:\n self.condition.wait(timeout)", "def waitForCompletion(self):\n\n while(json.loads(self.robot.device())['state']!=0):\n time.sleep(0.1)\n continue\n\n return", "def _busy_wait(self, timeout=40.0):\n # If the busy_pin is *high* (pulled up by host)\n # then assume we're not getting a signal from inky\n # and wait the timeout period to be safe.\n if self._gpio.input(self.busy_pin):\n warnings.warn(\"Busy Wait: Held high. Waiting for {:0.2f}s\".format(timeout))\n time.sleep(timeout)\n return\n\n # If the busy_pin is *low* (pulled down by inky)\n # then wait for it to high.\n t_start = time.time()\n while not self._gpio.input(self.busy_pin):\n time.sleep(0.01)\n if time.time() - t_start >= timeout:\n warnings.warn(\"Busy Wait: Timed out after {:0.2f}s\".format(time.time() - t_start))\n return\n\n # print(\"Busy_waited\", time.time()-t_start, \"out of\", timeout, \"seconds\")", "def wait(self, timeoout=None, state=\"C-completed\"):" ]
[ "0.68167084", "0.6798189", "0.6798189", "0.6798189", "0.6736745", "0.6614005", "0.65733945", "0.64865094", "0.64865094", "0.64865094", "0.64865094", "0.64865094", "0.64865094", "0.64865094", "0.64865094", "0.64865094", "0.64865094", "0.6464942", "0.639782", "0.63566405", "0.6349288", "0.6327558", "0.6315262", "0.631517", "0.63028723", "0.6300748", "0.6290689", "0.62645996", "0.6245855", "0.62182593" ]
0.7551
0
Takes an srm value as an int such as 1 or string such as '1' and returns a hex color code for it.
def srm_to_hex(srm: int | str) -> str: mapping = { 0: '#FFF4D4', 1: '#FFE699', 2: '#FFD878', 3: '#FFCA5A', 4: '#FFBF42', 5: '#FBB123', 6: '#F8A600', 7: '#F39C00', 8: '#EA8F00', 9: '#E58500', 10: '#DE7C00', 11: '#D77200', 12: '#CF6900', 13: '#CB6200', 14: '#C35900', 15: '#BB5100', 16: '#B54C00', 17: '#B04500', 18: '#A63E00', 19: '#A13700', 20: '#9B3200', 21: '#952D00', 22: '#8E2900', 23: '#882300', 24: '#821E00', 25: '#7B1A00', 26: '#771900', 27: '#701400', 28: '#6A0E00', 29: '#660D00', 30: '#5E0B00', 31: '#5A0A02', 32: '#600903', 33: '#520907', 34: '#4C0505', 35: '#470606', 36: '#420607', 37: '#3D0708', 38: '#370607', 39: '#2D0607', 40: '#1F0506', } try: return mapping.get(int(srm), mapping[40]) except Exception as e: logger.exception(e) return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_color(self, value):\n value = min(max(0,value), 1) * 510\n\n if value < 255:\n redValue = 255\n greenValue = math.sqrt(value) * 16\n greenValue = int(greenValue)\n else:\n greenValue = 255\n value = value - 255\n redValue = 255 - (value * value / 255)\n redValue = int(redValue)\n return '#' + f\"{redValue:0{2}x}\" + f\"{greenValue:0{2}x}\" + '00'", "def getColor(self,number):\n if number >= 0:\n ret = cs.hsv_to_rgb(0,0,1-abs(number/self.maxp))\n else:\n ret = cs.hsv_to_rgb(0,abs(number/self.maxn),1)\n hexcolor = '#%02x%02x%02x' % (ret[0]*255,ret[1]*255,ret[2]*255)\n return hexcolor", "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def sanitizte_color(value):\n if len(value) == 7 and value[0] == '#':\n return \"#%06x\" % int(value[1:], 16)\n raise ValueError('invalid color')", "def color_negative_red(value):\n\n if value == 1:\n color = 'red'\n else:\n color = 'black'\n\n return 'color: %s' % color", "def color(value):\r\n return 'RGB({}, {}, {})'.format(value.red(), value.blue(), value.green())", "def get_rgb(self, r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def color(self):\n return 0x2f3136", "def get_xterm_color(r, g, b):\n rr = round_tint(r)\n rg = round_tint(g)\n rb = round_tint(b)\n\n return 16 + rr * 36 + rg * 6 + rb", "def rgbString(red,green,blue):\n return chr(red)+chr(green)+chr(blue)", "def toColor(n):\n color = ('%X'%(n+ID_OFFSET)).rjust(6,'0')\n if not len(color) == 6:\n raise ColorError(n)\n else:\n r = int(color[0:2], 16)\n g = int(color[2:4], 16)\n b = int(color[4:6], 16)\n return '%.3d %.3d %.3d'%(r,g,b)", "def Amber_to_Green(val):\n\tif val == 1 :\n\t\treturn \"GREEN\"\n\telif val == -1:\n\t\treturn \"RED\"", "def HexColor(val):\n if isinstance(val, str):\n val = int(val, 16)\n factor = 1.0 / 255\n return Color(factor * ((val >> 16) & 0xFF), factor * ((val >> 8) & 0xFF), factor * (val & 0xFF))", "def color_negative_red_positive_green(val):\n if val < 0:\n color = 'red'\n elif val > 0:\n color = 'green'\n else:\n color = 'black'\n\n return 'color: %s' % color", "def _cc(self, args):\n if isinstance(args, str):\n return args\n try:\n r, g, b = args\n except (TypeError, ValueError):\n raise TurtleGraphicsError(\"bad color arguments: %s\" % str(args))\n if self.screen._colormode == 1.0:\n r, g, b = [round(255.0*x) for x in (r, g, b)]\n if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):\n raise TurtleGraphicsError(\"bad color sequence: %s\" % str(args))\n return \"#%02x%02x%02x\" % (r, g, b)", "def IntToColor(number):\n color = COLORS_INDEX.get(number)\n return color if color else 'default'", "def int2color(x):\n # r = int(1000 * x % 255)\n # g = int(10000 * x % 255)\n # b = int(100000 * x % 255)\n x = 0 if x == 0 else int(1/x)\n b = x & 0xff\n g = (x >> 8) & 0xff\n r = (x >> 16) & 0xff\n return [r, g, b]", "def getColorFlag(color):\n if color == 0: # MONO\n return 0\n elif color == 1: # BAYER\n return -1\n elif color == 2: # AS IS RBG\n return 1", "def hex_color(s):\n\n if s.startswith(\"#\"):\n s = s[1:]\n valid = len(s) in [1, 2, 3, 4, 6, 12] and set(s) <= set(string.hexdigits)\n if not valid:\n raise ValueError(\"colour must be 1,2,3,4,6, or 12 hex-digits\")\n\n # For the 4-bit RGB, expand to 8-bit, by repeating digits.\n if len(s) == 3:\n s = \"\".join(c + c for c in s)\n\n if len(s) in [1, 2, 4]:\n # Single grey value.\n return (int(s, 16),)\n\n if len(s) in [6, 12]:\n w = len(s) // 3\n return tuple(int(s[i : i + w], 16) for i in range(0, len(s), w))", "def get_colorstring(color) -> str:\n return f\"#{int(color[0]*255):02x}{int(color[1]*255):02x}{int(color[2]*255):02x}\"", "def Color(red, green, blue, white = 0):\n\treturn (white << 24) | (red << 16)| (green << 8) | blue", "def _proc_color(self, tokens):\n\n keys = tokens.keys()\n if \"red\" in keys: # RGB(A)\n rr, gg, bb = tokens[\"red\"], tokens[\"green\"], tokens[\"blue\"]\n hex2int = lambda h: int(h, 16)\n if \"alpha\" in keys:\n a = tokens[\"alpha\"]\n c = str((hex2int(rr), hex2int(gg), hex2int(bb), hex2int(a)))\n else:\n c = str((hex2int(rr), hex2int(gg), hex2int(bb)))\n elif \"hue\" in keys: # HSV\n r, g, b = hsv_to_rgb(tokens[\"hue\"],\n tokens[\"saturation\"],\n tokens[\"value\"])\n c = str((int(r*255), int(g*255), int(b*255)))\n else:\n c = tokens[\"color\"]\n\n return c", "def hexcode(self):\n hexc = \"#%.02X%.02X%.02X\" % (int(self.rgb_255[0]), int(self.rgb_255[1]), int(self.rgb_255[2]))\n return hexc", "def get_hexcode(rgb):\n return \"#\" + \"\".join(f\"{hex(int(x))[2:]:0>2}\" for x in rgb)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)" ]
[ "0.72110575", "0.69716215", "0.66990834", "0.66990834", "0.66990834", "0.66604406", "0.6657392", "0.6654255", "0.6536384", "0.6468973", "0.64152944", "0.6414161", "0.63517797", "0.633173", "0.63313353", "0.62861365", "0.6267756", "0.625612", "0.62495655", "0.62468195", "0.6235229", "0.6227488", "0.62274337", "0.6206583", "0.62065285", "0.6184884", "0.61641586", "0.61641586", "0.61641586", "0.61641586" ]
0.73023427
0
Parse line of text and return lineData where lineData is data, which shall be saved and used for parsing next line This is quicker version of highlighBlock, which doesn't return results, but only parsers the block and produces data, which is necessary for parsing next line. Use it for invisible lines
def parseBlock(self, text, prevLineData): return self.parser.parseBlock(text, prevLineData)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_line(self, line, data):\n return data", "def parse_line(self, line):\n success = self.parser.handle_line(line)\n if success:\n self.data.update()\n else:\n self.bot.log(\"didn't handle line: '{}'\".format(line))", "def parse(self, text: str) -> yaslha.slha.SLHA:\n self.processing = None\n slha = yaslha.slha.SLHA()\n comment_lines = [] # type: List[str]\n\n for line in text.splitlines():\n try:\n obj = self._parse_line(line)\n if obj is None:\n continue\n except ValueError:\n logger.warning(\"Unrecognized line: %s\", line)\n continue\n\n # comment handling\n if isinstance(obj, yaslha.line.CommentLine):\n comment_lines.append(obj.comment)\n continue\n elif isinstance(obj, yaslha.line.AbsLine):\n obj.pre_comment = comment_lines\n comment_lines = []\n else:\n raise NotImplementedError(obj)\n\n # line handling\n if isinstance(obj, yaslha.line.BlockHeadLine):\n self.processing = AbsBlock.new(obj)\n assert self.processing is not None\n slha.add_block(self.processing)\n elif isinstance(obj, yaslha.line.DecayHeadLine):\n self.processing = Decay(obj)\n assert self.processing is not None\n slha.add_block(self.processing)\n elif isinstance(obj, yaslha.line.InfoLine):\n if not isinstance(self.processing, InfoBlock):\n logger.critical(\"InfoLine found outside of INFO block: %s\", line)\n raise ValueError(self.processing)\n self.processing.append_line(obj)\n elif isinstance(obj, yaslha.line.ValueLine):\n if self.processing is None:\n logger.critical(\"ValueLine found outside of block: %s\", line)\n raise ValueError(self.processing)\n self.processing.update_line(obj)\n else:\n raise TypeError(obj)\n\n # tail comments\n slha.tail_comment = comment_lines\n self.processing = None\n return slha", "def _ProcessLine(\n self,\n first_line,\n input_line,\n line,\n stripped_line,\n output_stream):\n # Check for the start of a code block.\n if constants.START_CODEBLOCK_RE.match(stripped_line):\n if self._code_block_depth == 0:\n # Start a new collection of lines.\n self._code_block_lines = []\n else:\n # Just an embedded code block.\n self._code_block_lines.append(line)\n self._code_block_depth += 1\n return\n\n # Check for the end of a code block.\n if constants.END_CODEBLOCK_RE.match(stripped_line):\n self._code_block_depth -= 1\n if self._code_block_depth == 0:\n # Closed the highest-level code block, handle it.\n self._formatting_handler.HandleEscapedText(\n input_line,\n output_stream,\n \"\\n\")\n self._formatting_handler.HandleCodeBlockOpen(\n input_line,\n output_stream,\n None)\n code = \"\".join(self._code_block_lines)\n self._formatting_handler.HandleText(input_line, output_stream, code)\n self._formatting_handler.HandleCodeBlockClose(input_line, output_stream)\n else:\n # Just closed an embedded clode block.\n self._code_block_lines.append(line)\n return\n\n # Check if we're in a code block.\n # If we are, just put the raw text into code_block_lines.\n if self._code_block_depth != 0:\n self._code_block_lines.append(line)\n return\n\n # For empty lines, close all formatting.\n if not stripped_line:\n if not self._ConsumeTextForPlugin():\n self._SetCurrentList(input_line, 0, \" \", output_stream)\n self._CloseTags(input_line, output_stream)\n\n if self._table_columns:\n self._formatting_handler.HandleTableClose(input_line, output_stream)\n self._table_columns = []\n self._table_column = 0\n\n self._formatting_handler.HandleParagraphBreak(input_line, output_stream)\n return\n\n # Non-empty line, finish the previous line's newline.\n if not first_line:\n self._formatting_handler.HandleEscapedText(\n input_line,\n output_stream,\n \"\\n\")\n\n # Now check if we're processing within a list.\n indent_pos = constants.INDENT_RE.match(line).end()\n if (indent_pos and indent_pos < len(line) and\n not self._ConsumeTextForPlugin()):\n list_type = constants.LIST_TYPES.get(line[indent_pos], \"blockquote\")\n\n if self._SetCurrentList(input_line, indent_pos, list_type, output_stream):\n # Blockquotes take the entire remainder of the line,\n # but everything else skips the list symbol plus the space after.\n # (In case there is no space after, the first character is skipped;\n # we will warn if this is detected, as it was probably unintended.)\n if list_type == \"blockquote\":\n line = line[indent_pos:]\n else:\n if line[indent_pos + 1] != \" \":\n self._warning_method(\n input_line,\n u\"Missing space after list symbol: {0}, \"\n \"'{1}' was removed instead.\"\n .format(line[indent_pos], line[indent_pos + 1]))\n line = line[indent_pos + 2:]\n\n stripped_line = line.strip()\n else:\n # Reset to no indent.\n self._SetCurrentList(input_line, 0, \" \", output_stream)\n\n # Finally, split the line into formatting primitives.\n # We do so without whitespace so we can catch line breaks across tags.\n if constants.LINE_FORMAT_RE.match(stripped_line):\n self._ProcessMatch(\n input_line,\n constants.LINE_FORMAT_RE,\n stripped_line,\n output_stream)\n else:\n self._ProcessMatch(\n input_line,\n constants.TEXT_FORMAT_RE,\n stripped_line,\n output_stream)\n\n self._CloseTableRow(input_line, output_stream)", "def parse_line(self, line):\n raise NotImplementedError", "def parseLine(self,incoming):\n\t\tself.addContentLine(incoming)\n\t\tif incoming.mustProcess == 0: return \n\t\tself.iLineObject = incoming\n\t\titems = self.iLineObject.getItems()\n\t\tkeyword = string.upper(items[0])\n\t\tif len(items) < 2: \n\t\t\tif not keyword in self.sQuotedKeywords: \n\t\t\t\tself.addErrorMessage('2. Unrecognized keyword SIMULATOR_BLOCK:', self.iLineObject)\n\t\t\treturn\n\t\tself.parseKeyword(keyword,items[1:])", "def __handle_blank_line(\n parser_state,\n input_line,\n from_main_transform,\n position_marker=None,\n ):\n\n if not from_main_transform:\n close_only_these_blocks = [ParagraphStackToken]\n do_include_block_quotes = False\n else:\n close_only_these_blocks = None\n do_include_block_quotes = True\n POGGER.debug(\"hbl>>from_main_transform>>$\", from_main_transform)\n POGGER.debug(\"hbl>>close_only_these_blocks>>$\", close_only_these_blocks)\n POGGER.debug(\"hbl>>do_include_block_quotes>>$\", do_include_block_quotes)\n\n non_whitespace_index, extracted_whitespace = ParserHelper.extract_whitespace(\n input_line, 0\n )\n\n is_processing_list, in_index = LeafBlockProcessor.check_for_list_in_process(\n parser_state\n )\n POGGER.debug(\n \"hbl>>is_processing_list>>$>>in_index>>$>>last_stack>>$\",\n is_processing_list,\n in_index,\n parser_state.token_stack[-1],\n )\n\n requeue_line_info = None\n new_tokens = None\n force_default_handling = False\n if parser_state.token_stack[-1].was_link_definition_started:\n POGGER.debug(\n \"hbl>>process_link_reference_definition>>stopping link definition\"\n )\n empty_position_marker = PositionMarker(-1, 0, \"\")\n (\n _,\n _,\n did_pause_lrd,\n requeue_line_info,\n new_tokens,\n ) = LinkReferenceDefinitionHelper.process_link_reference_definition(\n parser_state, empty_position_marker, \"\", \"\", \"\", 0, 0\n )\n assert not did_pause_lrd\n force_default_handling = True\n elif parser_state.token_stack[-1].is_code_block:\n stack_bq_count = parser_state.count_of_block_quotes_on_stack()\n if stack_bq_count:\n POGGER.debug(\"hbl>>code block within block quote\")\n else:\n POGGER.debug(\"hbl>>code block\")\n new_tokens = []\n elif parser_state.token_stack[-1].is_html_block:\n POGGER.debug(\"hbl>>check_blank_html_block_end\")\n new_tokens = HtmlHelper.check_blank_html_block_end(parser_state)\n elif (\n is_processing_list\n and parser_state.token_document[-1].is_blank_line\n and parser_state.token_document[-2].is_list_start\n ):\n POGGER.debug(\"hbl>>double blank in list\")\n new_tokens, _ = TokenizedMarkdown.__close_open_blocks(\n parser_state, until_this_index=in_index, include_lists=True\n )\n\n if from_main_transform:\n POGGER.debug(\"hbl>>__handle_blank_line_in_block_quote\")\n TokenizedMarkdown.__handle_blank_line_in_block_quote(parser_state)\n\n if force_default_handling or new_tokens is None:\n POGGER.debug(\"hbl>>default blank handling-->cob\")\n n_tokens, _ = TokenizedMarkdown.__close_open_blocks(\n parser_state,\n only_these_blocks=close_only_these_blocks,\n include_block_quotes=do_include_block_quotes,\n was_forced=True,\n )\n if new_tokens:\n new_tokens.extend(n_tokens)\n else:\n new_tokens = n_tokens\n\n POGGER.debug(\"hbl>>new_tokens>>$\", new_tokens)\n assert non_whitespace_index == len(input_line)\n if not (requeue_line_info and requeue_line_info.force_ignore_first_as_lrd):\n new_tokens.append(\n BlankLineMarkdownToken(extracted_whitespace, position_marker)\n )\n POGGER.debug(\"hbl>>new_tokens>>$\", new_tokens)\n\n return new_tokens, requeue_line_info", "def parseLine(self, line):\n\n # Bail out on lines with a malformed timestamp\n try:\n timestamp = time.mktime(time.strptime(line[1:25], \"%a %b %d %H:%M:%S %Y\"))\n except:\n return\n \n text = line[27:]\n \n if self.myname: \n self.attendance.mark(timestamp, self.myname)\n text = self.re_myname.sub(self.myname + ' ', text) \n \n damage = self.re_damage.search(text)\n #damage = False\n death = self.re_death.search(text)\n #death = False\n miss = self.re_miss.search(text)\n #miss = False\n #defensive = self.re_defensive.search(text)\n defensive = False\n loot = self.re_loot.search(text)\n attendance = self.re_attendance.search(text)\n if damage:\n (attacker, atktype, defender, amount, nonmelee) = damage.groups()\n if nonmelee:\n atktype = 'non-melee'\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.getFight(timestamp, attacker, defender).addAttack(timestamp, atktype, int(amount))\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n if defender.count(' ') == 0:\n self.defender.mark(timestamp, defender)\n elif miss:\n (attacker, atktype, defender) = miss.groups()\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.getFight(timestamp, attacker, defender).addAttack(timestamp, atktype, 'miss')\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n if defender.count(' ') == 0:\n self.defender.mark(timestamp, defender)\n elif defensive:\n (attacker, atktype, defender, defensetype) = defensive.groups()\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.getFight(timestamp, attacker, defender).addAttack(timestamp, atktype, defensetype)\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n if defender.count(' ') == 0:\n self.defender.mark(timestamp, defender)\n elif death:\n (defender, junk, attacker) = death.groups()\n if junk.count('have slain'):\n (defender, attacker) = (attacker, defender)\n # Use PC deaths to track their attendance\n if defender.count(' ') == 0:\n self.attendance.mark(timestamp, defender)\n elif attacker.count(' ') == 0:\n self.kills.addKill(timestamp, defender)\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.addDeath(timestamp, attacker, defender)\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n elif loot:\n (looter, item) = loot.groups()\n self.loot.addLoot(timestamp, looter, item)\n self.attendance.mark(timestamp, looter)\n elif attendance:\n attendee = attendance.group(1)\n self.attendance.mark(timestamp, attendee)", "def _parse_line(self):\n #if self.debug: print '\\t ' + str(self._current_node)\n\n # PyParser setParseAction's actually execute during parsing,\n # So we need closures in order to change the current scope\n\n \n def depth_from_indentation(function):\n \"\"\" Set the depth as the start of the match \"\"\"\n def wrap(start, values):\n #print 'Depth %d | %d %s' %(self._depth, start, values)\n #self._depth = start\n self._current_node = function(values)\n #print self._current_node\n return ''\n\n return wrap\n \n def depth_from_match(function):\n \"\"\" Set the depth as the start of the match \"\"\"\n def wrap(start, values):\n #print 'Depth %d | %d %s' %(self._depth, start, values)\n #print self._current_node\n self._depth = start\n self._current_node = function(values)\n #print self._current_node\n return ''\n\n return wrap \n\n def depth_from_nemo_tag(function):\n \"\"\" Start of the match is where the nemo tag is. Pass the other values to the wrapped function \"\"\"\n def wrap(start, values):\n # print 'Depth %d | %d %s' %(self._depth, start, values)\n self._depth = start\n tokens = values[1]\n self._current_node = function(tokens)\n #print self._current_node\n return ''\n\n return wrap\n\n\n\n # Match HTML\n html = restOfLine\n html.setParseAction(depth_from_indentation(self._add_html_node))\n\n # Match Mako control tags\n\n # All nemo statements are like mako statements, and must begin with %\n nemo_tag = Literal('%')\n\n # Mako allows for for/if/while statements\n begin = Keyword('for') | Keyword('if') | Keyword('while')\n middle = Keyword('else') | Keyword('elif')\n end = Keyword('endfor') | Keyword('endif') | Keyword('endwhile')\n control = nemo_tag + (begin | middle | end)\n\n # When we match a control statement (for/if/while), the body will be indented and we'll have to account for it\n # We're using _add_nesting_mako_control_node / _add_mako_middle_node / _add_mako_control_leaf do the same thing\n # They're only separate functions due to testing\n begin.setParseAction(depth_from_indentation(self._add_nesting_mako_control_node) )\n middle.setParseAction(depth_from_indentation(self._add_mako_middle_node))\n end.setParseAction(depth_from_indentation(self._add_mako_control_leaf))\n\n # Match HTML tags\n argument_name = Word(alphas,alphanums+\"_-:\")\n argument_value = quotedString\n regular_argument = argument_name + Literal('=') + argument_value\n\n # Match Nemo tags\n # These are . and #\n class_name = Literal('.').setParseAction(lambda x: 'class=') # Transform . into class= within the text\n id_name = Literal('#').setParseAction(lambda x: 'id=') # Transform # into id= within the text\n special_argument = (class_name | id_name) + argument_value\n\n # Match argument (HTML tags + Nemo Tags)\n argument = Combine(special_argument) | Combine(regular_argument)\n\n # Match single Nemo statement (Part of a multi-line)\n inline_nemo_html = Word(alphas) + Group(ZeroOrMore(argument))\n inline_nemo_html.setParseAction(depth_from_match(self._add_nemo_node))\n\n # Match first nemo tag on the line (the one that may begin a multi-statement expression) \n nemo_html = nemo_tag + Group(Word(alphanums+\"_-:\") + Group(ZeroOrMore(argument)))\n nemo_html.setParseAction(depth_from_nemo_tag(self._add_nemo_node))\n\n # Setup to match a multi-statement expression. \n # These, nemo statements are separated by |. Anything after || is treated as html \n separator = Literal('|').suppress()\n \n # Match a list of nemo statements\n nemo_list = nemo_html + ZeroOrMore( separator + inline_nemo_html )\n \n # Match final HTML\n inline_html = html.copy()\n html_separator = Literal('||')\n inline_html.setParseAction(depth_from_match(self._add_inline_html_node))\n \n # A nemo multi-line statement can be any number of nemo statements separated by | then optionally terminated with an HTML statement\n nemo_multi = nemo_list + Optional(html_separator + inline_html)\n\n # Match empty Nemo statement\n empty = nemo_tag + Empty()\n empty.setParseAction(depth_from_indentation(self._add_blank_nemo_node))\n\n # Match unused Mako tags\n mako_tags = Literal('<%') | Literal('%>') | Literal('%CLOSETEXT') | Literal('</%')\n mako_tags.setParseAction(depth_from_indentation(self._add_html_node))\n\n # Matches General\n nemo = (control | nemo_multi | empty)\n line = mako_tags | nemo | html\n\n # Depth Calculation (deprecated?)\n self._depth = len(self._c) - len(self._c.strip())\n\n line.parseString(self._c)", "def FilterLine(self, a_line):\n return a_line", "def highlightBlock(self, text, prevLineData):\n #self.parser.parseAndPrintBlockTextualResults(text, prevLineData)\n return self.parser.highlightBlock(text, prevLineData)", "def GetLine(line):\r\n pass", "def ProcessLine(line, rules, processing, previous_line_data):\n line_data = {'line':line, 'line_offset':processing['offset_processed']}\n \n # Update with always-included data, like glob keys, and the component\n line_data.update(processing['data'])\n \n # Test if this line is multi-line (positive test)\n is_multi_line = False\n for rule in rules:\n if rule.get('multi line regex test', False):\n if re.match(rule['regex'], line):\n is_multi_line = True\n break\n # Negative regex test\n for rule in rules:\n if rule.get('multi line regex not', False):\n if re.match(rule['regex'], line):\n is_multi_line = True\n break\n \n # If this is multi_line and we have a real previous line to embed this data in\n if is_multi_line and previous_line_data != None:\n #print 'Multiline: %s' % line\n if 'multiline' not in previous_line_data:\n previous_line_data['multiline'] = []\n \n previous_line_data['multiline'].append(line)\n\n\n # Only process rules on first lines (not multi lines), and return the line_data to be the next line's previous_line_data\n if not is_multi_line:\n #print line\n \n # Start with: We havent found a match yet\n match_found = False\n \n for item in rules:\n # Skip the multi-line regext test/not rules\n if item.get('multi line regex test', False) or item.get('multi line regex not', False):\n continue\n \n # Break out our terms for this rule item\n terms = re.findall('%\\((.*?)\\)s', item['regex'])\n #print item['regex']\n #print terms\n \n regex = item['regex']\n \n # Pre-processing step, to remove any conflicting characters with the rest of the regex which need to be escaped/sanitized\n for term in terms:\n regex = regex.replace('%%(%s)s' % term, 'MATCHMATCHMATCH')\n \n regex = SanitizeRegex(regex)\n regex = regex.replace('MATCHMATCHMATCH', '(.*?)')\n \n #print '--- %s' % item['id']\n #print regex\n #print line\n \n regex_result = re.findall(regex, line)\n #print regex_result\n if regex_result:\n \n # Python does something stupid with multiple variables, so pull them out of the embedded tuple it adds to the list\n if type(regex_result[0]) == tuple:\n regex_result = regex_result[0]\n \n for count in range(0, len(terms)):\n #print '%s: %s: %s' % (count, terms[count], regex_result[count])\n line_data[terms[count]] = regex_result[count]\n \n #print regex\n #print 'MATCHED! %s' % regex\n #print regex_result\n \n match_found = True\n \n # Save the line match ID, so we can reference it for markup/state information\n line_data['__rule_id__'] = item['id']\n \n break\n \n return line_data\n \n # Else, this is multi-line, so return it to continue to be the next line's previous_line_data\n else:\n #TODO(g): Save this multi-line data every time? Otherwise when does it get saved out?\n pass\n \n return previous_line_data", "def GetLinePostProcess(self):\r\n retline = None\r\n outline = None\r\n try:\r\n retline= str(self.file.readline())\r\n except IOError:\r\n self.tracking.SetError(type(self).__name__, sys._getframe().f_code.co_name, \"cannot read a line from\" )\r\n finally: \r\n #outline1 = retline.replace(\"/\",\"\")\r\n if( (retline !=\"\") and (retline !=\"\\n\")) :\r\n outline = str(\"\")\r\n az_range=range(97,123)\r\n AZ_range = range (65, 91)\r\n val_range = range (48,58)\r\n space_range = range (32, 33)\r\n for i in range(len(retline)):\r\n value = ord(retline[i] )\r\n if ( (value in az_range) or (value in AZ_range) or (value in val_range) or (value in space_range) ):\r\n outline = \"\".join([outline,retline[i]])\r\n else:\r\n outline = \"\".join([outline,\"_\"])\r\n '''\r\n if( (retline[i] != \"/\") and (retline[i] != \"&\") and (retline[i] != \"\\\\\") and (retline[i] != \"%\") and (retline[i] != \"#\") and (retline[i] != \"_\") and (retline[i] != '\"') and (retline[i] != \"@\") and (retline[i] != \":\") and (retline[i] != \"\\n\")):\r\n #charac = str(retline[i].encode('ascii','ignore'))\r\n if(ord(retline[i]) < 128):\r\n outline = \"\".join([outline,retline[i]])\r\n ''' \r\n return outline\r\n #return unicodedata.normalize('NFKD', outline).encode('ascii','ignore')\r", "def _split_line( self, data_list, line_num, text ):\n\t\t# if blank line or context separator, just add it to the output list\n\t\tif not line_num:\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# if line text doesn't need wrapping, just add it to the output list\n\t\tsize = len( text )\n\t\tmax_len = self._wrapcolumn\n\t\tif ( size <= max_len ) or ( ( size - ( text.count( '\\0' ) * 3 ) ) <= max_len ):\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# scan text looking for the wrap point, keeping track if the wrap\n\t\t# point is inside markers\n\t\ti = 0\n\t\tn = 0\n\t\tmark = ''\n\t\twhile n < max_len and i < size:\n\t\t\tif text[i] == '\\0':\n\t\t\t\ti += 1\n\t\t\t\tmark = text[i]\n\t\t\t\ti += 1\n\t\t\telif text[i] == '\\1':\n\t\t\t\ti += 1\n\t\t\t\tmark = ''\n\t\t\telse:\n\t\t\t\ti += 1\n\t\t\t\tn += 1\n\n\t\t# wrap point is inside text, break it up into separate lines\n\t\tline1 = text[:i]\n\t\tline2 = text[i:]\n\n\t\t# if wrap point is inside markers, place end marker at end of first\n\t\t# line and start marker at beginning of second line because each\n\t\t# line will have its own table tag markup around it.\n\t\tif mark:\n\t\t\tline1 += '\\1'\n\t\t\tline2 = '\\0' + mark + line2\n\n\t\t# tack on first line onto the output list\n\t\tdata_list.append( ( line_num, line1 ) )\n\n\t\t# use this routine again to wrap the remaining text\n\t\tself._split_line( data_list, '>', line2 )", "def decodeline(self, line):\n result = ApacheLogLine()\n result.full_line = line\n linepatternmatch = self._linepattern.match(line)\n if linepatternmatch:\n result.hostname = linepatternmatch.group(1)\n result.user = linepatternmatch.group(2)\n if result.user == '-':\n result.user = ''\n (result.accesstime_seconds, result.serveroffset) = self.parsedate(linepatternmatch.group(3))\n result.accesstime_string = stringdate(result.accesstime_seconds, offset=result.serveroffset)\n result.file = linepatternmatch.group(4)\n result.code = linepatternmatch.group(5)\n result.code_description = self._codetranslator.get_description(result.code)\n result.size = linepatternmatch.group(6)\n if result.size == '-':\n result.size = 0\n result.referer = linepatternmatch.group(7)\n if result.referer == '-':\n result.referer = ''\n result.browser = linepatternmatch.group(8)\n else:\n self._notparsable += 1\n warn(\"The line '%s' could not be parsed\" % line)\n return None\n if self._line_fits_pattern(result):\n self._acceptedlines += 1\n return result\n else:\n self._rejectedlines += 1\n return None", "def interpret_line(self, line, source=None, lineno=None):\n\n pline = self.parser.parse_line(line, source=source, lineno=lineno)\n return self.execute(pline)", "def getLineInformation(line):\n \n pass", "def next_line(self, context, line):", "def parse_display_lines(self):\n is_on = None\n source_name = None\n volume = None\n mute_on = None\n party_mode_on = None\n info = None\n rec_source = None\n zone2_source = None\n zone2_volume = None\n zone3_source = None\n zone3_volume = None\n zone4_source = None\n zone4_volume = None\n\n line0 = self.lines[0]\n if len(line0) != 21:\n _LOGGER.error(\"Display line 1 must be exactly 21 bytes\")\n if (\n line0\n == \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n ):\n is_on = False\n else:\n is_on = True\n source_name = line0[:8].rstrip()\n party_mode_on = line0[10:13] == \"pty\"\n vol_str = line0[14:]\n if (vol_str == \"MUTE ON\") or (vol_str == \" \"):\n mute_on = True\n volume = None\n elif vol_str[0:3] != \"VOL\":\n _LOGGER.error(\"Could not verify VOL string: %s\", vol_str)\n else:\n mute_on = False\n volume = int(vol_str[3:])\n\n line1 = self.lines[1]\n if len(line1) != 21:\n _LOGGER.error(\"Display line 2 must be exactly 21 bytes\")\n if (\n line1\n == \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n ):\n pass\n else:\n info = line1.strip().replace(\"\\x19\", \"II\")\n if line1[:9] == \" REC \":\n rec_source = line1[9:].rstrip()\n elif line1[:9] == \" ZONE2 \":\n zone2_source = line1[9:].rstrip()\n elif line1[:14] == \" ZONE2 VOL \":\n zone2_volume = int(line1[14:16])\n elif line1[:9] == \" ZONE3 \":\n zone3_source = line1[9:].rstrip()\n elif line1[:14] == \" ZONE3 VOL \":\n zone3_volume = int(line1[14:16])\n elif line1[:9] == \" ZONE4 \":\n zone4_source = line1[9:].rstrip()\n elif line1[:14] == \" ZONE4 VOL \":\n zone4_volume = int(line1[14:16])\n\n return {\n \"is_on\": is_on,\n \"source_name\": source_name,\n \"volume\": volume,\n \"mute_on\": mute_on,\n \"party_mode_on\": party_mode_on,\n \"info\": info,\n \"rec_source\": rec_source,\n \"zone2_source\": zone2_source,\n \"zone2_volume\": zone2_volume,\n \"zone3_source\": zone3_source,\n \"zone3_volume\": zone3_volume,\n \"zone4_source\": zone4_source,\n \"zone4_volume\": zone4_volume,\n }", "def parse_lines(self, start_line=0, end_line=False):\n if end_line is False: end_line = len(self.file_ltxt)\n\n lines = self.file_ltxt\n self.E_str = \"parse_lines\"\n self.line_num = start_line\n\n # Loop over lines and parse\n while self.line_num < end_line:\n line = lines[self.line_num].strip()\n\n if line == \"echo\": print(\"\")\n\n # Parse any variables\n elif self.line_declarations['variable'](line):\n self.parse_variable_line(line)\n\n # Parse any file loading commands\n elif self.line_declarations['load'](line):\n self.parse_load_cmd(line)\n\n # Parse any file loading commands\n elif self.line_declarations['plot'](line):\n self.parse_plot_cmd(line)\n\n # Parse any file loading commands\n elif self.line_declarations['write'](line):\n self.parse_write_cmd(line)\n\n # Parse any math commands\n elif self.line_declarations['math'](line):\n self.parse_math_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['echo'](line):\n self.parse_echo_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['calc'](line):\n self.parse_calc_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['set'](line):\n self.parse_set_cmd(line)\n\n # Parse any shell commands\n elif self.line_declarations['shell'](line):\n self.parse_shell_cmd()\n\n # Parse any for loop commands\n elif self.line_declarations['for'](line):\n self.parse_for_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['script'](line):\n self.parse_script_cmd(line)\n\n elif self.line_declarations['inline_code'](line):\n getattr(self, f\"parse_{line.split()[0]}_cmd\")(line)\n\n elif self.line_declarations['if'](line):\n self.parse_if_cmd(line)\n\n # elif self.line_declarations['splice'](line):\n # self.parse_splice_cmd(line)\n\n elif self.line_declarations['glue'](line):\n self.parse_glue_cmd(line)\n\n elif self.line_declarations['exit'](line):\n print(\"\\n\\nStopped Code -exit was called.\")\n raise SystemExit\n\n # The end of control statements\n elif '}' in line:\n pass\n\n # Print a warning about unknown line\n else:\n self.print_warning(f\"I don't understand a line: '{line}'\")\n\n self.line_num += 1", "def lineparse(inline, options=None, **keywargs):\n p = LineParser(options, **keywargs)\n return p.feed(inline)", "def _parse_line(self, line):\n\n line_nr = self._current_line_nr.__next__()\n\n self.logger.debug('processing line_nr:{}'.format(line_nr))\n\n try:\n\n element_id = re.compile(\"^<([a-zA-Z0-9]+) *\").search(line).group(1)\n attributes_start_pos = len(element_id)+1\n attributes_end_pos = line.find(\">\")\n attributes = line[attributes_start_pos:attributes_end_pos]\n\n args = {\n 'name': element_id,\n 'id': self._current_item_id.__next__(),\n 'line_nr': line_nr,\n 'parent_id': self._get_last_unclosed_element_id()\n }\n\n self._current_element = Element(**args)\n self._parse_attributes(attributes)\n self._elements.append(self._current_element)\n\n line = line[attributes_end_pos+1:]\n\n except AttributeError:\n element_id = None\n\n try:\n\n end_tag = re.compile(\"</(.+)>$\").search(line).group(1)\n last_element_id = self._get_last_unclosed_element_id()\n\n self.get_element_by_id(last_element_id).set_line_end(line_nr)\n\n self.logger.debug('last_element_id:{} line_nr:{}'.format(\n last_element_id,\n line_nr\n )\n )\n\n len_end_tag = len(end_tag)+3\n line = line[:-len_end_tag]\n\n except AttributeError:\n end_tag = None\n\n if element_id and end_tag and len(line) > 0:\n self._current_element.add_content(line)", "def get_line(self, cluster=None):\n# print self._line\n# index = self._line.index(\":\")\n# half_left = self._line[:index]\n# r_index = self._line[index:].index(\" \")\n# half_right = self._line[index+r_index:]\n# if cluster == None:\n# cluster = self._speaker \n# self._line = half_left+\":\"+cluster+\" \"+half_right\n return self._line", "def fetch_data(self, document, line):\n index = self.resolve_ref(document)\n return line[index]", "def _parseLine(self, line, delimiter = \":\"):\r\n\t\tsplt = line.split(delimiter)\r\n\t\tinVec = self._parseVec(splt[0])\r\n\t\toutVec = self._parseVec(splt[1])\r\n\t\tif (len(splt) == 2):\r\n\t\t\tlabel = \"\"\r\n\t\telse:\r\n\t\t\tlabel = splt[2]\r\n\t\tself.data.append({'in':inVec, 'out':outVec, 'label':label})", "def parse(cls, line):\r\n raise NotImplementedError", "def _parse_line(self, line):\n fields = line.split('|', 4) # stop splitting after fourth | found\n line_info = {'raw_message': line}\n if len(fields) == 5:\n line_info.update(dict(zip(self._fieldnames, fields)))\n return line_info", "def _get_relevant_line(self):\n # () -> (Phi.Line)\n line_name = self._get_line_name()\n print(\"looking for \"+str(line_name))\n return Phi.findLine(line_name)", "def _get_line(self):\n if len(self.lines) > 0:\n # Get the next line\n return self.lines.pop(0)\n # packets are 8192 bytes in size\n # for packet in self.s3File :\n while self.packet_counter * CsvAbstractReader.BUFFER_SIZE <= self._get_file_size():\n\n success, packet = self._get_next_packet()\n if not success:\n break\n self.packet_counter += 1\n\n # Get the current lines\n current_bytes = self.unprocessed + packet\n self.lines = _split_lines(current_bytes)\n\n # edge case if the packet was filled with newlines only try again\n if len(self.lines) == 0:\n continue\n\n # last line still needs processing save and reuse\n self.unprocessed = self.lines.pop()\n if len(self.lines) > 0:\n # Get the next line\n return self.lines.pop(0)\n self.is_finished = True\n\n if len(self.unprocessed) < 5:\n # Got an extra line from a line break on the last line\n self.extra_line = True\n return self.unprocessed" ]
[ "0.65719175", "0.6142653", "0.6092511", "0.60456854", "0.6037642", "0.6006456", "0.58069974", "0.57957816", "0.5785322", "0.5602688", "0.56008947", "0.55998373", "0.5596856", "0.5575851", "0.5551373", "0.5543945", "0.5474812", "0.5460095", "0.5457196", "0.54539376", "0.5431522", "0.54156655", "0.5397605", "0.53955096", "0.53933537", "0.5377784", "0.5364032", "0.5363571", "0.5356153", "0.5344422" ]
0.6324044
1
Check if text at given position is a code
def isCode(self, lineData, column): return self._getTextType(lineData, column) == ' '
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsNonCode(self, pos):\n return self.IsComment(pos) or self.IsString(pos)", "def is_text(line, start, end, line_number, code_blocks):\n if any(c[0] <= line_number <= c[1] for c in code_blocks):\n return False\n else:\n n = len(line)\n idx = -1\n last_block_was_text = False\n in_link = False\n in_url = False\n while idx < start:\n if in_link:\n link_idx = line[idx+1:].find(')')\n assert link_idx != -1\n code_idx = n\n url_idx = n\n elif in_url:\n url_idx = line[idx+1:].find('>')\n assert url_idx != -1\n code_idx = n\n link_idx = n\n else:\n code_idx = line[idx+1:].find('`')\n link_idx = line[idx+1:].find('](')\n url_idx = line[idx+1:].find('<')\n if code_idx == -1:\n code_idx = n\n if link_idx == -1:\n link_idx = n\n if url_idx == -1:\n url_idx = n\n\n nearest_match = min(code_idx, link_idx, url_idx)\n\n if nearest_match == url_idx:\n in_url = not in_url\n elif nearest_match == link_idx:\n in_link = not in_link\n idx += nearest_match+1\n last_block_was_text = not last_block_was_text\n\n return last_block_was_text", "def check_message_for_code(in_lines):\n global _is_good_code\n # Loop through every line, and track its index\n for index, line in enumerate(in_lines):\n # Remove all tabs and newlines and bad stuff\n line = re.sub('\\s+', '', line)\n # Check if this is formatted code.\n if line.find('```') >= 0:\n print(line.find('```'))\n print(\"This code is fine, probably\")\n _is_good_code = True\n return\n # Check for code-like stuffs :D\n else:\n _check_last_character(index, line, ';')\n _check_last_character(index, line, '{')\n _check_last_character(index, line, '}')\n _check_last_character(index, line, ')')", "def is_position(position):\n return isinstance(position, str) and len(position) == 2 and POS_PATTERN.match(position)", "def isAMANDATrig(string, pos):\n return string == 0 and pos == 92", "def is_code_cell(cell):\n return cell[\"cell_type\"] == \"code\"", "def isIceAct(string, pos):\n return string == 0 and pos == 1", "def is_code(self) -> bool:\n return any(seg.is_code for seg in self.segments)", "def is_code(self, address):\n return self.is_address_of_type(address, MemoryType.Code)", "def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:], style=\"green\")\n return True\n\n except:\n console.print(\"An error has occurred while trying to parse the typo!\", style=\"red\")\n return False", "def check(self, text):\n p = self.d\n i = 0\n j = 0\n result = []\n ln = len(text)\n while i + j < ln:\n t = text[i + j].lower()\n # print i,j,hex(ord(t))\n if not (t in p):\n j = 0\n i += 1\n p = self.d\n continue\n p = p[t]\n j += 1\n # print p,i,j\n if chr(11) in p:\n p = self.d\n result.append(text[i:i + j])\n i = i + j\n j = 0\n return result", "def isValid(self) :\n try :\n pos = 0\n while self.firstblock[pos] == chr(0) :\n pos += 1\n except IndexError : \n return False\n else : \n firstblock = self.firstblock[pos:]\n if firstblock.startswith(\"\\033E\\033\") or \\\n firstblock.startswith(\"\\033%1BBPIN;\") or \\\n ((pos == 11000) and firstblock.startswith(\"\\033\")) or \\\n (firstblock.startswith(\"\\033*rbC\") and (not self.lastblock[-3:] == \"\\f\\033@\")) or \\\n firstblock.startswith(\"\\033*rB\\033\") or \\\n firstblock.startswith(\"\\033%8\\033\") or \\\n (firstblock.find(\"\\033%-12345X\") != -1) or \\\n (firstblock.find(\"@PJL ENTER LANGUAGE=PCL\\012\\015\\033\") != -1) or \\\n (firstblock.startswith(chr(0xcd)+chr(0xca)) and (firstblock.find(\"\\033E\\033\") != -1)) :\n return True\n else : \n return False", "def isAMANDASync(string, pos):\n return string == 0 and pos == 91", "def iscode(object):\r\n return isinstance(object, types.CodeType)", "def chunk_in_text(chunk, text):\n chunk = clean_chunk(chunk)\n return text.find(chunk) >= 0", "def check(self, text):\n lt = s = n = 0\n result = False\n for g in text:\n if g in LETTERS and lt < self.letters:\n lt += 1\n if g in NUMBERS and n < self.numbers:\n n += 1\n if g in SYMBOLS and s < self.symbols:\n s += 1\n if n == self.numbers and s == self.symbols and lt == self.letters:\n result = True\n break\n return result", "def verify_text(self, text):\n pass", "def match(self, context, line):\n\t\t\n\t\treturn line.kind == 'code' and line.partitioned[0] in self._both", "def is_code_section(addr):\n # The image file is considered all instructions, it is just a raw binary\n # image starting at 0.\n for section_addr, section_name in section_addrs.items(): \n if (section_name == '.text') or (\n section_name == 'image') or (\n section_name == 'reset') or (\n section_name == '.init'):\n code_start = section_addr\n code_bytes = int(section_bytes[section_name])\n code_end = code_start + code_bytes\n if code_start <= addr < code_end:\n return True\n\n return False", "def __set_has_codeblock(html_data=str):\n try:\n find = \"code\"\n bsoup = BeautifulSoup(html_data, \"html.parser\")\n for child in bsoup.find_all(find):\n child.string = constants.QUESTION_HAS_CODEBLOCK_KEY\n return bsoup.prettify()\n except TypeError as error:\n print(\"TypeError in text_processor.__set_has_codeblock\", error)\n return None", "def is_candidate(line):\n line = line.lower()\n line = prepare_text_line(line)\n return (has_content(line) and any(s in line for s in copyrights_hint.statement_markers))", "def is_marked(self):\n\n pos0 = self.ui.textBrowser.textCursor().selectionStart()\n pos1 = self.ui.textBrowser.textCursor().selectionEnd()\n for c in self.case_text:\n if c['pos0'] <= pos0 <= c['pos1']:\n return True\n if c['pos0'] <= pos1 <= c['pos1']:\n return True\n return False", "def IsString(self, pos):\n style = self.GetStyleAt(pos)\n return self.FindTagById(style) in ('string_style', 'char_style')", "def test_code_comment_success(self):\n found = False\n pyint = Interpreter()\n try:\n pyint.run(code=BF_CODE_COMMENT)\n except SystemExit: \n found = True\n self.assertFalse(found)", "def text_exists(self, text: str)-> bool:\n result = self.__content.find(text)\n if result == -1:\n return False\n else:\n return True", "def is_encoded(self,text):\n \n try:\n str(text)\n except:\n return False\n else:\n return True", "def isValid(text):\n return bool(re.search(r'\\blight\\b', text, re.IGNORECASE))", "def matches(self, text):\n return text == self.command", "def is_valid_python(code: str) -> bool:\n try:\n ast.parse(code)\n except SyntaxError:\n return False\n return True", "def is_code_ended(self) -> bool:" ]
[ "0.65709645", "0.63682026", "0.63105905", "0.62974036", "0.6263238", "0.62077844", "0.6141499", "0.6112447", "0.6061554", "0.599065", "0.5977286", "0.59511393", "0.5900852", "0.58948094", "0.5852603", "0.5815239", "0.57420677", "0.5670188", "0.5653685", "0.56366485", "0.5626324", "0.55965143", "0.55898166", "0.55649614", "0.5557158", "0.5463306", "0.545898", "0.54488224", "0.5443379", "0.54238015" ]
0.71405375
0
Check if text at given position is a block comment
def isBlockComment(self, lineData, column): return self._getTextType(lineData, column) == 'b'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ingest_c_block_comments(self, line, position):\n\n pos = position\n while self._in_block_comment and pos < len(line):\n if pos + 1 < len(line) and line[pos] == '*' and line[pos + 1] == '/':\n self._in_block_comment = False\n pos += 2\n pos += 1\n return pos - position", "def IsComment(self, pos):\n pos = max(0, pos-1)\n return 'comment' in self.FindTagById(self.GetStyleAt(pos))", "def test_does_not_match_block_comments(self):\n\n comment = dedent(\"\"\"\\\n --[[\n Hello, World!\n --]]\"\"\")\n\n script = rbxmx.ScriptElement(source=comment)\n first_comment = script.get_first_comment()\n\n assert first_comment is None", "def __ingest_c_comment_start(self, line, pos):\n\n if line[pos] == '/' and len(line) > pos + 1:\n if line[pos + 1] == '/':\n return -1\n elif line[pos + 1] == '*':\n self._in_block_comment = True\n return 2\n return 0", "def block_comment(self):\n while (\n not (self.peek() == \"*\" and self.peek_next() == \"/\")\n and not self.is_at_end()\n ):\n if self.peek() == \"\\n\":\n self.line += 1\n self.advance()\n\n if self.peek() == \"*\" and self.peek_next() == \"/\":\n self.advance(spaces=2)\n\n return None", "def block_comments(code):\n block = list()\n for line in code:\n if bool(line.strip()): # If line is not empty\n if line.strip()[0] == '!': # If the first character of the string is the start of a comment it adds it\n block.append(identify_comment(line))\n elif bool(line.strip()): # If the first character of the string is not the start of a comment or its not empty it exits\n break\n return block", "def isComment(self, lineData, column):\n return self._getTextType(lineData, column) in 'cbh'", "def visit_BlockComment(self, node):\n\n self.statement(node, '# ', node.text)", "def is_comment(self) -> bool: # pragma: no cover TODO?\n return all(seg.is_comment for seg in self.segments)", "def is_cmt(line,cmt):\n\n if len(line)==1:\n return False\n else:\n for i in range(len(line)):\n if line[i]!=' ' and line[i]!='\\t':\n if len(line[i:])>len(cmt):\n if line[i:i+len(cmt)]==cmt:\n return True\n else:\n break\n return False", "def _is_comment_line(self):\n pattern = re.compile(r\"^(\\s)*(//)+\")\n return pattern.search(self._line)", "def is_comment(self):\n return (self.__type & NODE_COMMENT) == NODE_COMMENT", "def is_text(line, start, end, line_number, code_blocks):\n if any(c[0] <= line_number <= c[1] for c in code_blocks):\n return False\n else:\n n = len(line)\n idx = -1\n last_block_was_text = False\n in_link = False\n in_url = False\n while idx < start:\n if in_link:\n link_idx = line[idx+1:].find(')')\n assert link_idx != -1\n code_idx = n\n url_idx = n\n elif in_url:\n url_idx = line[idx+1:].find('>')\n assert url_idx != -1\n code_idx = n\n link_idx = n\n else:\n code_idx = line[idx+1:].find('`')\n link_idx = line[idx+1:].find('](')\n url_idx = line[idx+1:].find('<')\n if code_idx == -1:\n code_idx = n\n if link_idx == -1:\n link_idx = n\n if url_idx == -1:\n url_idx = n\n\n nearest_match = min(code_idx, link_idx, url_idx)\n\n if nearest_match == url_idx:\n in_url = not in_url\n elif nearest_match == link_idx:\n in_link = not in_link\n idx += nearest_match+1\n last_block_was_text = not last_block_was_text\n\n return last_block_was_text", "def test_comments(self):\n\n comment_str = \"# This is a comment\\n# This is another comment\"\n doc = parser.parse(comment_str)\n\n self.assertEqual(len(doc.children()), 2)", "def start_comments(self, a, text):\n logging.debug(\"in comments\" + text)\n logging.debug(self.bracket_nesting_level)\n\n if self.bracket_nesting_level == 0:\n self.bracket_nesting_level += 1\n logging.debug(\"start state comments\")\n if Str(text) == languages_comment_start1:\n self.begin(\"comments\")\n else:\n self.begin(\"comments2\")\n elif self.bracket_nesting_level == 1:\n self.bracket_nesting_level -= 1\n self.begin('')", "def check_comments (character: str, is_comment: bool, counter_lines: str) -> list:\n line_comment = \"\"\n if (is_comment == False) and (character == '{'):\n line_comment = counter_lines\n is_comment = True\n elif (is_comment == True) and (character == '}'):\n is_comment = False\n\n return [is_comment, line_comment]", "def line_is_comment(line: str) -> bool:\n # TODO use existing tokens\n try:\n first_token = next(tokenize.generate_tokens(io.StringIO(line).readline))\n except tokenize.TokenError:\n # Assume that a token error happens because this is *not* a comment\n return False\n return first_token.type == tokenize.COMMENT", "def has_inside(block):\n return comma(block[0]) if block else '#N/A'", "def _is_comment_or_blank(line):\n return re.sub(\"#.*\", \"\", line).rstrip() == \"\"", "def chunk_in_text(chunk, text):\n chunk = clean_chunk(chunk)\n return text.find(chunk) >= 0", "def identify_comment(code_line):\n discard_between = ['\\'', '\\\"']\n counter = 0\n while counter + 1 <= len(code_line): # Studies each position in the line\n if code_line[counter] in discard_between: # If fortran character is being written jumps to end of char\n jump = code_line[counter+1:].find(code_line[counter])\n if jump == -1:\n raise Exception('Fortran character did not finish being declared from position {}: \\n {}'.format(counter, code_line))\n counter += jump + 1\n if code_line[counter] == '!': # If it finds comment declaration it stores it \n return code_line[counter:]\n break\n counter += 1 # Advances counter\n else: # If it reaches the end of the code without finding comment it returns none\n return None", "def blockcomment(self):\r\n editor = self.get_current_editor()\r\n if editor is not None:\r\n editor.blockcomment()", "def test_no_spelling_error_comments_blocked_region(self, style):\n result = self._spellcheck_lint(\"{s}{e}\\n{s} ```splelling```\",\n style)\n self.assertTrue(result)", "def detecteComments(liste, j, i):\n\n\treturn liste[j][i] == '#' or (i < len(liste[j])-2 and liste[j][i]==\"\\\"\" and liste[j][i+1]==\"\\\"\" and liste[j][i+2]==\"\\\"\")", "def isCommentStyle(self, style):\n return style in [QsciLexerCSS.Comment]", "def comment():", "def isCommentStyle(self, style):\n return style in [QsciLexerJava.Comment,\n QsciLexerJava.CommentDoc,\n QsciLexerJava.CommentLine,\n QsciLexerJava.CommentLineDoc]", "def is_comment_hook(self) -> bool:\n query = (where('message_cid') == self.channel_id) & \\\n (where('message_mid') == self.message_id) & \\\n (where('altype') == int(AlternateType.COMMENT))\n\n return self.handle.table(ALTERNATES_TABLE_NAME).get(query) is not None", "def print_comment_v(text):\n print_comment(text, True)", "def test_like_a_comment(self):\n self.base_test()" ]
[ "0.730411", "0.71887034", "0.68748695", "0.6691965", "0.66720694", "0.6622821", "0.6596969", "0.6570587", "0.64614373", "0.64526194", "0.64248955", "0.63126636", "0.62933534", "0.6258408", "0.61901736", "0.6140033", "0.60635126", "0.60262686", "0.59996486", "0.597242", "0.59587514", "0.5929147", "0.5909215", "0.5903703", "0.587839", "0.5857735", "0.57960975", "0.5785482", "0.5783819", "0.5754232" ]
0.7626524
0
Check if text at given position is a here document
def isHereDoc(self, lineData, column): return self._getTextType(lineData, column) == 'h'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _has_page_jump(text):\n # Determines matches with format strings.\n for format_tuple in _FORMAT_STRINGS:\n jump = _get_jump_with_pattern(text, format_tuple)\n if jump:\n return jump\n\n # Recognizes common OCR for \"From page 1\".\n match = _match_pattern(text, r\"(^Frompagel$){e<=3}\")\n if match and text[-1] == 'l':\n return -1", "def chunk_in_text(chunk, text):\n chunk = clean_chunk(chunk)\n return text.find(chunk) >= 0", "def is_position(position):\n return isinstance(position, str) and len(position) == 2 and POS_PATTERN.match(position)", "def is_resent(self):\n return self.unixtext.find(\"...RESENT\") > 0", "def check_doc(document):\n try:\n assert document[0].startswith(\"#doc \")\n except AssertionError:\n print(\"The document does not start with '#doc' but instead\", document[0])\n doc_id = document[0].split()[1]\n for i in range(1, len(document)):\n line = document[i]\n try:\n assert int(line.split()[0]) == i - 1\n except (ValueError, AssertionError):\n print(\"Document\", doc_id, \"line\", i, \":\", line, \"expect line index\",\n i - 1, \", found\", line.split()[0])", "def _hasMore(self): #$NON-NLS-1$\r\n found = False\r\n self._setIeWord(None)\r\n self._setIeSentence(None)\r\n moveCount = 1 # number of words moved. If this is 0, then there is no 'next word'\r\n if self.currRange and self.endRange and self.startRange:\r\n # end of doc\r\n eod = True\r\n try:\r\n eod = self.currRange.isEqual(self.endRange)\r\n except:\r\n pass\r\n while not found and not eod and self._getWordCount() < 10000 and moveCount > 0:\r\n # move to next word location. (no need to do this for the first word since the range is already positioned at the 1st range)\r\n if not self.currRange.isEqual(self.startRange):\r\n moveCount = self.currRange.moveStart(u\"word\")#$NON-NLS-1$\r\n\r\n # capture the full word.\r\n self.currRange.expand(u\"word\") #$NON-NLS-1$\r\n # get current word\r\n # The word found by IE may include the space (if any) following the word.\r\n w = self.currRange.text\r\n if moveCount > 0 and len(w.strip()) > 0 and not self._isAlphaNum(w.strip()):\r\n self._incWordCount()\r\n self._setIeWord(w)\r\n found = True\r\n eod = self.currRange.isEqual(self.endRange)\r\n return found", "def is_position_availible(self, position):\n return self.positions[position] == ' '", "def has_template(page_text: str) -> bool:\n\n\tpattern = '<noinclude>.*{{documentation}}.*</noinclude>'\n\tif re.search(pattern, page_text, re.DOTALL | re.IGNORECASE):\n\t\treturn True\n\telse:\n\t\treturn False", "def hasContents():", "def is_marked(self):\n\n pos0 = self.ui.textBrowser.textCursor().selectionStart()\n pos1 = self.ui.textBrowser.textCursor().selectionEnd()\n for c in self.case_text:\n if c['pos0'] <= pos0 <= c['pos1']:\n return True\n if c['pos0'] <= pos1 <= c['pos1']:\n return True\n return False", "def has_doc() -> None:", "def has_text(self, page: fitz.Page) -> bool:\n return page.get_text(clip=page.trimbox).strip() != \"\"", "def is_text(line, start, end, line_number, code_blocks):\n if any(c[0] <= line_number <= c[1] for c in code_blocks):\n return False\n else:\n n = len(line)\n idx = -1\n last_block_was_text = False\n in_link = False\n in_url = False\n while idx < start:\n if in_link:\n link_idx = line[idx+1:].find(')')\n assert link_idx != -1\n code_idx = n\n url_idx = n\n elif in_url:\n url_idx = line[idx+1:].find('>')\n assert url_idx != -1\n code_idx = n\n link_idx = n\n else:\n code_idx = line[idx+1:].find('`')\n link_idx = line[idx+1:].find('](')\n url_idx = line[idx+1:].find('<')\n if code_idx == -1:\n code_idx = n\n if link_idx == -1:\n link_idx = n\n if url_idx == -1:\n url_idx = n\n\n nearest_match = min(code_idx, link_idx, url_idx)\n\n if nearest_match == url_idx:\n in_url = not in_url\n elif nearest_match == link_idx:\n in_link = not in_link\n idx += nearest_match+1\n last_block_was_text = not last_block_was_text\n\n return last_block_was_text", "def text_exists(self, text: str)-> bool:\n result = self.__content.find(text)\n if result == -1:\n return False\n else:\n return True", "def contains(self, mention):\n return self.start <= mention.start and mention.end <= self.end", "def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))", "def home(self):\n while self.document.characters[self.position-1].character != '\\n':\n self.position -= 1\n if self.position == 0:\n # Got to beginning of file before newline\n break", "def search(self, word):\n now = self.tree\n for i in word:\n if i in now:\n now = now[i]\n else:\n return False\n return True if 'end' in now else False", "def _can_add_text(self):\n return self.textCursor().selectionStart() >= self._prev_input_indexes[-1]", "def containsCursor(self, textCursor):\n return self.cursor.selectionStart() <= textCursor.position() < \\\n self.cursor.selectionEnd()", "def has_text(self):\n try:\n first = self.text_planets()[0]\n except IndexError:\n first = None\n\n return first is not None", "def have_doc_extension(l):\r\n if \".doc\" in str(l):\r\n return 1\r\n else:\r\n return 0", "def isAMANDATrig(string, pos):\n return string == 0 and pos == 92", "def is_inside(pos):\r\n\t\trow, col = pos\r\n\t\treturn 0 <= row and row < num_rows and \\\r\n\t\t\t0 <= col and col < num_cols", "def isIceAct(string, pos):\n return string == 0 and pos == 1", "def has_word(self, word)->bool:\n if len(word) == 1:\n chars = word + GNode.CHAR_EOW\n else:\n chars = word[0] + GNode.CHAR_REV + word[1:] + GNode.CHAR_EOW\n cursor = self.root\n for c in chars.lower():\n if c not in cursor.children:\n return False\n else:\n cursor = cursor.children[c]\n return True", "def is_pos(self, term):\n return term in self.pos", "def if_sentence_contains_past_participle(sentence):\n tags = [word_tag[1] for word_tag in sentence.pos_tags]\n result = True if POS.POS_tags.past_participle_tag in tags else False\n return result", "def check_marked_paragraph(paragraph, number):\n\n\tq = 0 # счетчик найденных маркеров\n\tchars = '<> ' # возможные символы в каретке\n\n\tfor i in range(len(paragraph.runs)):\n\t\tif \"<>\" in paragraph.runs[i].text: # если в тексте каретки встречается маркер\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждый символ в каретке\n\t\t\t\tif c not in chars: # если он не входит в список разрешенных символов\n\t\t\t\t\treturn False\n\t\t\tq += 1 # если проверка пройдена, увеличиваем счетчик\n\t\telif \"<\" in paragraph.runs[i].text and \">\" in paragraph.runs[i+1].text: # если маркер разделен на две соседние каретки\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждую из кареток\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tfor c in paragraph.runs[i+1].text:\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tq += 1\n\n\tif q != number: # если количество маркеров не совпало с указанным в выводе\n\t\treturn False\n\telse:\n\t\treturn True", "def here(self, type):\n # Get the token ahead of the current index.\n possibleIndexEosToken = self.getCurrentToken().tokenIndex - 1\n ahead = self._input.get(possibleIndexEosToken)\n\n # Check if the token resides on the HIDDEN channel and if it is of the\n # provided type.\n return (ahead.channel == Lexer.HIDDEN) and (ahead.type == type)" ]
[ "0.6312338", "0.6036941", "0.60077053", "0.5858111", "0.57945687", "0.5726796", "0.5709392", "0.56847984", "0.5684184", "0.5651844", "0.56465435", "0.55971897", "0.55845344", "0.5449864", "0.5441029", "0.5401152", "0.53863245", "0.5381123", "0.5379395", "0.53566754", "0.5337815", "0.5331231", "0.5330852", "0.53283703", "0.53217566", "0.52911025", "0.52804554", "0.5251062", "0.52094114", "0.5208588" ]
0.707042
0
Get syntax by its xml file name
def _getSyntaxByXmlFileName(self, xmlFileName, formatConverterFunction): import qutepart.syntax.loader # delayed import for avoid cross-imports problem with self._loadedSyntaxesLock: if not xmlFileName in self._loadedSyntaxes: xmlFilePath = os.path.join(os.path.dirname(__file__), "data", "xml", xmlFileName) syntax = Syntax(self) self._loadedSyntaxes[xmlFileName] = syntax qutepart.syntax.loader.loadSyntax(syntax, xmlFilePath, formatConverterFunction) return self._loadedSyntaxes[xmlFileName]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getSyntaxBySourceFileName(self, name, formatConverterFunction):\n for regExp, xmlFileName in self._extensionToXmlFileName.items():\n if regExp.match(name):\n return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction)\n else:\n raise KeyError(\"No syntax for \" + name)", "def _getSyntaxByLanguageName(self, syntaxName, formatConverterFunction):\n xmlFileName = self._syntaxNameToXmlFileName[syntaxName]\n return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction)", "def find_syntax_name(self, syntaxFile):\n\n content = sublime.load_resource(syntaxFile).strip()\n\n # .tmLanguage (XML)\n if content.startswith('<'):\n matches = self.nameXmlRegex.search(content)\n # .sublime-syntax (YAML)\n else:\n matches = self.nameYamlRegex.search(content)\n\n if matches is None:\n return None\n\n return matches.group(1).strip()", "def _infer_source(self):\n mzid_xml = ET.parse(self.filename)\n root = mzid_xml.getroot()\n name_space = self._get_xml_namespace(root.tag)\n try:\n return root.find(f\".//{name_space}AnalysisSoftware\").attrib[\"name\"]\n except KeyError:\n return None", "def _getSyntaxByMimeType(self, mimeType, formatConverterFunction):\n xmlFileName = self._mimeTypeToXmlFileName[mimeType]\n return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction)", "def get_current_syntax(self, view):\n\n syntaxFile = view.settings().get('syntax')\n\n if syntaxFile not in syntaxInfos:\n syntaxInfos[syntaxFile] = {\n 'fileName' : os.path.splitext(os.path.basename(syntaxFile))[0],\n 'syntaxName' : self.find_syntax_name(syntaxFile),\n }\n\n return [\n v\n for v in syntaxInfos[syntaxFile].values()\n if isinstance(v, str)\n ]", "def _getSyntaxByFirstLine(self, firstLine, formatConverterFunction):\n for pattern, xmlFileName in self._firstLineToXmlFileName.items():\n if fnmatch.fnmatch(firstLine, pattern):\n return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction)\n else:\n raise KeyError(\"No syntax for \" + firstLine)", "def syntax_text():", "def get_xml(xml_file_path: str):\n root = et.parse(xml_file_path).getroot()\n\n return root", "def get_template(self, name):\n with open(name, 'r+') as open_f:\n template_content = open_f.read()\n return template_content", "def loadFile(filterExt):\n basicFilter = \"*.\" + filterExt\n filePath = fileDialog2(fileFilter=basicFilter, dialogStyle=2, fm=1)\n if(filePath != None):\n #openfile = open('/Users/camtton/Desktop/drawing.svg', 'r')\n tokens = getSVGpath(filePath[0])\n return tokens\n else:\n print 'Please select a %s file'%(filterExt)", "def sniff( self, filename ):\n # TODO - Use a context manager on Python 2.5+ to close handle\n handle = open(filename)\n line = handle.readline()\n handle.close()\n\n # TODO - Is there a more robust way to do this?\n return line.startswith('<?xml ')", "def read_syntax_file(syntax_file):\n # Added encoding='utf8' to make it run on windows pycharm and ubuntu system as well as mac\n with open(syntax_file, encoding='utf8') as syntax:\n syntax_definition = syntax.read()\n\n # Use regex to divide pattern from keyword in a more strict manner\n pattern_syntax = re.compile(r'\"(.+)\": (.+)')\n matches_syntax = pattern_syntax.findall(syntax_definition)\n\n # Loop through lines in the syntax matches and create a dict with key and corresponding pattern\n syntax_dictionary = {}\n for item in matches_syntax:\n syntax_dictionary[item[1]] = item[0]\n return syntax_dictionary", "def sample_xml(opts,file):\r\n with open(file, opts) as xml:\r\n return xml.read()", "def get(name):\n\n filename = find(name)\n if filename == None:\n return name\n return open(filename).read()", "def load_asterix_category_format(k):\n global filenames\n try:\n __basePath__ = os.path.abspath(os.path.join(os.getcwd(), '../../../..'))\n\n # Look for file in current executing directory\n path_filename1 = filenames[k]\n\n # On default directory (absolute)\n path_filename2 = __basePath__ + \"/\" +filenames[k]\n\n # On default directory (relative)\n path_filename3 = os.path.dirname(os.path.realpath(__file__)) + \"/xml/\" + filenames[k]\n\n if os.path.isfile(path_filename1):\n # print \"Loading file '%s'\" % path_filename1\n return minidom.parse(path_filename1)\n\n if os.path.isfile(path_filename2):\n # print \"Loading file '%s'\" % path_filename2\n return minidom.parse(path_filename2)\n\n if os.path.isfile(path_filename3):\n # print \"Loading file '%s'\" % path_filename3\n return minidom.parse(path_filename3)\n\n return None\n\n except:\n traceback.print_exc()\n\n return None", "def parse(self, fileName):\n from lxml import etree\n \n schemadoc = etree.parse(StringIO(\"\"\"\\\n<xs:schema xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <!-- the runscript -->\n <xs:complexType name=\"runscriptType\">\n <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"machine\" type=\"machineType\"/>\n <xs:element name=\"system\" type=\"systemType\">\n <!-- setting keys have to be unique per system/version-->\n <!-- unfortunately i have found no way to create a link between settings and systems -->\n <!-- schematron should be able to do this but the lxml implementation seems to be incomplete-->\n <xs:unique name=\"settingKey\">\n <xs:selector xpath=\"setting\"/>\n <xs:field xpath=\"@name\"/>\n </xs:unique>\n </xs:element>\n <xs:element name=\"config\" type=\"configType\"/>\n <xs:element name=\"benchmark\" type=\"benchmarkType\"/>\n <xs:element name=\"pbsjob\" type=\"pbsjobType\"/>\n <xs:element name=\"condorjob\" type=\"condorjobType\"/>\n <xs:element name=\"seqjob\" type=\"seqjobType\"/>\n <xs:element name=\"project\" type=\"projectType\"/>\n </xs:choice>\n <xs:attribute name=\"output\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a project -->\n <xs:complexType name=\"projectType\">\n <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"runspec\" type=\"runspecType\"/>\n <xs:element name=\"runtag\" type=\"runtagType\"/>\n </xs:choice>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"job\" type=\"nameType\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a machine -->\n <xs:complexType name=\"machineType\">\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"cpu\" type=\"xs:token\" use=\"required\"/>\n <xs:attribute name=\"memory\" type=\"xs:token\" use=\"required\"/>\n </xs:complexType>\n\n <!-- a system -->\n <xs:complexType name=\"systemType\">\n <xs:choice minOccurs=\"1\" maxOccurs=\"unbounded\">\n <xs:element name=\"setting\">\n <xs:complexType>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"tag\">\n <xs:simpleType>\n <xs:list itemType=\"nameType\"/>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"ppn\" type=\"xs:positiveInteger\"/>\n <xs:attribute name=\"procs\">\n <xs:simpleType>\n <xs:list itemType=\"xs:integer\"/>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"pbstemplate\" type=\"xs:string\"/>\n <xs:anyAttribute processContents=\"lax\"/>\n </xs:complexType>\n </xs:element>\n </xs:choice>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"version\" type=\"versionType\" use=\"required\"/>\n <xs:attribute name=\"measures\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"config\" type=\"nameType\" use=\"required\"/>\n </xs:complexType>\n\n <!-- generic attributes for jobs-->\n <xs:attributeGroup name=\"jobAttr\">\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"timeout\" type=\"timeType\" use=\"required\"/>\n <xs:attribute name=\"runs\" type=\"xs:positiveInteger\" use=\"required\"/>\n <xs:anyAttribute processContents=\"lax\"/>\n </xs:attributeGroup>\n \n <!-- a seqjob -->\n <xs:complexType name=\"seqjobType\">\n <xs:attributeGroup ref=\"jobAttr\"/>\n <xs:attribute name=\"parallel\" type=\"xs:positiveInteger\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a pbsjob -->\n <xs:complexType name=\"pbsjobType\">\n <xs:attributeGroup ref=\"jobAttr\"/>\n <xs:attribute name=\"script_mode\" use=\"required\">\n <xs:simpleType>\n <xs:restriction base=\"xs:string\">\n <xs:enumeration value=\"single\"/>\n <xs:enumeration value=\"timeout\"/>\n <xs:enumeration value=\"memout\"/>\n </xs:restriction>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"walltime\" type=\"timeType\" use=\"required\"/>\n </xs:complexType>\n\n <!-- a condorjob -->\n <xs:complexType name=\"condorjobType\">\n <xs:attributeGroup ref=\"jobAttr\"/>\n <xs:attribute name=\"script_mode\" use=\"required\">\n <xs:simpleType>\n <xs:restriction base=\"xs:string\">\n <xs:enumeration value=\"single\"/>\n <xs:enumeration value=\"timeout\"/>\n <xs:enumeration value=\"memout\"/>\n </xs:restriction>\n </xs:simpleType>\n </xs:attribute>\n <xs:attribute name=\"walltime\" type=\"timeType\" use=\"required\"/>\n <xs:attribute name=\"condortemplate\" type=\"xs:string\" use=\"required\"/>\n <xs:attribute name=\"basedir\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n\n\n <!-- a config -->\n <xs:complexType name=\"configType\">\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"template\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n \n <!-- a benchmark -->\n <xs:complexType name=\"benchmarkType\">\n <xs:sequence minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:choice>\n <xs:element name=\"files\">\n <xs:complexType>\n <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"add\">\n <xs:complexType>\n <xs:attribute name=\"file\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n </xs:choice>\n <xs:attribute name=\"path\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"folder\">\n <xs:complexType>\n <xs:sequence minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"ignore\">\n <xs:complexType>\n <xs:attribute name=\"prefix\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n <xs:attribute name=\"path\" type=\"xs:string\" use=\"required\"/>\n </xs:complexType>\n </xs:element>\n </xs:choice>\n </xs:sequence>\n <xs:attribute name=\"name\" type=\"nameType\" use=\"required\"/>\n </xs:complexType>\n \n <!-- common attributes for runspec/runtag -->\n <xs:attributeGroup name=\"runAttr\">\n <xs:attribute name=\"machine\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"benchmark\" type=\"nameType\" use=\"required\"/>\n </xs:attributeGroup>\n \n <!-- a runspec -->\n <xs:complexType name=\"runspecType\">\n <xs:attribute name=\"system\" type=\"nameType\" use=\"required\"/>\n <xs:attribute name=\"version\" type=\"versionType\" use=\"required\"/>\n <xs:attribute name=\"setting\" type=\"nameType\" use=\"required\"/>\n <xs:attributeGroup ref=\"runAttr\"/>\n </xs:complexType>\n \n <!-- a runtag -->\n <xs:complexType name=\"runtagType\">\n <xs:attributeGroup ref=\"runAttr\"/>\n <xs:attribute name=\"tag\" type=\"tagrefType\" use=\"required\"/>\n </xs:complexType>\n \n <!-- simple types used througout the above definitions -->\n <xs:simpleType name=\"versionType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"[0-9a-zA-Z._-]+\"/>\n </xs:restriction>\n </xs:simpleType>\n\n <xs:simpleType name=\"timeType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"[0-9]+(:[0-9]+(:[0-9]+)?)?\"/>\n </xs:restriction>\n </xs:simpleType>\n \n <xs:simpleType name=\"tagrefType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"(\\*all\\*)|([A-Za-z_\\-0-9]+([ ]*[A-Za-z_\\-0-9]+)*)([ ]*\\|[ ]*([A-Za-z_\\-0-9]+([ ]*[A-Za-z_\\-0-9]+)*))*\"/>\n </xs:restriction>\n </xs:simpleType>\n \n <xs:simpleType name=\"nameType\">\n <xs:restriction base=\"xs:string\">\n <xs:pattern value=\"[A-Za-z_\\-0-9]*\"/>\n </xs:restriction>\n </xs:simpleType>\n \n <!-- the root element -->\n <xs:element name=\"runscript\" type=\"runscriptType\">\n <!-- machine keys -->\n <xs:keyref name=\"machineRef\" refer=\"machineKey\">\n <xs:selector xpath=\"project/runspec|project/runall\"/>\n <xs:field xpath=\"@machine\"/>\n </xs:keyref>\n <xs:key name=\"machineKey\">\n <xs:selector xpath=\"machine\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- benchmark keys -->\n <xs:keyref name=\"benchmarkRef\" refer=\"benchmarkKey\">\n <xs:selector xpath=\"project/runspec|project/runall\"/>\n <xs:field xpath=\"@benchmark\"/>\n </xs:keyref>\n <xs:key name=\"benchmarkKey\">\n <xs:selector xpath=\"benchmark\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- system keys -->\n <xs:keyref name=\"systemRef\" refer=\"systemKey\">\n <xs:selector xpath=\"project/runspec\"/>\n <xs:field xpath=\"@system\"/>\n <xs:field xpath=\"@version\"/>\n </xs:keyref>\n <xs:key name=\"systemKey\">\n <xs:selector xpath=\"system\"/>\n <xs:field xpath=\"@name\"/>\n <xs:field xpath=\"@version\"/>\n </xs:key>\n <!-- config keys -->\n <xs:keyref name=\"configRef\" refer=\"configKey\">\n <xs:selector xpath=\"system\"/>\n <xs:field xpath=\"@config\"/>\n </xs:keyref>\n <xs:key name=\"configKey\">\n <xs:selector xpath=\"config\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- config keys -->\n <xs:keyref name=\"jobRef\" refer=\"jobKey\">\n <xs:selector xpath=\"project\"/>\n <xs:field xpath=\"@job\"/>\n </xs:keyref>\n <xs:key name=\"jobKey\">\n <xs:selector xpath=\"seqjob|pbsjob|condorjob\"/>\n <xs:field xpath=\"@name\"/>\n </xs:key>\n <!-- project keys -->\n <xs:unique name=\"projectKey\">\n <xs:selector xpath=\"project\"/>\n <xs:field xpath=\"@name\"/>\n </xs:unique>\n </xs:element>\n</xs:schema>\n\"\"\"))\n schema = etree.XMLSchema(schemadoc)\n\n doc = etree.parse(open(fileName))\n schema.assertValid(doc)\n \n root = doc.getroot()\n run = Runscript(root.get(\"output\"))\n\n for node in root.xpath(\"./pbsjob\"):\n attr = self._filterAttr(node, [\"name\", \"memout\", \"timeout\", \"runs\", \"ppn\", \"procs\", \"script_mode\", \"walltime\"])\n job = PbsJob(node.get(\"name\"), node.get(\"memout\"), tools.xmlTime(node.get(\"timeout\")), int(node.get(\"runs\")), node.get(\"script_mode\"), tools.xmlTime(node.get(\"walltime\")), attr)\n run.addJob(job)\n\n for node in root.xpath(\"./condorjob\"):\n attr = self._filterAttr(node, [\"name\", \"memout\", \"timeout\", \"runs\", \"ppn\", \"procs\", \"script_mode\", \"walltime\"])\n job = CondorJob(node.get(\"name\"), tools.xmlTime(node.get(\"memout\")), tools.xmlTime(node.get(\"timeout\")), int(node.get(\"runs\")), node.get(\"script_mode\"), tools.xmlTime(node.get(\"walltime\")), node.get(\"condortemplate\"),node.get(\"basedir\"), attr)\n run.addJob(job)\n\n for node in root.xpath(\"./seqjob\"):\n attr = self._filterAttr(node, [\"name\", \"timeout\", \"runs\", \"parallel\"])\n job = SeqJob(node.get(\"name\"), tools.xmlTime(node.get(\"timeout\")), int(node.get(\"runs\")), int(node.get(\"parallel\")), attr)\n run.addJob(job)\n \n for node in root.xpath(\"./machine\"):\n machine = Machine(node.get(\"name\"), node.get(\"cpu\"), node.get(\"memory\"))\n run.addMachine(machine)\n\n for node in root.xpath(\"./config\"):\n config = Config(node.get(\"name\"), node.get(\"template\"))\n run.addConfig(config)\n \n compoundSettings = {}\n sytemOrder = 0 \n for node in root.xpath(\"./system\"):\n system = System(node.get(\"name\"), node.get(\"version\"), node.get(\"measures\"), sytemOrder)\n settingOrder = 0\n for child in node.xpath(\"setting\"):\n attr = self._filterAttr(child, [\"name\", \"cmdline\", \"tag\"])\n compoundSettings[child.get(\"name\")] = []\n if \"procs\" in attr:\n procs = [int(proc) for proc in attr[\"procs\"].split(None)]\n del attr[\"procs\"]\n else: procs = [None]\n if \"ppn\" in attr: \n ppn = int(attr[\"ppn\"])\n del attr[\"ppn\"]\n else: ppn = None\n if \"pbstemplate\" in attr:\n pbstemplate = attr[\"pbstemplate\"]\n del attr[\"pbstemplate\"]\n else: pbstemplate = None\n if child.get(\"tag\") == None: tag = set()\n else: tag = set(child.get(\"tag\").split(None))\n for num in procs:\n name = child.get(\"name\")\n if num != None: \n name += \"-n{0}\".format(num)\n compoundSettings[child.get(\"name\")].append(name)\n setting = Setting(name, child.get(\"cmdline\"), tag, settingOrder, num, ppn, pbstemplate, attr)\n system.addSetting(setting)\n settingOrder += 1\n\n run.addSystem(system, node.get(\"config\"))\n sytemOrder += 1\n \n for node in root.xpath(\"./benchmark\"):\n benchmark = Benchmark(node.get(\"name\"))\n for child in node.xpath(\"./folder\"):\n element = Benchmark.Folder(child.get(\"path\"))\n for grandchild in child.xpath(\"./ignore\"):\n element.addIgnore(grandchild.get(\"prefix\"))\n benchmark.addElement(element)\n for child in node.xpath(\"./files\"):\n element = Benchmark.Files(child.get(\"path\"))\n for grandchild in child.xpath(\"./add\"):\n element.addFile(grandchild.get(\"file\"))\n benchmark.addElement(element)\n run.addBenchmark(benchmark)\n \n for node in root.xpath(\"./project\"):\n project = Project(node.get(\"name\"))\n run.addProject(project, node.get(\"job\"))\n for child in node.xpath(\"./runspec\"):\n for setting in compoundSettings[child.get(\"setting\")]: \n project.addRunspec(child.get(\"machine\"),\n child.get(\"system\"),\n child.get(\"version\"),\n setting,\n child.get(\"benchmark\"))\n \n for child in node.xpath(\"./runtag\"):\n project.addRuntag(child.get(\"machine\"), \n child.get(\"benchmark\"),\n child.get(\"tag\"))\n \n return run", "def read(self, path):\r\n return TranslationUnit.from_ast(path, self)", "def getrawxml(fp,fn):\n print(\"starting to get the NRE XML Data from historical file\")\n infile = open(fp+fn,\"r\",encoding=\"utf-8\")\n xml_file = infile.read()\n return xml_file", "def getFileContent(self, filename):\n return xmlee.parse(filename).getroot()", "def example_xml_file42():\n return load_xml('datacite-v4.2-full-example.xml')", "def load(path, name=None):\n path, name = get_full_path(path, name)\n doc = nineml.read(path)\n if name is not None:\n elem = doc[name]\n else:\n elem = doc\n return elem", "def example_xml_file():\n return load_xml('datacite-v3.1-full-example.xml')", "def getbyname(self, name, doctype='experiment'):\n\n if doctype not in self.documents:\n self.documents[doctype] = esd.search(self.source, doctype)\n return self.documents[doctype].load_document(name)", "def get_filename(self) -> str:\n\t\treturn self.xml_name", "def name(self):\r\n return conf.lib.clang_getCString(conf.lib.clang_getFileName(self))", "def read_meth(filename, name, window, smoothen=5):\n file_type = file_sniffer(filename)\n logging.info(\"File is of type {}\".format(file_type))\n try:\n if file_type.startswith(\"nanopolish\"):\n return parse_nanopolish(filename, file_type, name, window, smoothen=smoothen)\n elif file_type == \"nanocompore\":\n return parse_nanocompore(filename, name, window)\n elif file_type == \"ont-cram\":\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write(\"\\n\\n\\nInput file {} not recognized!\\n\".format(filename))\n sys.stderr.write(\"\\n\\n\\nDetailed error:\\n\")\n raise", "def find_xontrib(name):\n if name.startswith(\".\"):\n spec = importlib.util.find_spec(name, package=\"xontrib2\")\n else:\n spec = importlib.util.find_spec(\".\" + name, package=\"xontrib2\")\n return spec or importlib.util.find_spec(name)", "def example_xml_file43():\n return load_xml('datacite-v4.3-full-example.xml')", "def parse(path):\n try:\n return parseString(open(path, \"r\", encoding=\"utf-8\", errors=\"ignore\").read())\n except ExpatError as e:\n # Some neutrino configuration files may contain text data with invalid character ['&'].\n # https://www.w3.org/TR/xml/#syntax\n # Apparently there is an error in Neutrino itself and the document is not initially formed correctly.\n log(XmlHandler.ERROR_MESSAGE.format(path, e))\n\n return XmlHandler.preprocess(path)" ]
[ "0.73123056", "0.7202096", "0.67029333", "0.6100018", "0.5830935", "0.5712048", "0.5494571", "0.54078346", "0.53664255", "0.52780116", "0.5271664", "0.52548873", "0.52026504", "0.51961845", "0.5129509", "0.51132685", "0.5104656", "0.5078642", "0.5061981", "0.50239456", "0.50188565", "0.5005142", "0.49945244", "0.4993861", "0.49828464", "0.49770933", "0.4971592", "0.49638927", "0.49528378", "0.49452406" ]
0.7554375
0
Get syntax by its name. Name is defined in the xml file
def _getSyntaxByLanguageName(self, syntaxName, formatConverterFunction): xmlFileName = self._syntaxNameToXmlFileName[syntaxName] return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getSyntaxBySourceFileName(self, name, formatConverterFunction):\n for regExp, xmlFileName in self._extensionToXmlFileName.items():\n if regExp.match(name):\n return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction)\n else:\n raise KeyError(\"No syntax for \" + name)", "def _getSyntaxByXmlFileName(self, xmlFileName, formatConverterFunction):\n import qutepart.syntax.loader # delayed import for avoid cross-imports problem\n \n with self._loadedSyntaxesLock:\n if not xmlFileName in self._loadedSyntaxes:\n xmlFilePath = os.path.join(os.path.dirname(__file__), \"data\", \"xml\", xmlFileName)\n syntax = Syntax(self)\n self._loadedSyntaxes[xmlFileName] = syntax\n qutepart.syntax.loader.loadSyntax(syntax, xmlFilePath, formatConverterFunction)\n \n return self._loadedSyntaxes[xmlFileName]", "def find_syntax_name(self, syntaxFile):\n\n content = sublime.load_resource(syntaxFile).strip()\n\n # .tmLanguage (XML)\n if content.startswith('<'):\n matches = self.nameXmlRegex.search(content)\n # .sublime-syntax (YAML)\n else:\n matches = self.nameYamlRegex.search(content)\n\n if matches is None:\n return None\n\n return matches.group(1).strip()", "def syntax_text():", "def get_sup_code_by_name(self, name):\n raise NotImplementedError()", "def get_current_syntax(self, view):\n\n syntaxFile = view.settings().get('syntax')\n\n if syntaxFile not in syntaxInfos:\n syntaxInfos[syntaxFile] = {\n 'fileName' : os.path.splitext(os.path.basename(syntaxFile))[0],\n 'syntaxName' : self.find_syntax_name(syntaxFile),\n }\n\n return [\n v\n for v in syntaxInfos[syntaxFile].values()\n if isinstance(v, str)\n ]", "def select_syninfo(self, cellname, srctype, syntype):\n idx = np.char.startswith(self.syntab['dest'], cellname+'/') & \\\n np.char.startswith(self.syntab['source'], srctype) & \\\n np.char.startswith(self.syntab['type'], syntype)\n return self.syntab[idx]", "def get_node_with_name(self, name):\n\t return self.variables[name]", "def _getSyntaxByMimeType(self, mimeType, formatConverterFunction):\n xmlFileName = self._mimeTypeToXmlFileName[mimeType]\n return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction)", "def _getSyntaxByFirstLine(self, firstLine, formatConverterFunction):\n for pattern, xmlFileName in self._firstLineToXmlFileName.items():\n if fnmatch.fnmatch(firstLine, pattern):\n return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction)\n else:\n raise KeyError(\"No syntax for \" + firstLine)", "def __getitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n return tag.text\n raise KeyError(name)", "def get_code_by_name(self, name):\n raise NotImplementedError()", "def get(self, name):", "def _infer_source(self):\n mzid_xml = ET.parse(self.filename)\n root = mzid_xml.getroot()\n name_space = self._get_xml_namespace(root.tag)\n try:\n return root.find(f\".//{name_space}AnalysisSoftware\").attrib[\"name\"]\n except KeyError:\n return None", "def parse(self, pyName=None):\n if self.root.tag == \"china\":\n parseMethod = getattr(self, \"parse_china\")\n return parseMethod(pyName)\n else:\n parseMethod = getattr(self, \"parse_province\")\n return parseMethod(pyName)", "def read_syntax_file(syntax_file):\n # Added encoding='utf8' to make it run on windows pycharm and ubuntu system as well as mac\n with open(syntax_file, encoding='utf8') as syntax:\n syntax_definition = syntax.read()\n\n # Use regex to divide pattern from keyword in a more strict manner\n pattern_syntax = re.compile(r'\"(.+)\": (.+)')\n matches_syntax = pattern_syntax.findall(syntax_definition)\n\n # Loop through lines in the syntax matches and create a dict with key and corresponding pattern\n syntax_dictionary = {}\n for item in matches_syntax:\n syntax_dictionary[item[1]] = item[0]\n return syntax_dictionary", "def get_token(self, name: str) -> Optional[BuiltinTypeSymbol]:\n\n symbol = self._symbols.get(name)\n return symbol", "def get_node(self, name):\n return self._node_reg[name]", "def lookup(name):", "def lookup(name):", "def _get(self,name):\n for node in self._members:\n if node.alias == name:\n return node", "def get_node_by_name(self, name):\r\n root = self.get_xml_root()\r\n return root.find(name)", "def getSymbol(self, name: unicode, namespace: ghidra.program.model.symbol.Namespace) -> ghidra.program.model.symbol.Symbol:\n ...", "def syntax(self):\n\n # Detects syntax in the document. You can also analyze HTML with:\n # document.type == enums.Document.Type.HTML\n tokens = self.client.analyze_syntax(self.document).tokens\n\n # part-of-speech tags from enums.PartOfSpeech.Tag\n pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM',\n 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')\n\n result = []\n for token in tokens:\n result.append((u'{}: {}'.format(pos_tag[token.part_of_speech.tag],\n token.text.content)))\n \n return result", "def fetch(self, name, implicit_extrn=False):\n if name not in self.symbols:\n if implicit_extrn:\n self.extrn(name)\n else:\n self.label(name, None)\n return self.symbols[name]", "def _ast(self, s: str) -> t.Optional[parse.AST]:\n rule = self.rules.get(s)\n return rule.ast if rule else None", "def xd_element(name):\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name", "def getbyname(self, name, doctype='experiment'):\n\n if doctype not in self.documents:\n self.documents[doctype] = esd.search(self.source, doctype)\n return self.documents[doctype].load_document(name)", "def get_sup_name_by_name(self, name):\n raise NotImplementedError()", "def LookupTag(self, typ, name):\n if typ not in self.collections:\n raise KeyError('Unrecognized lookup type: %s(%s)' % (typ, name))\n collection = self.collections[typ]\n # Support both @command(Name) and @command(:Name).\n fullname = (\n typ == vimdoc.COMMAND and name.lstrip(':') or name)\n candidates = [x for x in collection if x.FullName() == fullname]\n if not candidates:\n raise KeyError('%s \"%s\" not found' % (typ, name))\n if len(candidates) > 1:\n raise KeyError('Found multiple %ss named %s' % (typ, name))\n return candidates[0].TagName()" ]
[ "0.7034269", "0.6904593", "0.680472", "0.5899903", "0.5554824", "0.5539092", "0.54612535", "0.53637296", "0.5326089", "0.5322306", "0.53003645", "0.52864903", "0.5273851", "0.52667606", "0.523834", "0.52281964", "0.522044", "0.51524365", "0.5142065", "0.5142065", "0.5132712", "0.5120045", "0.5104005", "0.5073797", "0.50669074", "0.5066296", "0.5064799", "0.50618124", "0.50485575", "0.5046322" ]
0.75593144
0
Get syntax by source name of file, which is going to be highlighted
def _getSyntaxBySourceFileName(self, name, formatConverterFunction): for regExp, xmlFileName in self._extensionToXmlFileName.items(): if regExp.match(name): return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction) else: raise KeyError("No syntax for " + name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def highlight_source(source):\n return highlight(source, PythonLexer(), HtmlFormatter())", "def get_highlighter(name):\n\n # Is it already in the path?\n try:\n return import_module('.' + name, 'pycclone.highlighters')\n except ImportError:\n pass\n\n # Import from user folder\n fpath = os.path.join(USERDIR, 'highlighters', name, name + '.py')\n return load_source('pycclone.highlighters.' + name, fpath)", "def syntax_text():", "def get_current_syntax(self, view):\n\n syntaxFile = view.settings().get('syntax')\n\n if syntaxFile not in syntaxInfos:\n syntaxInfos[syntaxFile] = {\n 'fileName' : os.path.splitext(os.path.basename(syntaxFile))[0],\n 'syntaxName' : self.find_syntax_name(syntaxFile),\n }\n\n return [\n v\n for v in syntaxInfos[syntaxFile].values()\n if isinstance(v, str)\n ]", "def syntax_highlight(lang, code):\n\n highlighted = None\n\n try:\n if lang.lower() == 'python':\n highlighted = highlight(code, PythonLexer(), HtmlFormatter())\n\n elif lang.lower() == 'shell':\n highlighted = highlight(code, BashLexer(), HtmlFormatter())\n\n elif lang.lower() == 'asp':\n highlighted = highlight(code, CSharpAspxLexer(), HtmlFormatter())\n\n elif lang.lower() == 'csharp':\n highlighted = highlight(code, CSharpLexer(), HtmlFormatter())\n\n elif lang.lower() == 'ruby':\n highlighted = highlight(code, RubyLexer(), HtmlFormatter())\n\n elif lang.lower() == 'json':\n highlighted = highlight(code, JsonLexer(), HtmlFormatter())\n\n elif lang.lower() == 'js':\n highlighted = highlight(code, JavascriptLexer(), HtmlFormatter())\n\n elif lang.lower() == 'objective-c':\n highlighted = highlight(code, ObjectiveCLexer(), HtmlFormatter())\n\n elif lang.lower() == 'java':\n highlighted = highlight(code, JavaLexer(), HtmlFormatter())\n\n splitted = highlighted.split('\"highlight')\n highlighted = splitted[0] + '\"highlight '+lang + splitted[1]\n\n highlighted = highlighted.replace(\"<pre>\", \"\")\n highlighted = highlighted.replace(\"</pre>\", \"\")\n highlighted = highlighted.replace(\"div\", \"pre\")\n\n return highlighted\n except Exception as e:\n raise e", "def _highlight(self, source):\n if not self.hasmarkup:\n return source\n try:\n from pygments.formatters.terminal import TerminalFormatter\n from pygments.lexers.python import PythonLexer\n from pygments import highlight\n except ImportError:\n return source\n else:\n return highlight(source, PythonLexer(), TerminalFormatter(bg=\"dark\"))", "def get_filename_and_formatted_source():\n sal = gdb.selected_frame().find_sal() # gdb.Symtab_and_line\n\n # Check if source code is available\n if sal.symtab is None:\n return \"\", []\n\n # Get the full source code\n closest_line = sal.line\n filename = sal.symtab.fullname()\n\n try:\n source = get_highlight_source(filename)\n except OSError:\n return \"\", []\n\n if not source:\n return \"\", []\n\n n = int(source_code_lines)\n\n # Compute the line range\n start = max(closest_line - 1 - n // 2, 0)\n end = min(closest_line - 1 + n // 2 + 1, len(source))\n num_width = len(str(end))\n\n # split the code\n source = source[start:end]\n\n # Compute the prefix_sign length\n prefix_sign = C.prefix(str(pwndbg.gdblib.config.code_prefix))\n prefix_width = len(prefix_sign)\n\n # Format the output\n formatted_source = []\n for line_number, code in enumerate(source, start=start + 1):\n if pwndbg.gdblib.config.context_source_code_tabstop > 0:\n code = code.replace(\"\\t\", \" \" * pwndbg.gdblib.config.context_source_code_tabstop)\n fmt = \" {prefix_sign:{prefix_width}} {line_number:>{num_width}} {code}\"\n if pwndbg.gdblib.config.highlight_source and line_number == closest_line:\n fmt = C.highlight(fmt)\n\n line = fmt.format(\n prefix_sign=prefix_sign if line_number == closest_line else \"\",\n prefix_width=prefix_width,\n line_number=line_number,\n num_width=num_width,\n code=code,\n )\n formatted_source.append(line)\n\n return filename, formatted_source", "def get_syntax_css(cls):\n\n from pygments.formatters import HtmlFormatter\n return HtmlFormatter().get_style_defs('.highlight')", "def _getSyntaxByLanguageName(self, syntaxName, formatConverterFunction):\n xmlFileName = self._syntaxNameToXmlFileName[syntaxName]\n return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction)", "def get_highlighted_function_code(foo, remove_comments=False):\n from pygments import highlight\n from pygments.lexers import PythonLexer\n from pygments.formatters import HtmlFormatter\n import IPython\n\n txt = inspect.getsource(foo)\n if remove_comments:\n lines = txt.split('\\n')\n lines = [l for l in lines if not l.lstrip().startswith('#')]\n txt = '\\n'.join(lines)\n\n textwrap.dedent(txt)\n\n formatter = HtmlFormatter()\n ipython_display_object = \\\n IPython.display.HTML('<style type=\"text/css\">{}</style>{}'.format(\n formatter.get_style_defs('.highlight'),\n highlight(txt, PythonLexer(), formatter)))\n\n return ipython_display_object\n # print(txt)", "def get_replacement():\n run_linter_throw(\"path/to/file\",\n \"{s}\\n{m} Text{e}\",\n style,\n whitelist=[\"headerblock/filename\"])", "def highlight_code(code, lexer=None):\n# See this page for help with colouring: http://pygments.org/docs/tokens/\n#\n#from pygments.styles.default import DefaultStyle\n#from pygments.style import Style\n#from pygments.styles import get_style_by_name\n#from pygments.token import Comment, Keyword, Name, String, Operator, Number\n#from pygments import formatters\n#class SciPyStyle(Style):\n #default_style = \"\"\n #styles = {\n ##Comment: '#888',\n ##Keyword: 'bold #080',\n ##Name: '#080',\n ##Name.Function: '#00F',\n ##Name.Class: 'bold #00F',\n ##String: '#BA2121',\n #Comment: '#008000',\n #Keyword: 'bold #000080',\n #Name: '#000',\n #Name.Builtin: '#407090',\n #Name.Function: 'bold #008080',\n #Name.Class: 'bold #00F',\n #Name.Namespace: '#000000',\n #Number: '#008080',\n #String: '#800080',\n #String.Doc: '#800000',\n #Operator: '#000000',\n #Operator.Word: 'bold #AA22FF',\n #}\n\n#formatter = formatters.HtmlFormatter(style=SciPyStyle)\n#print(formatter.get_style_defs('.highlight'))\n\n if code is None:\n return None\n else:\n lexer_class = lexers.get_lexer_for_mimetype(lexer or 'text/x-python')\n return highlight(code, lexer_class,\n formatters.HtmlFormatter(linenos=True,\n linenostep=1,))", "def highlight_syntax(self) -> List[SyntaxHighlight]:\n raise NotImplementedError", "def test_source(self):\n with open(__file__, 'r') as f:\n contents = f.read()\n\n lexer = syntax_highlighting.fetch_lexer(contents)\n self.assertIn(lexer.__class__.__name__, PYTHON_LEXER_CLASS_NAMES)", "def find_syntax_name(self, syntaxFile):\n\n content = sublime.load_resource(syntaxFile).strip()\n\n # .tmLanguage (XML)\n if content.startswith('<'):\n matches = self.nameXmlRegex.search(content)\n # .sublime-syntax (YAML)\n else:\n matches = self.nameYamlRegex.search(content)\n\n if matches is None:\n return None\n\n return matches.group(1).strip()", "def psource(*functions):\n source_code = '\\n\\n'.join(getsource(fn) for fn in functions)\n try:\n from pygments.formatters import HtmlFormatter\n from pygments.lexers import PythonLexer\n from pygments import highlight\n\n display(HTML(highlight(source_code, PythonLexer(), HtmlFormatter(full=True))))\n\n except ImportError:\n print(source_code)", "def __call__(self, source, language=None, metadata=None):\n from pygments.formatters import HtmlFormatter\n\n if not language:\n language = self.pygments_lexer\n\n return _pygments_highlight(\n source if len(source) > 0 else \" \",\n # needed to help post processors:\n HtmlFormatter(cssclass=\" highlight hl-\" + language, linenos=\"inline\"),\n language,\n metadata,\n )", "def read_syntax_file(syntax_file):\n # Added encoding='utf8' to make it run on windows pycharm and ubuntu system as well as mac\n with open(syntax_file, encoding='utf8') as syntax:\n syntax_definition = syntax.read()\n\n # Use regex to divide pattern from keyword in a more strict manner\n pattern_syntax = re.compile(r'\"(.+)\": (.+)')\n matches_syntax = pattern_syntax.findall(syntax_definition)\n\n # Loop through lines in the syntax matches and create a dict with key and corresponding pattern\n syntax_dictionary = {}\n for item in matches_syntax:\n syntax_dictionary[item[1]] = item[0]\n return syntax_dictionary", "def getSource():", "def get_replacement():\n run_linter_throw(\"path/to/file\",\n \"{s} /path/to/file\\n{m}\\n{m} Other{e}\\n\\n\",\n style,\n whitelist=[\"headerblock/copyright\"])", "def highlight_python(scontent):\n \n try:\n highlighter = wp_highlighter(lexer_name='python', line_nums=False)\n highlighter.code = scontent\n results = highlighter.highlight()\n except Exception as ex:\n _log.error('Error in highlight_python:\\n{}'.format(ex))\n results = scontent\n return results", "def _getSyntaxByXmlFileName(self, xmlFileName, formatConverterFunction):\n import qutepart.syntax.loader # delayed import for avoid cross-imports problem\n \n with self._loadedSyntaxesLock:\n if not xmlFileName in self._loadedSyntaxes:\n xmlFilePath = os.path.join(os.path.dirname(__file__), \"data\", \"xml\", xmlFileName)\n syntax = Syntax(self)\n self._loadedSyntaxes[xmlFileName] = syntax\n qutepart.syntax.loader.loadSyntax(syntax, xmlFilePath, formatConverterFunction)\n \n return self._loadedSyntaxes[xmlFileName]", "def get_replacement():\n run_linter_throw(\"path/to/file\",\n \"{s}\\n{m} Text{e}\",\n style,\n whitelist=[\"headerblock/desc_space\"])", "def highlight_code(code, language, style, output_format='html'):\n\n lexer = get_lexer_by_name(language, stripall=True)\n formatter = get_formatter_by_name(output_format, style=style,\n linenos=True, cssclass=\"source\")\n highlighted_code = highlight(code, lexer, formatter)\n\n css_code = formatter.get_style_defs('.highlight')\n\n return css_code, highlighted_code", "def syntax_highlight(code, highlight_lines=[], start_line=1, style='unavailable'):\n\n\t\tif start_line > 1:\n\t\t\tcode = ('# CodeCache - next line is %d; note lines: %r\\n' % (start_line, highlight_lines)) + code\n\n\t\tcode = ('# CodeCache - unhighlighted\\n') + code\n\n\t\thtml = \"\"\"<html>\n\t\t<body>\n\t\t<pre>%s</pre>\n\t\t</body>\n\t\t</html>\n\t\t\"\"\" % code\n\n\t\treturn html.strip()", "def get_formatted_partial_source(self, filename, linenumber, offset):\n lines = self.get_source(filename)\n if not lines:\n return \"\", \"\"\n\n begin = max(0, linenumber - self.context)\n partial_source, bad_line = highlight_source(\n linenumber,\n linenumber - begin - 1,\n lines[begin : linenumber + 1],\n offset=offset,\n )\n return partial_source, bad_line", "def syntax_highlight(code, highlight_lines=[], start_line=1, style='monokai'):\n\t\tif start_line > 1:\n\t\t\tcode = ('# CodeCache - note lines: %r\\n' % highlight_lines) + code\n\t\t\tstart_line -= 1\n\t\t\thighlight_lines = [line_no + 1 for line_no in highlight_lines]\n\n\t\tformatter = HtmlFormatter(\n\t\t\tlinenos='table', \n\t\t\tstyle=style,\n\t\t\thl_lines=highlight_lines,\n\t\t\tlinenostart=start_line,\n\t\t\twrapcode=True,\n\t\t\tfull=True,\n\t\t\tlineseparator='<br>',\n\t\t\tnoclasses=True,\n\t\t\t)\n\t\t\t\t\t\n\t\thtml = highlight(code, PYTHON_LEXER, formatter)\n\n\t\thtml = highlight_html_strip_pattern.sub('', html)\n\t\thtml = replace_linenumber_format(html)\n\t\t\n\t\treturn html.strip()", "def highlight_trace(filepath, color_name=\"\"):\n if filepath not in covdb.coverage_files:\n log.log_error(\"[!] %s is not in the coverage DB\" % filepath)\n return\n blocks = covdb.trace_dict[filepath]\n if color_name == \"\":\n color = HighlightStandardColor.OrangeHighlightColor\n elif color_name.lower() in colors:\n color = colors[color_name]\n else:\n log.log_warn(\"[!] %s isn't a HighlightStandardColor, using my favorite color instead\" % color_name)\n color = colors[\"red\"]\n highlight_set(blocks, color)\n log.log_info(\"[*] Highlighted %d basic blocks in trace %s\" % (len(blocks), filepath))", "def highlight_source(linenumber, index, lines, offset=None):\n # The following if statements are left-over diagnostic\n # from the hack to integrate into Idle.\n # they are harmless tests which could potentially be useful.\n if lines is None:\n return \"\", \"\"\n if index is None:\n print(\"problem in highlight_source(): index is None\")\n index = 0\n\n # The weird index arithmetic below is based on the information returned\n # by Python's inspect.getinnerframes()\n\n new_lines = []\n problem_line = \"\"\n nb_digits = len(str(linenumber + index))\n no_mark = \" {:%d}: \" % nb_digits\n with_mark = \" -->{:%d}: \" % nb_digits\n if offset is not None:\n offset_mark = \" \" * (8 + nb_digits + offset) + \"^\"\n i = linenumber - index\n\n for line in lines:\n if i == linenumber:\n num = with_mark.format(i)\n problem_line = line\n new_lines.append(num + line.rstrip())\n if offset is not None:\n new_lines.append(offset_mark)\n break\n else:\n num = no_mark.format(i)\n new_lines.append(num + line.rstrip())\n i += 1\n return \"\\n\".join(new_lines), problem_line", "def showsrcstyle(self, line):\n \n name = line.strip()\n if not name:\n name = \"default\"\n self.style_name = name\n self.formatter = HtmlFormatter(style=name)\n display(HTML(\"\"\"<style type='text/css'>\n span.inspector-header {\n font-family: monospace;\n border-bottom: 1px solid #555;\n }\n table.highlighttable, .highlighttable td, .highlighttable tr {\n border: 0px;\n }\n .highlighttable td.linenos {\n border-right: 1px solid #555;\n }\n \n span.inspector-filename {\n text-decoration: italic;\n }\n span.inspector-lineno {\n font-weight: bold;\n }\n %s\n </style>\n \"\"\" % self.formatter.get_style_defs()\n ))" ]
[ "0.69240004", "0.66271734", "0.6512138", "0.6433099", "0.63243836", "0.6319666", "0.6306805", "0.6298004", "0.6259873", "0.62318337", "0.62157995", "0.615547", "0.61453384", "0.596871", "0.5953962", "0.5923246", "0.5921629", "0.5886486", "0.58687", "0.5838372", "0.57989764", "0.5791167", "0.5787815", "0.5785402", "0.5752032", "0.56974673", "0.56723624", "0.56720746", "0.56387466", "0.56348705" ]
0.7074609
0
Get syntax by first line of the file
def _getSyntaxByFirstLine(self, firstLine, formatConverterFunction): for pattern, xmlFileName in self._firstLineToXmlFileName.items(): if fnmatch.fnmatch(firstLine, pattern): return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction) else: raise KeyError("No syntax for " + firstLine)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_first_line(file: str) -> str:\n with open(file) as f:\n return f.readline().split('\\n')[0]", "def first_line(self):\n with open(self.file_path) as file:\n return file.readline()", "def parseStart(fp):\n\n try:\n ln = fp.readline()\n p = re.compile(r'^Inicial\\s*(?:#.*)?$')\n m = p.match(ln)\n if m == None:\n raise ParseError(ParseError.startMsg)\n\n # match and capture \"{ a }\" etc.\n ln = fp.readline()\n p = re.compile(r'^\\{\\s*(.*)\\s*\\}\\s*(?:#.*)?$')\n m = p.match(ln)\n if m == None:\n raise ParseError(ParseError.startMsg)\n\n return m.group(1).strip()\n\n except:\n raise", "def _start_magic(line):\n return start(line)", "def get_first_line(filename):\n try:\n with open(filename, \"r\") as ff:\n first_line = ff.readlines()[0].strip(\" \\n\\r\")\n except FileNotFoundError: # pragma: no cover\n first_line = \"xxx\"\n return first_line", "def _get_first_code_line():\n return min(_code_lines)", "def get_input(line):\n tex_input_filename_re = r\"\"\"{[^}]*\"\"\"\n m = re.search(tex_input_filename_re, line)\n return m.group()[1:]", "def extract_first_line(func_code):\r\n if func_code.startswith(FIRST_LINE_TEXT):\r\n func_code = func_code.split('\\n')\r\n first_line = int(func_code[0][len(FIRST_LINE_TEXT):])\r\n func_code = '\\n'.join(func_code[1:])\r\n else:\r\n first_line = -1\r\n return func_code, first_line", "def section_name_in_first_line(): # noqa: D416", "def want_line(self, op, first = 0):\n for ii in range(first, len(self.__content)):\n match = re.match(op, self.__content[ii], re.IGNORECASE)\n if match:\n return (ii, match.group(1))\n return None", "def get_header(file):\n with open(file, 'r') as f:\n return f.readline()", "def read_syntax_file(syntax_file):\n # Added encoding='utf8' to make it run on windows pycharm and ubuntu system as well as mac\n with open(syntax_file, encoding='utf8') as syntax:\n syntax_definition = syntax.read()\n\n # Use regex to divide pattern from keyword in a more strict manner\n pattern_syntax = re.compile(r'\"(.+)\": (.+)')\n matches_syntax = pattern_syntax.findall(syntax_definition)\n\n # Loop through lines in the syntax matches and create a dict with key and corresponding pattern\n syntax_dictionary = {}\n for item in matches_syntax:\n syntax_dictionary[item[1]] = item[0]\n return syntax_dictionary", "def get_lexer_name_fromcontent(content):\n \n if isinstance(content, (list, tuple)):\n # readlines() was passed.\n firstline = content[0]\n else:\n if '\\n' in content:\n # not calling split() here.\n firstline = content[:content.index('\\n')]\n else:\n # Can't determine usable first line.\n return ''\n # Got usable first line, look for shebang.\n if firstline.startswith('#!'):\n # get interpreter from shebang line.\n shebanglang = firstline.split('/')[-1]\n # check for env use ('env python')\n if ' ' in shebanglang:\n # interpreter name should be last thing\n shebanglang = shebanglang.split()[-1]\n return shebanglang.strip()\n # didn't work, no language found.\n return ''", "def first_word_of_each_line(filepath):\n with open(filepath, 'r') as my_file:\n for line in my_file:\n line = line.strip()\n words = line.split()\n word = words[0]\n yield word", "def get_syntax_test_tokens(view):\n\n line = view.line(0)\n match = None\n if line.size() < 1000: # no point checking longer lines as they are unlikely to match\n first_line = view.substr(line)\n match = syntax_test_header_regex.match(first_line)\n\n if not match:\n return None\n else:\n return SyntaxTestHeader(**match.groupdict())", "def read_file_first_line(filename):\n result = None\n with open(filename, 'r') as f:\n result = f.readline()\n result = result.rstrip(\"\\n\")\n f.close()\n return result", "def getFileFirstLine(filename, mode=\"r\", encoding=None):\n\n with withFileLock(\"reading file %s\" % filename):\n with openTextFile(filename, mode, encoding=encoding) as f:\n return f.readline()", "def _getSyntaxBySourceFileName(self, name, formatConverterFunction):\n for regExp, xmlFileName in self._extensionToXmlFileName.items():\n if regExp.match(name):\n return self._getSyntaxByXmlFileName(xmlFileName, formatConverterFunction)\n else:\n raise KeyError(\"No syntax for \" + name)", "def syntax_text():", "def get_current_syntax(self, view):\n\n syntaxFile = view.settings().get('syntax')\n\n if syntaxFile not in syntaxInfos:\n syntaxInfos[syntaxFile] = {\n 'fileName' : os.path.splitext(os.path.basename(syntaxFile))[0],\n 'syntaxName' : self.find_syntax_name(syntaxFile),\n }\n\n return [\n v\n for v in syntaxInfos[syntaxFile].values()\n if isinstance(v, str)\n ]", "def find_syntax_name(self, syntaxFile):\n\n content = sublime.load_resource(syntaxFile).strip()\n\n # .tmLanguage (XML)\n if content.startswith('<'):\n matches = self.nameXmlRegex.search(content)\n # .sublime-syntax (YAML)\n else:\n matches = self.nameYamlRegex.search(content)\n\n if matches is None:\n return None\n\n return matches.group(1).strip()", "def line_parser(line_starts_with: str, line: str) -> str: # pure function\n\n if line is None:\n return 'empty'\n elif line.startswith(line_starts_with):\n return 'start'\n elif line[0] in [' ', '\\t', '#', '\\n']:\n return 'empty'\n else:\n print('End works: ', line)\n return 'end'", "def _get_match_start_tick(file):\n with file.open(\"r\") as fh:\n lines = fh.read().splitlines()\n return int(lines[1])", "def retrieve_token(filename):\n with open(filename, 'r') as f:\n token = f.readline()\n\n return token", "def get_line(self, path, line):\n\t\tlines = self.find_source(path)\n\t\tif lines == None:\n\t\t\treturn None\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn lines[line - 1]\n\t\t\texcept IndexError:\n\t\t\t\treturn None", "def check_for_header(filename):\n header = {}\n start_id = -1\n with open(filename, \"r\") as f:\n start = re.compile(r\"\\bSTART|start\\b\")\n # if the file has the keyword start, extract header\n if bool(start.search(f.read())):\n f.seek(0) # set the cursor back to the beginning\n lines = f.readlines()\n for i, line in enumerate(lines):\n if start.match(line):\n start_id = i # the line number where start is used (divides header and body)\n break\n args = line.split()\n args.insert(0, \"\") # check_for_commands only handles the second argument (first is usually res_id)\n header['DEFAULT'] = check_for_commands(args, 1, 2)\n\n return header, start_id", "def get_reference_header(file):\n\n with open(file, \"r\") as typing_report:\n lines = typing_report.readlines()\n return lines[1].split('\\\\t')[3]", "def read_codes(self, filename=\"static/codes.txt\"):\n with open(filename, \"r\") as f:\n contents = f.read().splitlines()\n code = contents[0]\n \n return code", "def parse_line(line):\n\tpattern = re.compile(\"\"\"\n\t\t[ \\t]*\n\t\t(private|package|remote|public)*[ ]*\n\t\t(any|string|array|numeric|boolean|component|struct|void)*[ ]*\n\t\t(function[ ]+|\\<cffunction[ ]+name[ ]*=[ ]*\\\"?)([_\\-a-z][a-z0-9_\\-]+)\n\t\t\"\"\", \n\t\tre.VERBOSE|re.MULTILINE|re.IGNORECASE)\n\tm = pattern.match(line)\n\t#return the 5th gouped regex, which should be the function name\n\tret_val = m.group(4) if m else ''\n\treturn ret_val", "def lang_type(filename, firstline=None, openok=True):\n\n basename = os.path.basename(filename)\n name, extension = os.path.splitext(basename)\n\n # first try to detect language based on file extension\n try:\n return lang_types[extension]\n except KeyError:\n pass\n\n # now try to detect language based on file prefix\n for start, lang in lang_prefixes:\n if basename.startswith(start):\n return lang\n\n # if a first line was not provided but the file is ok to open,\n # grab the first line of the file.\n if firstline is None and openok:\n handle = open(filename, \"r\")\n firstline = handle.readline()\n handle.close()\n\n # try to detect language based on #! in first line\n if firstline and firstline.startswith(\"#!\"):\n for string, lang in hash_bang:\n if firstline.find(string) > 0:\n return lang\n\n # sorry, we couldn't detect the language\n return None" ]
[ "0.6747421", "0.6670001", "0.64411604", "0.63172", "0.63021094", "0.62843513", "0.6278874", "0.61992455", "0.61644703", "0.6089005", "0.6036392", "0.60151047", "0.59718347", "0.5875066", "0.5856728", "0.58460116", "0.58287245", "0.5772079", "0.57663625", "0.57556015", "0.5725593", "0.5718873", "0.57037365", "0.5673584", "0.56588537", "0.56454635", "0.5627993", "0.5607024", "0.55983096", "0.5573879" ]
0.7575394
0
Get the sequence from 4D image images
def get_subject_sequence(img_itk, img_size, img_spacing, img_origin, mask_direction): n_sequence = img_size[-1] img_array = itk.GetArrayFromImage(img_itk) itk_sequences = [] for item in range(n_sequence): img_sequence = img_array[item] itk_img_sequence = itk.GetImageFromArray(img_sequence) itk_img_sequence.SetOrigin(img_origin[:3]) itk_img_sequence.SetSpacing(img_spacing[:3]) itk_img_sequence.SetDirection(mask_direction) itk_sequences.append(itk_img_sequence) return itk_sequences
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_sequence(image_name, output_name):\n nim = nib.load(image_name)\n T = nim.header['dim'][4]\n affine = nim.affine\n image = nim.get_data()\n\n for t in range(T):\n image_fr = image[:, :, :, t]\n nim2 = nib.Nifti1Image(image_fr, affine)\n nib.save(nim2, '{0}{1:02d}.nii.gz'.format(output_name, t))", "def image_batch():\n return np.zeros((2, 1, 4, 4))", "def cut4(image):\r\n i, j = image.shape\r\n a1 = image[:i // 2, :j // 2]\r\n a2 = image[i // 2:, :j // 2]\r\n a3 = image[:i // 2, j // 2:]\r\n a4 = image[i // 2:, j // 2:]\r\n return a1, a2, a3, a4", "def load_image_sequence(image_path, offset, bounds):\n if image_path[-1] != '/': image_path += '/'\n gt = np.zeros(bounds, np.uint32)\n images = parse(image_path)\n d, w, h = bounds\n z, x, y = offset\n for i in xrange(d):\n im = imread(image_path + images[z + i], mode = 'I')\n gt[i, :, :] = im[x : x + w, y : y + h]\n return gt", "def get_seq(data_dir, dname):\n # Get list of video files\n data_dir = os.path.join(data_dir, 'softmotion30_44k', dname)\n filenames = gfile.Glob(os.path.join(data_dir, '*'))\n if not filenames:\n raise RuntimeError('No data files found.')\n # Enumerates videos (filename, index of file, list of images)\n for f in filenames:\n k = 0\n for serialized_example in tf.python_io.tf_record_iterator(f):\n example = tf.train.Example()\n example.ParseFromString(serialized_example)\n image_seq = []\n # Get all frames of the video\n for i in range(30):\n image_name = str(i) + '/image_aux1/encoded'\n byte_str = example.features.feature[image_name].bytes_list.value[0]\n img = Image.frombytes('RGB', (64, 64), byte_str)\n image_seq.append(img)\n k = k + 1\n yield f, k, image_seq", "def make_sequence(image_names, dt, output_name):\n nim = nib.load(image_names[0])\n affine = nim.affine\n X, Y, Z = nim.header['dim'][1:4]\n T = len(image_names)\n image = np.zeros((X, Y, Z, T))\n\n for t in range(T):\n image[:, :, :, t] = nib.load(image_names[t]).get_data()\n\n nim2 = nib.Nifti1Image(image, affine)\n nim2.header['pixdim'][4] = dt\n nib.save(nim2, output_name)", "def create_sequence_of_images(size, source_image, target_image, \n source_triangles_list, target_triangles_list, num_frames):\n # initiate the list of intermediate images\n images_list = []\n # run over the num_frames value to create the intermediate images\n for i in range(0,num_frames):\n # add to the list the current intermediate image create by\n # create_intermediate_image with the current alpha value\n images_list.append(create_intermediate_image(i/(num_frames-1),\n size,source_image,\n target_image,\n source_triangles_list,\n target_triangles_list))\n return images_list", "def seqIo_toImgs(fName, tDir=[], skip=1, f0=0, f1=np.inf, ext=''):\n sr = seqIo_reader(fName)\n f1 = np.minimum(f1,sr.header['numFrames']-1)\n frames = range(f0,f1,skip)\n n=len(frames)\n k=0\n #output images to array\n if tDir==[]:\n I,_=sr.getFrame(0)\n d = I.shape\n assert(len(d)==2 or len(d)==3)\n try:\n Is = np.zeros((I.shape+(n,))).astype(I.dtype)\n except:\n sr.close()\n raise\n for k in range(n):\n I,ts = sr.getFrame(k)\n if len(d)==2:\n Is[:,:,k]=I\n else:\n Is[:,:,:,k]=I\n print('saved %d' % k)\n\n sr.close()\n # output image directory\n if not os.path.exists(tDir):os.makedirs(tDir)\n if tDir.split('/')[-1]!='/':tDir+'/'\n Is = np.array([])\n for frame in frames:\n f = tDir + 'I%05.' % (frame)\n I, ts = sr.getFrame(frame)\n if ext!='':\n cv2.imwrite(f+ext,I)\n else:\n cv2.imwrite(f+sr.ext)\n k+=1\n print('saved %d' % frame)\n sr.close()\n return Is", "def pix2pix_results_to_frames(img_array):\n frames = []\n\n for i in range(int(len(img_array)/3)):\n\n try:\n left = cv2.resize(img_array[i * 3], dsize=(512, 512), interpolation=cv2.INTER_NEAREST)\n right = cv2.resize(img_array[i * 3 + 2], dsize=(512, 512), interpolation=cv2.INTER_NEAREST)\n\n scale = 512/img_array[i * 3 + 1].shape[0]\n middle = cv2.resize(img_array[i * 3 + 1], (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n\n frames.append(np.concatenate((left, middle, right), axis=1))\n\n frames.append(img_array[i * 3+1])\n except:\n print(\"Error\")\n\n return frames", "def getimgs():", "def get_img_indices():\n if K.image_dim_ordering() == 'th':\n return 0, 1, 2, 3\n else:\n return 0, 3, 1, 2", "def prob_3_4(self):\n \n ###### START CODE HERE ######\n\n\n ###### END CODE HERE ######\n pass\n \n ###### return mirrorImg ######", "def replay_sequence(images_path):\n sequence = [(parse_time(f), parse_line(f)) for f in sorted(glob.glob(os.path.join(images_path, '????-*.jpg'))) if 'edges' in f]\n start_time = datetime.now()\n for frame_time, line in sequence:\n frame_time_str = frame_time.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]\n data_sample = (frame_time_str, line)\n frame_draw_time = start_time + (frame_time - sequence[0][0])\n sleep_until(frame_draw_time)\n print(repr(data_sample))\n yield frame_time, line", "def create_azi_to_rad_sequence():\n num_tot = 30\n for i in range(2*num_tot + 1):\n angle_arr = azi_to_rad_transformation(512, i, 30)\n phase_arr = create_flat_phase(512, 0)\n delta_1_arr = create_delta_1(phase_arr, angle_arr)\n delta_2_arr = create_delta_2(angle_arr)\n cv2.imwrite('frame' + str(i) +'.tiff', delta_2_arr)\n print(\"Frame \" + str(i))", "def extract_images(filename,lx):\n print('Extracting', filename,'aaaaaa')\n \n data=numpy.loadtxt(filename,dtype='int64')\n dim=data.shape[0]\n data=data.reshape(dim, lx, lx, 1) \n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n data = data.reshape(data.shape[0],\n data.shape[1] * data.shape[2])\n # Convert from [0, 255] -> [0.0, 1.0].\n data = data.astype(numpy.float64)\n # images = numpy.multiply(images, 1.0 / 255.0) # commented since it is ising variables\n data = numpy.multiply(data, 1.0 ) # multiply by one, instead\n print(data.shape)\n return data", "def get_segmentation_image(segdb, config):\n num_images = len(segdb)\n assert num_images > 0, \"No images\"\n processed_ims = []\n processed_segdb = []\n processed_seg_cls_gt = []\n for i in range(num_images):\n seg_rec = segdb[i]\n print(seg_rec[\"image\"])\n assert os.path.exists(seg_rec[\"image\"]), \"{} does not exist\".format(\n seg_rec[\"image\"]\n )\n im = np.array(cv2.imread(seg_rec[\"image\"]))\n\n new_rec = seg_rec.copy()\n\n scale_ind = random.randrange(len(config.SCALES))\n target_size = config.SCALES[scale_ind][0]\n max_size = config.SCALES[scale_ind][1]\n im, im_scale = resize(im, target_size, max_size)\n im_tensor = transform(im, config.network.PIXEL_MEANS)\n im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]\n new_rec[\"im_info\"] = im_info\n\n seg_cls_gt = np.array(Image.open(seg_rec[\"seg_cls_path\"]))\n seg_cls_gt, seg_cls_gt_scale = resize(\n seg_cls_gt, target_size, max_size, interpolation=cv2.INTER_NEAREST\n )\n seg_cls_gt_tensor = transform_seg_gt(seg_cls_gt)\n\n processed_ims.append(im_tensor)\n processed_segdb.append(new_rec)\n processed_seg_cls_gt.append(seg_cls_gt_tensor)\n\n return processed_ims, processed_seg_cls_gt, processed_segdb", "def process( fids, ndim=2 ):\n\timg = np.empty_like( fids )\n\tax = -1*(np.array( range(ndim) )+1)\n\t\n\timg = np.fft.fftshift( np.fft.fftn( fids, axes=ax, ).astype( np.complex64), axes=ax )\n\t\n\treturn np.squeeze(img)", "def frames(self):\n while True:\n ret, frame = self.classification()\n if ret == True:\n yield cv2.imencode('.jpg', frame)[1].tobytes()\n else:\n break", "def get_sequence_indices(self) -> List[List[int]]:\n imgnames = self.ann_data['imgname']\n video_frames = defaultdict(list)\n for idx, imgname in enumerate(imgnames):\n subj, action, camera = self._parse_h36m_imgname(imgname)\n video_frames[(subj, action, camera)].append(idx)\n\n # build sample indices\n sequence_indices = []\n _len = (self.seq_len - 1) * self.seq_step + 1\n _step = self.seq_step\n for _, _indices in sorted(video_frames.items()):\n n_frame = len(_indices)\n\n if self.pad_video_seq:\n # Pad the sequence so that every frame in the sequence will be\n # predicted.\n if self.causal:\n frames_left = self.seq_len - 1\n frames_right = 0\n else:\n frames_left = (self.seq_len - 1) // 2\n frames_right = frames_left\n for i in range(n_frame):\n pad_left = max(0, frames_left - i // _step)\n pad_right = max(0,\n frames_right - (n_frame - 1 - i) // _step)\n start = max(i % _step, i - frames_left * _step)\n end = min(n_frame - (n_frame - 1 - i) % _step,\n i + frames_right * _step + 1)\n sequence_indices.append([_indices[0]] * pad_left +\n _indices[start:end:_step] +\n [_indices[-1]] * pad_right)\n else:\n seqs_from_video = [\n _indices[i:(i + _len):_step]\n for i in range(0, n_frame - _len + 1)\n ]\n sequence_indices.extend(seqs_from_video)\n\n # reduce dataset size if needed\n subset_size = int(len(sequence_indices) * self.subset_frac)\n start = np.random.randint(0, len(sequence_indices) - subset_size + 1)\n end = start + subset_size\n\n return sequence_indices[start:end]", "def build_sequences(dcm):\n dimension_organization_uid = '1.2.276.0.7230010.3.1.4.8323329.20175.1573232544.237437'\n ds0 = Dataset()\n ds0.DimensionOrganizationUID = dimension_organization_uid\n dcm.DimensionOrganizationSequence = Sequence([ds0])\n del ds0\n\n ds1 = Dataset()\n ds1.DimensionOrganizationUID = dimension_organization_uid\n ds1.DimensionIndexPointer = Tag(0x0048021E)\n ds1.FunctionalGroupPointer = Tag(0x0048021A)\n\n ds2 = Dataset()\n ds2.DimensionOrganizationUID = dimension_organization_uid\n ds2.DimensionIndexPointer = Tag(0x0048021F)\n ds2.FunctionalGroupPointer = Tag(0x0048021A)\n\n dcm.DimensionIndexSequence = Sequence([ds1, ds2])\n del ds1, ds2\n\n ds3 = Dataset()\n ds3.XOffsetInSlideCoordinateSystem = 20\n ds3.YOffsetInSlideCoordinateSystem = 40\n dcm.TotalPixelMatrixOriginSequence = Sequence([ds3])\n del ds3\n\n ds4 = Dataset()\n ds5 = Dataset()\n\n # IlluminationTypeCodeSequence\n ds4.CodingSchemeDesignator = 'DCM'\n ds4.CodeMeaning = 'Brightfield illumination'\n ds4.CodeValue = '111744'\n\n # IlluminationColorCodeSequence\n ds5.CodingSchemeDesignator = 'DCM'\n ds5.CodeMeaning = 'No filter'\n ds5.CodeValue = '111609'\n\n ds7 = Dataset()\n ds7.IlluminationTypeCodeSequence = Sequence([ds4])\n ds7.IlluminationColorCodeSequence = Sequence([ds5])\n # noinspection PyPep8,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection\n ds7.ICCProfile = b'\\x00\\x00\\x1b\\nlcms\\x020\\x00\\x00mntrRGB XYZ \\x07\\xd4\\x00\\x08\\x00\\r\\x00\\x0c\\x00\\x12\\x00\\x06acspMSFT\\x00\\x00\\x00\\x00lcms\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xf6\\xd6\\x00\\x01\\x00\\x00\\x00\\x00\\xd3-lcms\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0cdmnd\\x00\\x00\\x01\\x14\\x00\\x00\\x00jdesc\\x00\\x00\\x01\\x80\\x00\\x00\\x00hdmdd\\x00\\x00\\x01\\xe8\\x00\\x00\\x00hwtpt\\x00\\x00\\x02P\\x00\\x00\\x00\\x14rXYZ\\x00\\x00\\x02d\\x00\\x00\\x00\\x14bXYZ\\x00\\x00\\x02x\\x00\\x00\\x00\\x14gXYZ\\x00\\x00\\x02\\x8c\\x00\\x00\\x00\\x14rTRC\\x00\\x00\\x02\\xa0\\x00\\x00\\x08\\x0cgTRC\\x00\\x00\\n\\xac\\x00\\x00\\x08\\x0cbTRC\\x00\\x00\\x12\\xb8\\x00\\x00\\x08\\x0cchrm\\x00\\x00\\x1a\\xc4\\x00\\x00\\x00$cprt\\x00\\x00\\x1a\\xe8\\x00\\x00\\x00!desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10lcms generated \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x05sRGB\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x05sRGB\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00XYZ \\x00\\x00\\x00\\x00\\x00\\x00\\xf3=\\x00\\x01\\x00\\x00\\x00\\x01\\x16\\x98XYZ \\x00\\x00\\x00\\x00\\x00\\x00o\\x94\\x00\\x008\\xee\\x00\\x00\\x03\\x90XYZ \\x00\\x00\\x00\\x00\\x00\\x00$\\x9d\\x00\\x00\\x0f\\x83\\x00\\x00\\xb6\\xbeXYZ \\x00\\x00\\x00\\x00\\x00\\x00b\\xa5\\x00\\x00\\xb7\\x90\\x00\\x00\\x18\\xdecurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffcurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffcurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffchrm\\x00\\x00\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x00\\xa3\\xd7\\x00\\x00T{\\x00\\x00L\\xcd\\x00\\x00\\x99\\x9a\\x00\\x00&f\\x00\\x00\\x0f\\\\text\\x00\\x00\\x00\\x00no copyright, use freely\\x00\\n'\n ds7.OpticalPathIdentifier = '1'\n # noinspection SpellCheckingInspection\n ds7.OpticalPathDescription = 'Brightfield'\n\n dcm.OpticalPathSequence = Sequence([ds7])\n del ds7, ds5, ds4\n\n dcm.AcquisitionContextSequence = Sequence([])\n\n ds0 = Dataset()\n ds0.LocalNamespaceEntityID = 'UNKNOWN'\n dcm.IssuerOfTheContainerIdentifierSequence = Sequence([ds0])\n del ds0\n\n ds0 = Dataset()\n\n ds0.SpecimenIdentifier = 'UNKNOWN'\n ds0.SpecimenPreparationSequence = Sequence([])\n ds0.SpecimenUID = generate_uid(prefix=None)\n ds0.IssuerOfTheSpecimenIdentifierSequence = Sequence([])\n dcm.SpecimenDescriptionSequence = Sequence([ds0])\n dcm.ContainerTypeCodeSequence = Sequence([])\n dcm.ContainerIdentifier = 'UNKNOWN'\n return dcm", "def part_1b():\n shift_0 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'Shift0.png'), 0) / 255.\n shift_r10 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'ShiftR10.png'), 0) / 255.\n shift_r20 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'ShiftR20.png'), 0) / 255.\n shift_r40 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'ShiftR40.png'), 0) / 255.\n\n raise NotImplementedError", "def extract_4_pics(im_rgb, im):\r\n _, w = im.shape\r\n \r\n # Find indices of the longest horizontal and vertical lines\r\n hor_indices, ver_indices, _, _ = find_longest_lines(im)\r\n\r\n # Find the 4 longest horizontal lines\r\n hor_indices = np.sort(hor_indices[:8])\r\n hor_lines = np.array([hor_indices[0], 0, 0, 0])\r\n \r\n # line 2-4\r\n cur = 0;\r\n thresholds = [0.42*w, w/50];\r\n for i in range(cur,8):\r\n if hor_indices[i] - hor_lines[cur] > thresholds[cur%2]:\r\n cur += 1;\r\n hor_lines[cur] = hor_indices[i];\r\n if cur == 3:\r\n break\r\n \r\n # vertical lines\r\n ver_indices = np.sort(ver_indices[:8]);\r\n ver_lines = np.array([ver_indices[0], 0,0,0]);\r\n \r\n # line 2-4\r\n cur = 0;\r\n for i in range(cur, 8):\r\n if ver_indices[i] - ver_lines[cur] > thresholds[cur%2]:\r\n cur += 1;\r\n ver_lines[cur] = ver_indices[i];\r\n if cur == 3:\r\n break\r\n \r\n im[:,ver_lines] = 255\r\n im[hor_lines,:] = 255\r\n \r\n # Extract images\r\n pic1 = im_rgb[hor_lines[0]:hor_lines[1], ver_lines[0]:ver_lines[1], :]\r\n pic2 = im_rgb[hor_lines[0]:hor_lines[1], ver_lines[2]:ver_lines[3], :]\r\n pic3 = im_rgb[hor_lines[2]:hor_lines[3], ver_lines[0]:ver_lines[1], :]\r\n pic4 = im_rgb[hor_lines[2]:hor_lines[3], ver_lines[2]:ver_lines[3], :]\r\n pics = [pic1, pic2, pic3, pic4]\r\n \r\n return pics", "def __call__(self, image: torch.Tensor) -> torch.Tensor:\n assert len(image.shape) == 4\n res = image.clone()\n if self.for_segmentation_input_maps:\n res = res.int()\n else:\n res = res.float()\n if res.max() > 1:\n raise ValueError(\"Image tensor should be in \"\n \"range 0-1 for conversion to PIL\")\n\n # Sample parameters defining the transformation\n transforms = self.draw_next_transform()\n for c in range(image.shape[0]):\n res[c] = self.apply_transform_on_3d_image(res[c], transforms)\n if not self.use_joint_channel_transformation:\n # Resample transformations for the next channel\n transforms = self.draw_next_transform()\n return res.to(dtype=image.dtype)", "def generate_images_pred(self, inputs, outputs):\n for scale in self.scales:\n disp = outputs[(\"disp\", scale)]\n disp = F.interpolate(\n disp, [self.height, self.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.min_depth, self.max_depth)\n\n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.frame_ids[1:]):\n\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n # from the authors of https://arxiv.org/abs/1712.00175\n # mean-normalized inverse depth from [62] to discourage shrinking of the estimated depth\n\n axisangle = outputs[(\"axisangle\", 0, frame_id)]\n translation = outputs[(\"translation\", 0, frame_id)]\n\n inv_depth = 1 / depth\n mean_inv_depth = inv_depth.mean(3, True).mean(2, True)\n\n T = transformation_from_parameters(\n axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0)\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\")\n\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]", "def get_batches_fn(batch_size):\n id_road = 7\n id_lane = 6\n id_car = 10\n\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n # Get corresponding label img path\n gt_image_file = image_file.replace('CameraRGB', 'CameraSeg')\n # Read rgb and label images\n img_in = scipy.misc.imread(image_file, mode='RGB')\n gt_in = scipy.misc.imread(gt_image_file)\n # Crop sky part of the image\n image = img_in[-out_shape[0]:, :]\n gt_image = gt_in[-out_shape[0]:, :, 0]\n # Obtain labels\n gt_road = ((gt_image == id_road) | (gt_image == id_lane))\n gt_car = (gt_image == id_car)\n gt_car[-105:, :] = False\n gt_bg = np.invert(gt_car | gt_road)\n # Augmentation\n if bool(random.getrandbits(1)):\n image, gt_bg, gt_car, gt_road = flip_img(\n image, gt_bg, gt_car, gt_road)\n\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_car = gt_car.reshape(*gt_car.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n\n gt_image = np.concatenate((gt_bg, gt_car, gt_road), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "def splitImages(self):\n imgs = self.img_list\n frames = self.frame_number.value()\n grps = []\n for i in range(0, len(imgs), frames):\n grps.append(imgs[i:i + frames])\n\n return grps", "def particle_images (sim,frame_id) :\n # get positions of all particles: define first the atom selection, then jump to\n # the user-requested trajectory frame, get the box dimensions (currently works\n # only for orthorhombic boxes, then calculate the image indices\n atoms = sim.u.select_atoms ('all')\n ts = sim.u.trajectory[frame_id]\n L = ts.dimensions[:3]\n pos = atoms.positions + L/2.\n return pos//L", "def from_sequence(images, orientation=\"right\", padding=0):\n if orientation == 'square':\n length = int(math.ceil(math.sqrt(len(images))))\n max_height = 0\n for index, image in enumerate(images):\n if index % length == 0:\n x = 0\n y += max_height\n max_height = 0\n else:\n x += image.width\n max_height = max(max_height, image.height)\n sequence.append((image, (x, y)))\n else:\n if orientation in ('left', 'right'):\n selector = spyral.Vec2D(1, 0)\n else:\n selector = spyral.Vec2D(0, 1)\n\n if orientation in ('left', 'above'):\n reversed(images)\n\n if type(padding) in (float, int, long):\n padding = [padding] * len(images)\n else:\n padding = list(padding)\n padding.append(0)\n base = spyral.Vec2D(0, 0)\n sequence = []\n for image, padding in zip(images, padding):\n sequence.append((image, base))\n base = base + selector * (image.size + (padding, padding))\n return from_conglomerate(sequence)", "def get_aligned_image_2frames(self, x, flows_backward, flows_forward):\n n = x.size(1)\n x_backward = [torch.zeros_like(x[:, -1, ...]).repeat(1, 4, 1, 1)]\n for i in range(n - 1, 0, -1):\n x_i = x[:, i, ...]\n flow = flows_backward[:, i - 1, ...]\n x_backward.insert(0, flow_warp(x_i, flow.permute(0, 2, 3, 1), 'nearest4'))\n x_forward = [torch.zeros_like(x[:, 0, ...]).repeat(1, 4, 1, 1)]\n for i in range(0, n - 1):\n x_i = x[:, i, ...]\n flow = flows_forward[:, i, ...]\n x_forward.append(flow_warp(x_i, flow.permute(0, 2, 3, 1), 'nearest4'))\n return [torch.stack(x_backward, 1), torch.stack(x_forward, 1)]", "def test_nib_resample_image_4d(fake_4dimage_nib):\n img_r = resampling.resample_nib(fake_4dimage_nib, new_size=[2, 2, 1, 1], new_size_type='factor', interpolation='nn')\n assert img_r.get_data().shape == (18, 18, 9, 3)\n assert img_r.get_data()[8, 8, 4, 0] == 1.0 # make sure there is no displacement in world coordinate system\n assert img_r.get_data()[8, 8, 4, 1] == 0.0\n assert img_r.header.get_zooms() == (0.5, 0.5, 1.0, 1.0)" ]
[ "0.6118404", "0.6090282", "0.6046997", "0.60036993", "0.5947869", "0.5940997", "0.5880866", "0.5857647", "0.57846844", "0.573141", "0.5714751", "0.56980777", "0.56915104", "0.568075", "0.56790113", "0.5626276", "0.55826557", "0.55743223", "0.5574049", "0.55700374", "0.55695724", "0.5559543", "0.55545366", "0.5549004", "0.5540507", "0.5530803", "0.55277365", "0.5518654", "0.5511431", "0.54999757" ]
0.65374935
0
Add a canned comment
def cli(ctx, comment, metadata=""): return ctx.gi.cannedcomments.add_comment(comment, metadata=metadata)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_comment(self, text, displayed, username, time,\n proposal, node_id, parent_id, moderator):\n raise NotImplementedError()", "def add_comment() -> str:\n if \"markdown\" in request.form:\n if \"file\" in request.form:\n comment = Comment(\n markdown=request.form[\"markdown\"],\n submission_id=Submission.query.filter(\n Submission.filepath.contains(request.form[\"file\"])\n )\n .first()\n .id,\n cell_id=request.form[\"cell_id\"] if \"cell_id\" in request.form else None,\n user=UserModel.get_by_token(session[\"token\"]),\n )\n # If not cell_id this is a general comment\n comment.save()\n else:\n return \"Missing file or cell_id\", 400\n else:\n return \"Missing markdown\", 400\n\n comment_maker = get_template_attribute(\"_macros.html\", \"comment_block\")\n return comment_maker(comment)", "def add_comment_to_announcement():\n vars = request.vars\n logger.info(\"vars.comment_text: %r\" % (vars.comment_text))\n comment_id = db.Comments.insert(\n comment_text = vars.comment_text,\n score = 1,\n ann_id= vars.ann_id,\n )\n comment = db.Announcements(comment_id)\n\n logger.info(\"api:add_comment_to_announcement ==> comment= %r\" % (comment))\n\n return response.json(comment)", "def add_comment(self, checkin_id: str, comment: str) -> Dict:\n method = \"checkin/addcomment/\" + checkin_id\n auth = self._get_access_token()\n if len(comment) > 140:\n raise ValueError(\n f\"Check-in comment is {len(comment)} characters whereas Untappd only supports comments up to 140 characters\"\n )\n params = {\"comment\": comment}\n return self._do_post(method, auth, params)", "def comment(self, msg):\n\t\tself._client.add_comment(self, msg)", "def add_comment(cls, post_id, user_id, content):\n c = cls(parent=comment_key(),\n post_id=post_id,\n user_id=user_id,\n content=content)\n c.put()", "def add_comment(self, text):\n selected = self.GetSelection()\n if selected != wx.NOT_FOUND:\n pseudo = get_facade()._desc.document.get_pseudo()\n self.blog.add_comment(selected, text, pseudo)\n self.refresh()\n else:\n display_warning(_(\"none selected\"))", "def comment(self, content):\n pass", "def comment():", "def add_comment(self, message):\n params = {\"ffauth_device_id\": self._DiscretelyAuthenticatedObject__device_id,\n \"ffauth_secret\": self._DiscretelyAuthenticatedObject__device_token}\n data = {\"data\": str(\n {\"event\": {\"type\": \"comment\", \"message\": message, \"assessment_details_id\": self.__assessmentDetailsId},\n \"recipient\": {\"guid\": self._DiscretelyAuthenticatedObject__guid, \"type\": \"user\"}})}\n requests.post(\n self._DiscretelyAuthenticatedObject__portal + \"/_api/1.0/tasks/\" + str(self.id) + \"/responses\",\n params=params, data=data)", "def problem_comments_append(self, identifier, comment, html=None):\n params = {\"text\": comment}\n if html is not None:\n params[\"html\"] = html\n \n self._post(\"problems/%d/comments\" % identifier, json=params)", "def comment(self, commentable, comment_text, oneshot=False, commentid=None):\r\n \r\n response = utils.comment(commentable, comment_text, self, oneshot, commentid)\r\n return response", "def add_comment(self, string):\n if self.comment is None:\n self.comment = string\n else:\n self.comment = self.comment.rstrip() + \"\\n \" + string", "def addComment(self, comment):\r\n comment.topicId = self.topicId\r\n self.comments.append(comment)\r\n return len(self.comments)-1", "def comment(self, comment):\r\n\r\n core.FW_conf['connection'].comment(comment)", "def comment(self, comment):\n self.appendString('%' + comment + '\\n')", "def addcomment(accountable, body):\n\n r = accountable.issue_add_comment(body)\n headers = sorted(['author_name', 'body', 'updated'])\n rows = [[v for k, v in sorted(r.items()) if k in headers]]\n rows.insert(0, headers)\n print_table(SingleTable(rows))", "def add_comment(self, comment: str):\n self.add_relationship(RDFS.comment, self._graph.string_literal(comment))", "def comment(self, body, incident_id):\n payload = {\"comment\":{\"body\":body, \"is_private\":\"false\"}}\n response = self.session.post(\n \"{0}/incidents/{1}/comments.json\".format(self.uri, incident_id),\n json=payload\n )\n return response.status_code", "def add_comment(self):\n comment = Comment(\n title=self.title,\n comment=self.comment,\n rating=self.rating,\n user_from_id=g.user.id,\n user_to_id=self.user_to_id\n )\n db.session.add(comment)\n db.session.commit()\n return comment", "def create_comment(self, body):\n return self.client.request(\n \"{}/issues/{}/comments\".format(self.repo.base_path, self.num),\n params={\"body\": body},\n method=\"POST\"\n )", "def __add_comment(self, issue_id, comment):\n import httplib2\n http = httplib2.Http() \n response, content = http.request(\n uri=self.__issue_url % int(issue_id),\n method='PUT',\n body=comment,\n headers={\n 'X-Redmine-API-Key': self.__api_key,\n 'Content-type': 'application/json'\n }\n )\n print(response)\n print(content)", "def _add_comment():\r\n per_page = current_app.config['FLASKY_ANSWERS_PER_PAGE']\r\n id = request.args.get('answer_id')\r\n answer = Answer.query.get_or_404(id)\r\n comment =request.args.get('comment')\r\n answers = Answer.query.get_or_404(id)\r\n page = 1\r\n result= False\r\n if current_user.can(Permission.COMMENT):\r\n comment = Comment(body=comment,\r\n author=current_user._get_current_object(),\r\n answer_id=id)\r\n db.session.add(comment)\r\n db.session.commit()\r\n page = (answer.comments.count()-1)/per_page + 1\r\n result=True\r\n pagination = Comment.query.order_by(Comment.timestamp).filter_by(answer_id=id).paginate(\r\n page,per_page=per_page,error_out=False\r\n )\r\n macro_comment = get_template_attribute(\"_comments.html\", \"render_comments\")\r\n macro_page = get_template_attribute(\"_page.html\", \"render_page\")\r\n comments = pagination.items\r\n return jsonify({'result': result,\r\n 'comment_html': macro_comment(comments),\r\n 'page_html': macro_page(pagination),\r\n 'comments_timestamp': [comment.timestamp for comment in comments],\r\n 'comments_id': [comment.id for comment in comments]\r\n })", "def add_comment(self, comment, author, date=None):\n date = date or datetime.date.today()\n self.header['COMMENT'] = '[%s %s] %s' % (author, str(date), comment)", "def add_comment_to_issue(self, issue, comment, visibility=None):\r\n self.jira.add_comment(issue=issue, body=comment)", "def add_comment(self, comment):\n assert isinstance(comment, Comment)\n self._comments.append(comment)\n return None", "def post_comment(self, entry, body, **args):\n args.update(entry=entry, body=body)\n return self.fetch(\"/comment\", post_args=args)", "def add_comment(request):\n if request.method != 'POST':\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n else:\n img_id = request.POST['id']\n try:\n img = Image.objects.get(pk=img_id)\n except:\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n comment_text = request.POST['comment']\n #TODO sanitize input\n comment = ImageComment()\n comment.submission_date = timezone.now()\n comment.comment_text= comment_text\n comment.image_id = img_id\n comment.submitter_id = int(request.POST['uid'])\n comment.save()\n return rest.rest_success(request, img_id)", "def add_comment(self, comment: str):\n self.add_relationship(\n RDFS.comment, self._graph.string_literal(comment))", "def add_comment(self, issue, comment):\n return self.get_jira().add_comment(issue, comment)" ]
[ "0.75225097", "0.74341697", "0.7288321", "0.712091", "0.71194667", "0.7118158", "0.71123606", "0.70643234", "0.7063868", "0.70613974", "0.70483786", "0.7039511", "0.7003543", "0.6991932", "0.69380873", "0.6924826", "0.6907805", "0.69008166", "0.68908685", "0.6865075", "0.684748", "0.6820581", "0.68156", "0.6813858", "0.6813721", "0.68128574", "0.681174", "0.68097514", "0.67856324", "0.67827374" ]
0.8707222
0
The main method of the class, reads the csv and returns a pandas DataFrame object.
def run(self) -> pd.DataFrame: with open(self.file_path, 'r') as in_file: headers = in_file.readline() headers = headers.replace("\n", "") if ',' in headers: headers = headers.split(',') else: headers = headers.split() if headers == self.NORMAL_HEADERS: return self.normal_csv() else: return self.read_data_columns_to_rows()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reader(self):\n df = pd.read_csv(self.path)\n return df", "def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df", "def read_csv(self) -> None:\n\n self._df = pd.read_csv(self._dataset_file)", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def _parse_csv(csv_file: str) -> pd.DataFrame:\n return pd.read_csv(csv_file, header=0)", "def read_csv():", "def csv_to_df(self, path=None):\n # reads the csv file and puts it to the dataframe\n df = pd.read_csv(path)\n return df", "def read_data_from_csv(filename: str) -> pd.DataFrame:\n try:\n data = pd.read_csv(filename)\n return data\n except(FileNotFoundError):\n print('Error: Could not read the data from csv.')\n return None", "def read_data(self) -> pd.DataFrame:\n data = pd.read_csv(self.data_path)\n assert isinstance(data, pd.DataFrame)\n return data", "def read_data(self) -> pd.DataFrame:\n data = pd.read_csv(self.data_path)\n assert isinstance(data, pd.DataFrame)\n return data", "def load(self) -> pd.DataFrame:\n if os.path.exists(self.file_name):\n df = pd.read_csv(self.file_name, index_col=0)\n df = self._clean(df)\n else:\n _LOG.debug(\"No file '%s'\", self.file_name)\n df = pd.DataFrame()\n return df", "def _csv_engine(filename, node):\n sep = node.get(\"sep\", \",\")\n header = node.get(\"header\", 0)\n logger.debug(\n \"Parsing CSV '{}'. sep={}, header={}.\".format(filename, sep, header)\n )\n index = node.get(\"index\")\n encoding = node.get(\"encoding\")\n if not index:\n raise InvalidConfig(\"An 'index' column is required. It should \"\n \"be the sample id column.\")\n\n df = pd.read_csv(filename, sep=sep, header=header, encoding=encoding)\n df.set_index(index, verify_integrity=True, inplace=True, drop=True)\n df.index = df.index.astype(str)\n\n return df", "def _read_csv(self) -> pd.DataFrame:\n\n return pd.concat(\n [\n pd.read_csv(f, usecols=[1, 2, 3, 4, 5])\n for f in self.temp_path.iterdir()\n if f.name.endswith(\".csv\")\n ]\n )", "def read(self):\n \n self.df = pd.read_csv(self.path, encoding = \"ISO-8859-1\")", "def test_read_csv_to_dataframe(fname):\n df = read_csv_to_dataframe(fname)\n print(df.head())", "def read_csv(self, filepath):\n try:\n self.df = pd.read_csv(filepath)\n return self\n except FileNotFoundError as e:\n raise OperationError(f\"File not found - {filepath}\") from e\n except ParserError as e:\n raise OperationError(f\"Fails to parse file - {e}\") from e", "def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)", "def getDataframe(self):\n self._loadCSVFile()\n self._cleanProcessDf()\n return self._df", "def read_data_from_csv(filename):\n df = pd.read_csv(filename)\n return df", "def read_csv():\n csv_file = \"dow.csv\"\n\n # read the data from the csv file, parsing the Dates to make the x-axis, setting index_col to zero to remove it\n data_frame = pd.read_csv(csv_file, parse_dates=True, index_col=0)\n return data_frame", "def _get_df_from_csv(self, filename):\n df = pd.read_csv(filename)\n df.set_index('Date', drop=True, inplace=True)\n df.index = pd.to_datetime(df.index)\n return df", "def test_from_file_csv(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.csv')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def _read(**kwargs) -> DataFrame:\n Engine.subscribe(_update_engine)\n\n try:\n pd_obj = FactoryDispatcher.read_csv_glob(**kwargs)\n except AttributeError:\n raise AttributeError(\"read_csv_glob() is only implemented for pandas on Ray.\")\n\n # This happens when `read_csv` returns a TextFileReader object for iterating through\n if isinstance(pd_obj, pandas.io.parsers.TextFileReader):\n reader = pd_obj.read\n pd_obj.read = lambda *args, **kwargs: DataFrame(\n query_compiler=reader(*args, **kwargs)\n )\n return pd_obj\n\n return DataFrame(query_compiler=pd_obj)", "def read_csv(self, csv_file):\n mylog.debug('Reading csv file %s for data' % csv_file)\n csv_data = pandas.read_csv(csv_file)\n mylog.debug('Read of csv file complete.')\n #mylog.debug('%s' % csv_data)\n #sometimes the csv has an empty dataframe #\n if csv_data.empty:\n mylog.debug('Data frame is empty; repopuating data')\n csv_info = []\n for item in csv_data:\n #add the data one cell at a time to the list #\n #for some reason, some csvs have the data #\n #with random decimal points #\n csv_info.append(item.split(\".\")[0])\n df = pandas.DataFrame(columns=csv_info)\n df.loc[0]=csv_info\n #write the data from the list back into the cells#\n #one at a time #\n for column in range(0, len(csv_info)): \n df.iloc[0,column] = csv_info[column]\n csv_data = df \n return csv_data", "def read_dataset():\n\n df = pd.read_csv('fake_job_postings.csv', index_col='job_id')\n return df", "def test_get_df_from_csv():\n df = get_df_from_csv('politics_30_months_comments_cleaned_standardized_vader_flair.csv')\n print(df.head())", "def get_data(filename):\r\n return pd.read_csv(filename)", "def normal_csv(self):\n if len(self.delimiter) > 0:\n df = pd.read_csv(self.file_path, delimiter=self.delimiter)\n else:\n df = pd.read_csv(self.file_path)\n\n return df", "def read_csv_file(self):\n pass", "def open_csv(filename=\"NOTEEVENTS.csv\", index=['SUBJECT_ID', 'HADM_ID']):\n df = pd.read_csv(DATA_DIR / filename,\n index_col=index,\n # nrows=1000,\n infer_datetime_format=True)\n logger.info(f\"opening {filename}\")\n logger.info(f\"Dataframe columns: {df.columns}\")\n # logger.info(f\"Clinical note types: {df['CATEGORY'].unique()}\")\n return df" ]
[ "0.7475168", "0.7415073", "0.739241", "0.728174", "0.71605515", "0.7031539", "0.6998462", "0.69451076", "0.69303966", "0.69303966", "0.69205564", "0.6900309", "0.6879413", "0.68781173", "0.6876149", "0.6874884", "0.68676704", "0.6860954", "0.6829186", "0.6784555", "0.67783505", "0.6767359", "0.6746179", "0.67325413", "0.6728165", "0.67269635", "0.6712692", "0.66946316", "0.66917706", "0.66855425" ]
0.743472
1
Merge any/all output files from subsidiary bcrham processes (used when not doing smc)
def merge_all_hmm_outputs(self, n_procs, cache_naive_seqs): assert self.args.smc_particles == 1 # have to do things more complicatedly for smc if self.args.action == 'partition': # merge partitions from several files if n_procs > 1: self.merge_subprocess_files(self.hmm_cachefname, n_procs) if not cache_naive_seqs: if n_procs == 1: infnames = [self.hmm_outfname, ] else: infnames = [self.args.workdir + '/hmm-' + str(iproc) + '/' + os.path.basename(self.hmm_outfname) for iproc in range(n_procs)] previous_info = None if len(self.paths) > 1: previous_info = self.paths[-1] glomerer = Glomerator(self.reco_info) glomerer.read_cached_agglomeration(infnames, smc_particles=1, previous_info=previous_info, calc_adj_mi=self.args.debug, debug=self.args.debug) #, outfname=self.hmm_outfname) assert len(glomerer.paths) == 1 # self.check_path(glomerer.paths[0]) self.paths.append(glomerer.paths[0]) else: self.merge_subprocess_files(self.hmm_outfname, n_procs) if not self.args.no_clean: if n_procs == 1: print 'removing ', self.hmm_outfname os.remove(self.hmm_outfname) else: for iproc in range(n_procs): subworkdir = self.args.workdir + '/hmm-' + str(iproc) os.remove(subworkdir + '/' + os.path.basename(self.hmm_infname)) if os.path.exists(subworkdir + '/' + os.path.basename(self.hmm_outfname)): print 'removing ', subworkdir + '/' + os.path.basename(self.hmm_outfname) os.remove(subworkdir + '/' + os.path.basename(self.hmm_outfname)) os.rmdir(subworkdir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combineAllGraphFiles(chroms, final_out):\n outfile = open(final_out,'w');\n outfile.close();\n \n for chrom in chroms:\n graph_file = chrom + \".graph\";\n try:\n if os.system('%s %s >> %s' %\n (cat, graph_file, final_out)): raise\n except: sys.stderr.write(\"cat failed at %s\\n\" % chrom)", "def combine(files, output):\n # read all files\n bxrs = [h5py.File(f,'r') for f in files]\n # some paths we might care about & will copy\n metadata_paths = [\n '3BRecInfo/3BRecVars/MaxVolt',\n '3BRecInfo/3BRecVars/MinVolt',\n '3BRecInfo/3BRecVars/BitDepth',\n '3BRecInfo/3BRecVars/SignalInversion',\n '3BRecInfo/3BRecVars/SamplingRate',\n '3BRecInfo/3BRecVars/ExperimentType',\n '3BRecInfo/3BMeaChip/NRows',\n '3BRecInfo/3BMeaChip/NCols',\n '3BRecInfo/3BMeaChip/Layout',\n '3BRecInfo/3BMeaChip/MeaType',\n '3BRecInfo/3BMeaSystem/FwVersion',\n '3BRecInfo/3BMeaSystem/HwVersion',\n '3BRecInfo/3BMeaSystem/System'\n ]\n\n # count n_frames, n_samples from each file\n # also verify that key metadata matches\n n_frames = bxrs[0]['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples = [bxrs[0]['3BData/Raw'].shape[0]]\n sampling_rate = bxrs[0]['3BRecInfo/3BRecVars/SamplingRate'][0]\n print(\"checking that all brw files have matching metadata\")\n for b in bxrs[1:]:\n for m in metadata_paths:\n try:\n if len(bxrs[0][m])==1:\n assert bxrs[0][m][:] == b[m][:]\n else:\n assert np.all(bxrs[0][m][:] == b[m][:])\n except Exception as E:\n logger.warn(f\"\"\"metadata does not match for {m}:\n found {bxrs[0][m]} and {b[m]}\n \"\"\")\n n_frames += b['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples.append(b[\"3BData/Raw\"].shape[0])\n print(f\"combined duration: {n_frames/sampling_rate/60:.2f} minutes\")\n\n out_bxr = h5py.File(output, \"w\")\n # copy metadata\n bxrs[0].visititems(partial(glia.copy_metadata, copy_to=out_bxr))\n\n # copy data\n out_bxr['3BRecInfo/3BRecVars/NRecFrames'] = [n_frames]\n out_bxr['nSamplesPerRecording'] = n_samples\n tot_samples = sum(n_samples)\n assert np.isclose(tot_samples/n_frames, 4096) #4096 channels\n \n # copy raw data\n raw_dtype = bxrs[0][\"3BData/Raw\"].dtype\n dset = out_bxr.create_dataset(\"3BData/Raw\", (tot_samples,),\n dtype=raw_dtype)\n start_sample = 0\n max_chunk = int(1e8) # <1GiB \n for i, b in enumerate(bxrs):\n print(f\"Copying {files[i]}\")\n end_sample = start_sample+n_samples[i]\n for s in tqdm(range(0,n_samples[i],max_chunk)):\n e = min(s+max_chunk, end_sample)\n dset[start_sample+s:start_sample+e] = b[\"3BData/Raw\"][s:e]\n start_sample = end_sample\n\n # cleanup\n out_bxr.close()\n [b.close() for b in bxrs]", "def gatherfiles(self):\n\t\tfrom subprocess import Popen,PIPE\n\t\timport os\n\t\timport tarfile\n\t\timport glob\n\t\t\n\t\tprint \"=== \",self.nameID,\": Joining all the files in one\"\n\t\t# FIXME: Only there are 1 file, not needed the hadd\n\t\tfinalfile = os.path.join(\"Results\",self.outputfile)\n\t\t# FIXED BUG: just cp when there is only one file, otherwise\n\t\t# there are problems with the TTree\n\t\tif len(self.outputfiles) == 1:\n\t\t\t# Note that when there is only 1 file, always its #task=1\n\t\t\tcommand = [ 'cp', self.outputfiles[1], finalfile ]\n\t\telse:\n\t\t\tcommand = [ 'haddPlus', finalfile ]\n\t\t\tfor f in self.outputfiles.itervalues():\n\t\t\t\tcommand.append( f )\n\t\tp = Popen( command ,stdout=PIPE,stderr=PIPE ).communicate()\n\t\t# Checking if everything was allright\n\t\ttotalevts = self.getevents(finalfile,True)\n\t\tif totalevts != self.nevents:\n\t\t\tmessage = \"\\033[33;1mclustermanager.gatherfiles: WARNING\\033[0m the total file\"\n\t\t\tmessage += \"'\"+finalfile+\"' do not contain all the events:\\n\"\n\t\t\tmessage += \"Total events to be processed:\"+str(self.nevents)+\"\\n\"\n\t\t\tmessage += \"Total events in '\"+finalfile+\"':\"+str(totalevts)+\"\\n\"\n\t\t\tprint message\n\t\t\treturn \n\t\t# If everything was fine, deleting the files \n\t\t# and cleaning the directory\n\t\tfor f in self.outputfiles.itervalues():\n\t\t\tos.remove( f )\n\t\t# Taring and compressing\n\t\tfilestotar = glob.glob(\"./*.*\")\n\t\tfilestotar.append( \".storedmanager\")\n\t\ttar = tarfile.open(os.path.basename(self.cwd)+\".tar.gz\",\"w:gz\")\n\t\tfor f in filestotar:\n\t\t\ttar.add(f)\n\t\ttar.close()\n\t\t# if everything was fine, deleting the files\n\t\tif os.path.exists(os.path.basename(self.cwd)+\".tar.gz\"):\n\t\t\tfor f in filestotar:\n\t\t\t\tos.remove(f)\n\t\telse:\n\t\t\tmessage = \"\\033[33;1mclustermanager.gatherfiles: WARNING\\033[0m I can't manage\\n\"\n\t\t\tmessage += \"to create the backup .tar.gz file\\n\"\n\t\t\tprint message\n\n\t\tprint \"Created \"+finalfile\n\t\tprint \"========= Process Completed =========\"", "def mergebams(bamlist,outbamfn):\n args = ['samtools','merge','-f',outbamfn] + bamlist\n print \"merging, cmd: \",args\n subprocess.call(args)\n\n for bamfile in bamlist:\n os.remove(bamfile)\n os.remove(bamfile + '.bai')", "def main():\n op = help()\n for t in [\"bowtie2\", \"samtools\", \"bamToBed\"]:\n if not isTool(t):\n logger.error(\"%s not exits! Please install through conda.\" % t)\n return\n if not os.path.exists(op.fqd):\n logger.error(\"Input %s not exists! Return.\" % op.fqd)\n return\n if len(glob(op.ref + \"*.bt2\")) == 0:\n logger.error(\"Bowtie2 reference not exists for prefix of %s! Return.\" %\n op.ref)\n return\n if not os.path.exists(op.output):\n os.makedirs(op.output, exist_ok=True)\n else:\n fs = glob(os.path.join(op.output, \"*\"))\n if len(fs) > 0:\n logger.info(\n \"Target output directory %s is not empty, may over-write some files.\"\n % op.output)\n\n #mapping\n data = preFqs(op.fqd)\n if len(data) == 0:\n logger.error(\n \"No matched _R1.fastq.gz and _R2.fastq.gz in %s. Return.\" %\n (op.fqd))\n return\n ref = op.ref\n sams = Parallel(n_jobs=op.number,backend=\"multiprocessing\")(\n delayed(tracMapping)(sample, fqs, ref, op.output, cpus=op.cpu)\n for sample, fqs in data.items())\n sams = [sam for sam in sams if sam is not None]\n\n #sam to bam and bedpe\n cpus = op.number * op.cpu\n ncpus = int(min(len(sams), cpus / 2))\n bedpes = Parallel(n_jobs=ncpus,backend=\"multiprocessing\")(delayed(sam2bamBedpe)(sam) for sam in sams)\n\n #cLoops2 qc\n cmd = \"cLoops2 qc -f %s -o bedpeQc -p %s\" % (\",\".join(bedpes),\n min(len(bedpes), cpus))\n callSys([cmd], logger)\n\n #combine report\n mata = parseBowtielog()\n matb = pd.read_csv(\"bedpeQc_bedpeQc.txt\", index_col=0, sep=\"\\t\")\n matb.index = [i.split(\"_all\")[0] for i in matb.index]\n for c in matb.columns:\n mata[c] = matb[c]\n mata.to_csv(\"tracPre_summary.txt\", sep=\"\\t\")\n cmd = \"rm bedpeQc_bedpeQc.txt\"\n os.system(cmd)", "def merge_wrapper(processdir, basedir, starglob, superstarglob, calibrootglob, njobs=2, invert=False):\n for glob in [starglob, superstarglob, calibrootglob]:\n assert path.dirname(glob), \\\n f\"Glob : {glob} should be/contain a subdirectory\"\n\n superstarGlobNew = get_glob_strings(superstarglob)\n calibrootGlob1, calibrootGlob2 = get_glob_strings(calibrootglob)\n superstardir = get_dir_from_glob(processdir, superstarglob)\n calibdir = get_dir_from_glob(basedir, calibrootglob)\n starglob = processdir + starglob\n\n # ssmcolfnames = converter(superstardir,\n # globstr1=superstarGlobNew,\n # globstr2=superstarGlobNew,\n # njobs=42,\n # mergecolsonly=True)\n # yecho(\"SuperStarfiles done.\")\n # tofiltercalibglob = converter(processdir,\n # globstr1=calibrootGlob1,\n # globstr2=calibrootGlob2,\n # njobs=42,\n # mergecolsonly=False)\n # yecho(\"Extracting done.\")\n tofiltercalibglob = \"./csv/*.csv\"\n ssmcolfnames = glob_and_check(\"./superstar/mergecols/*.csv\")\n\n yecho(\"Removing events.\")\n if njobs > 1:\n splitcalib = split_by_dates(tofiltercalibglob)\n splitstar = split_by_dates(starglob)\n splitss = split_by_dates(ssmcolfnames)\n # needs filename output\n assert len(splitcalib) == len(splitstar) == len(splitss), \"only works the first time when no calibfiles got moved, for everything else this needs a new function with more logic\"\n Parallel(n_jobs=njobs)\\\n (delayed(single_remove_events)(calibglob, starglob, ssglob, njobs, invert)\n for calibglob, starglob, ssglob in zip(splitcalib, splitstar, splitss))\n # filteredFiles = [f for arr in filteredFiles for f in arr]\n else:\n check_telescope_files(rootdir=None, globstr1=ssmcolfnames,\n globstr2=calibmcolfnames, replacer=(\"_Y_\", \"_I_\"))\n remover = EventRemover(tofiltercalibglob=tofiltercalibglob,\n starglob=starglob,\n superstarmcolglob=ssmcolfnames)\n remover.remove_events()\n filteredFiles = remover.outfilenames\n yecho(\"Removed events that get thrown out during image cleaning and superstar processing and wrote the merged runs to:\")\n yecho(f\"{path.basename(filteredFiles[0])}\")\n # return filteredFiles", "def write_merge_script(s,inputs=[]):\n assert len(inputs)>0\n # hadd determines if we are merging main histograms file, or unfolding files\n hadd = True if s.jobtype == \"MRG\" else False\n s.jobfile = os.path.join(s.submitdir, 'merge_wasym.sh' if hadd else 'munfold_wasym.sh')\n s.outROOT = ('root_' if hadd else 'unfold_')+s.tag+\".root\"\n s.outROOTpath = os.path.join('results','ana_wasym',s.outROOT)\n pre = 'merge' if hadd else 'munfold'\n s.outOU = os.path.join(s.submitdir, pre+'_wasym.out.log')\n s.outER = os.path.join(s.submitdir, pre+'_wasym.err.log')\n s.outLOG = os.path.join(s.submitdir, pre+'_wasym.log.log')\n flist = 'wasym.root.list' if hadd else 'wasym.unfold.list'\n s.outputs += [flist]\n f = open(s.jobfile, \"w\")\n print >>f, SH_PRE%(s.fdic[0],s.fdic[1])\n print >>f,'RMODE=merge'\n print >>f,'nexpected=%d'%len(inputs)\n print >>f,'ntot=0'\n print >>f,'rm -f ${ROOTDIR}/%s ; touch ${ROOTDIR}/%s;'%(flist,flist)\n for fin in inputs:\n fname = fin if hadd else '%s.unfold'%fin\n print >>f,'f=\"${RESDIR}/%s.root\"'%fname\n print >>f,'st=`xrd uct3-xrd.mwt2.org existfile $f`'\n print >>f,'if [ \"$st\" == \"The file exists.\" ]; then'\n # xrootd files: reduce cache size, since hadd is stupid and will eat 100% of RAM\n print >>f,'echo ${RESHOST}/$f?cachesz=1000000 >> ${ROOTDIR}/%s'%flist\n print >>f,'((ntot++))'\n print >>f,'else'\n print >>f,'echo ERROR: failed to locate file $f'\n print >>f,'fi'\n print >>f,'if [ \"$ntot\" -eq \"$nexpected\" ]; then echo \"ALL DONE\"; else echo \"ERROR: missing `expr $nexpected - $ntot` files\"; echo exit 202; exit 202; fi'\n print >>f,'if [ \"$ntot\" -eq \"0\" ]; then echo \"ERROR: no files to merge\"; echo exit 203; exit 203; fi'\n print >>f,\"\"\"\n# a special version of hadd that adds files in chunks of 20\nfunction hadd2() {\n local per\n per=30 #20\n fin=$1\n opts=$2\n fout=$3\n shift\n n=`cat $fin | wc -l`\n ngrp=`expr $n / $per`\n nrem=`expr $n % $per`\n if [ \\\"$nrem\\\" == \\\"0\\\" ]; then ngrp=`expr $ngrp - 1`; fi\n for igrp in `seq 0 $ngrp`; do\n\timin=`expr $per \\* $igrp`\n\timax=`expr $per \\* $igrp + $per`\n\tif [ \\\"$imax\\\" -gt \\\"$n\\\" ]; then imax=`expr $per \\* $igrp + $nrem`; fi\n\t# offset by 1\n\timin=`expr $imin + 1`\n\timax=`expr $imax`\n\tidel=`expr $imax - $imin + 1`\n\techo \\\"===== Part $igrp / $ngrp : $imin to $imax\\\"\n\techo hadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\thadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\tst=$?\n\tif [ \\\"$st\\\" != \\\"0\\\" ]; then\n\t echo \\\"ERROR: merge step $igrp failed. Bailing out...\\\"\n\t return $st\n\tfi\n done\n # remove opts to speed up the last step and prevent creation of additional ntuple cycles;2\n echo hadd ${fout} ${fout}.TMPHADD_*root*\n hadd ${fout} ${fout}.TMPHADD_*root*\n st=$?\n rm -f ${fout}.TMPHADD_*root*\n return $st\n}\n \"\"\"\n if False:\n if hadd:\n print >>f, 'echo hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'echo hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'hadd2 ${ROOTDIR}/%s \"%s\" %s'%(flist,\"-O\" if hadd else \"-T\",s.outROOTpath)\n print >>f, \"status=$?\"\n print >>f, SH_POST\n f.close()\n os.system('chmod +x %s'%s.jobfile)\n s.write_submit_script()\n return True", "def bed_merge(output_file, *inputfiles):\n working_dir = os.path.dirname(inputfiles[0]);\n temp_file1 = working_dir + os.sep + \"temp_dfj304jfd.txt\";\n\n #Concatenate input files\n cat_command = ['cat'];\n cat_command.extend(inputfiles);\n with open(temp_file1, 'w') as fout:\n sp.check_call(cat_command, stdout=fout);\n\n #Sort file to be merged\n temp_file2 = working_dir + os.sep + \"temp_fje094j3.txt\";\n with open(temp_file2, 'w') as fout:\n sp.check_call(['sortBed','-i',temp_file1], stdout=fout);\n\n #Merge file\n if(output_file.find(os.sep) == -1):\n output_file = working_dir + os.sep + output_file;\n\n with open(output_file, 'w') as fout:\n sp.check_call(['bedtools','merge','-i',temp_file2], stdout=fout);\n\n #Clean up temporary files\n os.remove(temp_file1);\n os.remove(temp_file2);\n\n return output_file;", "def mergefsl(log, file_list, outname):\n cmdargs = split('fslmerge -t {} {}'.format(outname, file_list))\n proc = Popen(cmdargs, stdout=PIPE, stderr=STDOUT)\n log.info(proc.stdout.read())", "def perform_combination(sonar_model, input_paths, output_path, engine):\n # TODO: there should be compression option for the combined file too...\n\n def coerce_type(ds, group):\n if group == 'Beam':\n if sonar_model == 'EK80':\n ds['transceiver_software_version'] = ds['transceiver_software_version'].astype('<U10')\n ds['channel_id'] = ds['channel_id'].astype('<U50')\n elif sonar_model == 'EK60':\n ds['gpt_software_version'] = ds['gpt_software_version'].astype('<U10')\n ds['channel_id'] = ds['channel_id'].astype('<U50')\n\n print(f\"{dt.now().strftime('%H:%M:%S')} combining files...\")\n\n # TODO: add in the documentation that the Top-level and Sonar groups are\n # combined by taking values (attributes) from the first file\n # Combine Top-level group, use values from the first file\n with xr.open_dataset(input_paths[0], engine=engine) as ds_top:\n io.save_file(ds_top, path=output_path, mode='w', engine=engine)\n\n # Combine Sonar group, use values from the first file\n with xr.open_dataset(input_paths[0], group='Sonar', engine=engine) as ds_sonar:\n io.save_file(ds_sonar, path=output_path, mode='a', engine=engine, group='Sonar')\n\n # Combine Provenance group,\n ds_prov = assemble_combined_provenance(input_paths)\n io.save_file(ds_prov, path=output_path, mode='a', engine=engine, group='Provenance')\n\n # TODO: Put the following in docs:\n # Right now we follow xr.combine_by_coords default to only combine files\n # with nicely monotonically varying ping_time/location_time/mru_time.\n # However we know there are lots of problems with pings going backward in time for EK60/EK80 files,\n # and we will need to clean up data before calling merge.\n # Combine Beam\n with xr.open_mfdataset(input_paths, group='Beam',\n concat_dim='ping_time', data_vars='minimal', engine=engine) as ds_beam:\n coerce_type(ds_beam, 'Beam')\n io.save_file(ds_beam.chunk({'range_bin': DEFAULT_CHUNK_SIZE['range_bin'],\n 'ping_time': DEFAULT_CHUNK_SIZE['ping_time']}), # these chunk sizes are ad-hoc\n path=output_path, mode='a', engine=engine, group='Beam')\n\n # Combine Environment group\n with xr.open_mfdataset(input_paths, group='Environment',\n concat_dim='ping_time', data_vars='minimal', engine=engine) as ds_env:\n io.save_file(ds_env.chunk({'ping_time': DEFAULT_CHUNK_SIZE['ping_time']}),\n path=output_path, mode='a', engine=engine, group='Environment')\n\n # Combine Platform group\n if sonar_model == 'AZFP':\n with xr.open_mfdataset(input_paths, group='Platform',\n combine='nested', # nested since this is more like merge and no dim to concat\n compat='identical', engine=engine) as ds_plat:\n io.save_file(ds_plat, path=output_path, mode='a', engine=engine, group='Platform')\n elif sonar_model == 'EK60':\n with xr.open_mfdataset(input_paths, group='Platform',\n concat_dim=['location_time', 'ping_time'],\n data_vars='minimal', engine=engine) as ds_plat:\n io.save_file(ds_plat.chunk({'location_time': DEFAULT_CHUNK_SIZE['ping_time'],\n 'ping_time': DEFAULT_CHUNK_SIZE['ping_time']}),\n path=output_path, mode='a', engine=engine, group='Platform')\n elif sonar_model in ['EK80', 'EA640']:\n with xr.open_mfdataset(input_paths, group='Platform',\n concat_dim=['location_time', 'mru_time'],\n data_vars='minimal', engine=engine) as ds_plat:\n io.save_file(ds_plat.chunk({'location_time': DEFAULT_CHUNK_SIZE['ping_time'],\n 'mru_time': DEFAULT_CHUNK_SIZE['ping_time']}),\n path=output_path, mode='a', engine=engine, group='Platform')\n\n # Combine Platform/NMEA group\n if sonar_model in ['EK60', 'EK80', 'EA640']:\n with xr.open_mfdataset(input_paths, group='Platform/NMEA',\n concat_dim='location_time', data_vars='minimal', engine=engine) as ds_nmea:\n io.save_file(ds_nmea.chunk({'location_time': DEFAULT_CHUNK_SIZE['ping_time']}).astype('str'),\n path=output_path, mode='a', engine=engine, group='Platform/NMEA')\n\n # Combine Vendor-specific group\n if sonar_model == 'AZFP':\n with xr.open_mfdataset(input_paths, group='Vendor',\n concat_dim=['ping_time', 'frequency'],\n data_vars='minimal', engine=engine) as ds_vend:\n io.save_file(ds_vend, path=output_path, mode='a', engine=engine, group='Vendor')\n else:\n with xr.open_mfdataset(input_paths, group='Vendor',\n combine='nested', # nested since this is more like merge and no dim to concat\n compat='no_conflicts', data_vars='minimal', engine=engine) as ds_vend:\n io.save_file(ds_vend, path=output_path, mode='a', engine=engine, group='Vendor')\n\n # TODO: print out which group combination errors out and raise appropriate error\n\n print(f\"{dt.now().strftime('%H:%M:%S')} all files combined into {output_path}\")", "def merge_test_files():\n for syscall_type in SYSCALLS:\n self_file = open(f\"{TEMP_DIR}/{syscall_type}-self-split.test\")\n nonself_file = open(f\"{TEMP_DIR}/{syscall_type}-nonself-split.test\")\n merged_file = open(f\"{TEMP_DIR}/{syscall_type}-merged-split.test\", \"w\")\n merged_lines = self_file.readlines()\n merged_lines.extend(nonself_file.readlines())\n merged_file.writelines(merged_lines)\n self_file.close()\n nonself_file.close()\n merged_file.close()", "def main(paths, minscore, outfile, is_matrix):\n \n # Get references and lengths from first BAM file.\n # We need these to print them in the output.\n # Might as well do it before spawning all those processes.\n firstfile = pysam.AlignmentFile(paths[0], \"rb\")\n references = firstfile.references\n lengths = firstfile.lengths\n \n if not len(references) == len(lengths):\n raise ValueError('Could not parse headers of first bam-file')\n \n # Spawn independent processed to calculate RPKM for each of the BAM files\n processresults = list()\n processes_done = 0\n \n # This is just to print to terminal when a process finishes. Not necessary.\n def callback(result, totalps=len(paths)):\n \"Generator yielding processed\"\n nonlocal processes_done\n processes_done += 1\n print('Files processed: {}/{}'.format(processes_done, totalps))\n return None\n\n # Queue all the processes\n with multiprocessing.Pool(processes=args.processors) as pool:\n for fileno, path in enumerate(paths):\n arguments = (fileno, path, args.minscore)\n processresults.append(pool.apply_async(get_contig_rpkms, arguments,\n callback=callback, error_callback=callback))\n \n # For some reason, this is needed.\n pool.close()\n pool.join()\n \n print('All processes finished. Checking outputs')\n sample_rpkms = list()\n \n for processresult in processresults:\n if processresult.successful():\n sample_rpkms.append(processresult.get())\n \n else:\n raise multiprocessing.ProcessError\n \n # sample_rpkms now contain (identifier, sample_rpkms) tuples, in the order\n # they were returned from the pool. We want to sort them by identifier,\n # so that we know which RPKMs belong to which BAM file\n sample_rpkms.sort()\n \n # Now we can discard the identifiers\n sample_rpkms = [i[1] for i in sample_rpkms]\n \n # Each BAM file MUST contain the same headers\n if not all(len(rpkms) == len(lengths) for rpkms in sample_rpkms):\n raise ValueError('Not all BAM files contain the same amount of headers.')\n \n print('Outputs alright. Printing table.')\n \n with open(outfile, 'w') as filehandle:\n # Print header if asked\n if not is_matrix:\n print('#contig\\tcontiglength', '\\t'.join(paths), sep='\\t', file=filehandle)\n \n # Print the actual output\n for fields in zip(references, lengths, *sample_rpkms):\n numbers = '\\t'.join([str(round(i, 3)) for i in fields[2:]])\n \n if not is_matrix:\n print(fields[0], fields[1], sep='\\t', end='\\t', file=filehandle)\n \n print(numbers, file=filehandle)", "def do_merge_all():\n for rawd, merged in TOMERGE:\n mylogger.info(\"cleaning \" + merged)\n ensure_dir(merged)\n cleandir(merged)\n mylogger.info(\"merging \" + rawd + \" to \" + merged)\n build_merged_dir(build_sensor_file_map(rawd), merged)\n\n # add timestamp file\n\tf = open(TIMESTAMP_FILE,\"w\")\n\tf.write(str(datetime.datetime.now()))\n\tf.close()", "def merge_regions(bed_files, out_bed):\n merge_all = (\"zcat {0} | \"\n \"sort -k1,1 -k2,2n | \"\n \"bedtools merge -i stdin | \"\n \"gzip -c \"\n \"> {1}\").format(' '.join(bed_files), out_bed)\n print merge_all\n os.system(merge_all)\n\n return None", "def run_process(hrc):\n#\n#--- set conditions for either hrc-i or hrc s\n#\n if hrc == 'hrc_i':\n out_list = 'hrc_i_list'\n data_dir = '/data/hrc/i/'\n inst = 'i'\n else:\n out_list = 'hrc_s_list'\n data_dir = '/data/hrc/s/'\n inst = 's'\n#\n#--- make a list of obsids\n#\n cmd = 'ls -d ' + data_dir + '* > ' + zspace\n os.system(cmd)\n data = mcf.read_data_file(zspace, remove=1)\n hlist = []\n for ent in data:\n atemp = re.split('\\/', ent)\n obsid = atemp[-1]\n if mcf.is_neumeric(obsid):\n hlist.append(obsid)\n\n# if hrc == 'hrc_i':\n# print(\"HRC I : \" + str(hlist))\n# else:\n# print(\"HRC S : \" + str(hlist))\n# \n for obsid in hlist:\n obsid = str(int(float(obsid)))\n\n with open(out_list, 'w') as fo:\n fo.write(str(obsid) + '\\n')\n cmd = 'rm -rf ' + data_dir + obsid + \"analysis/*\"\n os.system(cmd)\n#\n#--- extract fits data needed for analysis\n#\n chk = extract_hrc_data(obsid, data_dir)\n if chk == False:\n print(\"Not all data are available\")\n continue\n\n if hrc == 'hrc_i':\n cmd = 'csh -f ' + bin_dir + 'repro_all_new.csh hrc_i_list'\n else:\n cmd = 'csh -f ' + bin_dir + 'repro_all_S_new.csh hrc_s_list'\n\n try:\n run_ciao(cmd)\n cdir = data_dir + '/' + str(obsid)\n if os.path.isdir(cdir):\n cmd = 'chgrp -R hat ' + cdir \n os.system(cmd)\n cmd = 'chmod -R 775 ' + cdir \n os.system(cmd)\n#\n#--- directory name should be 5 digit\n#\n test = int(float(obsid))\n if test < 10000:\n chk = mcf.add_leading_zero(obsid, 5)\n odir = data_dir + '/' + str(chk)\n if os.path.isdir(odir):\n cmd = 'rm -rf ' + odir\n os.system(cmd)\n cmd = 'mv ' + cdir + ' ' + odir\n os.system(cmd)\n else:\n cmd = 'mv ' + cdir + ' ' + odir\n os.system(cmd)\n except:\n pass\n\n mcf.rm_files(out_list)\n correct_naming(obsid, inst)\n\n #chk_proccess_status(inst, hlist)", "def get_result_files(self):\n name_pattern = \"{mapper}.{ngs_library.name}\"\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"out\", name_pattern + \"{ext}\"), ext=EXT_VALUES\n )\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"log\", \"{mapper}.{ngs_library.name}.{ext}\"),\n ext=(\n \"log\",\n \"conda_info.txt\",\n \"conda_list.txt\",\n \"log.md5\",\n \"conda_info.txt.md5\",\n \"conda_list.txt.md5\",\n ),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt.md5\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html.md5\"\n )\n )\n\n for sheet in self.shortcut_sheets:\n for ngs_library in sheet.all_ngs_libraries:\n if ngs_library.name in self.ngs_library_to_kit:\n extraction_type = ngs_library.test_sample.extra_infos[\"extractionType\"]\n suffix = (\n \"_long\"\n if ngs_library.extra_infos[\"seqPlatform\"] in (\"PacBio\", \"ONP\")\n else \"\"\n )\n # Per-sample target coverage report.\n yield from expand(\n os.path.join(\n \"output\", name_pattern, \"report\", \"cov_qc\", name_pattern + \".{ext}\"\n ),\n mapper=self.config[\"tools\"][extraction_type.lower() + suffix],\n ngs_library=[ngs_library],\n ext=[\"txt\", \"txt.md5\"],\n )\n yield \"output/target_cov_report/out/target_cov_report.txt\"\n yield \"output/target_cov_report/out/target_cov_report.txt.md5\"\n if (\n self.config[\"picard_hs_metrics\"][\"path_targets_interval_list\"]\n and self.config[\"picard_hs_metrics\"][\"path_baits_interval_list\"]\n ):\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt.md5\"\n )\n )\n if self.config[\"compute_coverage_bed\"]:\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"report\", \"coverage\", name_pattern + \"{ext}\"),\n ext=(\".bed.gz\", \".bed.gz.tbi\"),\n )\n else:\n print(\n \"Genome-wide coverage BED generation disabled\", file=sys.stderr\n ) # pragma: no cover", "def GenotypeGVCFs():\n #creates sbatch files to merge batches of batch_size genomics vcf\n cwd = os.getcwd()\n sbatch_files = []\n if not os.path.isdir(os.path.join(cwd, \"01_CombineGVCFs\")):\n sys.exit(\"Directory 01_CombineGVCFs does not exits exists, something went wrong here.\")\n if os.path.isdir(os.path.join(cwd, \"02_GenotypeGVCFs\")):\n print \"WARNING: 02_GenotypeGVCFs already present, assuming this step has been completed with success.\"\n return sbatch_files\n else:\n #create the folder structure\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"sbatch\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"std_err\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"std_out\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"VCF\"))\n #Build the sbatch files for the join calling step\n working_dir = os.path.join(cwd, \"02_GenotypeGVCFs\")\n #now retrive the VCF stored in 01_CombineGVCFs/VCF/\n combined_gvcfs_to_process = []\n if len(CONFIG[\"intervals_list\"]) == 0:\n #no intervals, I have one file for each batch\n combined_gvcf_files = []\n for current_batch in range(1, CONFIG[\"batch_number\"] +1):\n # for each batch create the vcf file that need to be created by combine step\n combined_gvcf_name = \"{}_batch{}.g.vcf.gz\".format(CONFIG[\"output_header\"], current_batch)\n combined_gvcf_full_path = os.path.join(cwd, \"01_CombineGVCFs\", \"VCF\", combined_gvcf_name)\n combined_gvcf_files.append(combined_gvcf_full_path)\n combined_gvcfs_to_process.append(combined_gvcf_files)\n else:\n for interval in CONFIG[\"intervals_list\"]:\n interval_name = os.path.basename(interval).split(\".\")[0]\n combined_gvcf_files = []\n for current_batch in range(1, CONFIG[\"batch_number\"] +1):\n # for each batch create the vcf file that need to be created by combine step\n combined_gvcf_name = \"{}_batch{}_{}.g.vcf.gz\".format(CONFIG[\"output_header\"], current_batch, interval_name)\n combined_gvcf_full_path = os.path.join(cwd, \"01_CombineGVCFs\", \"VCF\", combined_gvcf_name)\n combined_gvcf_files.append(combined_gvcf_full_path)\n #now ceate a list with interval file and all gvcf to be combines\n interval_plus_gvcfs = [interval ,combined_gvcf_files]\n combined_gvcfs_to_process.append(interval_plus_gvcfs)\n for interval_plus_gvcfs in combined_gvcfs_to_process:\n interval = interval_plus_gvcfs[0]\n combined_gvcf_files = interval_plus_gvcfs[1]\n sbatch_file = build_GenotypeGVCFs_sbatch(working_dir, combined_gvcf_files, CONFIG[\"scratch\"], interval)\n sbatch_files.append(sbatch_file)\n return sbatch_files", "def merge(self):\n self.decompress_files()\n teqc_path = os.path.join(ROOT_DIR, 'teqc')\n if not os.path.isfile(teqc_path):\n raise OSError('Cannot find TEQC binary in project directory!')\n\n # currently cannot tell if there are daily logs or not\n daily_logs = glob('{}/*0.??o'.format(self.__directory)) + \\\n glob('{}/*.??d'.format(self.__directory))\n try:\n # Merge and extract the time window from the Rinex files if using daily logs\n if daily_logs:\n start_timestamp = START_TIMESTAMP.format(*self.__start)\n end_timestamp = END_TIMESTAMP.format(*self.__end)\n # files must be entered into TEQC in (a specific) chronological order or it will fail\n # hence we must sort them\n day_logs_uncompressed = glob(\n '{}/*0.??o'.format(self.__directory))\n hourly_logs_uncompressed = glob(\n '{}/*[a-z].??o'.format(self.__directory))\n files = sorted(day_logs_uncompressed, key=self.__file_sorter) + \\\n sorted(hourly_logs_uncompressed, key=self.__file_sorter)\n subprocess.run(\n ['{0} -O.s M -st {1} -e {2} {3} > {4}.obs'.format(teqc_path, start_timestamp, end_timestamp, ' '.join(files), self.__station)], capture_output=True, shell=True)\n else:\n # Merge files as is if there are no daily logs present\n subprocess.run(\n \"{0} -O.s M {1}/*.??o > {2}.obs\".format(teqc_path, self.__directory, self.__station), capture_output=True, shell=True)\n except Exception as e:\n print(e)\n # raise RuntimeError('Error occurred while trying to merge files.')", "def main():\n file_one_path, file_two_path, output_path =\\\n get_command_line_arguments(\n ['/home/ehler002/project/groups/go/Data/Cluster_Data/Dataset.txt',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/translated_genes.fpkm_table',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/Full_fpkm_Table.txt'])\n pattern = 'CRO_T'\n for file_path in [file_one_path, file_two_path]:\n assert os.path.exists(file_path), 'File %s does not exist.' % file_path\n start_time = datetime.datetime.now()\n print('Started concatenation at %s' % start_time)\n file_contents, headers = get_file_contents(file_two_path)\n file_contents = sort_file_contents(file_contents)\n file_contents = remove_pattern(file_contents, pattern)\n concatenate_files(file_one_path, file_contents, headers, output_path)\n print('Finished concatenation in %s' % (datetime.datetime.now() - start_time))", "def merge_root_files(self, force=False):\n self.OutFilePath.parent.mkdir(exist_ok=True)\n cmd = f'hadd{\" -f\" if force else \"\"} {self.proteus_raw_file_path()} {self.Raw.OutFilePath} {self.Ref.OutFilePath} {self.Adc2Vcal.OutFilePath}'\n pinfo(cmd)\n check_call(cmd, shell=True)", "def merge_bam_files(self, inputs, output, sample_id, rg_id=None,\n platform='illumina', library='A', sort_order=\"readname\"):\n if len(inputs) > 1:\n if sort_order == \"readname\":\n sort_options = \"-n\"\n else:\n sort_options = \"\"\n \n header_file = p.as_temp(\"%s.header\" % output)\n\n with open(header_file, \"w\") as header:\n for ix, input_file in enumerate(inputs):\n # TODO use pysam here\n in_header = pysam.Samfile(input_file,'rb',check_header=False, check_sq=False).text\n RG_lines = filter(lambda x: x.startswith(\"@RG\"), in_header.split(\"\\n\"))\n if len(RG_lines) == 1:\n rg_id = re.findall(\"ID:([a-zA-Z0-9_\\-\\.]*)\", RG_lines[0])[0]\n else:\n rg_id = re.sub(\"\\.bam$\", \"\", os.path.basename(input_file))\n header.write(\"@RG\\tID:%s\\tPU:%s\\tDS:%s\\tLB:%s\\tPL:%s\\tSM:%s\\n\" % (rg_id, rg_id, input_file, library, platform, sample_id))\n merge_options = \"-h %s\" % (header_file)\n\n self.cmd(\"{samtools} merge \\\n {sort_options} \\\n {merge_options} \\\n {output_bam} {input_bam_list}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n sort_options=sort_options,\n merge_options=merge_options,\n output_bam=output,\n input_bam_list=\" \".join(inputs),\n ),\n shell=True)\n else:\n # TODO use pysam here\n input_file = inputs[0]\n in_header = pysam.Samfile(input_file,'rb',check_header=False, check_sq=False).text\n RG_lines = filter(lambda x: x.startswith(\"@RG\"), in_header.split(\"\\n\"))\n if len(RG_lines) == 1:\n rg_id = re.findall(\"ID:([a-zA-Z0-9_\\-\\.]*)\", RG_lines[0])[0]\n else:\n rg_id = re.sub(\"\\.bam$\", \"\", os.path.basename(input_file))\n with open(p.as_temp(\"%s.header\" % output), \"w\") as header:\n header.write(\"@RG\\tID:%s\\tPU:%s\\tDS:%s\\tLB:%s\\tPL:%s\\tSM:%s\\n\" % (rg_id, rg_id, input_file, library, platform, sample_id))\n \n self.cmd(\"{picard}/AddOrReplaceReadGroups.jar \\\n INPUT={in_bam} \\\n OUTPUT={out_bam} \\\n QUIET=false \\\n VALIDATION_STRINGENCY=LENIENT\\\n COMPRESSION_LEVEL=5 \\\n RGID={rg_id} \\\n RGSM={sample_id} \\\n RGPU={rg_id} \\\n RGLB=A \\\n RGPL=illumina \\\n RGDS={in_bam}\"\n .format(\n picard=self.cmds[\"picard\"],\n in_bam=inputs[0],\n out_bam=output,\n sample_id=sample_id,\n rg_id=rg_id,\n ),\n shell=True)", "def bam_output(args):\n\n for strand in ['watson', 'crick']:\n merged_sam = os.path.join(args.output_dir, '%s_mergedAligned.out.sam' % strand)\n joined_sam = os.path.join(args.output_dir, '%s_joinedAligned.out.sam' % strand)\n out_sam = tempfile.NamedTemporaryFile(prefix=strand, suffix='.sam', dir=args.output_dir)\n #rewrite sam file merged and joined for watson and crick\n parse_sam(merged_sam, out_sam.name, 'merged', strand)\n #TODO: determine why joined reads have more soft-clips or single read matches\n parse_sam(joined_sam, out_sam.name, 'joined', strand)\n #convert to sorted and indexed bam\n cmd = 'cat %s %s |samtools view -@ 4 -Shb |sambamba sort -m 4GB --tmpdir %s -t %s -o %s /dev/stdin'%(args.header,\n out_sam.name,args.tmpdir, args.threads,\n os.path.join(args.output_dir,'%s.bam' % strand) )\n log = \"make sorted bam file\"\n run_subprocess([cmd], args, log)\n out_sam.close()\n return args", "def removeRedundantFiles(workdir, outputfiles=[]):\n\n logger.info(\"Removing redundant files prior to log creation\")\n\n workdir = os.path.abspath(workdir)\n\n dir_list = [\"AtlasProduction*\",\n \"AtlasPoint1\",\n \"AtlasTier0\",\n \"buildJob*\",\n \"CDRelease*\",\n \"csc*.log\",\n \"DBRelease*\",\n \"EvgenJobOptions\",\n \"external\",\n \"fort.*\",\n \"geant4\",\n \"geomDB\",\n \"geomDB_sqlite\",\n \"home\",\n \"o..pacman..o\",\n \"pacman-*\",\n \"python\",\n \"runAthena*\",\n \"share\",\n \"sources.*\",\n \"sqlite*\",\n \"sw\",\n \"tcf_*\",\n \"triggerDB\",\n \"trusted.caches\",\n \"workdir\",\n \"*.data*\",\n \"*.events\",\n \"*.py\",\n \"*.pyc\",\n \"*.root*\",\n \"JEM\",\n \"tmp*\",\n \"*.tmp\",\n \"*.TMP\",\n \"MC11JobOptions\",\n \"scratch\",\n \"jobState-*-test.pickle\",\n \"*.writing\",\n \"pwg*\",\n \"pwhg*\",\n \"*PROC*\",\n \"madevent\",\n \"HPC\",\n \"objectstore*.json\",\n \"saga\",\n \"radical\",\n \"ckpt*\"]\n\n # remove core and pool.root files from AthenaMP sub directories\n try:\n cleanupAthenaMP(workdir, outputfiles)\n except Exception, e:\n print(\"Failed to execute cleanupAthenaMP(): %s\" % (e))\n\n # explicitly remove any soft linked archives (.a files) since they will be dereferenced by the tar command (--dereference option)\n matches = []\n import fnmatch\n for root, dirnames, filenames in os.walk(workdir):\n for filename in fnmatch.filter(filenames, '*.a'):\n matches.append(os.path.join(root, filename))\n for root, dirnames, filenames in os.walk(os.path.dirname(workdir)):\n for filename in fnmatch.filter(filenames, 'EventService_premerge_*.tar'):\n matches.append(os.path.join(root, filename))\n if matches != []:\n for f in matches:\n remove(f)\n # else:\n # print(\"Found no archive files\")\n\n # note: these should be partitial file/dir names, not containing any wildcards\n exceptions_list = [\"runargs\", \"runwrapper\", \"jobReport\", \"log.\"]\n\n to_delete = []\n for _dir in dir_list:\n files = glob(os.path.join(workdir, _dir))\n exclude = []\n\n if files:\n for exc in exceptions_list:\n for f in files:\n if exc in f:\n exclude.append(os.path.abspath(f))\n\n _files = []\n for f in files:\n if not f in exclude:\n _files.append(os.path.abspath(f))\n to_delete += _files\n\n exclude_files = []\n for of in outputfiles:\n exclude_files.append(os.path.join(workdir, of))\n for f in to_delete:\n if not f in exclude_files:\n remove(f)\n\n # run a second pass to clean up any broken links\n broken = []\n for root, dirs, files in os.walk(workdir):\n for filename in files:\n path = os.path.join(root, filename)\n if os.path.islink(path):\n target_path = os.readlink(path)\n # Resolve relative symlinks\n if not os.path.isabs(target_path):\n target_path = os.path.join(os.path.dirname(path), target_path)\n if not os.path.exists(target_path):\n broken.append(path)\n else:\n # If it's not a symlink we're not interested.\n continue\n\n if broken:\n for p in broken:\n remove(p)\n\n return 0", "def _main_extract_CDS(args = None, stdout = None, stderr = None,\n gb_record_fmtdict = None,\n gb_cds_fmtdict = None) :\n if stdout is None :\n stdout = sys.stdout\n if stderr is None :\n stderr = sys.stderr\n if gb_record_fmtdict is None :\n gb_record_fmtdict = _GB_RECORD_FMTDICT\n if gb_cds_fmtdict is None :\n gb_cds_fmtdict = _GB_CDS_FMTDICT\n # Process arguments\n if args is None :\n parser = _makeParser_extract_CDS()\n args = parser.parse_args()\n args = _processArgsToLogic_extract_CDS(args, stdout, stderr,\n gb_record_fmtdict, gb_cds_fmtdict)\n # Go through the input files\n uniqueSeq = dict()\n i_file = 0\n for fi in args.genbank_records :\n i_file += 1\n if args.verbose :\n stderr.write(time.asctime() + \" - \" +\n \"Processing file \" + str(i_file) + \" : \" +\n os.path.basename(fi) + \" - \" +\n \"N unique seq : \" + str(len(uniqueSeq.keys())) + \"\\n\")\n record = SeqIO.parse(fi, \"genbank\")\n for r in record :\n if not args.actionFlags.get(\"DoCount\", False) :\n (summaryString, uniqueSeq, newSeq) = (\n _summarizeRecord(r, args.outfmt, args.hash, uniqueSeq))\n stdout.write(summaryString)\n else :\n count = len([x for x in r.features if x.type == \"CDS\"])\n stdout.write(r.annotations[\"gi\"] + \"\\t\" + str(count) + \"\\n\")\n # Write unique sequences\n if args.actionFlags.get(\"DoUniqueSequences\", False) :\n with open(args.unique, \"w\") as fo :\n for (k, v) in uniqueSeq.items() :\n fo.write(\">\" + k + \"\\n\")\n fo.write(v + \"\\n\")", "def write_pbs(self):\n fout = open(\"runStarCCM.pbs\", \"w\")\n fout.write(\"#PBS -S /bin/csh\\n\")\n fout.write(\"#PBS -l select=\" + str(self.numNodes) + \":ncpus=\" + str(self.numCPUs) + \":mpiprocs=\" + str(self.mpiProcs) + \":model=has,walltime=\" + self.WallTime + \"\\n\\n\")\n fout.write(\"#PBS -W group_list=\" + self.GroupID + \"\\n\")\n fout.write(\"#PBS -j oe\\n\")\n fout.write(\"#PBS -q \" + self.queue + \"\\n\")\n fout.write(\"#PBS -N \" + self.jobName + \"\\n\")\n fout.write(\"#PBS -m e\\n\")\n fout.write(\"#PBS -W block=true\\n\\n\")\n fout.write(\"cd $PBS_O_WORKDIR\\n\")\n\n if self.runVolGrid == 1:\n #fout.write(\"/bin/rm -f \" + self.simMeshFile + \".sim\\n\")\n fout.write(\"/bin/rm -f starccmMeshRun.out\\n\")\n fout.write(\"chmod u+x \" + self.cshBatch1File + \".csh\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch1File + \".csh -powerOnDemand \" + self.javaBatch1File + \".java >& starccmMeshRun.out\\n\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a mesh run.'\\n\")\n\n if self.runCFD == 1:\n fout.write(\"chmod u+x \" + self.cshBatch2File + \".csh\\n\")\n fout.write(\"/bin/rm -f *.csv *.png starccmFlowRun.out\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch2File + \".csh -powerOnDemand \" + self.javaBatch2File + \".java \" + self.simMeshFile + \" >& starccmFlowRun.out\\n\\n\")\n fout.write(\"# rename the strange file names\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.csv ForceX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.csv ForceY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.csv ForceZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.csv MomentX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.csv MomentY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.csv MomentZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.csv Residuals.csv\\n\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.png ForceX.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.png ForceY.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.png ForceZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.png MomentX.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.png MomentY.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.png MomentZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.png Residuals.png\\n\")\n fout.write(\"/bin/mv \\$PWDUpperCp.png UpperCp.png\\n\")\n fout.write(\"/bin/mv \\$PWDLowerCp.png LowerCp.png\\n\")\n fout.write(\"/bin/rm -rf null\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a CFD run.'\\n\")\n\n fout.close()", "def combine(args, library_sizes):\n with open(args.counts, \"r\") as counts, open(args.results, \"r\") as results:\n with open(args.output_dir + \"counts_results.txt\", \"w+\") as file1, \\\n open(args.output_dir + \"counts_results_rpm.txt\",\"w+\") \\\n as file2, \\\n open(args.output_dir + \"counts_results_rpkm.txt\", \"w+\") \\\n as file3:\n head = True\n for count_line, results_line in zip(counts, results):\n count_line = count_line.strip()\n results_line = results_line.strip()\n\n if head: # Process column names into one header\n head = False\n count_head_parts = count_line.split(\"\\t\")\n results_head_parts = results_line.split(\"\\t\")\n results_head_parts = [\"Chromosome\", \"Start\", \"End\"] + \\\n results_head_parts[1:]\n\n new_head_parts = results_head_parts + \\\n count_head_parts[2:]\n new_head = \"\\t\".join(new_head_parts)\n new_head += \"\\n\"\n file1.write(new_head)\n file2.write(new_head)\n file3.write(new_head)\n\n else:\n process(count_line, results_line,\n file1, file2, file3, library_sizes)", "def aggregate_bam_files(wildcards):\n\tsample_name, prep_type, seq_mode, abundance_control, sample_type, seq_type = get_sample_info(wildcards)\n\n\tif \"2x\" in seq_mode and seq_type == \"bisulfite\":\n\t\tbam = 'sample_output/pe_bisulfite_aligned/raw_aligned/{sample}.bam'\n\t\tmapped_all_chr = 'sample_output/pe_bisulfite_aligned/all_chr/{sample}_mapped_all_chr.bam'\n\t\tmapped_all_chr_bai = 'sample_output/pe_bisulfite_aligned/all_chr/{sample}_mapped_all_chr.bam.bai'\n\t\tunmapped_R1 = 'sample_output/pe_bisulfite_aligned/unmapped/{sample}_pe_unmapped_R1.fastq.gz'\n\t\tunmapped_R2 = 'sample_output/pe_bisulfite_aligned/unmapped/{sample}_pe_unmapped_R2.fastq.gz'\n\n\tif \"2x\" in seq_mode and seq_type == \"standard\":\n\t\tbam = 'sample_output/pe_stdseq_aligned/raw_aligned/{sample}.bam'\n\t\tmapped_all_chr = 'sample_output/pe_stdseq_aligned/all_chr/{sample}_mapped_all_chr.bam'\n\t\tmapped_all_chr_bai = 'sample_output/pe_stdseq_aligned/all_chr/{sample}_mapped_all_chr.bam.bai'\n\t\tunmapped_R1 = 'sample_output/pe_stdseq_aligned/unmapped/{sample}_pe_unmapped_R1.fastq.gz'\n\t\tunmapped_R2 = 'sample_output/pe_stdseq_aligned/unmapped/{sample}_pe_unmapped_R2.fastq.gz'\n\n\treturn[bam, mapped_all_chr, mapped_all_chr_bai, unmapped_R1, unmapped_R2]", "def main(argv):\n\n \n\n if validate_argv(argv) is False:\n print \"Usage: mergeFiles.py <search_term>\"\n sys.exit()\n\n input_directory_name = 'data_raw'\n search_term = argv[0]\n output_file_name = search_term + '_merged.tsv'\n output_directory_name = 'merged'\n\n\n output_path = fp.set_output_file_path(output_file_name, output_directory_name) \n output = open(output_path, 'a')\n for h1 in range(3):\n for h2 in range(10):\n for m1 in range(6):\n for m2 in range(10):\n file_name = search_term + '_' + str(h1) + str(h2) + str(m1) + str(m2) + '.tsv'\n file_path = fp.get_file_path(file_name, input_directory_name)\n if fp.filename_exists(file_path):\n file = open(file_path, 'r')\n file.next()\n for line in file:\n output.write(line)\n file.close()\n output.close()", "def _merge_files(parse_results: Iterable[ParseResult]) -> Iterable[ParseResult]:\n return map(_merge_records, groupby_file(parse_results))", "def split(bin_lid):\n resolvers = resolver.parse_stream(RESOLVER)\n suffixes = ['_cfa_' + camera for camera in 'LR']\n outdirs = [scratch(bin_lid,bin_lid + suffix) for suffix in suffixes]\n for od in outdirs:\n mkdirs(od)\n imagenames = list(list_images(bin_lid))\n (h,w)=(None,None)\n tiff = None\n # read an image to determine h,w\n for imagename in imagenames:\n for outdir,suffix in zip(outdirs,suffixes):\n LRout = os.path.join(outdir,remove_extension(imagename) + suffix + '.tif')\n if h is None:\n if tiff is None:\n tiff = as_tiff(imagename)\n cfain = resolvers['image'].resolve(pid=as_tiff(imagename)).value\n (h,w) = imread(cfain,plugin='freeimage').shape\n # now fork\n pids = []\n for n in range(NUM_PROCS):\n pid = os.fork()\n if pid == 0:\n for imagename in imagenames[n::NUM_PROCS]:\n tiff = None\n for outdir,suffix,offset in zip(outdirs,suffixes,[0,1]):\n LRout = os.path.join(outdir,remove_extension(imagename) + suffix + '.tif')\n if not os.path.exists(LRout):\n if tiff is None:\n tiff = as_tiff(imagename)\n cfain = resolvers['image'].resolve(pid=as_tiff(imagename)).value\n logging.info('loading %s' % cfain)\n cfa = imread(cfain,plugin='freeimage')\n (h,w) = cfa.shape\n if not os.path.exists(LRout):\n logging.info('splitting %s -> %s' % (cfain, LRout))\n half = w / 2\n off = offset * half\n imsave(LRout,cfa[:,off:off+half],plugin='freeimage')\n os._exit(0)\n else:\n pids += [pid]\n for pid in pids:\n os.waitpid(pid,0)\n logging.info('joined splitting process %d' % pid)\n return (h,w),outdirs" ]
[ "0.62794083", "0.6261093", "0.61784226", "0.61733913", "0.6107145", "0.6077044", "0.60737073", "0.5992166", "0.59356683", "0.58938164", "0.5881873", "0.58431023", "0.5833292", "0.5832355", "0.58270615", "0.58217084", "0.5784522", "0.5717899", "0.5691984", "0.56713927", "0.5654596", "0.56275743", "0.56156516", "0.5605297", "0.560412", "0.5583885", "0.5582724", "0.555073", "0.5545959", "0.55322105" ]
0.6633998
0
Merge the output from pairs of processes (used when doing smc)
def merge_pairs_of_procs(self, n_procs): assert self.args.action == 'partition' assert self.args.smc_particles > 1 if n_procs > 1: groups_to_merge = [[i, i+1] for i in range(0, n_procs-1, 2)] # e.g. for n_procs = 5, we merge the groups [0, 1], [2, 3, 4] else: groups_to_merge = [[], ] if n_procs % 2 != 0: # if it's odd, add the last proc to the last group groups_to_merge[-1].append(n_procs-1) self.smc_info.append([]) for group in groups_to_merge: if n_procs == 1: infnames = [self.hmm_outfname, ] else: infnames = [self.args.workdir + '/hmm-' + str(iproc) + '/' + os.path.basename(self.hmm_outfname) for iproc in group] assert len(self.smc_info[-2]) == n_procs previous_info = None if len(self.smc_info) > 2: previous_info = [self.smc_info[-2][iproc] for iproc in group] glomerer = Glomerator(self.reco_info) paths = glomerer.read_cached_agglomeration(infnames, self.args.smc_particles, previous_info=previous_info, calc_adj_mi=self.args.debug, debug=self.args.debug) #, outfname=self.hmm_outfname) self.smc_info[-1].append(paths) # ack? self.glomclusters.append(glomerer) # boof? self.list_of_preclusters.append(glomerer.combined_conservative_best_minus_ten_partitions) if n_procs > 1: # TODO I don't think this is right any more... self.merge_subprocess_files(self.hmm_cachefname, n_procs) if not self.args.no_clean: if n_procs == 1: os.remove(self.hmm_outfname) else: for iproc in range(n_procs): subworkdir = self.args.workdir + '/hmm-' + str(iproc) os.remove(subworkdir + '/' + os.path.basename(self.hmm_infname)) os.remove(subworkdir + '/' + os.path.basename(self.hmm_outfname)) os.rmdir(subworkdir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_par_results(res):\n nres = {}\n for r in res:\n nres.update(r)\n return nres", "def merge_all_hmm_outputs(self, n_procs, cache_naive_seqs):\n assert self.args.smc_particles == 1 # have to do things more complicatedly for smc\n if self.args.action == 'partition': # merge partitions from several files\n if n_procs > 1:\n self.merge_subprocess_files(self.hmm_cachefname, n_procs)\n\n if not cache_naive_seqs:\n if n_procs == 1:\n infnames = [self.hmm_outfname, ]\n else:\n infnames = [self.args.workdir + '/hmm-' + str(iproc) + '/' + os.path.basename(self.hmm_outfname) for iproc in range(n_procs)]\n previous_info = None\n if len(self.paths) > 1:\n previous_info = self.paths[-1]\n glomerer = Glomerator(self.reco_info)\n glomerer.read_cached_agglomeration(infnames, smc_particles=1, previous_info=previous_info, calc_adj_mi=self.args.debug, debug=self.args.debug) #, outfname=self.hmm_outfname)\n assert len(glomerer.paths) == 1\n # self.check_path(glomerer.paths[0])\n self.paths.append(glomerer.paths[0])\n else:\n self.merge_subprocess_files(self.hmm_outfname, n_procs)\n\n if not self.args.no_clean:\n if n_procs == 1:\n print 'removing ', self.hmm_outfname\n os.remove(self.hmm_outfname)\n else:\n for iproc in range(n_procs):\n subworkdir = self.args.workdir + '/hmm-' + str(iproc)\n os.remove(subworkdir + '/' + os.path.basename(self.hmm_infname))\n if os.path.exists(subworkdir + '/' + os.path.basename(self.hmm_outfname)):\n print 'removing ', subworkdir + '/' + os.path.basename(self.hmm_outfname)\n os.remove(subworkdir + '/' + os.path.basename(self.hmm_outfname))\n os.rmdir(subworkdir)", "def pool_combine(program_path='.', seq='999'):\n from os.path import dirname\n from subprocess import call\n\n new_pool = None\n for pool in get_pools(program_path=program_path, latest=False):\n if new_pool is None:\n path = dirname(pool)\n new_pool = open('/'.join([path, make_poolname(pool, seq=seq)]), 'w')\n call(['head', '-n', '1', pool], stdout=new_pool)\n call(['tail', '-n', '+2', pool], stdout=new_pool)\n new_pool.close()", "def __merge_processes_data(self, manager_data, tracker=None):\n\n if manager_data is not None:\n if (\n not self.autosave.authorized\n and PyFunceble.CONFIGURATION.multiprocess_merging_mode != \"live\"\n and not PyFunceble.CONFIGURATION.quiet\n ):\n print(\n Fore.MAGENTA\n + Style.BRIGHT\n + \"\\nMerging cross processes data... This process may take some time.\"\n )\n\n for test_output in manager_data:\n if self.autosave.authorized:\n print(Fore.MAGENTA + Style.BRIGHT + \"Merging process data ...\")\n\n self.post_test_treatment(\n test_output,\n self.file_type,\n complements_test_started=self.complements_test_started,\n auto_continue_db=self.autocontinue,\n inactive_db=self.inactive_db,\n mining=self.mining,\n whois_db=self.whois_db,\n )\n\n if tracker:\n tracker.add_position(len(test_output[\"given\"]))\n\n manager_data[:] = []\n\n self.autocontinue.save()\n self.inactive_db.save()\n self.mining.save()\n\n self.cleanup(self.autocontinue, self.autosave, test_completed=False)", "def pairTest(clients, servers):\n results = []\n #initOutput( opts.outfile )\n # 9 categories in linux 2.6+\n cpuHeader = ( 'cpu(start,stop,user%,nice%,sys%,idle%,iowait%,'\n 'irq%,sirq%,steal%,guest%)' )\n for pairs in [1]:\n #net.start()\n intervals, cpuEntries = iperfPairs(clients, servers )\n #net.stop()\n # Write output incrementally in case of failure\n result = { 'pairs': pairs, 'results': intervals,\n cpuHeader: cpuEntries }\n #appendOutput( opts, [ result ] )\n results += [ result ]\n return results", "def outputs(self):\n\t\treturn {k: v * self.throughput for k, v in self.per_process_outputs.items()}", "def processes(start, end, processes):\n end_things = [processes[x][2] for x in range(len(processes))]\n if start == end or end not in end_things:\n return []\n\n seq = []\n seen = 0\n inp = ''\n out = ''\n do = ''\n \n for i in range(len(processes)):\n if processes[i][2] == end:\n out = processes[i][2]\n inp = processes[i][1]\n do = processes[i][0]\n seq.append(do)\n seen += 1\n break\n\n while seen < len(processes):\n for i in range(len(processes)):\n if processes[i][2] == inp:\n out = processes[i][2]\n inp = processes[i][1]\n do = processes[i][0]\n seq.append(do)\n seen += 1\n\n seq.reverse()\n return seq", "def get_processes():\n yield from psutil.process_iter()", "def precompute_options_values_multipool(adj, skills, g):\n import os\n import multiprocessing\n processes=[]\n pool = multiprocessing.Pool(processes=len(skills))\n out = pool.map(multi_pov, [['fsdfa', [skill], g] for skill in skills])\n print out\n rewards = np.zeros((4604, len(skills)))\n dests = np.zeros((4604, len(skills)))\n discounts = np.zeros((4604, len(skills)))\n skill_vals_2 = np.zeros((4604, len(skills)))\n skill_Zs_2 = np.zeros((4604, len(skills)))\n if not os.path.exists('./povs/' + str(len(skills)) + '/' + str(skills[0][1])):\n os.makedirs('./povs/' + str(len(skills)) + '/' + str(skills[0][1]))\n names = ['rewards', 'dests', 'discounts', 'skill_vals_2', 'skill_Zs_2']\n for n in range(len(skills)):\n for index, array in enumerate([rewards, dests, discounts, skill_vals_2, skill_Zs_2]):\n array[:, n] = out[n][index][:, 0]\n if index == 1:\n dests = np.array(dests, dtype='int')\n np.save('./povs/' + str(len(skills)) + '/' + str(skills[0][1]) + '/' + str(names[index]), array)\n return [rewards, dests, discounts, skill_vals_2, skill_Zs_2]", "def combine(args, library_sizes):\n with open(args.counts, \"r\") as counts, open(args.results, \"r\") as results:\n with open(args.output_dir + \"counts_results.txt\", \"w+\") as file1, \\\n open(args.output_dir + \"counts_results_rpm.txt\",\"w+\") \\\n as file2, \\\n open(args.output_dir + \"counts_results_rpkm.txt\", \"w+\") \\\n as file3:\n head = True\n for count_line, results_line in zip(counts, results):\n count_line = count_line.strip()\n results_line = results_line.strip()\n\n if head: # Process column names into one header\n head = False\n count_head_parts = count_line.split(\"\\t\")\n results_head_parts = results_line.split(\"\\t\")\n results_head_parts = [\"Chromosome\", \"Start\", \"End\"] + \\\n results_head_parts[1:]\n\n new_head_parts = results_head_parts + \\\n count_head_parts[2:]\n new_head = \"\\t\".join(new_head_parts)\n new_head += \"\\n\"\n file1.write(new_head)\n file2.write(new_head)\n file3.write(new_head)\n\n else:\n process(count_line, results_line,\n file1, file2, file3, library_sizes)", "def processes(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process for item in self.process_tuples), key=lambda process: process.name)\n # MODIFIED 11/1/16 END", "def concatTwoHMMs(hmm1, hmm2):\n \n concatedHMM = {}\n #M is the number of emitting states in each HMM model (could be different for each)\n #K is the sum of the number of emitting states from the input models\n \n M1 = hmm1['means'].shape[0]\n M2 = hmm2['means'].shape[0]\n K = M1 + M2\n \n concatedHMM['name'] = hmm1['name'] + hmm2['name']\n concatedHMM['startprob'] = np.zeros((K + 1, 1))\n concatedHMM['transmat'] = np.zeros((K + 1, K + 1))\n concatedHMM['means'] = np.vstack((hmm1['means'],hmm2['means']))\n concatedHMM['covars'] = np.vstack((hmm1['covars'],hmm2['covars']))\n \n \n start1 = hmm1['startprob'].reshape(-1,1)\n start2 = hmm2['startprob'].reshape(-1,1)\n \n concatedHMM['startprob'][:hmm1['startprob'].shape[0]-1,:] = start1[:-1,:]\n concatedHMM['startprob'][hmm1['startprob'].shape[0]-1:,:] = np.dot(start1[-1,0],start2)\n trans = concatedHMM['transmat']\n trans1 = hmm1['transmat']\n trans2 = hmm2['transmat']\n\n trans[:trans1.shape[0]-1,:trans1.shape[1]-1] = trans1[:-1,:-1]\n temp = trans1[:-1,-1].reshape(-1,1)\n trans[:trans1.shape[0]-1,trans1.shape[1]-1:] = \\\n np.dot(temp,start2.T)\n trans[trans1.shape[0]-1:,trans1.shape[1]-1:] = trans2\n concatedHMM['transmat'] = trans \n \n return concatedHMM", "def cartesianproduct(lists):\r\n return reduce(appendEs2Sequences,lists,[])", "def gather_ps(rank, size, comm, k_allmodels, P21_allmodels, PHII_allmodels,\n first_snap_allmodels, last_snap_allmodels):\n\n def generate_tag(rank):\n tag = int(rank*100)\n\n return tag\n\n # Rank 0 will gather the wavenumber bins/power spectra from all other\n # ranks. \n if rank == 0:\n k_master = []\n P21_master = []\n PHII_master = []\n\n # Go through each model. \n for model_number in range(len(k_allmodels)):\n\n k_master.append([])\n P21_master.append([])\n PHII_master.append([])\n\n model_k = k_allmodels[model_number]\n model_P21 = P21_allmodels[model_number]\n model_PHII = PHII_allmodels[model_number]\n\n num_snaps = last_snap_allmodels[model_number] - \\\n first_snap_allmodels[model_number]\n rank_count = 0\n my_count = 0\n\n # Then go through each snapshot.\n # In the main data loop (``generate_data()``) the snapshots are\n # scatter sequentially. Hence when we gather, we get snap0 from\n # rank 0, snap1 from rank 1 etc. So we increase rank_count for each\n # snapshot and then reset it when we reach `size`.\n for snap_idx in range(num_snaps):\n\n if rank_count == 0:\n this_k = model_k[my_count] \n this_P21 = model_P21[my_count] \n this_PHII = model_PHII[my_count] \n my_count += 1\n else:\n # Each rank will use a unique tag.\n tag = generate_tag(rank_count) \n\n # Then the tag is offset for each data array. \n this_k = comm.recv(source = rank_count,\n tag = tag)\n this_P21 = comm.recv(source = rank_count,\n tag = tag+1)\n this_PHII = comm.recv(source = rank_count,\n tag = tag+2)\n\n # Now we have the data, append it to the master.\n k_master[model_number].append(this_k)\n P21_master[model_number].append(this_P21)\n PHII_master[model_number].append(this_PHII)\n\n rank_count += 1\n if rank_count == size:\n rank_count = 0\n\n # Snapshot Loop.\n # Model Loop.\n\n return k_master, P21_master, PHII_master\n\n else:\n\n # For all other ranks, go through the power spectra it calculated and\n # send it back to the root rank.\n for model_number in range(len(k_allmodels)):\n for idx in range(len(P21_allmodels[model_number])):\n\n tag = generate_tag(rank) \n\n k_this_idx = k_allmodels[model_number][idx]\n P21_this_idx = P21_allmodels[model_number][idx]\n PHII_this_idx = PHII_allmodels[model_number][idx]\n\n comm.send(k_this_idx, dest = 0, tag = tag)\n comm.send(P21_this_idx, dest = 0, tag = tag+1)\n comm.send(PHII_this_idx, dest = 0, tag = tag+2)\n\n # Non-zero ranks return junk.\n return None, None, None", "def _combine(self, results_list):\n pass", "def _merge_pairs(heaps):\n iheap = iter(heaps)\n return reduce(_merge, starmap(_merge, zip_longest(iheap, iheap)), None)", "def __call__(self, PID):\n i = 0\n pairs = 0\n outputdata = []\n for recordpair in self.data:\n pair = makeSAMpairFromStringTuple(recordpair, reorder=False)\n for stream in self.options.orderedStreams:\n # In SP mode, stream.next() returns a pair or None. In MP\n # it's more complicated, we pass back an array of dicts where\n # each one deinfes a pair (or not) depending on whether it is \n # filtered out by the stream.\n result = stream.next(pair, self.options)\n if result['matched']:\n if stream.op(OP_NOUT):\n continue\n\n # Copy stats for passing back.\n copy_of_stats = copy.deepcopy(stream.stats)\n copy_of_global = copy.deepcopy(self.options.orderedStreams[0].globalstats)\n\n # Reset original stats. Each subset of stats will\n # be integrated separately\n EmptyList(stream.stats)\n EmptyList(self.options.orderedStreams[0].globalstats)\n\n # First handle FASTQ output\n dataBucketFASTQ = []\n\n # Store root filename\n froot = result['output'][0]\n\n if stream.op(OP_FASTQ) or stream.op(OP_FASTQPP):\n if stream.op(OP_FASTQ):\n newpair,froot = self.ProcessPair(OP_FASTQ, stream, froot, pair)\n else:\n newpair,froot = self.ProcessPair(OP_FASTQPP, stream, froot, pair)\n if self.writeToFiles:\n if stream.op(OP_FASTQ) and stream.op(OP_SH):\n outputf1 = \"%s.sh.fastq.PID.%d\" %(froot,PID)\n if not stream.op(OP_INFO):\n dataBucketFASTQ = [open(outputf1, \"a\"),\n None,\n ]\n else:\n dataBucketFASTQ = [None,\n None,\n ]\n elif stream.op(OP_FASTQPP):\n outputf1 = \"%s.pp.1.fastq.PID.%d\" %(froot,PID)\n outputf2 = \"%s.pp.2.fastq.PID.%d\" %(froot,PID)\n if not stream.op(OP_INFO):\n dataBucketFASTQ = [open(outputf1, \"a\"),\n open(outputf2, \"a\"),\n ]\n else:\n dataBucketFASTQ = [None,\n None,\n ]\n elif stream.op(OP_FASTQ):\n outputf1 = \"%s.1.fastq.PID.%d\" %(froot,PID)\n outputf2 = \"%s.2.fastq.PID.%d\" %(froot,PID)\n if not stream.op(OP_INFO):\n dataBucketFASTQ = [open(outputf1, \"a\"),\n open(outputf2, \"a\"),\n ]\n else:\n dataBucketFASTQ = [None,\n None,\n ]\n else:\n if not stream.op(OP_INFO):\n dataBucketFASTQ = [StringIO.StringIO(), \n StringIO.StringIO(),\n ]\n else:\n dataBucketFASTQ = [None,\n None,\n ]\n if not stream.op(OP_INFO):\n newpair.writeFASTQ(dataBucketFASTQ, closeWhenDone=False)\n\n\n # Now Handle SAM output\n dataBucketSAM = []\n\n if stream.op(OP_SAM) or stream.op(OP_SAMPP):\n if stream.op(OP_SAM):\n newpair,froot = self.ProcessPair(OP_SAM, stream, froot, pair)\n else:\n newpair,froot = self.ProcessPair(OP_SAMPP, stream, froot, pair)\n if self.writeToFiles:\n if stream.op(OP_SAMPP):\n outputf = \"%s.pp.sam.PID.%d\" %(froot,PID)\n if not stream.op(OP_INFO):\n dataBucketSAM = [open(outputf, \"a\"),]\n else:\n dataBucketSAM = [None,]\n # OP_SAM (no OP_PP)\n else:\n outputf = \"%s.sam.PID.%d\" %(froot,PID)\n if not stream.op(OP_INFO):\n dataBucketSAM = [open(outputf, \"a\"),]\n else:\n dataBucketSAM = [None,]\n else:\n if not stream.op(OP_INFO):\n dataBucketSAM = [StringIO.StringIO(),]\n else:\n dataBucketSAM = [None,]\n if not stream.op(OP_INFO):\n newpair.writeSAM(dataBucketSAM[0], closeWhenDone=False)\n\n\n result['output'][0] = froot\n # Return results\n if stream.op(OP_SAM) or stream.op(OP_SAMPP) or \\\n stream.op(OP_FASTQ) or stream.op(OP_FASTQPP):\n if self.writeToFiles:\n if stream.op(OP_INFO):\n files_for_output = []\n else:\n files_for_output = result['output']\n outputdata.append({ 'datastrings' : '',\n 'files': files_for_output,\n 'name': result['name'],\n 'stats': copy_of_stats,\n 'gzipped' : stream.op(OP_GZ),\n 'sam,pp' : stream.op(OP_SAMPP),\n 'fastq,pp' : stream.op(OP_FASTQPP),\n 'sh' : stream.op(OP_SH),\n 'globalstats': copy_of_global,\n })\n else:\n pairvalueList = []\n for db in dataBucketFASTQ + dataBucketSAM:\n if db is None:\n pairvalueList.append(None)\n else:\n # If a StringIO object has nothing written \n # to it, the getvalue() call will throw an \n # exception about the object not having a \n # buf attribute. In this case we append None\n try:\n vv = db.getvalue()\n pairvalueList.append(vv)\n except:\n pairvalueList.append(None)\n\n # \"info\" operator quashes SAM,FASTQ output\n if stream.op(OP_INFO):\n pairvalueList = []\n files_for_output = []\n else:\n files_for_output = result['output']\n outputdata.append({ 'datastrings' : pairvalueList,\n 'files': files_for_output,\n 'name': result['name'],\n 'stats': copy_of_stats,\n 'gzipped' : stream.op(OP_GZ),\n 'sam,pp' : stream.op(OP_SAMPP),\n 'fastq,pp' : stream.op(OP_FASTQPP),\n 'sh' : stream.op(OP_SH),\n 'globalstats': copy_of_global,\n })\n\n for db in dataBucketFASTQ + dataBucketSAM:\n try:\n db.close()\n except:\n pass\n\n if not stream.op(OP_PASS):\n break\n \n\n # No matching data. We'll return an \"empty\" output dict\n if len(outputdata) == 0:\n stream = self.options.orderedStreams[0]\n empty = SAMStream('none', '')\n outputdata = [{ 'datastrings' : '',\n 'files': [],\n 'name': empty.name,\n 'stats': empty.stats,\n 'gzipped' : False,\n 'sam,pp' : False,\n 'fastq,pp' : False,\n 'sh' : False,\n 'globalstats': stream.globalstats\n },]\n return self.ID, outputdata", "def output_multiple(self):\n return _uhd_swig.usrp_sink_sptr_output_multiple(self)", "def map_product(process):\n\n process_params1 = set_extra_values(process['arguments'])\n process_params2 = get_process_params(process['arguments'], {'ignore_nodata': 'bool'})\n \n return map_default(process, 'product', 'reduce', {**process_params1, **process_params2})", "def concatenate(sequence):\n\n return Profiles([x.data for y in sequence for x in y],\n [x.description for y in sequence for x in y])", "def merge_paired_fastqs(target, outdir):\n left, right = target.get_fastq()\n left_fq = join(outdir, target.system_id + \"_1.fq.gz\")\n right_fq = join(outdir, target.system_id + \"_2.fq.gz\")\n commands = []\n\n if is_gz_file(left[0]):\n commands += [f\"cat {' '.join(left)} > {left_fq}\"]\n commands += [f\"cat {' '.join(right)} > {right_fq}\"]\n else:\n commands += [f\"cat {' '.join(left)} | \" f\"gzip -c > {left_fq}\"]\n commands += [f\"cat {' '.join(right)} | \" f\"gzip -c > {right_fq}\"]\n\n remove = [f\"rm {left_fq} {right_fq}\"]\n\n return (left_fq, right_fq), commands, remove", "def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections", "def merge_system_pairs(self, s_p_int, s_p_imp=False, s_p_w=False):\n try:\n if s_p_int:\n counters['s_p_int'] = len(s_p_int)\n if not s_p_imp and not s_p_w:\n return s_p_int\n except:\n print \"1029-feil\"\n print s_p_int\n print s_p_imp\n print s_p_w\n s_p = []\n if not s_p_imp:\n s_p_imp = []\n if not s_p_w:\n s_p_w = []\n if DEBUG:\n for it in s_p_int:\n print it['sent'], it['exp'], it['holder_gold']\n for it in s_p_w:\n print it['sent'], it['exp'], it['holder_gold']\n for cur_int, cur_imp, cur_w in itertools.izip_longest(s_p_int, s_p_imp, s_p_w):\n skipthis = False\n if cur_int:\n cur = cur_int\n elif cur_imp:\n cur = cur_imp\n elif cur_w:\n cur = cur_w\n else:\n print \"THIS IS NOT A PAIR\"\n skipthis = True\n if not skipthis:\n if cur_imp and (cur_imp['confidence'] > 0.5 and cur_imp['confidence'] > cur['confidence']) or cur['confidence'] == 0:\n if cur_imp['sent'] != cur['sent']:\n raise\n cur = cur_imp\n if cur_w:\n if cur_w['sent'] != cur['sent']:\n print \"int.. \", len(s_p_int)\n print \"imp.. \", len(s_p_imp)\n print \"w.. \", len(s_p_w)\n print cur_w\n print cur\n raise\n if (cur_w['confidence'] > 0.5 and cur_w['confidence'] > cur['confidence']) or cur['confidence'] == 0:\n cur = cur_w\n s_p.append(cur)\n if DEBUG:\n print \"Pairs\"\n for p in s_p:\n print p\n return s_p", "def info(self, handle):\n\n # Each process group gathers their output\n\n groupstr = \"\"\n procstr = \"\"\n\n gcomm = self._comm.comm_group\n wcomm = self._comm.comm_world\n rcomm = self._comm.comm_rank\n\n if wcomm.rank == 0:\n handle.write(\"Data distributed over {} processes in {} groups\\n\".format(self._comm.world_size, self._comm.ngroups))\n\n for ob in self.obs:\n id = ob['id']\n tod = ob['tod']\n base = ob['baselines']\n nse = ob['noise']\n intrvl = ob['intervals']\n\n if gcomm.rank == 0:\n groupstr = \"observation {}:\\n\".format(id)\n groupstr = \"{} {} total samples, {} detectors\\n\".format(groupstr, tod.total_samples, len(tod.detectors))\n if intrvl is not None:\n groupstr = \"{} {} intervals:\\n\".format(groupstr, len(intrvl))\n for it in intrvl:\n groupstr = \"{} {} --> {} ({} --> {})\\n\".format(groupstr, it.first, it.last, it.start, it.stop)\n\n # rank zero of the group will print general information,\n # and each process will get its statistics.\n\n nsamp = tod.local_samples[1]\n dets = tod.local_dets\n\n procstr = \" proc {}\\n\".format(gcomm.rank)\n my_chunks = 1\n if tod.local_chunks is not None:\n my_chunks = tod.local_chunks[1]\n procstr = \"{} sample range {} --> {} in {} chunks:\\n\".format(procstr, tod.local_samples[0], (tod.local_samples[0] + nsamp - 1), my_chunks)\n \n if tod.local_chunks is not None:\n chkoff = tod.local_samples[0]\n for chk in range(tod.local_chunks[1]):\n abschk = tod.local_chunks[0] + chk\n chkstart = chkoff\n chkstop = chkstart + tod.total_chunks[abschk] - 1\n procstr = \"{} {} --> {}\\n\".format(procstr, chkstart, chkstop)\n chkoff += tod.total_chunks[abschk]\n\n if nsamp > 0:\n \n stamps = tod.read_times(local_start=0, n=nsamp)\n\n procstr = \"{} timestamps {} --> {}\\n\".format(procstr, stamps[0], stamps[-1])\n\n for dt in dets:\n procstr = \"{} det {}:\\n\".format(procstr, dt)\n\n pdata = tod.read_pntg(detector=dt, local_start=0, n=nsamp)\n\n procstr = \"{} pntg [{:.3e} {:.3e} {:.3e} {:.3e}] --> [{:.3e} {:.3e} {:.3e} {:.3e}]\\n\".format(procstr, pdata[0,0], pdata[0,1], pdata[0,2], pdata[0,3], pdata[-1,0], pdata[-1,1], pdata[-1,2], pdata[-1,3])\n\n data = tod.read(detector=dt, local_start=0, n=nsamp)\n flags, common = tod.read_flags(detector=dt, local_start=0, n=nsamp)\n procstr = \"{} {:.3e} ({}) --> {:.3e} ({})\\n\".format(procstr, data[0], flags[0], data[-1], flags[-1])\n good = np.where((flags | common) == 0)[0]\n procstr = \"{} {} good samples\\n\".format(procstr, len(good))\n min = np.min(data[good])\n max = np.max(data[good])\n mean = np.mean(data[good])\n rms = np.std(data[good])\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n for cname in tod.cache.keys():\n procstr = \"{} cache {}:\\n\".format(procstr, cname)\n ref = tod.cache.reference(cname)\n min = np.min(ref)\n max = np.max(ref)\n mean = np.mean(ref)\n rms = np.std(ref)\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n recvstr = \"\"\n if gcomm.rank == 0:\n groupstr = \"{}{}\".format(groupstr, procstr)\n for p in range(1, gcomm.size):\n if gcomm.rank == 0:\n recvstr = gcomm.recv(source=p, tag=p)\n groupstr = \"{}{}\".format(groupstr, recvstr)\n elif p == gcomm.rank:\n gcomm.send(procstr, dest=0, tag=p)\n gcomm.barrier()\n\n # the world rank 0 process collects output from all groups and\n # writes to the handle\n\n recvgrp = \"\"\n if wcomm.rank == 0:\n handle.write(groupstr)\n for g in range(1, self._comm.ngroups):\n if wcomm.rank == 0:\n recvgrp = rcomm.recv(source=g, tag=g)\n handle.write(recvgrp)\n elif g == self._comm.group:\n if gcomm.rank == 0:\n rcomm.send(groupstr, dest=0, tag=g)\n wcomm.barrier()\n\n return", "def _combine_conditions(self):\n self.outpars = {}\n log.debug(\"{} step configuration parameter set(s) to be merged: {}\".format(self.step_title,\", \".join(p for p in list(self.pars_multidict.keys()))))\n for cfg_key in self.pars_multidict.keys():\n self.outpars = self._dict_merge(self.outpars, self.pars_multidict[cfg_key])", "def process():", "def concatenate(self):\n if not self.match_count:\n # No concatenating if there are no matches\n return []\n if self.match_count <= 1:\n # Can't combine a single match\n return self.match_list\n # Setup for iterating through\n cont = True\n first = self.match_list[self.i]\n second = self.match_list[self.j]\n while cont and self.match_count > 2:\n first, second, cont = self._process_main()\n\n # Last block is a special case\n self._process_last(first, second)\n return self.combined", "def merge():\n\n print(\"Starting merge thread...\\n\\n\")\n\n cmd = \"tsp \"\n for c in channels:\n port = 2000 + int(c)\n\n if c != \"1\":\n cmd += \"-P merge \\\"tsp \"\n cmd += f\"-I ip 230.2.2.2:{port}\\\" \"\n else:\n cmd += f\"-I ip 230.2.2.2:{port} \"\n \n cmd += \"-O ip --enforce-burst 230.2.2.2:2000\"\n\n tsduck = subprocess.call(\n cmd,\n shell=False,\n stdout=open(os.devnull, 'w'),\n stderr=subprocess.STDOUT\n )", "def concatTwoHMMs(hmm1, hmm2):\n A = hmm1['transmat']#4*4\n PI = hmm1['startprob']#1*4\n B = hmm2['transmat']\n P = hmm2['startprob']\n m = A.shape[0] - 1\n m2 = B.shape[0] - 1\n K = m + m2\n A_con = np.zeros((K+1, K+1))\n Pi_con = np.zeros((1, K+1))\n A_con[:m, :m] = A[:m, :m]\n A_con[m:, m:] = B\n A_con[:m, m:] = np.dot(A[:m,m].reshape(-1, 1), P.reshape(1, -1))\n PP = PI.reshape(1, -1)\n Pi_con[0, :m] = PP[0, :m]\n Pi_con[0, m:] = PP[0, m] * P\n\n twoHMMs = {}\n twoHMMs['startprob'] = Pi_con\n twoHMMs['transmat'] = A_con\n twoHMMs['means'] = np.concatenate((hmm1['means'], hmm2['means']), axis=0)\n twoHMMs['covars'] = np.concatenate((hmm1['covars'] ,hmm2['covars']), axis=0)#K*D\n\n return twoHMMs", "def test_3():\n \n\n # Functions wrapped by agents\n def f(in_streams, out_streams):\n multiply_and_add(in_streams[0], out_streams[0],\n multiplicand=2, addend=1)\n\n def g(in_streams, out_streams):\n t = Stream('t')\n filter_then_square(in_streams[0], t,\n filter_threshold=20)\n print_stream(t, name='p1')\n\n def sums(in_streams, out_streams):\n s = Stream('s')\n sum_window(in_streams[0], s, window_size=3, step_size=3)\n print_stream(s, name=' p2')\n\n processes = \\\n {\n 'source_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': f,\n 'sources':\n {'acceleration':\n {'type': 'i',\n 'func': source_thread_target\n },\n }\n },\n 'process_1':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': g,\n 'sources': {}\n },\n 'process_2':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': sums,\n 'sources': {}\n }\n }\n \n connections = \\\n {\n 'source_process' :\n {\n 'out' : [('process_1', 'in'), ('process_2', 'in')],\n 'acceleration' : [('source_process', 'in')]\n },\n 'process_1':\n {\n },\n 'process_2':\n {\n }\n }\n\n multicore(processes, connections)" ]
[ "0.59400517", "0.5814725", "0.5643951", "0.56101346", "0.5563475", "0.5513338", "0.549535", "0.5480405", "0.5461074", "0.53945047", "0.53856015", "0.53534395", "0.53074926", "0.52948034", "0.5292469", "0.5279101", "0.5227452", "0.5217861", "0.52140594", "0.520097", "0.5175036", "0.5159476", "0.5152338", "0.511525", "0.50761247", "0.50688154", "0.5064604", "0.5059331", "0.50575083", "0.5054423" ]
0.6553556
0
Get all unique the pairs of sequences in input_info, skipping where preclustered out
def get_pairs(self, preclusters=None): all_pairs = itertools.combinations(self.input_info.keys(), 2) if preclusters == None: print ' ?? lines (no preclustering)' # % len(list(all_pairs)) NOTE I'm all paranoid the list conversion will be slow (although it doesn't seem to be a.t.m.) return all_pairs else: # if we've already run preclustering, skip the pairs that we know aren't matches preclustered_pairs = [] n_lines, n_preclustered, n_previously_preclustered, n_removable, n_singletons = 0, 0, 0, 0, 0 for a_name, b_name in all_pairs: key = utils.get_key((a_name, b_name)) # NOTE shouldn't need this any more: if a_name not in preclusters.query_clusters or b_name not in preclusters.query_clusters: # singletons (i.e. they were already preclustered into their own group) n_singletons += 1 continue if key not in preclusters.pairscores: # preclustered out in a previous preclustering step n_previously_preclustered += 1 continue if preclusters.query_clusters[a_name] != preclusters.query_clusters[b_name]: # not in same cluster n_preclustered += 1 continue if preclusters.is_removable(preclusters.pairscores[key]): # in same cluster, but score (link) is long. i.e. *this* pair is far apart, but other seqs to which they are linked are close to each other n_removable += 1 continue preclustered_pairs.append((a_name, b_name)) n_lines += 1 print ' %d lines (%d preclustered out, %d removable links, %d singletons, %d previously preclustered)' % (n_lines, n_preclustered, n_removable, n_singletons, n_previously_preclustered) return preclustered_pairs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __find_similar_pairs(self):\n size = len(self.__indexclusters)\n candidates = []\n for i in range(size):\n for j in range(i+1, size):\n simi = self.__cluster_simi(i, j)\n #print simi, self.__indexclusters[i],self.__indexclusters[j]\n if simi >= self.__threshold:\n candidates.append((simi, i, j))\n candidates.sort(reverse = True, key = lambda x: x[0])\n\n\n # filter overlapped pairs\n to_remove = set()\n appeared = set()\n for index, cand in enumerate(candidates):\n if cand[1] not in appeared and cand[2] not in appeared:\n appeared.add(cand[1])\n appeared.add(cand[2])\n else:\n to_remove.add(index)\n\n #print 'ahha'\n #print [(cand[1], cand[2]) for index, cand in enumerate(candidates) if index not in to_remove]\n\n return [(cand[1], cand[2]) for index, cand in enumerate(candidates)\n if index not in to_remove]", "def remove_ill_matched_pair(phi1,S1,TU1,TV1): #---- remove ill matched pair\r\n #--- mark inlier= 1; outlier= 0 ---\r\n mask, phi0= pano_tools.remove_outlier(phi1);\r\n mask, S0 = pano_tools.remove_outlier(S1 ,Nstd=2, mask= mask);\r\n mask, TU0 = pano_tools.remove_outlier(TU1 ,Nstd=2, mask= mask);\r\n mask, TV0 = pano_tools.remove_outlier(TV1 ,Nstd=2, mask= mask); \r\n mask, phi0= pano_tools.remove_outlier(phi1,Nstd=3, mask= mask);\r\n mask, S0 = pano_tools.remove_outlier(S1 ,Nstd=3, mask= mask);\r\n mask, TU0 = pano_tools.remove_outlier(TU1 ,Nstd=3, mask= mask);\r\n #--- select reliable data pair ---\r\n # mask is M*M matrix: 1= reliable pair combination;\r\n M = phi1.shape[0];\r\n sumx= np.sum(mask,axis=0); # large number= reliable\r\n seq = []; # chosen reliable data\r\n for k in range(0, int(M*0.7)):\r\n maxx = np.argmax(sumx);\r\n seq.append(maxx);\r\n sumx[maxx]= 0; \r\n return seq, phi0, S0, TU0, TV0", "def get_pairs(self, data, linked_clusters):\n\n _linked_clusters = [_cluster.antecessor for _cluster in linked_clusters]\n pairs = [pair for pair in itertools.combinations(_linked_clusters, r=2)]\n do_not_merge = [False for pair in itertools.combinations(_linked_clusters, r=2)]\n paircount = 0\n for pair in itertools.combinations(_linked_clusters, r=2):\n cluster1 = pair[0]\n cluster2 = pair[1]\n\n if cluster1.number_of_members > cluster2.number_of_members:\n dnm = check_components(self, data, cluster2, [cluster1])\n else:\n dnm = check_components(self, data, cluster1, [cluster2])\n if np.any(dnm):\n do_not_merge[paircount] = True\n paircount += 1\n\n return pairs, do_not_merge", "def inputs(self):\n # there's a better algorithm (Knuth's L-algorithm)\n # that generates this without duplications but isn't worth the effort\n possible_stems = sorted(set(itertools.permutations(self.segments)))\n\n # number the segments in the order they appear\n numbered_stems = [[seg + str(i+1) for i, seg in enumerate(stem)]\n for stem in possible_stems]\n\n return [(tuple(stem[:self.root_length]),\n tuple(stem[self.root_length:])) for stem in numbered_stems]", "def compute_unique_blocks(self):\n\n unique_blocks = OrderedDict()\n for seqname, rec in self.sequences_with_annotated_blocks().items():\n blocks_locations = (\n [(0, 0)]\n + sorted(\n [\n (f.location.start, f.location.end)\n for f in rec.features\n if f.qualifiers.get(\"is_block\", False)\n ]\n )\n + [(len(rec), len(rec))]\n )\n unique_blocks[seqname] = [\n (end1, start2)\n for (_, end1), (start2, _) in zip(\n blocks_locations, blocks_locations[1:]\n )\n if (start2 - end1) > 1\n ]\n return unique_blocks", "def get_3away_pairs(kmers):\n k = len(kmers[0])\n if k == 1 or k==2:\n return []\n if k == 3:\n return [pair for pair in combinations(kmers, 2) if pair[0][0] != pair[1][0] and pair[0][1] != pair[1][1] and pair[0][2] != pair[1][2]]\n k_L = k//2\n k_R = k-k_L\n kmer_L_hashes = defaultdict(list)\n kmer_R_hashes = defaultdict(list)\n pairs = []\n kmers_L = []\n kmers_R = []\n for i, kmer in enumerate(kmers):\n kmer_L = kmer[:k_L]\n kmer_R = kmer[k_L:]\n #print(kmer_L)\n #print(kmer_R)\n kmers_L.append(kmer_L)\n kmers_R.append(kmer_R)\n kmer_L_hashes[kmer_to_int(kmer_L)] += [i]\n kmer_R_hashes[kmer_to_int(kmer_R)] += [i]\n for kmer_L_hash in kmer_L_hashes.values(): #same in first half\n if len(kmer_L_hash) > 1:\n kmer_L = kmers[kmer_L_hash[0]][:k_L] #first half\n pairs += [tuple(kmer_L + kmer for kmer in pair) for pair in get_3away_pairs([kmers[i][k_L:] for i in kmer_L_hash])] #differ by 3 in second half\n for kmer_R_hash in kmer_R_hashes.values(): #same in second half\n if len(kmer_R_hash) > 1:\n kmer_R = kmers[kmer_R_hash[0]][k_L:] #second half\n #print(kmer_R)\n pairs += [tuple(kmer + kmer_R for kmer in pair) for pair in get_3away_pairs([kmers[i][:k_L] for i in kmer_R_hash])] #differ by 3 in first half\n possible_pairs = []\n possible_pairs_L = get_1away_pairs(kmers_L)\n possible_pairs_R = get_2away_pairs(kmers_R)\n #print(kmers_L)\n #print(kmers_R)\n #print(possible_pairs_L)\n #print(possible_pairs_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n possible_pairs = []\n possible_pairs_L = get_2away_pairs(kmers_L)\n possible_pairs_R = get_1away_pairs(kmers_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n return(pairs)", "def _compute_sims(self):\n no_duplicates = defaultdict(list)\n for num, lineset1, idx1, lineset2, idx2 in self._iter_sims():\n duplicate = no_duplicates[num]\n for couples in duplicate:\n if (lineset1, idx1) in couples or (lineset2, idx2) in couples:\n couples.add((lineset1, idx1))\n couples.add((lineset2, idx2))\n break\n else:\n duplicate.append({(lineset1, idx1), (lineset2, idx2)})\n sims = []\n for num, ensembles in no_duplicates.items():\n for couples in ensembles:\n sims.append((num, couples))\n sims.sort()\n sims.reverse()\n return sims", "def _get_pairs_onebatch(self, distance, batch):\n data_reduced = self.data_reduced[0]\n data_kd = self.data_kd[0]\n pairs = data_kd.query_ball_point(data_reduced[batch], distance)\n pairs = set(frozenset((i, m)) for\n i, matches in zip(batch, pairs)\n for m in matches\n if m != i)\n for t in range(1, self.n_trees):\n data_reduced = self.data_reduced[t]\n data_kd = self.data_kd[t]\n newpairs = data_kd.query_ball_point(data_reduced[batch], distance)\n newpairs = set(frozenset((i, m)) for\n i, matches in zip(batch, newpairs)\n for m in matches\n if m != i)\n pairs = set(p for p in newpairs if p in pairs)\n\n return pairs", "def merge_pairs(lpairs):\n \n pairs = np.unique(np.vstack(lpairs), axis=0)\n return pairs", "def pairs_from_knn(ind):\n \n NN = ind.shape[1]\n source_nodes = np.repeat(ind[:,0], NN-1).reshape(-1,1)\n target_nodes = ind[:,1:].reshape(-1,1)\n pairs = np.hstack((source_nodes, target_nodes))\n pairs = remove_duplicate_pairs(pairs)\n return pairs", "def _find_paired_nodes(self, graph):\n paired_list = []\n for line in nx.generate_edgelist(graph):\n if ('basepair' in line):\n if not (int(line.split(' ')[0]) in paired_list):\n paired_list.append(int(line.split(' ')[0]))\n if not (int(line.split(' ')[1]) in paired_list):\n paired_list.append(int(line.split(' ')[1]))\n return paired_list", "def remove_duplicate_pairs(pairs):\n \n uniq_pairs = np.unique(np.sort(pairs, axis=1), axis=0)\n return uniq_pairs", "def find_TSS_CRE_pairs(self):\n if self.verbose >= 2:\n print(\"\\r{}\\rFinding TSS-cCRE pairs\".format(' ' * 80), end='', file=sys.stderr)\n TSS_ranges = self.find_TSS_ranges()\n if self.skip_cre_promoter:\n pair_indices = numpy.r_[0, numpy.cumsum(TSS_ranges[:, 1] - TSS_ranges[:, 0]\n + TSS_ranges[:, 3] - TSS_ranges[:, 2])]\n else:\n pair_indices = numpy.r_[0, numpy.cumsum(TSS_ranges[:, 1] - TSS_ranges[:, 0])]\n # Normalize predicted values for easy correlation\n pair_queue = multiprocessing.JoinableQueue()\n results_queue = multiprocessing.JoinableQueue()\n processes = []\n for i in range(self.threads):\n processes.append(multiprocessing.Process(\n target=self._find_correlations, args=(pair_queue, results_queue)))\n processes[-1].daemon = True\n processes[-1].start()\n step = 50\n for i in range(self.chroms.shape[0]):\n for j in range(self.rna_indices[i], self.rna_indices[i + 1], step):\n end = min(j + step, self.rna_indices[i + 1])\n pair_queue.put((j, end, TSS_ranges[j:end, :]))\n for i in range(self.threads):\n pair_queue.put(None)\n pairs = numpy.zeros((pair_indices[-1], 3), dtype=numpy.int32)\n valid = numpy.zeros(pair_indices[-1], dtype=numpy.bool)\n finished = 0\n while finished < self.threads:\n results = results_queue.get(True)\n if results is None:\n finished += 1\n continue\n for i in range(len(results)):\n index, corrs = results[i][:2]\n s = pair_indices[index]\n e = pair_indices[index + 1]\n pairs[s:e, 0] = index\n if self.skip_cre_promoter:\n pairs[s:e, 1] = numpy.r_[numpy.arange(TSS_ranges[index, 0], TSS_ranges[index, 1]),\n numpy.arange(TSS_ranges[index, 2], TSS_ranges[index, 3])]\n else:\n pairs[s:e, 1] = numpy.arange(TSS_ranges[index, 0], TSS_ranges[index, 1])\n valid[s:e] = corrs >= self.corr_cutoff\n self.pairs = pairs[numpy.where(valid)[0], :]\n self.TSS_indices = numpy.r_[0, numpy.cumsum(numpy.bincount(self.pairs[:, 0],\n minlength=self.tssN))]\n self.selected = numpy.ones(self.pairs.shape[0], dtype=numpy.bool)\n if self.pca is not None:\n self.find_PCAs()\n if self.maxcres > 0:\n where = numpy.where(self.TSS_indices[1:] - self.TSS_indices[:-1] > self.maxcres)[0]\n for i in where:\n s, e = self.TSS_indices[i:(i + 2)]\n self.selected[self.rng.choice(numpy.arange(s, e), e - s - self.maxcres,\n replace=False)] = False\n if self.verbose >= 2:\n print(\"\\r{}\\r\".format(' ' * 80), end='', file=sys.stderr)\n kept = numpy.sum(valid)\n temp = numpy.bincount(self.pairs[:, 0], weights=self.selected, minlength=self.tssN)\n self.logger.info(\"Retained {} of {} TSS-CRE pairs ({:02.2f}%), {} - {} CREs/TSS (median {})\".format(\n self.selected.shape[0], valid.shape[0],\n 100. * self.selected.shape[0] / valid.shape[0],\n numpy.amin(temp), numpy.amax(temp), numpy.median(temp)))\n self.logger.info(\"Unique CREs in pairings: {}\".format(numpy.unique(self.pairs[:, 1]).shape[0]))", "def list_duplicates(seq):\n tally = defaultdict(list)\n for i, item in enumerate(seq):\n try:\n if item.mask == True:\n continue\n except:\n tally[item].append(i)\n return ((key, locs) for key, locs in tally.items() if len(locs) > 1)", "def _get_pairs_simple(self, distance):\n pairs = self.data_kd[0].query_pairs(distance)\n pairs = set(frozenset(p) for p in pairs)\n for kd in self.data_kd[1:]:\n newpairs = set(frozenset(p) for p in kd.query_pairs(distance)\n if frozenset(p) in pairs)\n pairs = newpairs\n return pairs", "def get_context_pairs(tokens):\n data = set()\n ngrams = get_ngrams(tokens, 4)\n if not ngrams:\n ngrams = [tokens]\n for ngrams_batch in ngrams:\n for pair in combinations(ngrams_batch, 2):\n diff_index = abs(tokens.index(pair[0]) - abs(tokens.index(pair[1])))\n if len(pair[0]) < 2 or len(pair[1]) < 2:\n continue\n data.add((pair, diff_index))\n return data", "def _pair_indices(self):\n indices_src = []\n indices_dst = []\n for i in range(self.walk_len):\n for j in range(max(i - self.l, 0), i):\n indices_src.append(i)\n indices_dst.append(j)\n for j in range(i + 1, min(i + self.r + 1, self.walk_len)):\n indices_src.append(i)\n indices_dst.append(j)\n return indices_src, indices_dst", "def combine(combination_input):\n\n output = sum([map(list, itertools.combinations(combination_input, i)) for i in range(len(combination_input) + 1)], [])\n output_final = [sorted(i) for i in output if len(i)>1]\n\n return sorted(output_final)", "def reduce_pairs(pairs):\n return set(map(reduce_bits, filter(differ_by_one, pairs)))", "def _collapse_exact_matches(self, seqs, prefix_length, suffix_length):\r\n cluster_map = {}\r\n for seq_id, seq in seqs:\r\n seq_id = seq_id.split()[0]\r\n seq_hash = self._build_seq_hash(seq, prefix_length, suffix_length)\r\n try:\r\n cluster_map[seq_hash].append(seq_id)\r\n except KeyError:\r\n cluster_map[seq_hash] = [seq_id]\r\n\r\n return cluster_map.values()", "def get_individual_bins(seqids,ali,n=100,delimiter=None,rename=None,outname=None): \n if delimiter!=None and rename!=None:\n raise ValueError('ERROR: both options not compatible either delimiter or rename')\n seq_dict={}\n pats=[None]*len(seqids)\n for i,c in enumerate(seqids):\n if rename!=None: pats[i]=_rename(c,rename)\n elif delimiter!=None: pats[i]=c.split(delimiter)[0]\n else: pats[i]=c\n for seq in seqids:\n if rename!=None: pat=_rename(seq,rename)\n elif delimiter!=None: pat=seq.split(delimiter)[0]\n else: pat=seq\n seq_dict[pat]=[(seq,ali.ali[seq],0)]*n\n print pat, \"Cluster with %d identical sequences\"%(len(seq_dict[pat]))\n if outname!=None:\n with open(outname+\".pkl\",\"w\") as f:\n pickle.dump(seq_dict,f)\n return pats,seq_dict", "def unpruned_atom_pairs(\n molecules: List[masm.Molecule], idx_map: List[Tuple[int, int]], distance_bounds: Tuple[int, int]\n) -> Set[Tuple[int, int]]:\n\n def structure_idx(c: int, i: int) -> int:\n return idx_map.index((c, i))\n\n pairs: Set[Tuple[int, int]] = set()\n\n for component, molecule in enumerate(molecules):\n for i in molecule.graph.atoms():\n distances = np.array(masm.distance(i, molecule.graph))\n partners = np.nonzero((distances <= max(distance_bounds)) & (distances >= min(distance_bounds)))[0]\n\n # Back-transform to structure indices and add to set\n s_i = structure_idx(component, i)\n s_partners = [structure_idx(component, j) for j in partners]\n pairs |= set(make_sorted_pair(s_i, s_j) for s_j in s_partners)\n\n return pairs", "def unique(self):\n seen = {}\n result = []\n for p in map(tuple, self):\n if p not in seen:\n seen[p] = True\n result.append(p)\n return Pairs(result)", "def remove_duplicates(pairs):\n unique_pairs = []\n pair_list = {}\n for i in range(len(pairs)):\n for j in range(len(pairs[0])):\n # This is to remove self-matches\n if i == pairs[i][j]:\n continue\n if (\"%d,%d\" % (i, pairs[i][j]) not in pair_list):\n # This is stored to remove symmetric duplicates\n pair_list[\"%d,%d\" % (i, pairs[i][j])] = 1\n pair_list[\"%d,%d\" % (pairs[i][j], i)] = 1\n unique_pairs.append([i, pairs[i][j]])\n return unique_pairs", "def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a & d | b & c, a & c | b & d\n a, b, c, d = ab >> 1, ab & 1, cd >> 1, cd & 1\n yield ((a & d | b & c) << 1) | (a & c | b & d)", "def _find_shifted_pairs(pred_words: List[str], target_words: List[str]) ->Iterator[Tuple[int, int, int]]:\n for pred_start in range(len(pred_words)):\n for target_start in range(len(target_words)):\n if abs(target_start - pred_start) > _MAX_SHIFT_DIST:\n continue\n for length in range(1, _MAX_SHIFT_SIZE):\n if pred_words[pred_start + length - 1] != target_words[target_start + length - 1]:\n break\n yield pred_start, target_start, length\n _hyp = len(pred_words) == pred_start + length\n _ref = len(target_words) == target_start + length\n if _hyp or _ref:\n break", "def get_valid_pairs(output):\n valid_pairs = []\n invalid_pairs = []\n n_interp_samples = 10 # 插值点数目\n paf_threshold = 0.2\n conf_threshold = 0.5\n # loop for every POSE_PAIR\n for k in range(len(MAP_INDEX)):\n # a->b constitute a limb\n paf_a = output[0, MAP_INDEX[k][0], :, :]\n # print(paf_a.shape)\n paf_b = output[0, MAP_INDEX[k][1], :, :]\n paf_a = cv.resize(paf_a, (frameWidth, frameHeight))\n paf_b = cv.resize(paf_b, (frameWidth, frameHeight))\n\n # Find the joints for the first and second limb\n # cand_a为某一joint的列表, cand_b为另一与之相连接的joint的列表\n cand_a = joints_list_with_id[POSE_PAIRS[k][0]]\n cand_b = joints_list_with_id[POSE_PAIRS[k][1]]\n # 在完美检测到frame中所有joints的情况下, n_a = n_b = len(persons)\n n_a = len(cand_a)\n n_b = len(cand_b)\n\n # If joints for the joint-pair is detected\n # check every joint in cand_a with every joint in cand_b\n if n_a != 0 and n_b != 0:\n valid_pair = np.zeros((0, 3))\n for i in range(n_a):\n max_j = -1\n max_score = -1\n found = False\n for j in range(n_b):\n # Calculate the distance vector between the two joints\n distance_ij = np.subtract(cand_b[j][:2], cand_a[i][:2])\n # 求二范数,即求模,算两点距离\n norm = np.linalg.norm(distance_ij)\n if norm:\n # 距离不为零的话, 缩放到单位向量\n distance_ij = distance_ij / norm\n else:\n continue\n\n # Find p(u),在连接两joints的直线上创建一个n_interp_samples插值点的数组\n interp_coord = list(zip(np.linspace(cand_a[i][0], cand_b[j][0], num=n_interp_samples),\n np.linspace(cand_a[i][1], cand_b[j][1], num=n_interp_samples)))\n # Find the PAF values at a set of interpolated points between the joints\n paf_interp = []\n for m in range(len(interp_coord)):\n paf_interp.append([paf_a[int(round(interp_coord[m][1])), int(round(interp_coord[m][0]))],\n paf_b[int(round(interp_coord[m][1])), int(round(interp_coord[m][0]))]])\n # Find E\n paf_scores = np.dot(paf_interp, distance_ij)\n avg_paf_score = sum(paf_scores)/len(paf_scores)\n\n # Check if the connection is valid\n # If the fraction of interpolated vectors aligned with PAF is higher then threshold -> Valid Pair\n if (len(np.where(paf_scores > paf_threshold)[0]) / n_interp_samples) > conf_threshold:\n if avg_paf_score > max_score:\n # 如果这些点中有70%大于conf threshold,则把这一对当成有效\n max_j = j\n max_score = avg_paf_score\n found = True\n # Append the connection to the list\n if found:\n valid_pair = np.append(valid_pair, [[cand_a[i][3], cand_b[max_j][3], max_score]], axis=0)\n\n # Append the detected connections to the global list\n valid_pairs.append(valid_pair)\n # If no joints are detected\n else:\n # print(\"No Connection : k = {}\".format(k))\n invalid_pairs.append(k)\n valid_pairs.append([])\n return valid_pairs, invalid_pairs", "def removeDuplicates(seq):\n\n pass", "def make_paired_end_reads(sequence):\n \n R1 = sequence[0:n]\n R2 = sequence[len(sequence) - n:len(sequence)]\n\n #one reads are reverse complement, so make reverse complement of R2\n R2 = make_reverse_complement(R2)\n\n return [R1, R2]", "def split_input(self, n_procs, infname, prefix, divvy_up):\n # read single input file\n assert self.args.smc_particles == 1\n info = []\n with opener('r')(infname) as infile:\n reader = csv.DictReader(infile, delimiter=' ')\n for line in reader:\n info.append(line)\n\n # initialize\n sub_outfiles, writers = [], []\n for iproc in range(n_procs):\n subworkdir = self.args.workdir + '/' + prefix + '-' + str(iproc)\n utils.prep_dir(subworkdir)\n # prep each suboutput file\n sub_outfiles.append(opener('w')(subworkdir + '/' + os.path.basename(infname)))\n writers.append(csv.DictWriter(sub_outfiles[-1], reader.fieldnames, delimiter=' '))\n writers[-1].writeheader()\n # copy cachefile to this subdir\n if os.path.exists(self.hmm_cachefname):\n check_call(['cp', self.hmm_cachefname, subworkdir + '/'])\n\n if divvy_up:\n divvied_queries = self.divvy_up_queries(n_procs, info, 'names', 'seqs')\n for iproc in range(n_procs):\n for iquery in range(len(info)):\n if divvy_up:\n if info[iquery]['names'] not in divvied_queries[iproc]: # NOTE I think the reason this doesn't seem to be speeding things up is that our hierarhical agglomeration time is dominated by the distance calculation, and that distance calculation time is roughly proportional to the number of sequences in the cluster (i.e. larger clusters take longer)\n continue\n else:\n if iquery % n_procs != iproc:\n continue\n writers[iproc].writerow(info[iquery])\n\n for iproc in range(n_procs):\n sub_outfiles[iproc].close()\n\n if self.bcrham_divvied_queries is not None:\n self.bcrham_divvied_queries = None" ]
[ "0.60435754", "0.5448895", "0.5411966", "0.533383", "0.5203005", "0.5178553", "0.513516", "0.5110696", "0.50869215", "0.5070539", "0.5038386", "0.5036675", "0.49660292", "0.49623376", "0.4951787", "0.49416682", "0.49400976", "0.49369627", "0.4934104", "0.493045", "0.48957512", "0.4886766", "0.4880295", "0.4878189", "0.48704168", "0.48543462", "0.485374", "0.48505723", "0.48454544", "0.4841328" ]
0.6535067
0
Write hmm model files to /hmms, using information from
def write_hmms(self, parameter_dir): print ' writing hmms with info from %s' % parameter_dir # start = time.time() from hmmwriter import HmmWriter hmm_dir = parameter_dir + '/hmms' utils.prep_dir(hmm_dir, '*.yaml') # gene_list = self.args.only_genes # if gene_list is None and self.sw_info is not None: # if specific genes weren't specified, do the ones for which we have sw matches # print 'only-gene s argument not specified, writing hmms using sw matches' # gene_list = [] # for region in utils.regions: # for gene in self.germline_seqs[region]: # if gene in self.sw_info['all_best_matches']: # gene_list.append(gene) # if gene_list is None: # ack, just do 'em all # print 'just do them all' # gene_list = [] # for region in utils.regions: # gene_list += list(self.germline_seqs[region].keys()) if self.args.only_genes is None: # make a list of all the genes for which we have counts in <parameter_dir> (a.tm., this is all the genes that appeared as a best match at least once) gene_list = [] for region in utils.regions: with opener('r')(parameter_dir + '/' + region + '_gene-probs.csv') as pfile: reader = csv.DictReader(pfile) for line in reader: gene_list.append(line[region + '_gene']) else: gene_list = self.args.only_genes for gene in gene_list: if self.args.debug: print ' %s' % utils.color_gene(gene) writer = HmmWriter(parameter_dir, hmm_dir, gene, self.args.naivety, self.germline_seqs, self.args, self.cyst_positions, self.tryp_positions) writer.write() # print ' time to write hmms: %.3f' % (time.time()-start)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def WriteCcmModelToFile(filename, model):\n #Write the .hpp file\n WriteHeaderFileForCcmModel(filename, model)\n\n #Write the .cpp fil\n WriteSourceFileForCcmModel(filename, model)", "def WriteHeaderFileForCcmModel(filename, model): \n\n ccm_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n header_file = open(ccm_model_name + \".hpp\", 'w')\n\n #Define the header files\n header_file_defn = GetHeaderFileDefinitionString(filename, model)\n header_file.write(header_file_defn)\n\n #Include the appropriate files\n include_files = GetIncludedFilesForHeaderString()\n header_file.write(include_files)\n\n #Define the ODE System class\n ode_class = GetOdeClassDefinitionString(filename, model)\n header_file.write(ode_class)\n\n #Define the serialization\n serialization = GetSerializationInformationString(filename)\n header_file.write(serialization)\n\n #Define the SRN model\n srn_model_defn = GetModelDefinitionString(filename, model, True)\n header_file.write(srn_model_defn)\n\n #Close the file\n header_close = GetHeaderFileClosingString(filename, model)\n header_file.write(header_close)\n\n header_file.close()\n\n print(ccm_model_name + \".hpp written!\\n\")", "def intf_MMSAVE(E):\n global SAVEFILE\n with open(SAVEFILE,'w') as f:\n f.write( MMEL.simplistic_mm_save_format() )\n print(\"Model script written to: %s\\n\" % SAVEFILE)", "def write_model_info(content=None):\n _info_dir = os.path.join(CKPT_DIR, UNIQUE_NAME)\n create_dir(_info_dir)\n if content is None:\n content = f\"Backbone: {BACKBONE}\\nLR: {LEARNING_RATE}\\n\" \\\n f\"Resolution: {IMAGE_SIZE}\\nAugmentations: {AUG_PROBABILITY}\"\n\n with open(os.path.join(_info_dir, 'info.txt'), 'a') as fp:\n fp.write(content + '\\n')", "def write_to_md(dictData, outputDirectory):\n\tdic = prepare_hw_dict(dictData)\n\tfor hw in dic:\n\t\tfileout = os.path.join(outputDirectory, hw+'.md')\n\t\t# Prepare the output file\n\t\tfout = codecs.open(fileout, 'w', 'utf-8')\n\t\t#‌ Write frontmatter\n\t\tfout.write('---\\ntitle: \"'+hw+'\"\\n---\\n\\n')\n\t\t# For each (headword, meanings, verseNumber, PageNum) tuples,\n\t\tfor (hw, meanings, verse, verseNumDetails, pageNumDetails) in dic[hw]:\n\t\t\tcommaed = ', '.join(meanings)\n\t\t\tverse = verse.replace('<BR>', '<br />')\n\t\t\t# Write in babylon format. <BR><BR> is to separate verses.\n\t\t\tfout.write('# ' + hw + '\\n## ' + commaed + '\\n' + verse + '<br />verse ' + verseNumDetails + '<br />page ' + pageNumDetails +'\\n\\n')\n\t\tfout.close()\n\n\t# Give some summary to the user\n\tprint('MD files generated. Success!')\n\tprint('{} separate .md files written, one per headword.'.format(len(dic)))", "def save_model(self):\n filename=self.name + '_words'\n file_write(filename, self.words)\n\n filename2=self.name+'_word_lengths'\n file_write(filename2, self.word_lengths)\n\n filename3=self.name+'_stems'\n file_write(filename3, self.stems)\n\n filename4=self.sentence_lengths+'_sentence_lengths'\n file_write(filename4, self.sentence_lengths)\n\n filename5= self.endings+'_endings'\n file_write(filename5, self.endings)", "def _save_model_info(self, model):\r\n with open_(self.output_path / \"model.info\", \"w+\") as f:\r\n f.write(model.info)", "def write_megam_file(train_toks, encoding, stream, bernoulli: bool = ..., explicit: bool = ...):\n ...", "def WriteSrnModelToFile(filename, model):\n\n # Write the .hpp file\n WriteHeaderFileForSrnModel(filename, model)\n\n # Write the .cpp fil\n WriteSourceFileForSrnModel(filename, model)", "def write_solution(mm):\n\n m = mm.model\n\n solution_file = \"{0}_sol.csv\".format(mm.filename)\n\n harv_data = []\n harv_data.append([\"Harvest data\"])\n harv_data.append([\"Species\", \"Region\", \"Period\", \"Value\"])\n # write harv variable solution values\n harv = pg.get_variables(m, \"harv\")\n for h in harv:\n name = h.varName.split(\",\")\n species = name[0].split(\"[\")[1]\n region = name[1]\n period = name[-1][:-1]\n harv_data.append(\n [species, region, period, h.X])\n\n age_data = []\n age_data.append([\"Age data\"])\n age_data.append([\"Region\", \"Period\", \"Value\"])\n age = pg.get_variables(m, \"age\")\n for a in age:\n name = a.varName.split(\",\")\n region = name[0].split(\"[\")[1]\n period = name[-1][:-1]\n age_data.append(\n [region, period, a.X])\n\n with open(solution_file, \"w+\") as wrf:\n wf = csv.writer(wrf)\n wf.writerows(harv_data)\n wf.writerows(age_data)", "def save_model(self):\r\n jeff = self.name + '_words'\r\n f = open(jeff, 'w')\r\n f.write(str(self.words))\r\n f.close()\r\n \r\n jeph = self.name + '_word_lengths'\r\n f = open(jeph, 'w')\r\n f.write(str(self.word_lengths))\r\n f.close()\r\n \r\n geoff = self.name + '_stems'\r\n f = open(geoff, 'w')\r\n f.write(str(self.stems))\r\n f.close()\r\n \r\n joeff= self.name + '_sentence_lengths'\r\n f = open(joeff, 'w')\r\n f.write(str(self.sentence_lengths))\r\n f.close()\r\n \r\n geoph = self.name + '_punctuation'\r\n f = open(geoph, 'w')\r\n f.write(str(self.punctuation))\r\n f.close()", "def WriteHeaderFileForSrnModel(filename, model): \n\n srn_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n header_file = open(srn_model_name + \".hpp\", 'w')\n\n #Define the header files\n header_file_defn = GetHeaderFileDefinitionString(filename, model)\n header_file.write(header_file_defn)\n\n #Include the appropriate files\n include_files = GetIncludedFilesForHeaderString()\n header_file.write(include_files)\n\n #Define the ODE System class\n ode_class = GetOdeClassDefinitionString(filename, model)\n header_file.write(ode_class)\n\n #Define the serialization\n serialization = GetSerializationInformationString(filename)\n header_file.write(serialization)\n\n #Define the SRN model\n srn_model_defn = GetModelDefinitionString(filename, model, True)\n header_file.write(srn_model_defn)\n\n #Close the file\n header_close = GetHeaderFileClosingString(filename, model)\n header_file.write(header_close)\n\n header_file.close()\n\n print(srn_model_name + \".hpp written!\\n\")", "def concatHMMs(hmmmodels, namelist):\n #output har samma features som modelinputen\n #använd phoneHMMs för att hämta markovmodellerna.\n #namelist är modellerna vi vill kombinera till en modell som vi sedan returnerar\n #modellist = {}\n #for digit in prondict.keys():\n # modellist[digit] = ['sil'] + prondict[digit] + ['sil']\n names=['sil']+namelist+['sil']\n tsize=3*len(names)+1\n transmat=np.zeros([tsize,tsize])\n i=0\n means=np.zeros([len(names)*3,13])\n covars=np.zeros([len(names)*3,13])\n for digit in names:\n tmat=phoneHMMs[digit]['transmat']\n transmat[i:i+4,i:i+4]=tmat\n mean=phoneHMMs[digit]['means']\n cov=phoneHMMs[digit]['covars']\n means[i:i+3,0:13]=mean\n covars[i:i+3,0:13]=cov\n i+=3\n transmat[-1,-1]=1.0\n startprobs=np.zeros(tsize)\n startprobs[0]=1.0\n combinedHMM={'covars':covars,'name':namelist[0],'transmat':transmat,'startprob':startprobs,'means':means}\n return combinedHMM", "def minfo():\n model = np.loadtxt('cumul_depths.tmp',dtype={'names': ('H'),'formats': \\\n ('f4')}, usecols=[0])\n d = model['H']\n model = np.loadtxt('start_model.dat',dtype={'names': (\"S\"),'formats': \\\n ('f4')}, skiprows=1,usecols=[2])\n vs = model['S']\n\n A = np.repeat(vs,2)\n B = np.repeat(d,2)\n B = np.insert(B,[0],0.0)[:-1] \n out = zip(A, B)\n \n f = open('model.info','w+')\n for line in out:\n print (\" \".join(str(x) for x in line))\n f.write(\" \".join(str(x) for x in line) + \"\\n\") \n f.close()", "def save_model(self):\n filename = self.name + '_words'\n f = open(filename, 'w') \n f.write(str(self.words)) \n f.close()\n \n filename2 = self.name + '_word_lengths'\n f = open(filename2, 'w') \n f.write(str(self.word_lengths)) \n f.close()\n \n filename3 = self.name + '_stems'\n f = open(filename3, 'w') \n f.write(str(self.stems)) \n f.close()\n \n filename4 = self.name + '_sentence_lengths'\n f = open(filename4, 'w') \n f.write(str(self.sentence_lengths)) \n f.close()\n \n filename5 = self.name + '_punctuation'\n f = open(filename5, 'w') \n f.write(str(self.punctuation)) \n f.close()", "def write_voldata_to_mgh_file(mgh_file_name, vol_data, affine=None, header=None):\n if header is None:\n header = fsmgh.MGHHeader()\n image = fsmgh.MGHImage(vol_data, affine, header=header)\n image.to_filename(mgh_file_name)", "def create_mat_file(data, col_names, model_name, outputModelFilesDirectory):\n\n dimx = None\n dimy = None\n\n if len(data.shape) == 1:\n dimy = 1\n dimx = data.shape[0]\n else:\n dimx, dimy = data.shape\n\n\n ppstring = '/PPheights'\n\n for i in range(0, dimy):\n\n ppstring += '\\t' + '%1.5e' %(1.0)\n\n ppstring += '\\n'\n\n\n f = open(os.path.join(outputModelFilesDirectory, model_name + '.mat'), 'w')\n\n print >>f, '/NumWaves\\t%d' %dimy\n print >>f, '/NumPoints\\t%d' %dimx\n print >>f, ppstring\n\n # print labels for the columns - mainly for double-checking your model\n col_string = '\\n'\n\n for col in col_names:\n col_string = col_string + col + '\\t'\n\n print >>f, col_string, '\\n'\n\n\n print >>f, '/Matrix'\n\n np.savetxt(f, data, fmt='%1.5e', delimiter='\\t')\n\n f.close()", "def magnetization(h):\n if h.has_eh: raise\n if h.has_spin: \n mx = extract.mx(h.intra)\n my = extract.my(h.intra)\n mz = extract.mz(h.intra)\n else: raise\n np.savetxt(\"MAGNETIZATION_X.OUT\",np.matrix([h.geometry.x,h.geometry.y,mx]).T)\n np.savetxt(\"MAGNETIZATION_Y.OUT\",np.matrix([h.geometry.x,h.geometry.y,my]).T)\n np.savetxt(\"MAGNETIZATION_Z.OUT\",np.matrix([h.geometry.x,h.geometry.y,mz]).T)", "def write_name_file(self):\n fn_path = os.path.join(self.model_ws, self.mpnamefile)\n f_nam = open(fn_path, 'w')\n f_nam.write('%s\\n' % (self.heading))\n if self.mpbas_file is not None:\n f_nam.write('%s %3i %s\\n' % ('MPBAS', 86, self.mpbas_file))\n if self.dis_file is not None:\n f_nam.write('%s %3i %s\\n' % ('DIS', self.dis_unit, self.dis_file))\n if self.head_file is not None:\n f_nam.write('%s %3i %s\\n' % ('HEAD', 88, self.head_file))\n if self.budget_file is not None:\n f_nam.write('%s %3i %s\\n' % ('BUDGET', 89, self.budget_file))\n for u, f in zip(self.external_units, self.external_fnames):\n f_nam.write('DATA {0:3d} '.format(u) + f + '\\n')\n f_nam.close()", "def write_lammps_files(self): \n lammps_file = self.file_name\n with open( lammps_file, 'w' ) as f:\n f.write(self.input_string())", "def drawHMM(numModels, obs, assignedData, \\\n writeLocation = \"../output/models.png\", verbose = False):\n if verbose:\n print \"calling drawHMM\"\n\n spacing = 4\n height = numModels * (len(assignedData[0][0]) + spacing)\n width = int(math.log(obs, 2))\n\n im = Image.new(\"RGB\", (width + 2, height))\n d = ImageDraw.Draw(im)\n for i in range(len(assignedData)):\n #Draw a given model. \n _drawOneHMM(d, assignedData[i], 1, i * (len(assignedData[0][0]) + spacing), \\\n width, len(assignedData[0][0]))\n\n im.save(writeLocation, \"PNG\")\n del d", "def save_model(self):\n # words dictionary\n filename = self.name + \"_words\"\n f = open(filename, 'w')\n f.write(str(self.words))\n f.close()\n\n # word_lengths dictionary\n filename = self.name + \"_word_lengths\"\n f = open(filename, 'w')\n f.write(str(self.word_lengths))\n f.close()\n\n # stems dictionary\n filename = self.name + \"_stems\"\n f = open(filename, 'w')\n f.write(str(self.stems))\n f.close()\n\n # sentence_lengths dictionary\n filename = self.name + \"_sentence_lengths\"\n f = open(filename, 'w')\n f.write(str(self.sentence_lengths))\n f.close()\n\n # ten most common words\n filename = self.name + \"_common_word\"\n f = open(filename, 'w')\n f.write(str(self.common_word))\n f.close()", "def write_mat_file(self):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat('geometry_description',mat_dict)", "def train_hmm_from_directory(folder_path, hmm_model_name, mid_window, mid_step):\n\n flags_all = np.array([])\n class_names_all = []\n for i, f in enumerate(glob.glob(folder_path + os.sep + '*.wav')):\n # for each WAV file\n wav_file = f\n gt_file = f.replace('.wav', '.segments')\n if os.path.isfile(gt_file):\n seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)\n flags, class_names = \\\n segments_to_labels(seg_start, seg_end, seg_labs, mid_step)\n for c in class_names:\n # update class names:\n if c not in class_names_all:\n class_names_all.append(c)\n sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)\n feature_vector, _, _ = \\\n mtf.mid_feature_extraction(signal, sampling_rate,\n mid_window * sampling_rate,\n mid_step * sampling_rate,\n round(sampling_rate * 0.050),\n round(sampling_rate * 0.050))\n\n flag_len = len(flags)\n feat_cols = feature_vector.shape[1]\n min_sm = min(feat_cols, flag_len)\n feature_vector = feature_vector[:, 0:min_sm]\n flags = flags[0:min_sm]\n\n flags_new = []\n # append features and labels\n for j, fl in enumerate(flags):\n flags_new.append(class_names_all.index(class_names_all[flags[j]]))\n\n flags_all = np.append(flags_all, np.array(flags_new))\n\n if i == 0:\n f_all = feature_vector\n else:\n f_all = np.concatenate((f_all, feature_vector), axis=1)\n\n # compute HMM statistics\n class_priors, transmutation_matrix, means, cov = \\\n train_hmm_compute_statistics(f_all, flags_all)\n # train the HMM\n hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], \"diag\")\n hmm.covars_ = cov\n hmm.means_ = means\n hmm.startprob_ = class_priors\n hmm.transmat_ = transmutation_matrix\n\n save_hmm(hmm_model_name, hmm, class_names_all, mid_window, mid_step)\n\n return hmm, class_names_all", "def save(self, directory):\n\n os.makedirs(directory, exist_ok=True)\n\n self.ft.save(os.path.join(directory, \"w2v.model\"))\n self.matrix.save(os.path.join(directory, \"stsm.model\"))\n self.dictionary.save(os.path.join(directory, \"dict.model\"))", "def write_mm(g, fn):\n f = open(fn, \"w\")\n f.write(\"%d %d %d\\n\" % (g.vcount(), g.vcount(), g.ecount()))\n\n if g.is_weighted():\n for e in g.es():\n f.write(\"%d %d %.4f\\n\" % (e.source, e.target, e[\"weight\"]))\n else:\n for e in g.es():\n f.write(\"%d %d 1\\n\" % (e.source, e.target))\n\n f.close()", "def save_model(self):\n f = open(self.name + '_' + 'words', 'w')\n f.write(str(self.words))\n f.close\n\n f = open(self.name + '_' + 'word_lengths', 'w')\n f.write(str(self.word_lengths))\n f.close\n\n f = open(self.name + '_' + 'sentence_lengths', 'w')\n f.write(str(self.sentence_lengths))\n f.close\n\n f = open(self.name + '_' + 'stems', 'w')\n f.write(str(self.stems))\n f.close\n\n f = open(self.name + '_' + 'commas_per_sentence', 'w')\n f.write(str(self.commas_per_sentence))\n f.close", "def WriteSourceFileForCcmModel(filename, model):\n ccm_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n source_file = open(ccm_model_name + \".cpp\", 'w')\n\n #Include header files\n included_files = GetIncludedFilesForSourceString(filename, model)\n source_file.write(included_files)\n\n #Initialise class\n class_def = GetClassDefinition(filename, model)\n source_file.write(class_def)\n\n #Constructor for system\n constructor = GetClassConstructor(filename)\n source_file.write(constructor)\n\n #Function definitions\n funct_defn_str = GetFunctionDefinitionsForSource(filename, model)\n source_file.write(funct_defn_str)\n\n #Initialise parameters\n init_fn = GetInitForSource(filename, model)\n source_file.write(init_fn)\n\n #Get the derivative function\n derivs_fn = GetEvaluateYDerivativesVoidString(filename, model)\n source_file.write(derivs_fn)\n\n #Get the stopping event function\n stopping_event_fn = GetStoppingEventBooleanString(filename, model)\n source_file.write(stopping_event_fn)\n\n #Get the void to check and update SBML events\n events_fn = GetCheckAndUpdateEventsVoidString(filename, model)\n source_file.write(events_fn)\n\n #Get the void to check and update SBML events\n events_satisfied_fn = GetAreAllEventsSatisfiedBooleanString(filename)\n source_file.write(events_satisfied_fn)\n\n #Initialise function\n initialise_fn = GetInitialiseString(filename, model)\n source_file.write(initialise_fn)\n\n #Define SRN Model\n srn_model_defn = GetModelDefinitionString(filename, model, False)\n source_file.write(srn_model_defn)\n\n source_file.close()\n\n print(ccm_model_name + \".cpp written!\\n\")", "def _write_model(self, file, specs, experiment = None):\n self.require_complete()\n for pragma in self._pragmas:\n file.write(pragma)\n file.write('\\n')\n if len(self._pragmas) > 0:\n file.write('\\n')\n file.write('-- NOTE: This file was auto-generated by aivika-modeler 1.0\\n')\n file.write('\\n')\n for module_import in self._module_imports:\n file.write(module_import)\n file.write('\\n')\n if len(self._module_imports) > 0:\n file.write('\\n')\n file.write('specs =\\n')\n specs.write(file, ' ')\n file.write('\\n')\n self._write_transact_types(file)\n self._write_model_def(file)\n file.write('\\n')\n if experiment is None:\n file.write('main =\\n')\n file.write(' printSimulationResultsInStopTime\\n')\n file.write(' printResultSourceInEnglish\\n')\n file.write(' model specs\\n')\n file.write('\\n')\n else:\n experiment.write(file)\n file.write('\\n')", "def write_data(self, file_name=None, model=None, model_text=None, var=None):\n\n # Open the files.\n file_1st = open(\"Sij\" + file_name, 'w')\n file_2nd = open(\"Sijkl\" + file_name, 'w')\n files = [file_1st, file_2nd]\n\n # The headers.\n for i in range(2):\n # Alias the file.\n file = files[i]\n\n # The titles.\n file.write(\"@with g0\\n\")\n if i == 0:\n file.write(\"@ world 0, -0.2, 180, 1\\n\")\n else:\n file.write(\"@ world 0, -0.7, 180, 1\\n\")\n file.write(\"@ title \\\"Calculated frame order matrix elements\\\"\\n\")\n if i == 0:\n file.write(\"@ subtitle \\\"%s, 1\\\\Sst\\\\N degree matrix\\\"\\n\" % model_text)\n else:\n file.write(\"@ subtitle \\\"%s, 2\\\\Snd\\\\N degree matrix\\\"\\n\" % model_text)\n\n # Legend.\n if i == 0:\n file.write(\"@ legend 0.23, 0.55\\n\")\n else:\n file.write(\"@ legend off\\n\")\n\n # Plot data.\n file.write(\"@ xaxis bar linewidth 0.5\\n\")\n file.write(\"@ xaxis label \\\"Cone half-angle \\\\xq\\\\f{}\\\\s%s\\\\N (deg.)\\\"\\n\" % var)\n file.write(\"@ xaxis label char size 1.000000\\n\")\n file.write(\"@ xaxis tick major 45\\n\")\n file.write(\"@ xaxis tick major linewidth 0.5\\n\")\n file.write(\"@ xaxis tick minor ticks 3\\n\")\n file.write(\"@ xaxis tick minor linewidth 0.5\\n\")\n file.write(\"@ yaxis bar linewidth 0.5\\n\")\n if i == 0:\n file.write(\"@ yaxis label \\\"Order parameter \\qS\\sij\\\"\\n\")\n else:\n file.write(\"@ yaxis label \\\"Order parameter \\qS\\sijkl\\\"\\n\")\n file.write(\"@ yaxis label char size 1.000000\\n\")\n file.write(\"@ yaxis tick major 0.2\\n\")\n file.write(\"@ yaxis tick major linewidth 0.5\\n\")\n file.write(\"@ yaxis tick minor ticks 1\\n\")\n file.write(\"@ yaxis tick minor linewidth 0.5\\n\")\n\n # Header for first order matrix.\n graph_num = 0\n for i in range(3):\n for j in range(3):\n # Legend.\n file_1st.write(\"@ s%i legend \\\"\\\\q<c\\\\s%s%s\\\\N>\\\"\\n\" % (graph_num, i+1, j+1))\n file_1st.write(\"@ s%i linewidth 0.5\\n\" % graph_num)\n\n # Inc.\n graph_num = graph_num + 1\n\n # Header for second order matrix.\n graph_num = 0\n for i in range(3):\n for j in range(3):\n for k in range(3):\n for l in range(3):\n # Legend.\n file_2nd.write(\"@ s%i legend \\\"<\\\\qc\\\\s%s%s\\\\N.c\\\\s%s%s\\\\N>\\\"\\n\" % (graph_num, i+1, j+1, k+1, l+1))\n file_2nd.write(\"@ s%i linewidth 0.5\\n\" % graph_num)\n\n # Inc.\n graph_num = graph_num + 1\n\n # Loop over the first rotation matrix index.\n graph_num = 0\n for i in range(3):\n # Loop over the second rotation matrix index.\n for j in range(3):\n # Header.\n file_1st.write(\"@target G0.S%i\\n\" % graph_num)\n file_1st.write(\"@type xy\\n\")\n\n # Loop over each time point.\n for k in range(INC+1):\n # Get the angle.\n angle = self.get_angle(k-1, model=model, var=var, deg=True)\n\n # Write.\n file_1st.write(\"%s %s\\n\" % (angle, self.first_frame_order[k, i, j]))\n\n # Footer.\n file_1st.write(\"&\\n\")\n\n # Inc.\n graph_num = graph_num + 1\n\n # Loop over the first frame order index.\n graph_num = 0\n for i in range(9):\n # Loop over the second frame order index.\n for j in range(9):\n # Header.\n file_2nd.write('@target G0.S%i\\n' % graph_num)\n file_2nd.write('@type xy\\n')\n\n # Loop over each time point.\n for k in range(INC+1):\n # Get the angle.\n angle = self.get_angle(k-1, model=model, var=var, deg=True)\n\n # Write.\n file_2nd.write('%s %s\\n' % (angle, self.second_frame_order[k, i, j]))\n\n # Footer.\n file_2nd.write('&\\n')\n\n # Inc.\n graph_num = graph_num + 1\n\n # No autoscaling.\n file_1st.write(\"@autoscale onread none\\n\")\n file_2nd.write(\"@autoscale onread none\\n\")\n\n # Close the files.\n file_1st.close()\n file_2nd.close()" ]
[ "0.6306435", "0.62701946", "0.6194146", "0.6162228", "0.60442907", "0.59868413", "0.5865779", "0.5834945", "0.5833884", "0.5833685", "0.5771575", "0.5769922", "0.57470757", "0.572483", "0.5723051", "0.57100296", "0.5691098", "0.5648298", "0.56091905", "0.5607857", "0.5585511", "0.5565819", "0.552539", "0.55140436", "0.55132294", "0.5503089", "0.5501371", "0.54826266", "0.5470935", "0.54649454" ]
0.6623211
0
Check if hmm model file exists, and if not remove gene from
def check_hmm_existence(self, gene_list, skipped_gene_matches, parameter_dir): # first get the list of genes for which we don't have hmm files if len(glob.glob(parameter_dir + '/hmms/*.yaml')) == 0: raise Exception('no yamels in %s' % parameter_dir) genes_to_remove = [] for gene in gene_list: hmmfname = parameter_dir + '/hmms/' + utils.sanitize_name(gene) + '.yaml' if not os.path.exists(hmmfname): # if self.args.debug: # print ' WARNING %s removed from match list for %s %s (not in %s)' % (utils.color_gene(gene), query_name, '' if second_query_name==None else second_query_name, os.path.dirname(hmmfname)) skipped_gene_matches.add(gene) genes_to_remove.append(gene) # then remove 'em from <gene_list> for gene in genes_to_remove: gene_list.remove(gene)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_model_checkpoints(self):\n if self.file_prefix is None:\n return\n\n with os.scandir() as path_list:\n for entry in path_list:\n if entry.is_file() and entry.name.startswith(self.file_prefix) and entry.name.endswith(\".h5\"):\n print(\"{}: Removing {}\".format(self.MODEL_NAME, entry.path))\n os.remove(entry.path)", "def delete_model(self):\n os.remove(self.filepath)\n self.cmodel = None", "def delete_best_model(self):\n if self.best_model_path.exists():\n # not using `missing_ok=True` because we are running this code on pythin 3.7\n self.best_model_path.unlink()", "def remove(self):\n \n dbpath, config = self._start() \n desc_file = check_file(config.model_descriptions, dbpath,\n \"model_descriptions\", allow_none=False) \n self.logger.msg1(\"Reading model ids\")\n ids = values_in_column(desc_file, \"id\")\n self.logger.msg1(\"Deleting models: \"+str(len(ids)))\n delete_models(dbpath, ids)\n self._end()", "def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'fitting')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'fitting', existing_file))", "def delete_training_files(cls, model_file):\n try:\n os.remove('%s.vec' % model_file)\n os.remove('%s.bin' % model_file)\n except FileNotFoundError:\n logger.debug('Training files %s not found when attempting to delete', model_file)\n pass", "def delete_ffmlp_data():\n import shutil\n ffmlp_dir = \"%s/data/fnc-1/mlp_models/temp_models\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n if (os.path.exists(ffmlp_dir)):\n for the_file in os.listdir(ffmlp_dir):\n file_path = os.path.join(ffmlp_dir, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(e)", "def clean_up(model_path):\n cmds = [\"rm */grad*.pickle\",\n \"rm -r checkpoints\",\n \"rm */train_len\",\n \"rm log_human_read.csv\",\n \"rm */log_human_read.csv\",\n \"rm -r best_model\",\n \"rm */*epoch*\"]\n\n for cmd in cmds:\n os.system(\"cd {} && {}\".format(model_path, cmd))", "def test_cleanup():\n os.remove(test_file[:-4] + \"_no_grave.h5m\")", "def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'analysis')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'analysis', existing_file))", "def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)", "def cleanup_callback(self):\n\n # Remove from include\n ghtin = self.idf.output_directory / \"GHTIn.idf\"\n if ghtin.exists():\n try:\n self.idf.include.remove(ghtin)\n ghtin.remove()\n except ValueError:\n log(\"nothing to remove\", lg.DEBUG)", "def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)", "def test_remove_model_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n try:\n model_manager.remove_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass", "def __exit__(self, exc_type, exc_value, traceback):\n if self.cleanup_model_file:\n os.unlink(self.model_file)", "def test_mapping_file_removal(self):\r\n # doesn't change in test\r\n mf_exp, _ = parse_mapping_file_to_dict(self.MF_IN_2)\r\n bt_exp = parse_biom_table(self.BT_OUT_2)\r\n mf_obs, bt_obs, nonshared_samples = \\\r\n sync_biom_and_mf(parse_mapping_file_to_dict(self.MF_IN_2)[0],\r\n parse_biom_table(self.BT_IN_1))\r\n self.assertEqual(mf_exp, mf_obs)\r\n self.assertEqual(bt_exp, bt_obs)\r\n self.assertEqual(nonshared_samples, set(['Sample5', 'Sample6']))", "def test_remove_model_method(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised1 = False\n # accessing the MLModelMock model object\n try:\n model_manager.remove_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised1 = True\n\n exception_raised2 = False\n exception_message2 = \"\"\n # trying to access the model that was removed\n try:\n model = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n\n # assert\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 == \"Instance of model 'qualified_name' not found in ModelManager.\")", "def clear_dir_of_bad_files(ndmg_participant_dir):\n # TODO: generalize to non-desikan shape\n # TODO: currently only works on ssv files\n files = get_graph_files(ndmg_participant_dir) # list of paths\n for i, file in enumerate(files):\n if numpy_from_output_graph(files[i], sep=\" \").shape != (70, 70):\n os.remove(file) # delete files that aren't 70x70", "def test_mapping_file_removal(self):\r\n mf_exp, _ = parse_mapping_file_to_dict(self.MF_OUT_1)\r\n # bt doesn't change in this test\r\n bt_exp = parse_biom_table(self.BT_IN_1)\r\n mf_obs, bt_obs, nonshared_samples = \\\r\n sync_biom_and_mf(parse_mapping_file_to_dict(self.MF_IN_1)[0],\r\n parse_biom_table(self.BT_IN_1))\r\n self.assertEqual(mf_exp, mf_obs)\r\n self.assertEqual(bt_exp, bt_obs)\r\n self.assertEqual(nonshared_samples, set(['NotInOtuTable2',\r\n 'NotInOtuTable1']))", "def delete(self):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.delete()\n\t\telse:\n\t\t\tsuper( textureFile, self ).delete()", "def setup():\n if os.path.exists(\"observations.p\"):\n os.remove(\"observations.p\")\n else:\n pass", "def test_mapping_file_removal(self):\r\n mf_exp, _ = parse_mapping_file_to_dict(self.MF_OUT_3)\r\n # bt doesn't change in this test\r\n bt_exp = parse_biom_table(self.BT_OUT_3)\r\n mf_obs, bt_obs, nonshared_samples = \\\r\n sync_biom_and_mf(parse_mapping_file_to_dict(self.MF_IN_3)[0],\r\n parse_biom_table(self.BT_IN_1))\r\n self.assertEqual(mf_exp, mf_obs)\r\n self.assertEqual(bt_exp, bt_obs)\r\n self.assertEqual(nonshared_samples, set(['Sample4', 'NotInOtuTable2',\r\n 'NotInOtuTable1']))", "def clean_up(self, early_stopping, current_epoch):\n\n early_stopping: EarlyStopping = early_stopping\n\n if early_stopping.enable_stopping:\n lower_limit = early_stopping.best_loss_index - 1\n else:\n lower_limit = current_epoch - self.config.model_files_stored - 1\n\n for file in listdir(self.training_model_path):\n\n try:\n epoch_of_file = int(file.split('.')[0].split('-')[-1])\n if epoch_of_file <= lower_limit:\n os.remove(self.training_model_path + file)\n except ValueError:\n pass\n except Exception as e:\n print(e)", "def cleanup() -> None:\n\n for fname in glob(os.path.join(tdir, 'alexandria.*')):\n if os.path.splitext(fname)[1] not in {'.c', '.h'}:\n os.unlink(fname)", "def test_non_existent_file(self):\n remove('Rectangle.json')\n self.assertEqual(self.r1.load_from_file(), [])", "def get_manual_homology_models(self, input_dict, outdir=None, clean=True, force_rerun=False):\n if outdir:\n outdir_set = True\n else:\n outdir_set = False\n\n counter = 0\n for g in tqdm(self.genes):\n if g.id not in input_dict:\n continue\n\n if not outdir_set:\n outdir = g.protein.structure_dir\n if not outdir:\n raise ValueError('Output directory must be specified')\n\n for hid, hdict in input_dict[g.id].items():\n if 'model_file' not in hdict or 'file_type' not in hdict:\n raise KeyError('\"model_file\" and \"file_type\" must be keys in the manual input dictionary.')\n\n new_homology = g.protein.load_pdb(pdb_id=hid, pdb_file=hdict['model_file'],\n file_type=hdict['file_type'], is_experimental=False)\n\n if clean:\n try:\n new_homology.load_structure_path(new_homology.clean_structure(\n outdir=outdir,\n force_rerun=force_rerun),\n hdict['file_type'])\n except PDBConstructionException as e:\n log.error('{}, {}, {}: Unable to read PDB file, actual error was:'.format(g.id, hid, hdict['model_file']))\n log.exception(e)\n else:\n copy_to = op.join(outdir, op.basename(hdict['model_file']))\n if ssbio.utils.force_rerun(force_rerun, copy_to):\n # Just copy the file to the structure directory and store the file name\n log.debug('{}: copying model from original directory to GEM-PRO directory'.format(op.basename(hdict['model_file'])))\n shutil.copy2(hdict['model_file'], outdir)\n new_homology.load_structure_path(copy_to, hdict['file_type'])\n else:\n log.debug('{}: homology model already copied to directory'.format(copy_to))\n new_homology.load_structure_path(copy_to, hdict['file_type'])\n\n # TODO: need to better handle other info in the provided dictionary, if any\n new_homology.update(hdict)\n\n log.debug('{}: updated homology model information and copied model file.'.format(g.id))\n counter += 1\n\n log.info('Updated homology model information for {} genes.'.format(counter))", "def cleanup(self): \n if os.path.exists(self.inpms):\n shutil.rmtree(self.inpms)", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def clean_up_state_file(self):\r\n if os.path.exists(StudentModuleHistoryCleaner.STATE_FILE):\r\n os.remove(StudentModuleHistoryCleaner.STATE_FILE)" ]
[ "0.62253946", "0.61339456", "0.61209595", "0.6105428", "0.59902775", "0.5978239", "0.5936864", "0.5926887", "0.5825889", "0.5775082", "0.5756565", "0.5729566", "0.5712733", "0.56921035", "0.56839734", "0.5574046", "0.5529738", "0.55226207", "0.5518598", "0.55043536", "0.5492275", "0.54914963", "0.54812586", "0.54342014", "0.5372868", "0.5369924", "0.53625244", "0.5357935", "0.5344816", "0.5339668" ]
0.62624025
0
Check that we have at least one gene for each region
def all_regions_present(self, gene_list, skipped_gene_matches, query_name, second_query_name=None): for region in utils.regions: if 'IGH' + region.upper() not in ':'.join(gene_list): print ' no %s genes in %s for %s %s' % (region, ':'.join(gene_list), query_name, '' if (second_query_name == None) else second_query_name) print ' skipped %s' % (':'.join(skipped_gene_matches)) print 'giving up on query' return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gene_exists(ensemble, methylation_type, gene):\n\n\tgene_table_name = 'gene_' + gene.replace(\".\", \"_\")\n\treturn len(db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM information_schema.tables WHERE table_name = '%s'\"%gene_table_name).fetchall()) > 0", "def check_has_regions(seq):\n for j in range(len(seq)):\n seq[j]['has_entries'] = 0\n if int(seq[j]['entry_count']) != 0 and int(seq[j]['is_country']) != 1:\n seq[j]['has_entries'] = 1\n\n if int(seq[j]['entry_count']) != 0 and ('regions' not in seq[j] or 'children' not in seq[j]) :\n seq[j]['has_entries'] = 1\n if 'children' in seq[j]:\n seq[j]['has_children'] = 1\n else:\n seq[j]['has_children'] = 0\n return seq", "def check_tables_populated(self) -> bool:\n sources = self.metadata.scan().get(\"Items\", [])\n if len(sources) < len(SourceName):\n logger.info(\"Gene sources table is missing expected sources.\")\n return False\n\n records = self.genes.query(\n IndexName=\"item_type_index\",\n KeyConditionExpression=Key(\"item_type\").eq(\"identity\"),\n Limit=1,\n )\n if len(records.get(\"Items\", [])) < 1:\n logger.info(\"Gene records index is empty.\")\n return False\n\n normalized_records = self.genes.query(\n IndexName=\"item_type_index\",\n KeyConditionExpression=Key(\"item_type\").eq(RecordType.MERGER.value),\n Limit=1,\n )\n if len(normalized_records.get(\"Items\", [])) < 1:\n logger.info(\"Normalized gene records index is empty.\")\n return False\n\n return True", "def test_variant_case_no_genes(adapter, case_obj, variant_obj):\n # GIVEN a variant wihtout gene info\n assert variant_obj.get(\"genes\") is None\n # GIVEN that no region vcf exists\n assert \"region_vcf_file\" not in case_obj\n # WHEN adding info\n variant_case(adapter, case_obj, variant_obj)\n # THEN assert no region vcf was added since there where no gene info\n assert \"region_vcf_file\" not in case_obj", "def check_region(self, region_id, action=\"check\"):\n self.init_structures()\n con = SimConnection()\n con.connect(self.gridinfo._url)\n scenedata = con._con.ogrescene_list({\"RegionID\":region_id})\n total = 0\n total_yes = 0\n for groupid, scenegroup in scenedata['res'].items():\n if getattr(self, action+\"_group\")(groupid, scenegroup):\n total_yes += 1\n total += 1\n report = []\n report.append(\"--. \\n\")\n report.append(\"total objects %s. \\n\"%(total,))\n for key in self._found.keys():\n report.append(\"total \"+key+\" %s. \\n\"%(self._total_server[key],))\n report.append(key+\" in blend %s\\n\"%(self._found[key],))\n return report", "def test_signal_regions_len(i07_nexus, regions):\n assert len(i07_nexus.signal_regions) == len(regions)", "def gene_check(self,DNA,Pol_ac,Pol_c,gene_begin,gene_end):\n PolymeraseIII_ac = Pol_ac\n PolymeraseIII_c = Pol_c\n if (gene_end < PolymeraseIII_c.position) or (gene_begin > (2*self.DNA.length-PolymeraseIII_ac.position)):\n return 2\n else:\n return 1", "def verify_aggCode(self):\n self.c.execute('''SELECT aggCode, COUNT(aggCode) \n FROM Agglomerations\n GROUP BY aggCode\n HAVING COUNT(aggCode) > 1''')\n res = self.c.fetchall()\n if (len(res) > 0):\n return [False, \"The aggCode '%s' is present %s times\", res]\n else:\n return [True]", "def test_valid_genes_file(self):\n\n # Create a valid genes file\n valid_genes_file = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), \"data\", \"valid_genes_file.bed\")\n\n ref_name = \"ref1\"\n\n genes = {\"gene1\": {\"start\": 0, \"end\": 100},\n \"gene 2\": {\"start\": 101, \"end\": 200}, # Spaces are allowed in the gene name\n \"gene3\": {\"start\": 201, \"end\": 300}}\n\n with open(valid_genes_file, \"w+\") as f:\n for gene in genes:\n f.write(\"%s\\t%s\\t%s\\t%s\\n\" % (ref_name, genes[gene][\"start\"],\n genes[gene][\"end\"], gene))\n\n parsed_genes = parse_genes_file(valid_genes_file, ref_name)\n\n for gene in parsed_genes:\n assert gene in genes\n assert parsed_genes[gene][\"start\"] == genes[gene][\"start\"]\n assert parsed_genes[gene][\"end\"] == genes[gene][\"end\"]\n assert parsed_genes[gene][\"frame\"] == genes[gene][\"start\"] % 3\n\n os.remove(valid_genes_file)", "def can_mutate(self, ga, chromosome):\n return len(chromosome.genes) < len(ga._gene_bank)", "def check_hgvs(self):\n import re\n check = 0\n for row_index, row in self.snp_df.iterrows():\n if row['hgvs'] is not None:\n if not re.match(\"c(.*)\", str(row['hgvs'])):\n check += 1\n print \"Error: invalid HGVS nomenclature, see row\", row_index+4 # prints row in excel doc\n return check", "def check(indivs, geno_list):\r\n\tfor i in xrange(0,len(indivs)):\r\n\t\tif indivs[i] not in geno_list:\r\n\t\t\t# print \"this is not in: \"+ indivs[i]\r\n\t\t\treturn False\r\n\treturn True", "def check_region(self, region):\n region_slugs = [x.slug for x in self.manager.get_all_regions()]\n return region in region_slugs", "def check(self):\n gAsset = cmds.ls(type='gAsset')\n\n render_geo = []\n if gAsset:\n trans = cmds.listRelatives(gAsset[0], p=True, f=True)\n meshes = cmds.listRelatives(trans, ad=True, type='mesh', f=True)\n if meshes:\n render_geo.extend(meshes)\n # for item in meshes:\n # trans = cmds.listRelatives(item, p=True, f=True)\n # render_geo.extend(trans)\n\n if not pm.ls(\"*.grid_renderGeo\"):\n self.status = self.errorMode\n self.addError(\"No geometry's are tagged as render geo\")\n self.errorMessage = \"No geometry is tagged as render geo\"\n elif not len(set(cmds.ls(\"*.grid_renderGeo\"))) == len(render_geo):\n self.status = self.errorMode\n self.addError(\"Not all Geo tags under gasset\")\n self.errorMessage = \"Not all Geo tags under gasset\"\n else:\n self.status = \"OK\"\n else:\n self.addError(\"No Gasset found\")\n self.errorMessage = \"No gasset found\"", "def validate_det1_region(regfile):\n err=-1\n import regions\n# from regions.io.ds9.read import DS9Parser\n from regions import Regions\n assert os.path.isfile(regfile), f'{regfile} does not exist!'\n \n# with open(regfile) as fh: \n# region_string = fh.read()\n# parser = DS9Parser(region_string)\n# assert parser.coordsys == 'image', \\\n# f'Region coordinate system is {parser.coordsys}, not image!'\n\n reg = Regions.read(regfile)\n\n\n # Check and make sure this is a \"pixel\" region and not a \"sky\" region\n\n assert 'Pixel' in f'{type(reg[0])}', \\\n f'Region coordinate system is not image coordinates for {regfile}\\n'\n\n # Check to make sure tha the first region in the file is an \"include\" region\n for ri in reg:\n assert ri.meta['include'] is True, \\\n f'\\n {regfile} has an exclusion region first! \\n Put the source region first instead!'\n break", "def isgene(s, gene):\n test = s.query(Genes).filter(Genes.name.ilike(gene)).first()\n if test is None:\n gene_list = check_gene_name(gene)\n if len(gene_list) == 0:\n return None\n else:\n for g in gene_list:\n print(g)\n test = s.query(Genes).filter(Genes.name.ilike(str(g))).first()\n if test is not None:\n return test.name\n return None\n else:\n return test.name", "def treasure_condition(locations):\n return not len(locations) == 0", "def test_region_check(self):\n reference = {'region': 'reference'}\n target = {'region': 'target'}\n\n # Check that IOError is raised for nonmatching regions\n self.assertRaises(IOError, librad_drift.RadiometricDrift.check_fields, reference, target)\n\n # Check no error raised if regions match\n librad_drift.RadiometricDrift.check_fields(reference, reference)", "def hasRequiredElements(self):\n return _libsbml.GeneProductAssociation_hasRequiredElements(self)", "def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True", "def check_regions(self, timestamp, bid, ofr, signal, allow=True):\n if self.regions:\n mutated = False\n\n # one ore many region, have to pass at least one test\n for region in self.regions:\n if region.can_delete(timestamp, bid, ofr):\n mutated |= True\n\n elif region.test_region(timestamp, signal):\n # match with at least one region\n return True\n\n if mutated:\n self.cleanup_regions(timestamp, bid, ofr)\n\n return False\n else:\n # no region always pass\n return allow", "def check_single_region(region_data_path):\n # 1.check every components of this region path\n vendor, region, version, is_level0 = Util.parse_rdf_version(os.path.basename(region_data_path))\n if region and version:\n success_components_check_info_dic, failed_components_check_info_dic = check_region_components(region, is_level0, region_data_path)\n # 2.collection success check info and failed check info\n if is_level0:\n region = \"_\".join([region, Util.LEVEL0_FLAG])\n detail_msg_list = []\n for failed_component_name, failed_msg in failed_components_check_info_dic.iteritems():\n detail_msg_list.append(\":\".join([failed_component_name, ExtendChecker.CHECK_FAILED_STR, failed_msg]))\n for component_name, msg in success_components_check_info_dic.iteritems():\n detail_msg_list.append(\":\".join([component_name, ExtendChecker.CHECK_SUCCESS_STR, msg]))\n check_state_str = ExtendChecker.CHECK_FAILED_STR if failed_components_check_info_dic else ExtendChecker.CHECK_SUCCESS_STR\n\n return CheckStateInfo(region, version, detail_msg_list, check_state_str)", "def test_bad_region():\n ref_file = pkg_resources.resource_filename('m260b.test_data', 'ref_practice_W_1_chr_1.fasta')\n read_file = pkg_resources.resource_filename('m260b.test_data', 'practice_w_1.std.bad_region1.bam')\n ref_hdr, reference = read_basic_fasta(ref_file) \n read_iter = pysam.Samfile(read_file)\n chr = ref_hdr[1:].strip()\n areg = list(active_regions(read_iter, reference, chr, start_offset=0, flank=30, dfrac=1.0))\n found = False\n for region, reads in areg:\n found |= region.start <= 5769 <= region.stop\n if not found:\n raise ValueError('Window did not open around variant')", "def is_exceptional(self):\n G = self.poset().hasse_diagram()\n for x in G:\n nx = list(G.neighbors_out(x))\n nx.append(x)\n if min(nx) < x and max(nx) > x:\n return False\n return True", "def _need_genes(config):\n need_genes = []\n for t in ['gene', 'gene1', 'gene2']:\n if (t in config.keys()) and config[t]:\n need_genes.append(config[t])\n if ('adj_gene' in config.keys()) and config['adj_gene']:\n if config['adj_gene'] == 'CTL':\n need_genes.extend(['CD8A', 'CD8B', 'PRF1', 'GZMA', 'GZMB'])\n else:\n need_genes.append(config['adj_gene'])\n if ('protein_gene' in config.keys()) and config['protein_gene']:\n need_genes.extend(config['protein_gene'])\n return(need_genes)", "def check_chromosomes(fasta_chromosomes, gtf_chromosomes):\n fasta_unique = fasta_chromosomes - gtf_chromosomes\n gtf_unique = gtf_chromosomes - fasta_chromosomes\n if fasta_unique:\n logger.warning((\n 'The following chromosomes were found in the FASTA but doens\\'t have '\n 'any \"transcript\" features in the GTF: {}. '\n 'No sequences will be generated for these chromosomes.'\n ).format(', '.join(fasta_unique)))\n if gtf_unique:\n logger.warning((\n 'The following chromosomes were found to have \"transcript\" features '\n 'in the GTF but doens\\'t exist in the FASTA. '\n 'No sequences will be generated for these chromosomes.'\n ).format(', '.join(fasta_unique)))\n chromosomes = set.intersection(fasta_chromosomes, gtf_chromosomes)\n\n return chromosomes", "def boundary_invariant(self):\n for cell in self.fire_boundary():\n if self.is_empty(cell[0], cell[1]):\n print \"Cell \" + str(cell) + \" in fire boundary is empty.\"\n return False\n return True", "def remove_empty_genes(self):\n to_remove = []\n for gene in self.genes:\n if not gene.mrnas:\n to_remove.append(gene)\n if to_remove:\n for gene in to_remove:\n self.genes.remove(gene)\n sys.stderr.write(\"Removed empty gene \" + gene.identifier + \"\\n\")\n self.removed_genes.extend(to_remove)\n return to_remove", "def valid(self):\n if (self._npix == []\n or self._gpix == []\n or self._epix == []\n or self._ppix == []) :\n return False\n return True", "def test_bkg_regions_len(i07_nexus: I07Nexus, regions):\n assert len(i07_nexus.background_regions) == len(regions)" ]
[ "0.6536178", "0.6260842", "0.6060093", "0.60499275", "0.5892845", "0.5784357", "0.5690322", "0.56686544", "0.5655738", "0.56486446", "0.564675", "0.5644961", "0.5612827", "0.5574081", "0.55659956", "0.55635387", "0.5556078", "0.55531275", "0.5541258", "0.5540612", "0.55338156", "0.55225384", "0.55095524", "0.54927075", "0.5492087", "0.5489024", "0.5489022", "0.5478509", "0.54743004", "0.5473618" ]
0.63438827
1
Pad all sequences in to the same length to the left and right of their conserved cysteine positions. Next, pads all sequences further out (if necessary) such as to eliminate all v_5p and j_3p deletions.
def pad_seqs_to_same_length(self, debug=False): maxima = self.get_padding_parameters(debug) for query in self.sw_info['queries']: swfo = self.sw_info[query] if 'padded' in swfo: # already added padded information (we're probably partitioning, and this is not the first step) return seq = swfo['seq'] cpos = swfo['cyst_position'] if cpos < 0 or cpos >= len(seq): print 'hm now what do I want to do here?' k_v = swfo['k_v'] # padleft = maxima['fv_insertion_len'] + maxima['gl_cpos'] - cpos # left padding: biggest germline cpos minus cpos in this sequence # padright = maxima['gl_cpos_to_j_end'] + maxima['jf_insertion_len'] - (len(seq) - cpos) padleft = maxima['gl_cpos'] - cpos # left padding: biggest germline cpos minus cpos in this sequence padright = maxima['gl_cpos_to_j_end'] - (len(seq) - cpos) if padleft < 0 or padright < 0: raise Exception('bad padding %d %d for %s' % (padleft, padright, query)) swfo['padded'] = {} padfo = swfo['padded'] # shorthand assert len(utils.ambiguous_bases) == 1 # could allow more than one, but it's not implemented a.t.m. padfo['seq'] = padleft * utils.ambiguous_bases[0] + seq + padright * utils.ambiguous_bases[0] if query in self.sw_info['indels']: print ' also padding reversed sequence' self.sw_info['indels'][query]['reversed_seq'] = padleft * utils.ambiguous_bases[0] + self.sw_info['indels'][query]['reversed_seq'] + padright * utils.ambiguous_bases[0] padfo['k_v'] = {'min' : k_v['min'] + padleft, 'max' : k_v['max'] + padleft} padfo['cyst_position'] = swfo['cyst_position'] + padleft padfo['padleft'] = padleft padfo['padright'] = padright if debug: print ' pad %d %d %s' % (padleft, padright, query) print ' %d --> %d (%d-%d --> %d-%d)' % (len(seq), len(padfo['seq']), k_v['min'], k_v['max'], padfo['k_v']['min'], padfo['k_v']['max']) if debug: for query in self.sw_info['queries']: print '%20s %s' % (query, self.sw_info[query]['padded']['seq'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pad_sequences(sequences):\n max_len = max(s.shape[0] for s in sequences)\n padded = []\n for seq in sequences:\n zero_pad = np.concatenate(\n [seq, np.zeros((max_len - seq.shape[0], ) + seq.shape[1:])])\n padded.append(zero_pad[np.newaxis, :])\n\n return np.concatenate(padded, axis=0)", "def pad(seq, n):\n return", "def _pad_sequences(sequences, pad=PAD):\n lengths = [tf.shape(x)[0] for x in sequences]\n padded_size = tf.reduce_max(lengths)\n padded_sequences = tf.stack([\n tf.pad(x,\n paddings=[[0, padded_size - lengths[i]]],\n mode='CONSTANT',\n constant_values=pad) for i, x in enumerate(sequences)\n ])\n return padded_sequences, lengths", "def pad_to_max_length(self, sequence):\n sequence = sequence[:self.max_seq_length]\n n = len(sequence)\n #return sequence + ['[PAD]'] * (self.max_seq_length - n)\n return sequence + [0] *(self.max_seq_length - n)", "def paddingSequence(X_train, X_test, maxLen=30):\r\n #######equalize list of seq\r\n X_train = pad_sequences(X_train, maxLen, padding='post', truncating='post')\r\n X_test = pad_sequences(X_test, maxLen, padding='post', truncating='post')\r\n return X_train, X_test", "def pad_sequence(seq):\n seq_split = seq.strip().split(\"1\")\n last = seq_split[0]\n new_seq = last + \"1\"\n inc_added = 0\n out_added = 0\n for i in range(1, len(seq_split)-1):\n current = seq_split[i]\n\n # break up the intial sequences that leak information by adding padding\n if current == last:\n if last == \"-\":\n new_seq += \"+1\"\n inc_added += 1\n last = \"+\"\n else:\n new_seq += \"-1\"\n out_added += 1\n last = \"-\"\n else:\n new_seq += current + \"1\"\n last = current\n\n # 30% chance to inject randomness\n coin = random.randint(1, 101)\n if coin <= 30:\n if coin % 2 == 0:\n new_seq += \"+1\"\n else:\n new_seq += \"-1\"\n \n # return padded sequence, original number of cells, \n # number of incoming padding cells, and number of outgoing padding cells\n return new_seq, len(seq_split), inc_added, out_added", "def _pad_shorter(sequence: str) -> str:\n return sequence.ljust(3, \"X\")", "def pad_seq_records_for_alignment(seqs: List[SeqLikeType]):\n df = pd.DataFrame({\"seqs\": [SeqLike(seq, seq_type=\"aa\") for seq in seqs]})\n return df.seqs.seq.as_alignment()", "def __pad__(sequence, max_l):\n if max_l - len(sequence) < 0:\n sequence = sequence[:max_l]\n else: \n sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0))\n return sequence", "def pad_sequences(self,sequences, pad_func, maxlen = None):\n ret = []\n\n # Determine the maxlen\n max_value = max(map(len, sequences))\n if maxlen is None:\n maxlen = max_value\n\n # Pad / truncate (done this way to deal with np.array)\n for sequence in sequences:\n cur_seq = list(sequence[:maxlen])\n cur_seq.extend([pad_func()] * (maxlen - len(sequence)))\n ret.append(cur_seq)\n return ret", "def pad_sequences(self, X):\n return pad_sequences(X, maxlen=self.pad_length)", "def add_padding(x, maxlen=500):\n \n # May want to increase maxlen from 500! Not sure the total dist of chomragram lengths.\n\n for i in range(len(x)):\n x[i] = x[i][:,:maxlen]\n q = maxlen - x[i].shape[1]\n p = q//2\n# if q % 2 == 0:\n# x[i] = np.pad(x[i], ((p,p), (0,0)), 'constant', constant_values=(0,0))\n# else:\n# x[i] = np.pad(x[i], ((p,p+1), (0,0)), 'constant', constant_values=(0,0))\n\n print\n if q % 2 == 0:\n x[i] = np.pad(x[i], ((0,0), (p,p)), 'constant', constant_values=(0,0))\n else:\n x[i] = np.pad(x[i], ((0,0), (p,p+1)), 'constant', constant_values=(0,0))\n \n return x", "def pad_sequences(data, max_length):\n ret = []\n\n # Use this zero vector when padding sequences.\n zero_vector = [0] * Config.n_features\n zero_label = len(LBLS)-1 # corresponds to the 'O' tag\n\n for sentence, labels in data:\n ### YOUR CODE HERE (~4-6 lines)\n newSentence = []\n newLabels = []\n mask = []\n \n for i in range(0, max_length):\n if(i < len(sentence)):\n newSentence.append(sentence[i])\n newLabels.append(labels[i])\n mask.append(True)\n else:\n newSentence.append(zero_vector)\n newLabels.append(zero_label)\n mask.append(False)\n ret.append( (newSentence, newLabels, mask,[len(sentence)]) )\n ### END YOUR CODE ###\n return ret", "def pad_sequences(sequences, pad_func, maxlen = None):\n ret = []\n\n # Determine the maxlen\n max_value = max(map(len, sequences))\n if maxlen is None:\n maxlen = max_value\n\n # Pad / truncate (done this way to deal with np.array)\n for sequence in sequences:\n cur_seq = list(sequence[:maxlen])\n cur_seq.extend([pad_func()] * (maxlen - len(sequence)))\n ret.append(cur_seq)\n return ret", "def _pad(seqs, dtype=torch.float32):\n assert len(seqs) > 0 and all(x.shape[1:] == seqs[0].shape[1:] for x in seqs)\n lens = torch.LongTensor([len(x) for x in seqs])\n max_seq_len = torch.max(lens)\n\n # padded_seq_dims: (batch, max_seq_len, ...).\n padded_seq_dims = (len(seqs), max_seq_len,) + seqs[0].shape[1:]\n res = torch.zeros(padded_seq_dims, dtype=dtype)\n for i, seq in enumerate(seqs):\n src_len = lens[i]\n res[i, :src_len] = torch.Tensor(seq)\n return res, lens", "def pad_or_trim(seq, max_len=1000):\n n, m = seq.shape\n \n if n > max_len:\n seq = seq[-max_len:, :]\n elif n < max_len:\n if sparse.issparse(seq):\n pad_csr(seq, (max_len, m))\n else:\n seq = np.r_[seq, np.zeros((max_len - n, m))]\n return seq", "def _pad_large(self, arrays, sentinel):\n # Compute max length.\n maxlen_ctx = 0\n maxlen_sent = 0\n for array in arrays:\n maxlen_ctx = max(maxlen_ctx, len(array))\n for seq in array:\n maxlen_sent = max(maxlen_sent, len(seq))\n\n # Pad contexts\n ctx_lens = []\n ctx_sent_lens = []\n padded_ctxs = []\n for array in arrays:\n ctx_lens.append(len(array))\n padding = maxlen_ctx - len(array)\n padded_ctx = array + [[sentinel]] * padding\n # Pad sents\n padded = []\n lens = []\n for i, seq in enumerate(padded_ctx):\n padding = maxlen_sent - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq) if i < ctx_lens[-1] else 0)\n\n padded_ctxs.append(padded)\n ctx_sent_lens.append(lens)\n\n return padded_ctxs, ctx_lens, ctx_sent_lens", "def pad_sequence(sequence, max_length, pad):\n padN = max(max_length - len(sequence), 0)\n result = sequence[:max_length - padN] + [pad] * padN\n return result", "def padding_tensor(sequences, max_length=1000000):\n # get the number of sequences\n num = len(sequences)\n # get the maximum length (clip too long sequences)\n max_len = min(max([s.shape[0] for s in sequences]), max_length)\n # define new output dimensions\n out_dims = (num, max_len, *sequences[0].shape[1:])\n # create output_tensor with new dimensionality\n out_tensor = sequences[0].data.new(*out_dims).fill_(0)\n # create new mask_tensor with the corresponding mask\n mask = sequences[0].data.new(*out_dims).fill_(0)\n # iterate over the sequences\n logger.info('Start padding breaths....')\n with tqdm(\n total=len(sequences),\n bar_format=\"{desc:<5.5}{percentage:3.0f}%|{bar:100}{r_bar}\",\n ascii=True\n ) as pbar:\n for i, tensor in enumerate(sequences):\n # get the length of the current breath\n length = min(tensor.size(0), max_len)\n # add all valid breaths\n print(tensor)\n input('before')\n out_tensor[i, :length] = tensor[:length, :]\n # for the breaths that are \"too short\" padd with last value\n out_tensor[i, length:] = 0\n print(out_tensor)\n input('after')\n # create mask\n mask[i, :length] = 1\n # update progressbar\n pbar.update(1)\n\n # return result\n return max_len, out_tensor, mask", "def sequence_align(string_v, string_w):\n m = len(string_v)\n n = len(string_w)\n\n # Initialization; D[i][j][0] contains the max alignment score of the\n # ith prefix of v and the jth of w; D[i][j][1] contains the back pointer.\n D = [[(0, START) for _ in range(n + 1)] for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n D[i][0] = (D[i - 1][0][0] + blosum['-', string_v[i - 1]], DELETE)\n\n for j in range(1, n + 1):\n D[0][j] = (D[0][j - 1][0] + blosum['-', string_w[j - 1]], INSERT)\n\n # Recurrence\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n insert = D[i][j-1][0] + blosum['-', string_w[j - 1]]\n delete = D[i-1][j][0] + blosum[string_v[i - 1], '-']\n substitute = D[i-1][j-1][0] + blosum[string_v[i - 1], string_w[j - 1]]\n # Set D[i][j] to the max of the recurrences\n if insert > delete and insert > substitute:\n D[i][j] = (insert, INSERT)\n elif delete > substitute:\n D[i][j] = (delete, DELETE)\n else:\n D[i][j] = (substitute, SUBSTITUTE)\n\n i, j = m, n\n v_aligned = ''\n w_aligned = ''\n back_pointer = D[i][j][1]\n while back_pointer != START:\n if back_pointer == INSERT:\n j -= 1\n v_aligned = '-' + v_aligned\n w_aligned = string_w[j] + w_aligned\n\n \n elif back_pointer == DELETE:\n i -= 1\n v_aligned = string_v[i] + v_aligned\n w_aligned = '-' + w_aligned\n\n elif back_pointer == SUBSTITUTE:\n i -= 1\n j -= 1\n v_aligned = string_v[i] + v_aligned\n w_aligned = string_w[j] + w_aligned\n\n \n back_pointer = D[i][j][1]\n \n return v_aligned, w_aligned", "def pad_sentences(sentences, padding_word=\"<PAD/>\"):\n # !!! 一定要注意这里会影响数据的形状,要与代码内的 sequence length 保持一致 !!!\n sequence_length = 30\n # sequence_length = max(len(x) for x in sentences)\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i][:sequence_length]\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return padded_sentences", "def _justify(self):\n minLengths = [max([max(map(len, row[i].split() + [''])) for row in self._rows if len(row) > 0])\n for i in range(self._colsNum)]\n shifts = [w - mw for mw, w in zip(minLengths, self._widthes)]\n # length = len(shifts)\n borrow = zip(self._colsRange, shifts)\n borrow.sort(lambda a, b: cmp(a[1], b[1]))\n delta = [0] * self._colsNum\n\n donorIdx = self._colsNum - 1\n recIdx = 0\n while True:\n\n curDonation = borrow[donorIdx][1]\n curRec = borrow[recIdx][1]\n\n if curRec >= 0 or curDonation <= 0:\n break\n\n curDelta = min(curDonation, -curRec)\n curDonation -= curDelta\n curRec += curDelta\n delta[borrow[donorIdx][0]] -= curDelta\n delta[borrow[recIdx][0]] += curDelta\n\n if curDonation == 0:\n donorIdx -= 1\n\n if curRec == 0:\n recIdx += 1\n\n for i in self._colsRange:\n self._widthes[i] += delta[i]", "def pad_seq_list(array, sentinel):\n # Compute max length.\n maxlen = 0\n for seq in array:\n maxlen = max(maxlen, len(seq))\n\n # Pad.\n padded = []\n lens = []\n for seq in array:\n padding = maxlen - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq))\n\n return padded, lens", "def pad_sequences_1d(sequences, max_len=None, padding='post', truncating='post', value=0.):\n return pad_sequences(sequences, maxlen=max_len, padding=padding, truncating=truncating,\n value=value)", "def _pad(self, array, sentinel, max_len=None):\n # Compute max length.\n maxlen = 0\n for seq in array:\n maxlen = max(maxlen, len(seq))\n\n if max_len is not None:\n maxlen = max(maxlen, max_len)\n\n # Pad.\n padded = []\n lens = []\n for seq in array:\n padding = maxlen - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq))\n\n return padded, lens", "def align(self):\n number_of_Xs = 0\n xFront = \"\"\n xEnd = \"\"\n dashFront = \"\"\n dashEnd = \"\"\n\n # Determining if variable amino acids (\"X\") need to be added to the\n\t # beginning of the sequence:\n z = self.hmmStart-self.seqStart\n number_of_Xs = (self.hmmStart-1)-z\n if z > 0:\n dashFront = \"-\"*z\n xFront = \"X\"*number_of_Xs\n elif self.hmmStart-1<=self.seqStart-1:\n xFront = \"X\"*(self.hmmStart-1) \n\n # Determining if variable amino acids (\"X\") need to be added to the \n # end of the sequence:\n number_of_Xs_end = self.hmmLength - self.hmmEnd\n\n # The original sequence length; SPA format includes this\n delimeter = \"|\" #Need to fix can be \"_\" or \"|\" or something else...\n \n distToSeqEnd = self.origSeqLength - seqTo\n if distToSeqEnd >= number_of_Xs_end and number_of_Xs_end != self.hmmLength:\n xEnd = 'X'*number_of_Xs_end\n else:\n if distToSeqEnd < number_of_Xs_end:\n xEnd = 'X'*distToSeqEnd\n \tdashEnd += \"-\"*(number_of_Xs_end-distToSeqEnd)\n \t\n begin = \"{}{}\".format(dashFront, xFront)\n end = \"{}{}\".format(xEnd, dashEnd)\n self.addToFront(begin)\n self.data.extend(end)\n self.original = str(self)", "def pad_sequences(sequences, maxlen, nb_sequences, dtype='int32', value=-1):\n\n x = (numpy.ones((nb_sequences, maxlen)) * value).astype(dtype)\n for idx, s in enumerate(sequences):\n trunc = s[:maxlen]\n\n x[idx, :len(trunc)] = trunc\n\n return x", "def padAlignment(align, applyPadding=True):\n if type(align) in [dict, np.ndarray, list]:\n align = pd.Series(align)\n\n \"\"\"Replace * and # with - and - \"\"\"\n for ind in align.index:\n if '*' in align[ind]:\n align[ind] = align[ind].replace('*', '-')\n if '#' in align[ind]:\n align[ind] = align[ind].replace('#', '-')\n \"\"\"Pad with gaps if the lengths are all the same\"\"\"\n if applyPadding:\n L = align.map(len).unique()\n if len(L) > 1:\n #print 'Sequences have different lengths (pading with gaps): %s' % L\n L = L.max()\n for ind in align.index:\n if len(align[ind]) < L:\n align[ind] = align[ind].ljust(L, '-')\n else:\n L = L.max()\n return align", "def pad_left(x, block_size=3, fill=0):\n if len(x) > block_size:\n return x\n else:\n right = np.array(list(str(x)))\n left = np.repeat(str(fill), block_size - right.size )\n return \"\".join(np.concatenate([left, right]))", "def pad_sentences(sentences, padding_word=\"<PAD/>\",sequence_length = 0):\n if sequence_length == 0:\n sequence_length = max(len(x) for x in sentences)\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return padded_sentences" ]
[ "0.70233285", "0.63832694", "0.63405013", "0.630228", "0.62689775", "0.6214276", "0.6211584", "0.6066575", "0.6046993", "0.6031975", "0.6023637", "0.5948999", "0.59289056", "0.59222424", "0.5858899", "0.5857871", "0.584324", "0.5786291", "0.5765228", "0.5753933", "0.5746988", "0.5689573", "0.56867737", "0.56859225", "0.56525075", "0.5645074", "0.56409395", "0.56312054", "0.5593313", "0.5584177" ]
0.72471905
0
If any of the queries in was unproductive, return an empty list (which will be skipped entirely), otherwise return the original name list
def remove_sw_failures(self, query_names): unproductive, unknown = False, False for qrn in query_names: if qrn in self.sw_info['skipped_unproductive_queries']: unproductive = True if qrn in self.sw_info['skipped_unknown_queries']: unknown = True if unproductive or unknown: return [] # otherwise they should be in self.sw_info, but doesn't hurt to check return_names = [] for name in query_names: if name in self.sw_info: return_names.append(name) else: print ' %s not found in sw info' % ' '.join([qn for qn in query_names]) return return_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deduplicate_raw_text_queries(log_queries_iter) -> List[str]:\n return list(set(q for q in log_queries_iter))", "def clean_query_list(queries: List[str]) -> List[str]:\n return [remove_leading_whitespace_and_empty_lines(query) for query in queries]", "def drug_names():\n results = set()\n if 'qry' in request.args and len(request.args['qry']) >= 3:\n look_for = f\"{request.args['qry'].lower()}%\"\n drug_list = FTA.find_by_name(look_for, False )\n results = set([f\"{d.PROPRIETARY_NAME} - {d.NONPROPRIETARY_NAME}\" for d in drug_list if d.ACTIVE])\n\n results = sorted(list(results))\n return jsonify(results)", "def get(self):\n return [\n [a, v]\n for a, v in sorted(self.__dict__.items())\n if a != \"query_name\" and len(v)\n ]", "def get_suggestions(db_company):\n if db_company.archived:\n return []\n\n names = [\n db_company.name,\n *db_company.trading_names,\n ]\n\n data = [\n *itertools.chain(\n *[name.split(' ') for name in names],\n ),\n *names,\n ]\n\n return list(filter(None, set(data)))", "def _badnames():\n\n with sqlite3.connect(DB) as db:\n cursor = db.cursor()\n cursor.execute(\"SELECT eid, fullname from players ORDER BY eid\")\n rows = cursor.fetchall()\n # list to put all entries in.\n outlist = []\n # now check each name.\n if len(rows) == 0:\n return None\n else:\n for row in rows: # fullname = row[1]\n splitname = row[1].split() # splits on the space.\n if len(splitname) != 2: # if the name is not 2. append to list.\n outlist.append(\"{0} - {1}\".format(row[0], row[1]))\n # return what we have.\n return outlist", "def extract_special_queries(queries):\n specials = {}\n dc = queries.copy()\n for i in queries:\n if i.startswith('__') and i in FILTERS_LIST:\n specials[i] = queries[i]\n del dc[i]\n return (dc, specials)", "def get_queryset(self):\n queryset = self.queryset.order_by('Name')\n query = self.request.QUERY_PARAMS.get('q', None)\n unique_name = self.request.QUERY_PARAMS.get('unique_name', False)\n if query is not None:\n queryset = queryset.filter(Name__istartswith=query)\n if unique_name:\n queryset = queryset.distinct('Name')\n return queryset", "def normalize_query(text: str) -> List[str]:\n tokens = nltk.word_tokenize(text.lower())\n return [lemmatizer.lemmatize(w, get_wordnet_pos(w)) for w in tokens if w not in PUNC]", "def build_query(selections, exclude=None):\n other_selected = {sel for c, sel in selections.items() if (c != exclude and sel != -1)}\n if other_selected:\n return ' and '.join(other_selected)\n else:\n return None", "def similar_qs(self):\n pass", "def process_query(raw_query: str) -> [str]:\n query_tokens = word_tokenize(raw_query)\n\n return [LEMMATIZER.lemmatize(token.lower()) for token in query_tokens\n if LEMMATIZER.lemmatize(token.lower()) not in STOP_WORDS]", "def test_cases_different_prefix_return_both(self):\n self.create_testdata()\n res = self.filter([u\"pre\", u\"moz\"])\n\n self.assertEqual(\n Set([x.name for x in res.all()]),\n Set([\"CV 1\", \"CV 3\", \"CV 4\"]),\n )", "def filter_results(qry):\n result = []\n\n # check if qry is a list (multiple records) or not (single record)\n if type(qry) != list:\n record = make_ndb_return_data_json_serializable(qry)\n return(record)\n\n for q in qry:\n result.append(make_ndb_return_data_json_serializable(q))\n\n return(result)", "def getNoShortName(self):\n return [x for x in self.xeps if not x.shortname]", "def not_matching_list(self):\n\n pre_result = comp(self.regex)\n\n return list(\n filter(lambda element: not pre_result.search(element), self.data)\n )", "def clean_names_list(names):\n pure_names = []\n nan = re.compile('nan', re.IGNORECASE)\n title = re.compile('surname', re.IGNORECASE)\n for name in names:\n if nan.search(name):\n continue\n elif title.search(name):\n continue\n else:\n pure_names.append(name)\n return pure_names", "def __generateQuery(self, query):\n if query == None:\n return [\"1=1\"]\n elif type(query) is not list:\n return [query]\n else:\n return query", "def test_excludeIngredientQuery(self) -> None:\n ingredient0 = 'multimedia'\n ingredient1 = 'provision'\n result = self.entries.exclude(Q(ingredients__icontains=ingredient0) | Q(ingredients__icontains=ingredient1))\n self.assertEqual(988, len(result))\n\n queries = (Q(ingredients__icontains=ingredient0), Q(ingredients__icontains=ingredient1))\n result = self.entries.exclude(functools.reduce(operator.or_, queries))\n self.assertEqual(988, len(result))", "def new_list_with_IDs_if_available(query_list, mygene_website_dict):\n output_list = []\n no_entrez_gene_id_count = 0\n for i in query_list:\n try:\n output_list.append(mygene_website_dict[i]) # get entrezgene ID\n except KeyError:\n output_list.append(i) # if no entrezgene ID, just keep the symbol\n no_entrez_gene_id_count += 1\n print(\"number of output list: \", len(output_list))\n print(\"number of inputs with no gene ID found: \", no_entrez_gene_id_count)\n print(\"end\")\n return output_list", "def getSuggestions(self,query):\n if not isinstance(query, str): # Checks if the query is entered as a string.\n raise TypeError('The query must be a string')\n self._possible = [] #List of strings one change away\n self._final = [] #Final list of suggestions\n self._alphabet = list(string.ascii_lowercase) # Produces a list of all lowercase letters.\n self._alphabet.extend(('-',' '))\n self._query = query.lower()\n for i in range((len(query))-1):\n possible = self._query[:i]+self._query[i+1]+self._query[i]+self._query[(i+2):] #Add cases of inverting two letters\n self._possible.append(possible)\n for i in range(len(query)):\n possible = self._query[:i] + self._query[(i+1):] #Add cases of deleting one letter\n self._possible.append(possible)\n for g in range(len(self._alphabet)):\n possible = self._query[:i]+self._alphabet[g]+self._query[(i+1):] #Add cases of inserting one letter\n possibleAlso = self._query[:i]+self._alphabet[g]+self._query[i:] #Add cases of replacing one letter\n self._possible.append(possible)\n self._possible.append(possibleAlso)\n suggestionLength = len(self._possible)\n for i in range(suggestionLength):\n self._possible.append(self._possible[i].capitalize()) #Add all possible strings, capitalized (doubles list length)\n for i in self._possible:\n if i in self._words:\n if i not in self._final: #Removes duplicates from final list\n if i != query: \n self._final.append(i)\n if query.islower() == True:\n for i in self._final:\n if i[0].isupper() == True:\n if i[0] != query[0].upper():\n self._final.remove(i)\n if query.istitle() == True:\n self._final = [i.capitalize() for i in self._final]\n self._final.sort()\n return self._final", "def get_users_by_name(query):\n\n user_list = None\n if query == None:\n user_list = User.objects.filter(Q(user_profile__isnull=False))\n else:\n user_list = User.objects.filter(Q(first_name__icontains=query) | Q(last_name__icontains=query)).distinct()\n return user_list", "def names(filter=None):", "def test_cases_same_prefix_return_both(self):\n self.create_testdata()\n res = self.filter([u\"moz\"])\n\n self.assertEqual(\n Set([x.name for x in res.all()]),\n Set([\"CV 3\", \"CV 4\"]),\n )", "def _search_suggestions():\n now = time.time()\n words_q = Application.objects.values('acronym',\n 'owner', 'owner_org',\n 'nasa_off_name', 'nasa_requester',\n 'manager_app_development', 'manager_project',\n 'dev_name_primary', 'dev_name_alternate').distinct()\n wordset = set()\n for worddict in words_q:\n vals = worddict.values()\n for val in vals:\n wordset.add(val)\n words = [word for word in wordset if word]\n words.sort()\n logging.info(\"search_suggestions len=%d time=%f\" % (len(words), time.time() - now))\n return json.dumps(words)", "def _replica_results_dedup(queries):\n deduplicated_queries = []\n for query in queries:\n new_query = query.copy()\n\n if \"results\" in query:\n objects_seen = {}\n dedup_results = []\n results = query[\"results\"]\n\n for result in results:\n if result[\"type\"] == \"dataobject\":\n full_name = result[\"full_name\"]\n if full_name not in objects_seen:\n objects_seen[full_name] = 1\n dedup_results.append(result)\n else:\n dedup_results.append(result)\n\n new_query[\"results\"] = dedup_results\n\n deduplicated_queries.append(new_query)\n\n return deduplicated_queries", "def get_queryset(self):\n queryset = super(APIListSession, self).get_queryset()\n name = self.request.QUERY_PARAMS.get('name', None)\n if name is not None:\n queryset = queryset.filter(name__istartswith=name)\n return queryset", "def normalize_query(self):\n query_lower = self.user_input.lower()\n # Remove punctuation\n query_no_punctuation = re.sub(r\"[!#$%&'()*+,-./:;<=>?@\\^_`{|}~]+\\ *\", \" \", query_lower)\n # Remove accent from all the words\n self.query_no_accent = ''.join((c for c in \\\n unicodedata.normalize('NFD', query_no_punctuation) \\\n if unicodedata.category(c) != 'Mn'))\n\n return query_lower, query_no_punctuation, self.query_no_accent", "def _maybe_match_names(self, other):\n if len(self.names) != len(other.names):\n return [None] * len(self.names)\n names = []\n for a_name, b_name in zip(self.names, other.names):\n if a_name == b_name:\n names.append(a_name)\n else:\n # TODO: what if they both have np.nan for their names?\n names.append(None)\n return names", "def get_short_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[0])\n return result" ]
[ "0.6213024", "0.5977889", "0.59332335", "0.578334", "0.5704371", "0.5704283", "0.5696776", "0.55361503", "0.55334", "0.5487988", "0.5462377", "0.545718", "0.5447865", "0.54456997", "0.5400081", "0.53956217", "0.53954196", "0.5392832", "0.5387499", "0.53780365", "0.53728545", "0.5371278", "0.5350881", "0.53408307", "0.53316814", "0.5328971", "0.53228796", "0.5322019", "0.53136307", "0.52998924" ]
0.6498767
0
Read bcrham annotation output
def read_annotation_output(self, algorithm, count_parameters=False, parameter_out_dir=None): print ' read output' if count_parameters: assert parameter_out_dir is not None pcounter = ParameterCounter(self.germline_seqs) if count_parameters else None true_pcounter = ParameterCounter(self.germline_seqs) if (count_parameters and not self.args.is_data) else None perfplotter = PerformancePlotter(self.germline_seqs, 'hmm') if self.args.plot_performance else None n_seqs_processed, n_events_processed = 0, 0 hmminfo = {} with opener('r')(self.hmm_outfname) as hmm_csv_outfile: reader = csv.DictReader(hmm_csv_outfile) boundary_error_queries = [] for line in reader: utils.process_input_line(line, splitargs=('unique_ids', 'seqs'), int_columns=('nth_best', 'v_5p_del', 'd_5p_del', 'cdr3_length', 'j_5p_del', 'j_3p_del', 'd_3p_del', 'v_3p_del'), float_columns=('logprob')) ids = line['unique_ids'] same_event = utils.from_same_event(self.args.is_data, self.reco_info, ids) if same_event is None: same_event = -1 id_str = ''.join(['%20s ' % i for i in ids]) # check for errors if line['nth_best'] == 0: # if this is the first line for this set of ids (i.e. the best viterbi path or only forward score) if line['errors'] is not None and 'boundary' in line['errors'].split(':'): boundary_error_queries.append(':'.join([uid for uid in ids])) else: assert len(line['errors']) == 0 utils.add_cdr3_info(self.germline_seqs, self.cyst_positions, self.tryp_positions, line) if self.args.debug: if line['nth_best'] == 0: # if this is the first line (i.e. the best viterbi path) for this query (or query pair), print the true event print '%s %d' % (id_str, same_event) self.print_hmm_output(line, print_true=(line['nth_best']==0)) #, perfplotter=perfplotter) if line['nth_best'] == 0 and (line['cdr3_length'] != -1 or not self.args.skip_unproductive): # if it's productive, or if we're not skipping unproductive rearrangements if pcounter is not None: pcounter.increment_reco_params(line) if true_pcounter is not None: true_pcounter.increment_reco_params(self.reco_info[ids[0]]) # NOTE doesn't matter which id you pass it, since they all have the same reco parameters n_events_processed += 1 for iseq in range(len(ids)): uid = ids[iseq] hmminfo[uid] = dict(line) # make a copy of the info, into which we'll insert the sequence-specific stuff hmminfo[uid]['seq'] = line['seqs'][iseq] hmminfo[uid]['unique_id'] = uid utils.add_match_info(self.germline_seqs, hmminfo[uid], self.cyst_positions, self.tryp_positions, debug=(self.args.debug > 0)) if pcounter is not None: pcounter.increment_mutation_params(hmminfo[uid]) if true_pcounter is not None: true_pcounter.increment_mutation_params(self.reco_info[uid]) # NOTE doesn't matter which id you pass it, since they all have the same reco parameters if perfplotter is not None: perfplotter.evaluate(self.reco_info[uid], hmminfo[uid], None if self.args.dont_pad_sequences else self.sw_info[uid]['padded']) n_seqs_processed += 1 if pcounter is not None: pcounter.write(parameter_out_dir) if self.args.plotdir is not None: pcounter.plot(self.args.plotdir + '/hmm', subset_by_gene=True, cyst_positions=self.cyst_positions, tryp_positions=self.tryp_positions) if true_pcounter is not None: true_pcounter.write(parameter_out_dir + '/true') if self.args.plotdir is not None: true_pcounter.plot(self.args.plotdir + '/hmm/true', subset_by_gene=True, cyst_positions=self.cyst_positions, tryp_positions=self.tryp_positions) if perfplotter is not None: assert self.args.plotdir is not None perfplotter.plot(self.args.plotdir + '/hmm/performance') print ' processed %d sequences (%d events)' % (n_seqs_processed, n_events_processed) if len(boundary_error_queries) > 0: print ' %d boundary errors (%s)' % (len(boundary_error_queries), ', '.join(boundary_error_queries)) if self.args.outfname is not None: outpath = self.args.outfname if self.args.outfname[0] != '/': # if full output path wasn't specified on the command line outpath = os.getcwd() + '/' + outpath shutil.copyfile(self.hmm_outfname, outpath) with open(outpath) as outfile: reader = csv.DictReader(outfile) outfo = [] for line in reader: outfo.append(line) outfo[-1]['naive_seq'] = utils.get_full_naive_seq(self.germline_seqs, line) with open(outpath, 'w') as outfile: writer = csv.DictWriter(outfile, outfo[0].keys()) writer.writeheader() for line in outfo: writer.writerow(line) if self.args.annotation_clustering == 'vollmers': if self.args.outfname is not None: outfile = open(self.args.outfname, 'w') # NOTE overwrites annotation info that's already been written to <self.args.outfname> headers = ['n_clusters', 'threshold', 'clusters'] #, 'true_clusters'] if not self.args.is_data: headers += ['adj_mi', ] #, 'n_true_clusters'] writer = csv.DictWriter(outfile, headers) writer.writeheader() for thresh in self.args.annotation_clustering_thresholds: adj_mi, partition = annotationclustering.vollmers(hmminfo, threshold=thresh, reco_info=self.reco_info) n_clusters = len(partition) if self.args.outfname is not None: row = {'n_clusters' : n_clusters, 'threshold' : thresh, 'clusters' : utils.get_str_from_partition(partition)} if not self.args.is_data: row['adj_mi'] = adj_mi # row['n_true_clusters'] = len(utils.get_true_partition(self.reco_info)) # true_partition = [cl for cl in utils.get_true_partition(self.reco_info).values()] # row['true_clusters'] = utils.get_str_from_partition(true_partition) writer.writerow(row) if self.args.outfname is not None: outfile.close() if not self.args.no_clean: os.remove(self.hmm_outfname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_binned(run, bin_scheme):\n\n fname=get_binned_file(run,bin_scheme)\n print(\"reading:\",fname)\n return fitsio.read(fname)", "def read_annotation_yolov5(bbox_path):\n\n # image_paths = get_lists_in_dir(rawImage_dir)\n\n dw = 1./(camera_resolution[0]) # 1 / image width\n dh = 1./(camera_resolution[1]) # 1 / image height\n\n # Read in bbox coordinate information from bbox_information.txt\n dimension_list = []\n with open(bbox_path, 'r') as annotation_file:\n content = annotation_file.read().splitlines()\n\n for n in content:\n # x = int(n.split()[0])+int(n.split()[2])/2\n # y = int(n.split()[1])+int(n.split()[3])/2\n # w = int(n.split()[2])\n # h = int(n.split()[3])\n #\n # x = x*dw\n # w = w*dw\n # y = y*dh\n # h = h*dh\n\n bb = n.split()\n w = int(bb[2])\n h = int(bb[3])\n\n start_x = int(bb[0])\n start_y = int(bb[1])\n\n center_x = start_x + w / 2\n center_y = start_y + h / 2\n\n x = center_x * dw\n y = center_y * dh\n w = w * dw\n h = h * dh\n \n dimension_list.append((x, y, w, h))\n\n return dimension_list", "def read_jack(run, bin_scheme):\n fname=get_jack_file(run, bin_scheme)\n print(\"reading:\",fname)\n return fitsio.read(fname)", "def get_annotations(self):\n ann = wfdb.rdann(self.patient_number, 'atr', pb_dir='mitdb', return_label_elements=['symbol', 'label_store',\n 'description'],\n summarize_labels=True)\n\n mit_bih_labels_str = ann.symbol\n\n labels_locations = ann.sample\n\n labels_description = ann.description\n\n return mit_bih_labels_str, labels_locations, labels_description", "def parse_annotations(Hinv, obsmat_txt):\n\n def to_image_frame(loc):\n \"\"\"\n Given H^-1 and (x, y, z) in world coordinates,\n returns (u, v, 1) in image frame coordinates.\n \"\"\"\n loc = np.dot(Hinv, loc) # to camera frame\n return loc / loc[2] # to pixels (from millimeters)\n\n mat = np.loadtxt(obsmat_txt)\n num_peds = int(np.max(mat[:, 1])) + 1\n peds = [np.array([]).reshape(0, 4) for _ in range(num_peds)] # maps ped ID -> (t,x,y,z) path\n\n num_frames = (mat[-1, 0] + 1).astype(\"int\")\n num_unique_frames = np.unique(mat[:, 0]).size\n recorded_frames = [-1] * num_unique_frames # maps timestep -> (first) frame\n peds_in_frame = [[] for _ in range(num_unique_frames)] # maps timestep -> ped IDs\n\n frame = 0\n time = -1\n blqk = False\n for row in mat:\n if row[0] != frame:\n frame = int(row[0])\n time += 1\n recorded_frames[time] = frame\n\n ped = int(row[1])\n\n peds_in_frame[time].append(ped)\n loc = np.array([row[2], row[4], 1])\n loc = to_image_frame(loc)\n loc = [time, loc[0], loc[1], loc[2]]\n peds[ped] = np.vstack((peds[ped], loc))\n\n return recorded_frames, peds_in_frame, peds", "def main():\n args = get_args()\n annot_fp = args.annotations\n out_fp = args.outfile\n blast_fp = args.positional\n\n #print('output_arg = \"{}\"'.format(out_fp))\n #print('annotation_arg = \"{}\"'.format(annot_fp))\n #print('blast_fp = \"{}\"'.format(blast_fp))\n\n if not os.path.isfile(annot_fp):\n print(\"\\\"{}\\\" is not a file\".format(annot_fp))\n exit(1)\n if not os.path.isfile(blast_fp):\n print(\"\\\"{}\\\" is not a file\".format(blast_fp))\n exit(1)\n\n #Load the annotations\n annots_dict = {}\n with open(annot_fp, 'r') as f:\n for l in f:\n larr = l[:-1].split(\",\")\n annots_dict[larr[0]] = larr[6:]\n\n header_str = \"seq_id\\tpident\\tgenus\\tspecies\"\n if out_fp != \"\":\n out = open(out_fp, 'w')\n out.write(\"{}\\n\".format(header_str))\n else:\n print(header_str)\n\n with open(blast_fp, 'r') as f:\n for l in f:\n larr = l.split(\"\\t\")\n seq_id = larr[1]\n tax_info = annots_dict.get(seq_id, [\"BAD\", \"BAD\"])\n if tax_info[0] == \"BAD\":\n warn(msg=\"Cannot find seq {} in lookup\".format(seq_id))\n continue\n genus = tax_info[0]\n species = tax_info[1]\n if genus == \"\":\n genus = \"NA\"\n if species == \"\":\n species = \"NA\"\n if out_fp == \"\":\n print(\"{}\\t{}\\t{}\\t{}\".format(seq_id, larr[2], genus, species))\n else:\n out.write(\"{}\\t{}\\t{}\\t{}\\n\".format(seq_id, larr[2], genus, species))\n\n if out_fp != \"\":\n out.close()", "def __readCONTINoutput(self):\n\n titleline = 'OBJ. FCTN. VARIANCE STD. DEV.'\n chunkTitle = re.compile('OBJ. FCTN. VARIANCE STD. DEV. ')\n\n alldata = []\n\n with open(self.outputfile, 'r') as f:\n\n for line in f:\n if chunkTitle.search(line) is not None:\n\n alphadic = {}\n\n # gets the header\n alphaLine = next(f)\n if '*' in alphaLine:\n alphadic['marked'] = True\n\n alphaLine = alphaLine.replace('*', '')\n alphaParam = np.fromstring(alphaLine, sep=' ')\n\n # reduce the header line to string seperated text\n line = re.sub('\\s\\s\\s+', ' ', line).strip()\n for key, value in zip(line.split(' '), alphaParam):\n alphadic[key] = value\n # skip a line then get the data\n next(f)\n # alldata.append((alphadic, readblock(f)))\n alldata.append(\n (alphadic, readblock(f), readSummaryData(f)))\n\n # skip a line then get the data\n # print(next(f))\n\n return alldata", "def read_match_binned(lens_run, rand_run, bin_scheme):\n\n fname=get_match_binned_file(lens_run, rand_run, bin_scheme)\n print(\"reading:\",fname)\n return fitsio.read(fname)", "def readAnnotation(path, gui):\n file = open(path, 'r')\n file.readline()\n line = file.readline()\n data = {}\n counter = 0\n gui.write_to_output(\"\\n\")\n while line != '':\n counter += 1\n if counter % 10000 == 0:\n gui.write_to_output(\"Done reading \" + str(counter) + \" annotation entries\\n\", overwrite=True)\n columns = line.split()\n name = columns[-4]\n start = int(columns[4])\n end = int(columns[5])\n cds_start = int(columns[6])\n cds_end = int(columns[7])\n strand = columns[3]\n # data.append(Gene(name, reads, np.array([start, end]).astype(np.int), strand, chrm, np.array([cds_start, cds_end])))\n line = file.readline()\n if name in data.keys():\n data[name] = np.vstack((data[name], np.array([start, end, cds_start, cds_end])))\n else:\n data[name] = np.array([[start, end, cds_start, cds_end]])\n # return list(sorted(data, key=lambda x: x.getName()))\n gui.write_to_output(\"Done reading \" + str(counter) + \" annotation entries\\n\", overwrite=True)\n return data", "def load_annotations(path, img_w, img_h):\n bboxes = []\n with open(path, 'r') as file:\n for row in file:\n _, xc , yc, w, h = row.split()\n xc = float(xc)*img_w\n yc = float(yc)*img_h\n w = float(w)*img_w\n h = float(h)*img_h\n bboxes.append([xc - w/2 , yc - h/2, xc + w/2 , yc + h/2])\n\n return bboxes", "def test_humann_fastq_biom_output(self):\n \n # create a temp directory for output\n tempdir = utils.create_temp_folder(\"fastq\")\n \n # run humann test\n command = [\"humann\",\"--input\",cfg.demo_fastq,\"--output\",tempdir,\n \"--output-format\", \"biom\"]\n utils.run_humann(command)\n \n # check the output files are as expected\n for expression, message in utils.check_output(cfg.expected_demo_output_files_biom, tempdir):\n self.assertTrue(expression,message)\n\n # remove the temp directory\n utils.remove_temp_folder(tempdir)", "def fetch_baja_bathymetry():\n data_file = POOCH.fetch(\"baja-bathymetry.csv.xz\")\n data = pd.read_csv(data_file, compression=\"xz\")\n return data", "def get_bb(self,bbname, mol = False):\n lines = self.mfp.get_bb(bbname)\n return lines", "def parse_beam(self):\n # AIPS FITS file; stored in the history section\n beam_regex = re.compile(r'''\n BMAJ\n \\s*=\\s*\n (?P<bmaj>[-\\d\\.eE]+)\n \\s*\n BMIN\n \\s*=\\s*\n (?P<bmin>[-\\d\\.eE]+)\n \\s*\n BPA\n \\s*=\\s*\n (?P<bpa>[-\\d\\.eE]+)\n ''', re.VERBOSE)\n\n bmaj, bmin, bpa = None, None, None\n header = self.header\n try:\n # MIRIAD FITS file\n bmaj = header['BMAJ']\n bmin = header['BMIN']\n bpa = header['BPA']\n except KeyError:\n\n def get_history(hdr):\n \"\"\"\n Returns all history cards in FITS header hdr as a list of strings.\n \"\"\"\n return hdr['HISTORY']\n\n for hist_entry in get_history(header):\n results = beam_regex.search(hist_entry)\n if results:\n bmaj, bmin, bpa = [float(results.group(key)) for\n key in ('bmaj', 'bmin', 'bpa')]\n break\n\n return bmaj, bmin, bpa", "def unpack_annotation(path):\n buffer = []\n with open(path, 'r') as file:\n lines = file.read()\n\n lines = lines.splitlines()\n for line in lines:\n if not line.startswith('#') and line:\n buffer.append(line)\n\n # Filename to match annotation with photo\n filename = ''\n for line in buffer:\n if 'Image filename' in line:\n filename = line.replace(' ', '').split(':')[1]\n\n # How many person-like objects in photo\n how_many = 0\n for line in buffer:\n if 'Objects with ground truth' in line:\n how_many = int((line.replace(' ', '').split(':')[1][0]))\n break\n\n person_id = []\n for i in range(how_many):\n person_id.append(f'{i+1} \"PASperson\"')\n\n # Centers of objects\n centers = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (X, Y)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split(',')\n centers.append((int(buf[0]), int(buf[1])))\n which_one += 1\n\n # Bounding boxes of objects\n boxes = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (Xmin, Ymin)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split('-')\n buf0 = buf[0].split(',')\n buf1 = buf[1].split(',')\n boxes.append((int(buf0[0]), int(buf0[1]), int(buf1[0]), int(buf1[1])))\n which_one += 1\n\n return filename, how_many, centers, boxes", "def parseBlastOutput(blast_path):\r\n\t\t#unpruned_read_objects = {}\r\n\t\t#ref_pruned_reads = {}\r\n\r\n\t\tunpruned_read_objects = {key:[] for key in COMMON_NAME.keys()}\r\n\t\tref_pruned_reads = {key:[] for key in COMMON_NAME.keys()}\r\n\t\twith open(blast_path,\"r\") as f:\r\n\t\t\t\tfor line in f:\r\n\r\n\t\t\t\t\t\tline = line.rstrip()\r\n\t\t\t\t\t\tline = line.rsplit()\r\n\t\t\t\t\t\t# print(line, file=sys.stderr,flush=True)\r\n\t\t\t\t\t\tif len(line) > 1:\r\n\t\t\t\t\t\t\t\tread_name = line[0]\r\n\t\t\t\t\t\t\t\tsubject_hit = line[1]\r\n\t\t\t\t\t\t\t\tlength = int(line[3])\r\n\t\t\t\t\t\t\t\t# sstart = int(line[6])\r\n\t\t\t\t\t\t\t\t# send = int(line[7])\r\n\t\t\t\t\t\t\t\tsstart = int(line[8])\r\n\t\t\t\t\t\t\t\tsend = int(line[9])\r\n\t\t\t\t\t\t\t\te_score = float(line[10])\r\n\r\n\t\t\t\t\t\t\t\t# CREATE A READ OBJECT FOR EACH OF THESE SIGNIFICANT HITS TO WOLBACHIA ENDOSYMBIONT.\r\n\t\t\t\t\t\t\t\t# IF A READ HITS THE SAME SUBJECT MORE THAN ONCE,\r\n\t\t\t\t\t\t\t\t# SAVE ONLY THE MOST SIGNIFICANT HIT (LOWEST E-SCORE).\r\n\t\t\t\t\t\t\t\tif e_score < 1e-10 and length > 40:\r\n\t\t\t\t\t\t\t\t\t\t# if subject_hit in ENDOSYMBIONT_IDS:\r\n\t\t\t\t\t\t\t\t\t\t# wol_host = ENDOSYMBIONT_IDS[subject_hit]\r\n\t\t\t\t\t\t\t\t\t\tcurrent_read = Read(read_name,subject_hit,length,sstart,send,e_score)\r\n\t\t\t\t\t\t\t\t\t\tif subject_hit in unpruned_read_objects:\r\n\t\t\t\t\t\t\t\t\t\t\t\tunpruned_read_objects[subject_hit].append(current_read)\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tunpruned_read_objects[subject_hit] = [current_read]\r\n\t\tif len(unpruned_read_objects) > 0:\r\n\t\t\t\tfor ref in unpruned_read_objects.keys():\r\n\t\t\t\t\t\tpruned_reads_ref = prune(unpruned_read_objects[ref])\r\n\t\t\t\t\t\tref_pruned_reads[ref] = pruned_reads_ref\r\n\r\n\t\t\t\treturn unpruned_read_objects, ref_pruned_reads\r\n\t\telse:\r\n\t\t\t\treturn None, None", "def read_line(line):\n label = line[0:11]\n text = line[11:]\n y = 1 if label == '__label__2 ' else 0\n return text, y", "def read_acbr(self):\n return self.ACBR", "def main() -> None:\n\n gh_annotations: List[str] = []\n\n for line in sys.stdin.readlines():\n print(line, end=\"\")\n\n if \":\" not in line:\n continue\n\n try:\n file, line_no, col, error = line.strip().split(\":\", 3)\n gh_annotations.append(f\"::error file={file},line={line_no},col={col}::{error}\")\n except ValueError:\n pass\n\n for annotation in gh_annotations:\n print(annotation)\n\n sys.exit(len(gh_annotations) > 0)", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n frame_dir = video_info['filename']\n video_info['filename'] = osp.join(self.data_prefix, video_info['filename'])\n video_info['frame_dir'] = frame_dir\n video_info['index'] = i\n \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n\n return video_infos", "def _decode_header(self):\n #header = self.file_content[0:6]\n log_screen_descr = self.file_content[6:13]\n self.canvas_width = log_screen_descr[0] + (log_screen_descr[1]<<8)\n self.canvas_height = log_screen_descr[2] + (log_screen_descr[3]<<8)\n # is there a global color table? (usually yes)\n flags = log_screen_descr[4]\n self.glob_col_table = (flags & 0b10000000) != 0\n\n # determine the number of bits per primary color value\n self.color_resolution = (flags & 0b01110000) >> 4\n self.bits_per_pixel = self.color_resolution + 1\n\n # If the value is 1, then the colors in the global color table are sorted\n # in order of \"decreasing importance,\" which typically means \"decreasing\n # frequency\" in the image\n self.sort_flag = (flags & 0b00001000) != 0\n\n # If this value is N, then the actual table size is 2^(N+1).\n self.glob_col_table_sz = 1 << ((flags & 0b00000111)+1)\n\n self.bg_color_index = log_screen_descr[5]\n self.pix_asp_ratio = log_screen_descr[6]", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n info_dict = {} \n info_dict['filename'] = video_info['vid_name'] if 'filename' not in video_info else video_info['filename']\n frame_dir = info_dict['filename']\n info_dict['frame_dir'] = frame_dir\n info_dict['index'] = i\n info_dict['label'] = video_info['answer_idx']\n info_dict['answers'] = video_info['answers'] if 'answers' in video_info else video_info['text']\n info_dict['question'] = video_info['question'] if 'question' in video_info else \"\"\n video_infos.append(info_dict) \n del ann_info\n\n return video_infos", "def _parse_anno_info(self, annotations):\n gt_bboxes, gt_bboxes_ignore = [], []\n gt_masks, gt_masks_ignore = [], []\n gt_labels = []\n for ann in annotations:\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(ann['bbox'])\n gt_masks_ignore.append(ann.get('segmentation', None))\n else:\n gt_bboxes.append(ann['bbox'])\n gt_labels.append(ann['category_id'])\n gt_masks.append(ann.get('segmentation', None))\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks_ignore=gt_masks_ignore,\n masks=gt_masks)\n\n return ann", "def get_info(line, bit_thresh):\n if len(line) >= 18: # output is from cmsearch\n id, model, bit, inc = line[0].split()[0], line[2], float(line[14]), line[16]\n sstart, send, strand = int(line[7]), int(line[8]), line[9]\n mstart, mend = int(line[5]), int(line[6])\n elif len(line) == 9: # output is from ssu-cmsearch\n if bit_thresh == 0:\n print('# ssu-cmsearch does not include a model-specific inclusion threshold, ', file=sys.stderr)\n print('# please specify a bit score threshold', file=sys.stderr)\n exit()\n id, model, bit = line[1].split()[0], line[0], float(line[6])\n inc = '!' # this is not a feature of ssu-cmsearch\n sstart, send = int(line[2]), int(line[3])\n mstart, mend = int(4), int(5)\n if send >= sstart:\n strand = '+'\n else:\n strand = '-'\n else:\n print('# unsupported hmm format:', file=sys.stderr)\n print('# provide tabular output from ssu-cmsearch and cmsearch supported', file=sys.stderr)\n exit()\n coords = [sstart, send]\n sstart, send = min(coords), max(coords)\n mcoords = [mstart, mend]\n mstart, mend = min(mcoords), max(mcoords)\n return id, model, bit, sstart, send, mstart, mend, strand, inc", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']]\n for text in video_info['text']:\n info = {}\n frame_dir = video_info['filename']\n filename = osp.join(self.data_prefix, video_info['filename']+'.mp4') \n info['filename'] = filename\n info['frame_dir'] = frame_dir\n info['index'] = i\n info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n info['text'] = [text]\n if self.is_ret:\n pass\n elif self.is_mc:\n info['clip_text_candidate'] = [0, 1, 2, 3, 4]\n elif self.is_qa:\n pass\n video_infos.append(info) \n del ann_info\n\n return video_infos", "def _read_boot_output(self):\n if not self._boot_output_file:\n return\n try:\n with open(self._boot_output_file, \"r\") as f:\n j_doc = json.load(f)\n except Exception, ex:\n cloudinitd.log(self._log, logging.DEBUG, \"No output read from the boot program %s\" % (str(ex)))\n return\n for k in j_doc.keys():\n self._attr_bag[k] = j_doc[k]\n bao = BagAttrsObject(k, j_doc[k])\n self._s.attrs.append(bao)", "def read_corr_jack(lens_run, rand_run, bin_scheme):\n\n fname=get_corr_jack_file(lens_run, rand_run, bin_scheme)\n print(\"reading:\",fname)\n return fitsio.read(fname)", "def cal_ResBeam_Stats(infile, header_bmaj, header_bmin):\n\n beamlog_file = np.loadtxt(infile)\n bmaj = beamlog_file[:,1]\n bmin = beamlog_file[:,2]\n ind_nonzero_bmaj = np.nonzero(bmaj) # finding array indices of nonzero values\n ind_nonzero_bmin = np.nonzero(bmin)\n total_nbmaj = np.count_nonzero(bmaj) # count total number of bmaj non zero occurance\n total_nbmin = np.count_nonzero(bmin)\n bmaj_variance = (np.sum((bmaj[ind_nonzero_bmaj]-header_bmaj)**2.0))/total_nbmaj # using header beam value as mean \n bmin_variance = (np.sum((bmin[ind_nonzero_bmin]-header_bmin)**2.0))/total_nbmin\n bmaj_stdev = np.sqrt(bmaj_variance)\n bmin_stdev = np.sqrt(bmin_variance)\n beam_threshold = round((((header_bmaj + bmaj_stdev) * (header_bmin + bmin_stdev))/ (header_bmaj*header_bmin))-1.0, 4)\n bmaj_max = np.max(bmaj[ind_nonzero_bmaj])\n bmaj_min = np.min(bmaj[ind_nonzero_bmaj])\n bmin_max = np.max(bmin[ind_nonzero_bmin])\n bmin_min = np.min(bmin[ind_nonzero_bmin])\n max_ratio_beam_area = (bmaj_max*bmin_max)/(header_bmaj*header_bmin) # measured beam area / header beam area\n min_ratio_beam_area = (bmaj_min*bmin_min)/(header_bmaj*header_bmin)\n\n return bmaj_stdev, bmin_stdev, beam_threshold, max_ratio_beam_area, min_ratio_beam_area", "def parse_bam():\n global sample_name, header, segmentID, bam\n sys.stderr.write(time.strftime(\"%c\") + \" Busy with parsing bam file...\\n\")\n bam = pysam.AlignmentFile(NanoSV.opts_bam, 'rb')\n if not bam.has_index():\n sys.exit('The bam has no index file')\n header = bam.header\n if 'HD' in header:\n if not header['HD']['SO'] == 'coordinate':\n sys.exit('The bam file is not coordinate sorted')\n if 'RG' in header:\n if type(header['RG']) is list:\n sample_name = header['RG'][0]['SM']\n else:\n sample_name = header['RG']['SM']\n else:\n sample_name = re.sub('(\\.sorted)?\\.bam$', '', str(NanoSV.opts_bam))\n\n for line in bam:\n if line.query_name in reads:\n read = reads[line.query_name]\n else:\n read = r.Read(line.query_name, line.infer_read_length())\n reads[line.query_name] = read\n\n if line.flag & 4 or line.mapping_quality < NanoSV.opts_min_mapq:\n continue\n segment = s.Segment(segmentID, line.query_name, line.flag, line.reference_name, line.reference_start+1, line.mapping_quality,\n line.query_alignment_length)\n segment.end = line.reference_start + line.reference_length\n if line.has_tag('MD'):\n matches = sum(map(int, re.findall(r\"(\\d+)\", line.get_tag('MD'))))\n segment.pid = format(matches / segment.length, '.3f')\n else:\n segment.pid = format(line.get_cigar_stats()[0][7] / segment.length, '.3f')\n if segment.pid == \"0.000\":\n segment.pid = format(line.get_cigar_stats()[0][0] / segment.length, '.3f')\n if line.flag & 16:\n if line.cigartuples[-1][0] == 5 or line.cigartuples[-1][0] == 4:\n segment.clip = line.cigartuples[-1][1]\n else:\n segment.clip = 0\n if line.cigartuples[0][0] == 5 or line.cigartuples[0][0] == 4:\n segment.clip_2 = line.cigartuples[0][1]\n else:\n segment.clip_2 = 0\n else:\n if line.cigartuples[0][0] == 5 or line.cigartuples[0][0] == 4:\n segment.clip = line.cigartuples[0][1]\n else:\n segment.clip = 0\n if line.cigartuples[-1][0] == 5 or line.cigartuples[-1][0] == 4:\n segment.clip_2 = line.cigartuples[-1][1]\n else:\n segment.clip_2 = 0\n if float(segment.pid) < NanoSV.opts_min_pid:\n continue\n read.addSegment(segment)\n segments[segmentID] = segment\n segmentID += 1", "def load_metadata(self, path):\n self.paths = []\n self.annotations = []\n\n with open(path, \"r\") as f:\n for line in f:\n line = line.strip().split(\" \")\n \n rgb_path = line[0]\n\n if len(line) > 1:\n bounding_boxes = np.array([list(map(int, box.split(','))) for box in line[1:]])\n else:\n bounding_boxes = []\n \n self.annotations.append({\n \"rgb_path\": rgb_path, \n \"bounding_boxes\": bounding_boxes,\n })" ]
[ "0.5672629", "0.5474085", "0.54246736", "0.5418223", "0.539274", "0.5346803", "0.53454983", "0.52752584", "0.5274368", "0.5263105", "0.51855737", "0.51788867", "0.51730055", "0.5136605", "0.5127657", "0.5109556", "0.5103017", "0.5099789", "0.5080782", "0.5076813", "0.5054723", "0.5035919", "0.5030293", "0.5023387", "0.50138754", "0.50133634", "0.49770117", "0.4975263", "0.49711704", "0.49682105" ]
0.5637101
1
method for adding random distortion to dataset images, including random brightness adjust, and a random vertical shift of the horizon position
def data_augmentation(self, img): new_img = img.astype(float) # random brightness - the mask bit keeps values from going beyond (0,255) value = np.random.randint(-28, 28) if value > 0: mask = (new_img[:, :, 0] + value) > 255 if value <= 0: mask = (new_img[:, :, 0] + value) < 0 new_img[:, :, 0] += np.where(mask, 0, value) # random shadow - full height, random left/right side, random darkening h, w = new_img.shape[0:2] mid = np.random.randint(0, w) factor = np.random.uniform(0.6, 0.8) if np.random.rand() > .5: new_img[:, 0:mid, 0] *= factor else: new_img[:, mid:w, 0] *= factor return (new_img.astype(np.uint8))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_color_distort(src, brightness_delta=32, contrast_low=0.5, contrast_high=1.5,\n saturation_low=0.5, saturation_high=1.5, hue_delta=18):\n\n def brightness(src, delta, p=0.5):\n \"\"\"Brightness distortion.\"\"\"\n if np.random.uniform(0, 1) > p:\n delta = np.random.uniform(-delta, delta)\n src += delta\n return src\n return src\n\n def contrast(src, low, high, p=0.5):\n \"\"\"Contrast distortion\"\"\"\n if np.random.uniform(0, 1) > p:\n alpha = np.random.uniform(low, high)\n src *= alpha\n return src\n return src\n\n def saturation(src, low, high, p=0.5):\n \"\"\"Saturation distortion.\"\"\"\n if np.random.uniform(0, 1) > p:\n alpha = np.random.uniform(low, high)\n gray = src * np.array([[[0.299, 0.587, 0.114]]])\n gray = np.sum(gray, axis=2, keepdims=True)\n gray *= (1.0 - alpha)\n src *= alpha\n src += gray\n return src\n return src\n\n def hue(src, delta, p=0.5):\n \"\"\"Hue distortion\"\"\"\n if np.random.uniform(0, 1) > p:\n alpha = random.uniform(-delta, delta)\n u = np.cos(alpha * np.pi)\n w = np.sin(alpha * np.pi)\n bt = np.array([[1.0, 0.0, 0.0],\n [0.0, u, -w],\n [0.0, w, u]])\n tyiq = np.array([[0.299, 0.587, 0.114],\n [0.596, -0.274, -0.321],\n [0.211, -0.523, 0.311]])\n ityiq = np.array([[1.0, 0.956, 0.621],\n [1.0, -0.272, -0.647],\n [1.0, -1.107, 1.705]])\n t = np.dot(np.dot(ityiq, bt), tyiq).T\n src = np.dot(src, np.array(t))\n return src\n return src\n\n src = src.astype('float32')\n\n # brightness\n src = brightness(src, brightness_delta)\n\n # color jitter\n if np.random.randint(0, 2):\n src = contrast(src, contrast_low, contrast_high)\n src = saturation(src, saturation_low, saturation_high)\n src = hue(src, hue_delta)\n else:\n src = saturation(src, saturation_low, saturation_high)\n src = hue(src, hue_delta)\n src = contrast(src, contrast_low, contrast_high)\n return src", "def augmenter(x, y):\n # Note that we only use fliprots along axis=(1,2), i.e. the yx axis\n # as 3D microscopy acquisitions are usually not axially symmetric\n x, y = random_fliprot(x, y, axis=(1, 2))\n x = random_intensity_change(x)\n return x, y", "def data_augmentation(image, aug):\n if (aug == \"random_crop\") and (random.randint(0,1)):\n image = random_crop(image) \n if (aug == \"random_rotation\") and (random.randint(0,1)): \n image = random_rotation(image) \n if (aug == \"random_flip\") and (random.randint(0,1)): \n image = random_flip(image)\n if (aug == \"affine_transformation\") and (random.randint(0,1)): \n image = affine_transformation(image)\n if (aug == \"random_gaussian_noise\") and (random.randint(0,1)): \n image = random_gaussian_noise(image)\n if (aug == \"random_erasing\") and (random.randint(0,1)): \n image = random_erasing(image) \n return image", "def augment_brightness_camera_images(image):\n\n # The HSV - Hue Saturation Value representation converts the image from RGB space to HSV space\n # where the Value(brightness) represents the brightness that is randomly increased\n\n image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)\n random_bright = .25+np.random.uniform()\n #print(random_bright)\n image1[:,:,2] = image1[:,:,2]*random_bright\n image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)\n return image1", "def shift(self):\n \"\"\"\n shift cluster randomly within bounds of im\n \"\"\"\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.im_size - self.mid_pixel - r - 10\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n im_shift = np.roll(self.im,shift=y,axis=0)\n self.im = np.roll(im_shift,shift=x,axis=1)\n \n return", "def random_shifts(self, image, label, h_shift, v_shift):\n\n rows = image.shape[0]\n cols = image.shape[1]\n\n horizontal = uniform(- h_shift / 2, h_shift / 2)\n vertical = uniform(- v_shift / 2, v_shift / 2)\n\n mtx = np.float32([[1, 0, horizontal], [0, 1, vertical]])\n\n # change also corresponding lable -> steering angle\n image = cv2.warpAffine(image, mtx, (cols, rows))\n label = cv2.warpAffine(label, mtx, (cols, rows))\n\n return image, label, (h_shift, v_shift)", "def random_shift(self, X, y_2d, im_h, im_w, scale):\n wrng = np.random.randint(-int(im_w*scale), int(im_w*scale))\n hrng = np.random.randint(-int(im_h*scale), int(im_h*scale))\n\n X = self.shift_im(X, wrng)\n X = self.shift_im(X, hrng, dim=1)\n\n y_2d = self.shift_im(y_2d, wrng)\n y_2d = self.shift_im(y_2d, hrng, dim=1)\n\n return X, y_2d", "def augment(image,masks):\n\n # Random horizontal flipping\n if random.random() > 0.5:\n image = TF.hflip(image)\n masks = TF.hflip(masks)\n\n # Random vertical flipping\n if random.random() > 0.5:\n image = TF.vflip(image)\n masks = TF.vflip(masks)\n return image,masks", "def __call__(self, img, target):\n if random.random() < 0.5:\n img = ImageEnhance.Brightness(img).enhance(0.5 + random.random())\n if random.random() < 0.5:\n img = ImageEnhance.Color(img).enhance(0.5 + random.random())\n if random.random() < 0.5:\n img = ImageEnhance.Contrast(img).enhance(0.5 + random.random())\n return img, target", "def random_crop(image, steering = 0.0, tx_lower = -20, tx_upper = 20, ty_lower = -2, ty_upper = 2, rand = True):\n\n shape = image.shape\n (col_start, col_end) = (abs(tx_lower), shape[1] - tx_upper)\n horizon = 60\n bonnet = 136\n if rand:\n tx = np.random.randint(tx_lower, tx_upper + 1)\n ty = np.random.randint(ty_lower, ty_upper + 1)\n else:\n (tx, ty) = (0, 0)\n\n crop = image[horizon + ty: bonnet + ty, col_start + tx: col_end + tx, :]\n image = cv2.resize(crop, (320, 160), cv2.INTER_AREA)\n # the steering variable needs to be updated to counteract the shift \n if tx_lower != tx_upper:\n dsteering = -tx / (tx_upper - tx_lower) / 3.0\n else:\n dsteering = 0\n steering += dsteering\n\n return image, steering", "def shift(self):\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.size - self.mid_pixel - r\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n image_shift = np.roll(self.image,shift=x,axis=0)\n self.image = np.roll(image_shift,shift=y,axis=1)\n \n return", "def random_hflip_img(img):\n if np.random.rand() > 0.5:\n return np.fliplr(img)\n return img", "def visualize_augmentation(image, angle):\n\n # Create a copy of the image to prevent changing the original\n img = np.copy(image)\n\n cols = 2\n rows = 6\n fig_size = (7 * cols, 4 * rows) # Figure width and height, in inches\n\n fig, ax = plt.subplots(rows, cols, figsize=fig_size)\n # Plot original images in the left column\n for idx in range(rows):\n ax[idx, 0].imshow(img)\n ax[idx, 0].set_title(\"Original, Angle = \" + str(round(angle, 3)))\n # Horizontal Flip\n tmp_img, tmp_angle = random_horizontal_flip(img, angle, 1.0)\n ax[0, 1].imshow(tmp_img)\n ax[0, 1].set_title(\"Horizontal Flip, Angle = \" + str(round(tmp_angle, 3)))\n # Translation\n tmp_img, tmp_angle = random_translation(img, angle)\n ax[1, 1].imshow(tmp_img)\n ax[1, 1].set_title(\"Translation, Angle = \" + str(round(tmp_angle, 3)))\n # Gaussian Noise\n tmp_img = random_gaussian(img)\n ax[2, 1].imshow(tmp_img)\n ax[2, 1].set_title(\"Gaussian Noise, Angle = \" + str(round(angle, 3)))\n # Shadows\n tmp_img = random_shadows(img, 1.0, 0.9)\n ax[3, 1].imshow(tmp_img)\n ax[3, 1].set_title(\"Shadows, Angle = \" + str(round(angle, 3)))\n # Brightness\n tmp_img = random_brightness(img)\n ax[4, 1].imshow(tmp_img)\n ax[4, 1].set_title(\"Brightness, Angle = \" + str(round(angle, 3)))\n # All Augmentation\n tmp_img, tmp_angle = random_all(img, angle)\n ax[5, 1].imshow(tmp_img)\n ax[5, 1].set_title(\"All Randomization, Angle = \" +\n str(round(tmp_angle, 3)))\n\n return fig", "def augument_data(data_dir, center, left, right, steering_angle):\n image, steering_angle = choose_random_image(data_dir, center, left, right, steering_angle)\n # Randomly flipt the image horizontally and adjust the steering angle.\n if np.random.rand() < 0.5:\n image = cv2.flip(image, 1)\n steering_angle = -steering_angle\n \n #translate the object with random distance in x and y direction and adjust the steering angle\n trans_x = np.random.uniform(0, 30)\n trans_y = np.random.uniform(0, 20)\n steering_angle += trans_x * 0.002\n trans_matrix = np.float32([[1, 0, trans_x], [0, 1, trans_y]])\n height= image.shape[0]\n width=image.shape[1]\n image = cv2.warpAffine(image, trans_matrix, (width, height))\n return image, steering_angle", "def random(self):\n self.img[:, :] = np.random.random(\n (self.l_i, self.l_i)).astype('float32')\n self.img_name = 'white_noise'", "def brightness_shift(img):\n img_new = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # convert color spaces\n random_bright_val = BRIGHT_VAL + np.random.uniform() # random coefficient to shift brightness by\n img_new[:,:,2] = img_new[:,:,2] * random_bright_val\n img_new = cv2.cvtColor(img_new, cv2.COLOR_HSV2RGB) # convert back to RGB\n return img_new", "def add_random_shadow(image):\n\n top_y = 320*np.random.uniform()\n top_x = 0\n bot_x = 160\n bot_y = 320*np.random.uniform()\n image_hls = cv2.cvtColor(image,cv2.COLOR_RGB2HLS)\n shadow_mask = 0*image_hls[:,:,1]\n X_m = np.mgrid[0:image.shape[0],0:image.shape[1]][0]\n Y_m = np.mgrid[0:image.shape[0],0:image.shape[1]][1]\n shadow_mask[((X_m-top_x)*(bot_y-top_y) -(bot_x - top_x)*(Y_m-top_y) >=0)] = 1\n #random_bright = .25+.7*np.random.uniform()\n if (np.random.randint(2)==1 ):\n \trandom_bright = .5\n \tcond1 = shadow_mask==1\n \tcond0 = shadow_mask==0\n \tif np.random.randint(2)==1 :\n \t\timage_hls[:,:,1][cond1] = image_hls[:,:,1][cond1]*random_bright\n \telse:\n \t\timage_hls[:,:,1][cond0] = image_hls[:,:,1][cond0]*random_bright\n image = cv2.cvtColor(image_hls,cv2.COLOR_HLS2RGB)\n return image", "def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)", "def random_distortion(image, bboxes=None, brightness=None, contrast=None,\n hue=None, saturation=None, seed=None):\n # Following Andrew Howard (2013). \"Some improvements on deep convolutional\n # neural network based image classification.\"\n if brightness is not None:\n if 'max_delta' not in brightness:\n brightness.max_delta = 0.3\n image = tf.image.random_brightness(\n image, max_delta=brightness.max_delta, seed=seed\n )\n # Changing contrast, even with parameters close to 1, can lead to\n # excessively distorted images. Use with care.\n if contrast is not None:\n if 'lower' not in contrast:\n contrast.lower = 0.8\n if 'upper' not in contrast:\n contrast.upper = 1.2\n image = tf.image.random_contrast(\n image, lower=contrast.lower, upper=contrast.upper,\n seed=seed\n )\n if hue is not None:\n if 'max_delta' not in hue:\n hue.max_delta = 0.2\n image = tf.image.random_hue(\n image, max_delta=hue.max_delta, seed=seed\n )\n if saturation is not None:\n if 'lower' not in saturation:\n saturation.lower = 0.8\n if 'upper' not in saturation:\n saturation.upper = 1.2\n image = tf.image.random_saturation(\n image, lower=saturation.lower, upper=saturation.upper,\n seed=seed\n )\n if bboxes is None:\n return_dict = {'image': image}\n else:\n return_dict = {\n 'image': image,\n 'bboxes': bboxes,\n }\n return return_dict", "def random_shift(image, wsr=0.1, hsr=0.1):\n height, width, _ = common_layers.shape_list(image)\n width_range, height_range = wsr*width, hsr*height\n height_translations = tf.random_uniform((1,), -height_range, height_range)\n width_translations = tf.random_uniform((1,), -width_range, width_range)\n translations = tf.concat((height_translations, width_translations), axis=0)\n return tf.contrib.image.translate(image, translations=translations)", "def random_image(self, height, width):\n random_image_properties = {}\n # flip a coin to determine whether image should be synthetic or real\n if random.random() < self.prob_real:\n random_image_properties['real'] = True\n # select a random row from the list of filenames\n # random_row = self.df_real_images.sample(n=1)\n # print(random_row)\n # filename, x0, y0, x1, y1, x2, y2, x3, y3 = random_row\n random_index = random.choice(list(range(len(self.df_real_images))))\n filename = self.df_real_images['filename'].values[random_index]\n print(filename)\n x0 = self.df_real_images['x0'].values[random_index]\n y0 = self.df_real_images['y0'].values[random_index]\n x1 = self.df_real_images['x1'].values[random_index]\n y1 = self.df_real_images['y1'].values[random_index]\n x2 = self.df_real_images['x2'].values[random_index]\n y2 = self.df_real_images['y2'].values[random_index]\n x3 = self.df_real_images['x3'].values[random_index]\n y3 = self.df_real_images['y3'].values[random_index]\n random_image_properties['real_image_path'] = filename\n cornerpoints = np.array([[x0,y0],[x1,y1],[x2,y2],[x3,y3]], dtype=np.float32)\n\n abs_impath = os.path.join(self.real_image_dirpath, filename)\n im = cv2.imread(abs_impath, 1)\n print('Loaded image from {}'.format(abs_impath))\n h,w,c = im.shape\n sfx = float(width)/w\n sfy = float(height)/h\n scale_matrix = np.array([[sfx,0,0],\n [0,sfy,0],\n [0,0,1]], dtype=np.float32)\n\n # mask = np.zeros((h,w))\n # mask = cv2.fillPoly(mask, np.int32([cornerpoints]), 1)\n # utils.showims([im, mask], ['im', 'mask'])\n\n # im_shrunk = cv2.resize(im, (width, height))\n # mask_shrunk=np.zeros((height, width))\n\n cornerpoints_shrunk = coordinate_geometry.transform_cornerpoints_2D(scale_matrix, cornerpoints)\n cornerpoints = cornerpoints_shrunk\n\n # mask_shrunk = cv2.fillPoly(mask_shrunk, np.int32([cornerpoints_shrunk]), 1)\n # utils.showims([im_shrunk, mask_shrunk], ['im_shrunk', 'mask_shrunk'])\n\n # cornerpoints = coordinate_geometry.transform_cornerpoints_2D(scale_matrix, cornerpoints)\n random_image_properties['cornerpoints'] = np.int32(cornerpoints)\n\n\n # and set unused variables to None\n random_image_properties['card_template_path'] = None\n random_image_properties['background_image_path'] = None\n else:\n random_image_properties['real'] = False\n random_image_properties['card_template_path'] = random.choice(self.card_template_filenames)\n random_image_properties['background_image_path'] = random.choice(self.background_image_filenames)\n random_image_properties['cornerpoints'] = self.random_cornerpoints(height, width)\n\n # and set unused variables to NoneX\n random_image_properties['real_image_path'] = None\n\n return random_image_properties", "def _get_random_transform(self, img_shape, seed=None):\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n if self.rotation_range:\n theta = np.random.uniform(\n -self.rotation_range,\n self.rotation_range)\n else:\n theta = 0\n\n if self.height_shift_range:\n try: # 1-D array-like or int\n tx = np.random.choice(self.height_shift_range)\n tx *= np.random.choice([-1, 1])\n except ValueError: # floating point\n tx = np.random.uniform(-self.height_shift_range,\n self.height_shift_range)\n if np.max(self.height_shift_range) < 1:\n tx *= img_shape[img_row_axis]\n else:\n tx = 0\n\n if self.width_shift_range:\n try: # 1-D array-like or int\n ty = np.random.choice(self.width_shift_range)\n ty *= np.random.choice([-1, 1])\n except ValueError: # floating point\n ty = np.random.uniform(-self.width_shift_range,\n self.width_shift_range)\n if np.max(self.width_shift_range) < 1:\n ty *= img_shape[img_col_axis]\n else:\n ty = 0\n\n if self.shear_range:\n shear = np.random.uniform(\n -self.shear_range,\n self.shear_range)\n else:\n shear = 0\n\n if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(\n self.zoom_range[0],\n self.zoom_range[1],\n 2)\n\n flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip\n flip_vertical = (np.random.random() < 0.5) * self.vertical_flip\n\n channel_shift_intensity = None\n if self.channel_shift_range != 0:\n channel_shift_intensity = np.random.uniform(-self.channel_shift_range,\n self.channel_shift_range)\n\n brightness = None\n if self.brightness_range is not None:\n brightness = np.random.uniform(self.brightness_range[0],\n self.brightness_range[1])\n\n transform_parameters = {'theta': theta,\n 'tx': tx,\n 'ty': ty,\n 'shear': shear,\n 'zx': zx,\n 'zy': zy,\n 'flip_horizontal': flip_horizontal,\n 'flip_vertical': flip_vertical,\n 'channel_shift_intensity': channel_shift_intensity,\n 'brightness': brightness}\n\n return transform_parameters", "def preprocess_image(image, training):\r\n if training:\r\n ### YOUR CODE HERE\r\n hpad = np.zeros((32,4,3))\r\n image = np.hstack((image,hpad))\r\n image = np.hstack((hpad,image))\r\n\r\n vpad = np.zeros((4,40, 3))\r\n image = np.vstack((image, vpad))\r\n image = np.vstack((vpad, image))\r\n\r\n #print(np.shape(image))\r\n # Resize the image to add four extra pixels on each side.\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Randomly crop a [32, 32] section of the image.\r\n # HINT: randomly generate the upper left point of the image\r\n rx = np.random.randint(8)\r\n ry = np.random.randint(8)\r\n crp_img = image[rx:rx+32,ry:ry+32,:]\r\n #print(np.shape(crp_img))\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Randomly flip the image horizontally.\r\n # for i in range(crp_img.shape[0]):\r\n # crp_img[i] = np.fliplr(crp_img[i])\r\n rf = np.random.randint(2)\r\n if(rf == 0):\r\n crp_img = np.fliplr(crp_img)\r\n #print(np.shape(crp_img))\r\n image = crp_img\r\n\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Subtract off the mean and divide by the standard deviation of the pixels.\r\n cmean = []\r\n cstd = []\r\n for i in range(np.shape(image)[2]):\r\n arr = image[:,:,i]\r\n cmean = np.mean(arr)\r\n cstd = (np.std(arr))\r\n lfn = lambda x : (x-cmean)/cstd\r\n image[:,:,i] = lfn(arr)\r\n #print(np.shape(image))\r\n\r\n ### YOUR CODE HERE\r\n\r\n return image", "def shift_augmentation():\n shift = np.random.randint(-200, 201, size=2)\n return lambda image: shift_with_extension(image, shift)", "def distort_images(self, images, seed):\n if self.mode == \"train\":\n images = image_processing.distort_image(images, seed)\n\n # Rescale to [-1,1] instead of [0, 1]\n images = tf.subtract(images, 0.5)\n images = tf.multiply(images, 2.0)\n return images", "def augment(im_path):\n # change directory to toplevel of repo (parent of augmentation)\n os.chdir(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0])\n\n im_name, im_ext = os.path.splitext(im_path)\n if im_path not in os.listdir(\"data/raw\"):\n raise FileNotFoundError(f\"{im_path} could not be found in the list of raw images\")\n\n if im_name + \".json\" not in os.listdir(\"data/corrected\"):\n raise FileNotFoundError(f\"{im_name} has not been labelled yet! (no file '{im_name}.json' in corrected)\")\n\n with open(f\"data/corrected/{im_name}.json\") as read_file:\n im_label = json.loads(read_file.read(-1))\n persp = np.float32(im_label[\"perspective\"])\n\n im: Image.Image = Image.open(f\"data/raw/{im_path}\")\n # downscale image to reasonable height\n scale_factor = 500 / im.height\n persp = persp * scale_factor\n im.thumbnail([1000000, 500])\n im_cv = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR)\n\n # determine crop box\n crop_amount = (im.width - 500)\n left_crop = random.randint(crop_amount//4, 3 * crop_amount // 4)\n # left_crop = crop_amount//2\n right_crop = crop_amount - left_crop\n box = [\n left_crop,\n 0,\n im.width - right_crop,\n im.height\n ]\n\n # warp perspective\n # basic way: add gaussian noise to the 4 corner points\n warped_persp = persp.copy()\n for i in range(4):\n for j in range(2):\n v = warped_persp[i][j]\n v += random.gauss(0, 5)\n # ensure none of the perspective points will fall outside the cropped image\n v = max(box[j] + 5, v)\n v = min(box[j+2] - 5, v)\n warped_persp[i][j] = v\n\n matrix = cv2.getPerspectiveTransform(persp, warped_persp)\n warped_im = cv2.warpPerspective(im_cv, matrix, (im.width, im.height))\n warped_im = Image.fromarray(cv2.cvtColor(warped_im, cv2.COLOR_BGR2RGB))\n\n # run crop on warped image\n warped_im = warped_im.crop(box)\n # adjust warped coordinates according to crop\n for i in range(4):\n warped_persp[i][0] -= box[0]\n warped_persp[i][1] -= box[1]\n\n # scale down to final size\n warped_im = warped_im.resize((256, 256))\n for i in range(4):\n warped_persp[i][0] *= 256 / 500\n warped_persp[i][1] *= 256 / 500\n\n # adjust image colour balance, saturation and contrast\n warped_im = ImageEnhance.Color(warped_im).enhance(random.uniform(0.9, 1.2))\n warped_im = ImageEnhance.Contrast(warped_im).enhance(random.uniform(0.8, 1.2))\n warped_im = ImageEnhance.Brightness(warped_im).enhance(random.uniform(0.8, 1.2))\n\n # adjust image temperature\n # thanks to Mark Ransom (https://stackoverflow.com/a/11888449)\n temp_r, temp_g, temp_b = random.choice(KELVIN_TABLE)\n convert_matrix = (temp_r / 255.0, 0.0, 0.0, 0.0,\n 0.0, temp_g / 255.0, 0.0, 0.0,\n 0.0, 0.0, temp_b / 255.0, 0.0)\n warped_im = warped_im.convert(\"RGB\", convert_matrix)\n\n # add noise\n noise_strength = random.uniform(5, 10)\n warped_im_arr = np.float64(np.array(warped_im))\n warped_im_arr += np.random.normal(0, noise_strength, warped_im_arr.shape)\n warped_im_arr = np.clip(warped_im_arr, 0, 255)\n warped_im = Image.fromarray(np.uint8(warped_im_arr))\n\n fname = f\"{im_name}-{hex(random.randint(2**20, 2**24))[2:]}\"\n warped_im.save(f\"data/augmented/{fname}{im_ext}\")\n with open(f\"data/augmented/{fname}.json\", \"w\") as write_file:\n data = {\n \"darts\": im_label[\"darts\"],\n \"perspective\": warped_persp.tolist()\n }\n write_file.write(json.dumps(data))\n return warped_im, warped_persp", "def __call__(self, results):\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n assert img.dtype == np.float32, \\\n 'PhotoMetricDistortion needs the input image of dtype ' \\\n 'np.float32, please set \"to_float32=True\" in ' \\\n '\"LoadImageFromFile\" pipeline'\n # random brightness\n if random.randint(2):\n delta = random.uniform(-self.brightness_delta,\n self.brightness_delta)\n img += delta\n\n # mode == 0 --> do random contrast first\n # mode == 1 --> do random contrast last\n mode = random.randint(2)\n if mode == 1:\n if random.randint(2):\n alpha = random.uniform(self.contrast_lower,\n self.contrast_upper)\n img *= alpha\n\n # convert color from BGR to HSV\n img = general_ocr.bgr2hsv(img)\n\n # random saturation\n if random.randint(2):\n img[..., 1] *= random.uniform(self.saturation_lower,\n self.saturation_upper)\n\n # random hue\n if random.randint(2):\n img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)\n img[..., 0][img[..., 0] > 360] -= 360\n img[..., 0][img[..., 0] < 0] += 360\n\n # convert color from HSV to BGR\n img = general_ocr.hsv2bgr(img)\n\n # random contrast\n if mode == 0:\n if random.randint(2):\n alpha = random.uniform(self.contrast_lower,\n self.contrast_upper)\n img *= alpha\n\n # randomly swap channels\n if random.randint(2):\n img = img[..., random.permutation(3)]\n\n results['img'] = img\n return results", "def add_random_shadow(image):\n h, w = image.shape[0], image.shape[1]\n [x1, x2] = np.random.choice(w, 2, replace=False)\n k = h / (x2 - x1)\n b = - k * x1\n for i in range(h):\n c = int((i - b) / k)\n image[i, :c, :] = (image[i, :c, :] * .5).astype(np.int32)\n\n return image", "def _distort_resize(image, image_size):\n distorted_image = tf.image.random_crop(image, [image_size, image_size, 3])\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)\n distorted_image = tf.image.random_contrast(\n distorted_image, lower=0.2, upper=1.8)\n distorted_image.set_shape([image_size, image_size, 3])\n return distorted_image", "def __call__(self, results):\n if random.random() < self.shift_ratio:\n img_shape = results['img'].shape[:2]\n\n random_shift_x = random.randint(-self.max_shift_px,\n self.max_shift_px)\n random_shift_y = random.randint(-self.max_shift_px,\n self.max_shift_px)\n new_x = max(0, random_shift_x)\n orig_x = max(0, -random_shift_x)\n new_y = max(0, random_shift_y)\n orig_y = max(0, -random_shift_y)\n\n # TODO: support mask and semantic segmentation maps.\n for key in results.get('bbox_fields', []):\n bboxes = results[key].copy()\n bboxes[..., 0::2] += random_shift_x\n bboxes[..., 1::2] += random_shift_y\n\n # clip border\n bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1])\n bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0])\n\n # remove invalid bboxes\n bbox_w = bboxes[..., 2] - bboxes[..., 0]\n bbox_h = bboxes[..., 3] - bboxes[..., 1]\n valid_inds = (bbox_w > self.filter_thr_px) & (\n bbox_h > self.filter_thr_px)\n # If the shift does not contain any gt-bbox area, skip this\n # image.\n if key == 'gt_bboxes' and not valid_inds.any():\n return results\n bboxes = bboxes[valid_inds]\n results[key] = bboxes\n\n # label fields. e.g. gt_labels and gt_labels_ignore\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][valid_inds]\n\n for key in results.get('img_fields', ['img']):\n img = results[key]\n new_img = np.zeros_like(img)\n img_h, img_w = img.shape[:2]\n new_h = img_h - np.abs(random_shift_y)\n new_w = img_w - np.abs(random_shift_x)\n new_img[new_y:new_y + new_h, new_x:new_x + new_w] \\\n = img[orig_y:orig_y + new_h, orig_x:orig_x + new_w]\n results[key] = new_img\n\n return results" ]
[ "0.68516517", "0.630621", "0.6267898", "0.6167012", "0.61349374", "0.6047315", "0.6030819", "0.60097414", "0.59941983", "0.59620386", "0.5956868", "0.5932575", "0.5926298", "0.59043723", "0.5862057", "0.5848643", "0.5839401", "0.5836999", "0.5795006", "0.5790555", "0.5788634", "0.5782043", "0.5781843", "0.57795244", "0.5748073", "0.5740421", "0.5721135", "0.571546", "0.5693936", "0.5675762" ]
0.6651788
1
Print the coordinates of the vertices of Koch Curve. d is the depth of recursion. p1, p2 are coordinates of end point of the initial state.
def koch(d, p1, p2): if d == 0: return sx = (2 * p1[0] + p2[0]) / 3 sy = (2 * p1[1] + p2[1]) / 3 tx = (p1[0] + 2 * p2[0]) / 3 ty = (p1[1] + 2 * p2[1]) / 3 dx = tx - sx dy = ty - sy ux = dx * c60 - dy * s60 + sx uy = dx * s60 + dy * c60 + sy koch(d - 1, p1, (sx, sy)) print("{0:.8f} {1:.8f}".format(sx, sy)) koch(d - 1, (sx, sy), (ux, uy)) print("{0:.8f} {1:.8f}".format(ux, uy)) koch(d - 1, (ux, uy), (tx, ty)) print("{0:.8f} {1:.8f}".format(tx, ty)) koch(d - 1, (tx, ty), p2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_path(self, d, parent, s, t):\n idxs = [t]\n while idxs[-1]!=s:\n idxs.append(parent[idxs[-1]])\n idxs.reverse()\n print('[{:g}]'.format(d[t])+' '+'-->'.join([str(self.vertices[i]) for i in idxs]))", "def draw(self):\n # s1 = ShowPoint(self.cnv, self.p1.xpt, self.p1.ypt)\n # s2 = ShowPoint(self.cnv, self.p2.xpt, self.p2.ypt)\n # s1.draw()\n # # s2.draw()\n self.cnv.create_line(self.p1.xpt, self.p1.ypt, self.p2.xpt, self.p2.ypt)", "def print_pairs(self, d, level=0):\n for k, v in d.iteritems():\n if type(v) is dict:\n self._write('%s%s :\\n' % (\"\\t\" * level, k.upper()))\n self.print_pairs(v, level + 1)\n elif k == \"output\":\n self._write('%s%s :\\n' % (\"\\t\" * level, k.upper()))\n self._write('%s\\n' % v)\n else:\n self._write('%s%s : %s\\n' % (\"\\t\" * level, k.upper(), v))", "def print_cords(self):\n print('startX :', self.startX, ' ,startY :', self.startY, ' ,endX :', self.endX, ' ,endY :', self.endY)", "def print_path(next_v, u, v):\n\tp = u\n\twhile (next_v[p][v]):\n\t\tprint('{} -> '.format(p.get_key()), end='')\n\t\tp = next_v[p][v]\n\t\tprint('{} '.format(v.get_key()), end='')", "def curvecontrol(p1,p2, u_or_d):\r\n## four possibile orders:\r\n## A p1 lower and to left of p2\r\n## B p1 lower and to right of p2\r\n## C p1 higher and to left of p2\r\n## D p1 higher and to right of p2\r\n## B and C are reverse of each other\r\n## A and D are reverse of each other\r\n## so only 2 types of pairs really\r\n## each has a curve up or curve down possibility\r\n## start by converting D to A, and C to B\r\n e1 = 0.0001\r\n e2 = 0.9\r\n e1c = 1 - e1\r\n e2c = 0.5\r\n cp1 = []\r\n cp2 = []\r\n if p2[1] < p1[1]:\r\n resort = True\r\n ptemp = p2\r\n p2 = p1\r\n p1 = ptemp\r\n else:\r\n resort = False\r\n if p1[0] < p2[0]: ## type A\r\n if u_or_d: ## curve up\r\n cp1.append( ((p2[0]-p1[0]) * e1) + p1[0])\r\n cp1.append( ((p2[1]-p1[1]) * e2) + p1[1])\r\n cp2.append( ((p2[0]-p1[0]) * e2c) + p1[0])\r\n cp2.append( ((p2[1]-p1[1]) * e1c) + p1[1])\r\n else:\r\n cp1.append( ((p2[0]-p1[0]) * e2) + p1[0])\r\n cp1.append( ((p2[1]-p1[1]) * e1) + p1[1])\r\n cp2.append( ((p2[0]-p1[0]) * e1c) + p1[0])\r\n cp2.append( ((p2[1]-p1[1]) * e2c) + p1[1])\r\n else: ## type B\r\n if u_or_d: ## curve up\r\n cp1.append( p1[0]-((p1[0]-p2[0]) * e1))\r\n cp1.append( ((p2[1]-p1[1]) * e2) + p1[1])\r\n cp2.append( p1[0] - ((p1[0]-p2[0]) * e2c))\r\n cp2.append( ((p2[1]-p1[1]) * e1c) + p1[1])\r\n else:\r\n cp1.append( p1[0]-((p1[0]-p2[0]) * e2))\r\n cp1.append( ((p2[1]-p1[1]) * e1) + p1[1])\r\n cp2.append( p1[0]-((p1[0]-p2[0]) * e1c))\r\n cp2.append( ((p2[1]-p1[1]) * e2c) + p1[1])\r\n if resort:\r\n ptemp = cp2\r\n cp2 = cp1\r\n cp1 = ptemp\r\n return cp1,cp2", "def print_dfs_output(G, d, f, pi):\n V = G[0]\n print (\" d f pi\")\n for v in V:\n print (\" {: <5}{: <5}{: <5}{: <5}\".format(v, d[v], f[v], pi[v]))", "def debugprint(self, cur_pos): \n print(\"cur_pos = \", cur_pos)\n print(\"Distance map:\")\n print(self.distance_map)\n print(\"Frontier:\")\n print(sorted(self.frontier.items(), key=lambda x:x[1] ))\n print(\"Footprint:\")\n print(self.footprint)\n print(\"--------------\")", "def trace(self,p):\n n = self\n c=0 \n while n!=None :\n print (n)\n n = n.pere\n c+=1\n print (\"Nombre d'étapes de la solution:\", c-1)\n return", "def generate_points(self):\n for x in range(self.num_sides):\n for y in range(self.num_sides):\n for z in range(self.num_sides):\n col_name = y + 4\n top_num = 0\n if 1 < z < 4:\n top_name = 'b'\n else:\n top_name = 'd'\n if z == 3 or z == 1:\n top_num += 4\n top_num += x\n\n top_name += str(top_num)\n\n k = Node(x*self.length-self.center, y*self.length -\n self.center, z*self.length-self.center, top_name, col_name)\n self.c_layers[y].append(k)\n self.points.append(k)", "def find_vanishing_lderivs(self, do_print=True, latex=True, nd=50):\n res = list()\n if(latex):\n S = \" $ \"\n O = \" & \"\n else:\n S = \" \"\n O = \" \"\n if(len(list(self._Lv.keys())) == 0):\n return res\n L = list(self._Lv.keys())\n L.sort()\n L.reverse()\n s = \"\"\n sc = \"\"\n # increase mpmath.mp.dps to print all relevant digits\n mpold = mpmath.mp.dps\n mpmath.mp.dps = self.maxdigs\n for DD in L:\n x = self._Lv[DD]\n if(abs(x) < 1E-10):\n # res.append((DD,x))\n res.append(DD)\n s = s + S + str(DD) + S + O + S + sci_pretty_print(self._Lv[DD], nd, latex_pow=latex) + S + \"\\\\\\\\ \\n\"\n c = self.get_coefficient(DD)\n if c is not None:\n x = c.real()\n x1 = floor(x)\n x2 = ceil(x)\n er1 = abs(x1 - x)\n er2 = abs(x2 - x)\n erm = min(er1, er2)\n print(\"erm({0})={1}\".format(DD, erm))\n erms = sci_pretty_print(erm, 2, latex_pow=latex)\n if(er1 < er2):\n xi = x1\n else:\n xi = x2\n # sc=sc+S+str(DD)+S+\"\\t\"+O+S+sci_pretty_print(c.real,nd,latex_pow=latex)+\"\\\\ \\n\"\n sc = sc + S + str(DD) + S + O + S + str(xi) + S + O + S + erms + S + \"\\\\\\\\ \\n\"\n else:\n sc = sc + S + str(DD) + S + O + S + \" \" + S + O + S + \" \" + S + \"\\\\\\\\ \\n\"\n print(s)\n print(sc)\n mpmath.mp.dps = mpold\n return res", "def print_level(self, node, depth):\n\n if not node:\n return\n\n if depth == 1:\n self.print_count += 1\n print(node.point, self.print_count)\n\n elif depth > 1:\n self.print_level(node.left, depth - 1)\n self.print_level(node.right, depth - 1)", "def dot(self):\n d = Digraph(comment=\"VP Tree\", format=\"png\")\n for parent, left, right in self.root.preorder():\n\n if isinstance(parent,VPTreeNonLeaf):\n d.node(str(parent.uid), \"\"\"VP Node:: Key={} Median Dist = {:2.2f}\n \"\"\".format(parent.pk, parent.median_dist))\n d.edge(str(parent.uid), str(left.uid))\n d.edge(str(parent.uid), str(right.uid))\n elif isinstance(parent,VPTreeLeaf):\n d.node(str(parent.uid), \"Leaf Node:: \"+str(parent.pk_list))\n else:\n raise Exception(\"something went wrong\")\n\n return d", "def test_print_level_order_2(depth_one_tree):\n depth_one_tree.insert(5, 1)\n depth_one_tree.insert(6, 2)\n depth_one_tree.insert(7, 3)\n depth_one_tree.insert(8, 4)\n assert print_level_order(depth_one_tree) == ['0', '1 2 3 4', '5 6 7 8']", "def koch(order,size):\n if order ==0: #the base case is just a straight line\n forward(size)\n else:\n koch(order-1,size/3) #Go 1/3 of the way\n left(60)\n koch(order-1,size/3)\n right(120)\n koch(order-1,size/3)\n left(60)\n koch(order-1,size/3)", "def plotTimeDepthInteract(d2,d3,v1,v2,v3):\n d = np.array((0.,d2,d3), dtype=float)\n v = np.array((v1,v2,v3), dtype=float)\n plotTimeDepth(d,v)", "def print_path(self, node_id, connection=None):\n\n connection = connection or self.engine.connect()\n\n ancestors = self.get_path(node_id, connection)\n\n print(' -> '.join(x.title for x in ancestors))", "def show_graph(self):\n print(f'|V| = {self.V}, |E| = {self.E}')\n for n in range(1, self.V+1):\n print(f'[{n}] -> {self.adjacency_list[n]}')", "def print_gcd():\n a = get_inp_gcd()\n b = get_inp_gcd('second')\n print(a, \" gcd \", b, \" = \", gcd(a,b), sep='')", "def display_path(self, path):\n graph = path.graph\n if not graph:\n return\n for v in sorted(graph.vertices()):\n p = graph.get_vertex_attribute(v, 'xy')\n x, y = to_geometry(p[0]), to_geometry(p[1])\n print('define v{} ellipse 2 2 c_vertex {} {}'.format(v, x, y))\n #print('define v{0}t text {0} 14 white {1} {2}'.format(v, x, y))\n for u, v in graph.edges():\n print('define - link v{} v{} 1 c_edge'.format(u, v))\n # NOTE: this code assumes paths will not move indefinitely\n print('fix /./')", "def print(self):\n for i, v in enumerate(self._adj):\n if v:\n print(\"vertex {0}\".format(i))\n for e in v:\n print(e)\n print()", "def do_printgraph(self, args):\n self.currentGraph.printGraph()", "def print_solution(state1, number_nodes_expanded, goal_state, state2 = None):\n\n\tif state2 != None:\n\t\ttotal_depth = state1.depth + state2.depth\n\telse:\n\t\ttotal_depth = state1.depth\n\t\tprint(\"Solution found at depth: \" + str(total_depth))\n\n\tdimensions = int(math.sqrt(total_depth)) + 1\n\n\tfig = plt.figure(figsize=[4 * dimensions, 4 * dimensions])\n\n\tstate1.print_path(fig, dimensions, state1.depth + 1)\n\n\tif state2 != None:\n\t\tstate2.parent.print_path_reserse(fig, dimensions, state1.depth + 2)\n\t\tmiddle_depth = state1.depth\n\t\tfound = False\n\t\twhile True:\n\t\t\tif state1.check_solution(goal_state):\n\t\t\t\tmiddle_depth = state1.depth\n\t\t\t\tfound = True\n\t\t\t\t#check if the solution can still be find in previous nodes\n\t\t\t\tstate1 = state1.parent\n\t\t\telse:\n\t\t\t\tif state1.parent == None:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tstate1 = state1.parent\n\n\t\tstate2 = state2.parent\n\t\twhile not(found):\n\t\t\tif state2.check_solution(goal_state):\n\t\t\t\tmiddle_depth += 1\n\t\t\t\tfound = True\n\t\t\telse:\n\t\t\t\tmiddle_depth += 1\n\t\t\t\tstate2 = state2.parent\n\t\t\n\t\tprint(\"Solution found at depth: \" + str(middle_depth))\n\t\tplt.show()\n\t\treturn middle_depth\n\telse:\n\t\tplt.show()\n\t\treturn None", "def preorder_indent(T, p, d):\n print(2 * d * '-' + str(p.element()))\n for c in T.children(p):\n preorder_indent(T, c, d + 1)", "def printPath(edgesTo,v):\r\n path = str()\r\n while v is not None:\r\n print(v) \r\n path += str(v) + ' -> ' \r\n v = edgesTo[v]\r\n print(path)", "def Gd():\n Pz=[8]\n Pp=[1,1]\n return Pz, Pp", "def printLevelOrder(root):\n print(\"---- printing below the level traversal of the tree -----\")\n h = height(root) \n for i in range(1, h+1): \n printGivenLevel(root, i) \n print(\"=========================================================\")", "def eddy_floyd(points, side=\"\", p_min=[], p_max=[], show=True, save=False, detailed=True):\n# :param points: the points from which to find the convex hull\n# :param side: if \"up\", we care about the points above the line (p_min,p_max), else, below\n# :param p_min: the point on the left of the line (min = min abscissa)\n# :param p_max: the point on the right of the line\n# :param show: if True, the progress in constructing the hull will be plotted on each iteration in a window\n# :param save: if True, the progress in constructing the hull will be saved on each iteration in a .png file\n# :param detailed: if True, even non convex explored polygons are plotted\n if p_min==[] or p_max==[]:\n #Find the point the most on the left (p_min) and the most on the right (p_max)\n p_min,p_max=points[0],points[0]\n for p in points:\n if p[0]<p_min[0]: p_min=p\n if p[0]>p_max[0]: p_max=p\n\n #Divide the points in 2 subproblems (E2=above line, E1=below line)\n #Remark: p_min and p_max are neither in E2 nore in E1 \n E1,E2=[],[]\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: E1+=[p]\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=\"up\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_1=eddy_floyd(E1,side=\"down\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_min]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n return [p_max]+to_be_returned_2+[p_min]+to_be_returned_1\n\n \"\"\"End algorithm ?\"\"\"\n #Find if points remain outside the line (either above if up or below if done)\n end=True\n i=0\n while end and i<len(points):\n p=points[i]\n if side==\"up\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: end=False \n if side==\"down\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: end=False \n i+=1\n\n \"\"\"Intermidiate case, look for the furthest point and divide the pb in 2 pbs\"\"\"\n if not end:\n p_extr,dist=p_min,0\n E1,E2=[],[]\n if side==\"up\":\n #Find the furthest point from the line (above)\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems\n for p in points:\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])>0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_max]+to_be_returned+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n\n if side==\"down\":\n #Find the furthest point from the line (below) \n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems \n for p in points:\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])<0: E2+=[p]\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])<0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_min]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_min]+to_be_returned+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n \n \"\"\"End case\"\"\"\n if end:\n return []\n\n \"\"\"None of these cases\"\"\"\n print(\"ERREUR\")\n return []", "def drawPath(self):\r\n bgl.glColor4f(0.8,0.8,0.9,0.01)\r\n bgl.glLineWidth(0.01)\r\n\r\n bgl.glBegin(bgl.GL_LINES)\r\n bgl.glVertex3f(self.p1[0],self.p1[1],self.p1[2])\r\n bgl.glVertex3f(self.p2[0],self.p2[1],self.p2[2])\r\n bgl.glEnd()\r\n\r\n bgl.glNormal3f(0.0,0.0,1.0)\r\n bgl.glShadeModel(bgl.GL_SMOOTH);", "def __init__(self, d, init, depth, **kwargs):\r\n self.d = d\r\n self.init = init\r\n self.depth = depth\r\n super(Separator, self).__init__(**kwargs)" ]
[ "0.575522", "0.54456514", "0.5345275", "0.5257093", "0.5226664", "0.5217727", "0.5186063", "0.5185967", "0.51700306", "0.5160067", "0.50513047", "0.50492215", "0.5020757", "0.5018144", "0.5006685", "0.49828735", "0.4976104", "0.49749985", "0.4970847", "0.49628836", "0.49621964", "0.49571154", "0.49507642", "0.49405602", "0.4919051", "0.48927188", "0.4892202", "0.48842412", "0.48792267", "0.48530442" ]
0.6926076
0
Returns the url to access a particular tour instance.
def get_absolute_url(self): return reverse('tour-detail', args=[str(self.id)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def instance_url(self) -> str:\n easypost_id = self.get(\"id\")\n if not easypost_id:\n raise Error(\"%s instance has invalid ID: %r\" % (type(self).__name__, easypost_id))\n return \"%s/%s\" % (self.class_url(), easypost_id)", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def get_absolute_url(self):\n return reverse('tournament-details', args=[self.uuid])", "def instance_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_url\")", "def url(self):\n return self.full()", "def get_url(self):\n return self.url", "def get_url(self):\n return self.url", "def url(self, **kwargs):\n return self._location.url(**kwargs)", "def Url(self) -> str:", "def get_absolute_url(self):\n return reverse('teacher-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('teacher-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('tour-review', args=[str(self.id)])", "def get_url(self):\n\n return self.url", "def get_url(self):\n\n return self.url", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self):\n url = self.url\n return url", "def get_absolute_url(self):\n return reverse('trait_browser:source:traits:detail', kwargs={'pk': self.pk})", "def url(self):\r\n return self.urlparts.geturl()", "def get_absolute_url(self):\n return reverse('wine-detail', args=[str(self.id)])", "def task_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"task-detail\", args=[obj.id], request=request)", "def get_url(self):\n return self.resource.url", "def get_absolute_url(self):\n return reverse(\"jewelry_detail\", args = [str(self.id)])" ]
[ "0.6904997", "0.665611", "0.665611", "0.665611", "0.665611", "0.665611", "0.665611", "0.665611", "0.665611", "0.66277385", "0.652006", "0.6373929", "0.6353588", "0.6353588", "0.6309502", "0.62769485", "0.6269816", "0.6269816", "0.62662625", "0.6248603", "0.6248603", "0.62311894", "0.62311894", "0.6228326", "0.6227284", "0.6201681", "0.6173254", "0.6169864", "0.61633784", "0.61612403" ]
0.6999632
0
Returns the url to access a particular review instance.
def get_absolute_url(self): return reverse('tour-review', args=[str(self.id)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content_object_url(self):\n return urlresolvers.reverse(\n \"reviews-url-redirect\",\n args=(self.content_type_id, self.object_pk)\n )", "def instance_url(self) -> str:\n easypost_id = self.get(\"id\")\n if not easypost_id:\n raise Error(\"%s instance has invalid ID: %r\" % (type(self).__name__, easypost_id))\n return \"%s/%s\" % (self.class_url(), easypost_id)", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_url\")", "def instance_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_url\")", "def get_url(\n self,\n *,\n context: Context,\n ) -> str:\n request = context['request']\n\n # We want to use a relative URL in the diff viewer as we will not be\n # re-rendering the page when switching between revisions.\n from reviewboard.urls import diffviewer_url_names\n match = request.resolver_match\n\n if match.url_name in diffviewer_url_names:\n return 'raw/'\n\n return local_site_reverse(\n 'raw-diff',\n request,\n kwargs={\n 'review_request_id': context['review_request'].display_id,\n })", "def test_get_url_on_review_request(self) -> None:\n review_request = self.create_review_request()\n\n self.assertEqual(\n self.action.get_url(context=self._create_request_context(\n review_request=review_request,\n url_name='review-request-detail')),\n '/r/%s/diff/raw/' % review_request.display_id)", "def get(self, request, *args, **kwargs):\n view = ReviewDisplay.as_view()\n return view(request, *args, **kwargs)", "def exam_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"exam-detail\", args=[obj.id], request=request)", "def get_url(self):\n return self.resource.url", "def show_orion_url(self, obj):\n return obj.orion_url", "def get_url(self):\n\n return self.url", "def get_url(self):\n\n return self.url", "def get_url(self):\n return self.url", "def get_url(self):\n return self.url", "def get_review(review_id):\n return get(cls, review_id)", "def _product_reviews_url(self, url):\n temp_url = re.sub('/dp/', '/product-reviews/', url)\n return re.sub('ref=(.+)\\?', 'cm_cr_pr_top_link_1', temp_url)", "def get_redirect_url(self, *args, **kwargs):\n referer = self.request.META.get('HTTP_REFERER', '')\n if 'reviews' in referer:\n url = reverse('review_home')\n else:\n document = self.metadata.document\n url = reverse('document_detail', args=[\n document.category.organisation.slug,\n document.category.slug,\n document.document_key])\n\n return url", "def url(self):\n url = self.url\n return url", "def url(self):\n\n if self.identifier and self.identifier != \"\":\n return self.collection.url + self.identifier + \"/\"\n else:\n return self.collection.url", "def get_url(self):\n return self._url", "def build_review_url(self, cipherid, offset=0, limit=20, sort='helpful'):\n params = {\n 'offset': offset,\n 'limit': limit,\n 'sort': sort\n }\n query = urllib.urlencode(params)\n return 'https://www.beautylish.com/rest/reviews/p-{cipherid}?{query}'.format(cipherid=cipherid, query=query)", "def url(self):\n return url_for_item(self.key)", "def url(self):\n return url_for_item(self.key)" ]
[ "0.6614286", "0.63549036", "0.6237856", "0.6237856", "0.6237856", "0.6237856", "0.6237856", "0.6237856", "0.6237856", "0.6237856", "0.6212627", "0.6147254", "0.60942954", "0.5924494", "0.5896306", "0.5863019", "0.575913", "0.574045", "0.574045", "0.5736692", "0.5736692", "0.5691407", "0.5641374", "0.5593469", "0.5559285", "0.5555017", "0.55505264", "0.554513", "0.5509554", "0.5509554" ]
0.6492123
1
Generating start vector with random numbers in range [1; 1] with length columns_num. Using division by norm of this vector to scale values.
def generate_start_w0(columns_num): start_w0 = [uniform(-1, 1.) for _ in range(columns_num)] norm_start_w0 = start_w0 / np.linalg.norm(start_w0) return norm_start_w0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initial_vector(self):\n\n return asarray([np.random.uniform(l, u) for l, u in self.bounds])", "def _generate_random_vector(size):\n return np.random.uniform(-0.1, 0.1, size)", "def gen_vector(size):\n solution = []\n for i in range(size):\n rand_num = uniform(-size, size)\n solution.append(rand_num)\n return np.array(solution)", "def generate_column(size: int, norm: float, row_slope: float) -> np.ndarray:\r\n return norm * (np.arange(1, size + 1)) ** row_slope", "def df_numeric_column(min_value=0, max_value=1, num_rows=100):\n # Generate numeric column\n return pd.Series(np.random.uniform(min_value, max_value, num_rows))", "def init_w(self, size):\n return np.random.uniform(self.r_min, self.r_max, size=size)", "def normal_init(self, shape):\n return np.random.normal(size=(shape[0],shape[1]))*0.01", "def randrange(n, vmin, vmax):\n return (vmax - vmin) * np.random.rand(n) + vmin", "def xavier_init(dims, uniform=True):\n n_inputs,n_outputs = dims\n if uniform:\n # 6 was used in the paper.\n init_range = np.sqrt(6.0 / (n_inputs + n_outputs))\n return tf.random_uniform(shape=dims,minval=-init_range, maxval=init_range)\n else:\n # 3 gives us approximately the same limits as above since this repicks\n # values greater than 2 standard deviations from the mean.\n stddev = np.sqrt(3.0 / (n_inputs + n_outputs))\n return tf.truncated_normal(shape=dims,stddev=stddev)", "def WeightInitializer():\n return np.random.uniform(-1, 1)", "def get_seeds(self, start: float, num: int) -> np.ndarray:\r\n\r\n if self.integral:\r\n if self.domain[1] - self.domain[0] + 1 <= num:\r\n return np.arange(self.domain[0], self.domain[1] + 1)\r\n result = np.random.choice(\r\n np.arange(self.domain[0], self.domain[1] + 1), num, replace=False)\r\n if start not in result:\r\n result[0] = start\r\n return result\r\n\r\n\r\n result = np.zeros((num,), dtype='float64')\r\n result[0] = start\r\n min_sep = (self.domain[1] - self.domain[0]) * min(0.05, 0.25 / num)\r\n\r\n for i in range(1, num):\r\n rejections = 0\r\n while True:\r\n _pt = np.random.uniform(self.domain[0], self.domain[1], 1)\r\n if sdist.cdist(_pt.reshape(1, 1), result[:i].reshape(i, 1)).min() < min_sep:\r\n rejections += 1\r\n if rejections > 50000:\r\n raise ValueError(\r\n f'failed to sample! domain might be singleton: {self.domain}')\r\n else:\r\n result[i] = _pt\r\n break\r\n\r\n return result", "def construct_random_initial(self):\n x = np.random.random((self._crv_size, self._bound))\n return x", "def dense_vector (n, init_val=0.0):\n return [init_val] * n", "def generate_2nd_10th_column(rows):\n\n start_col = 2\n end_col = 10\n range_for_col = end_col - start_col\n result = []\n\n for i in range(0, range_for_col + 1):\n column_index = i + 2\n input_mean = (i + 1) * 10\n std = 1\n column_to_attach = [[\"col\" + str(column_index) + \"_\" + str(input_mean)]]\n\n while len(column_to_attach) < rows + 1:\n item = []\n random_10per_null = get_random_num_given_per_null(1000, 0.1)\n\n if random_10per_null != None:\n gauss_num = random.normalvariate(input_mean, std)\n to_append = \"{0:.2f}\".format(gauss_num)\n else:\n to_append = None\n item.append(to_append)\n column_to_attach.append(item)\n\n display_indicator(ROWS, \\\n len(column_to_attach), \\\n \"column \" + str(column_index) + \", \" + str(len(column_to_attach)) + \" processed\")\n\n if result:\n result = generate_column(column_to_attach, result)\n else:\n result = column_to_attach\n\n return result", "def draw_normal_initial(self):\n means, scale = self.get_means_and_scales_from_q()\n return np.random.normal(means,scale,size=[self.sims,means.shape[0]]).T", "def gen_m(self, n_dims):\n m = np.random.randint(-1000, 1000, n_dims).astype(float)\n\n return m", "def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d", "def generate_dense_vector(expanded_vector_size, number_of_pieces):\n vector = []\n remaining = expanded_vector_size\n piece_length = int(expanded_vector_size / number_of_pieces)\n for i in range(number_of_pieces - 1):\n number = randint(1, 10)\n vector.extend([number, piece_length])\n remaining -= piece_length\n vector.extend([randint(1, 10), remaining])\n return vector", "def rownorm(a):\n return sum(array(a)**2,axis=1)**.5", "def generate_number(lbound=1, ubound=100, mean=None, std=None):\n x = np.arange(lbound, ubound + 1)\n if mean and std:\n prob = stats.norm.pdf(x, loc=mean, scale=std)\n prob = prob / prob.sum() #normalize the probabilities so they sum up to 1\n else:\n prob = np.repeat(1 / len(x), len(x))\n num = np.random.choice(x, p=prob)\n return num", "def generate(self, n):\n num_variables = len(self.xmin)\n\n # Generate in [0,1] space\n x = np.random.rand(n, num_variables)\n\n # Scale from [0,1] to [self.xmin, self.xmax]\n x_scaled = self.scale_to_new_domain(x, self.xmin, self.xmax)\n if self.use_logger:\n self.logger.info(\"Random design: generated {0} points in {1} dimensions\".format(n, num_variables))\n return x_scaled", "def gen_normal(self,n_step=100):\n if n_step < 30:\n print(\"WARNING! The number of steps is small. It may not generate a good stochastic process sequence!\")\n \n w = np.ones(n_step)*self.x0\n \n for i in range(1,n_step):\n # Sampling from the Normal distribution\n yi = np.random.normal()\n # Weiner process\n w[i] = w[i-1]+(yi/np.sqrt(n_step))\n \n return w", "def set_random_vector(self):\n self.vector = vu.create_dense_random_vector(dimension)", "def random_init(self, shape):\n return np.random.randn(shape[0],shape[1])*0.01", "def _random_x(self):\n return np.random.uniform(-self._extent, self._extent, self._batchsize)", "def initial_x():\n\n # RANDOMLY GENERATES the INITIAL VALUES of the independent variables:\n temp = [uniform(1, cfg.n) for i in range(cfg.n)]\n\n return np.array(temp, dtype=np.float_)", "def randomgrid(self, n):\n lam = np.random.random((n, 3))\n return self.normalize(lam)", "def init_function(matrix_dimensions):\n\n return numpy.random.uniform(\n low=min_value, high=max_value, size=matrix_dimensions)", "def init_embedding(size=50):\n vector = np.random.normal(0.0, 0.01, size)\n return vector", "def gen_normal(self,n_step=100):\n if n_step < 30:\n print(\"WARNING! The number of steps is small. It may not generate a good stochastic process sequence!\")\n \n w = np.ones(n_step)*self.x0\n \n for i in range(1,n_step):\n # Sampling from the Normal distribution\n yi = np.random.normal()\n # Weiner process\n w[i] = yi\n \n return w" ]
[ "0.6294636", "0.60306394", "0.5967467", "0.5958537", "0.5909196", "0.58106685", "0.5785931", "0.56947416", "0.56108725", "0.55979025", "0.55717945", "0.5541559", "0.5512951", "0.5509296", "0.5499508", "0.5478247", "0.5463694", "0.54308057", "0.5373491", "0.535581", "0.5354147", "0.53527117", "0.53493756", "0.5328436", "0.5321781", "0.5311847", "0.5307078", "0.5298074", "0.5298053", "0.52956593" ]
0.7427251
0
Calculates component Y value in the current iteration. Multiplies dataframe_row with transposed vector_w to get scalar value y.
def calculate_y(dataframe_row, vector_w): y_val = np.dot(dataframe_row, np.transpose(vector_w)) return y_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subtract_component(dataframe, component_y, vector_w):\n if dataframe.empty:\n raise TypeError('It is impossible to calculate eigen vector W '\n 'and component Y on the empty dataframe.')\n component_df = np.outer(component_y, vector_w)\n result_df = dataframe - component_df\n return result_df", "def calculate_w(dataframe_row, prev_w, prev_y, df_len):\n vector_w = prev_w + (prev_y / df_len * (dataframe_row - prev_y * prev_w))\n norm_vector_w = vector_w / np.linalg.norm(vector_w)\n return norm_vector_w.to_numpy()", "def calculate_component(dataframe, vector_w, component_num):\n if dataframe.empty:\n raise TypeError('It is impossible to calculate eigen vector W '\n 'and component Y on the empty dataframe.')\n\n df_size = len(dataframe)\n # calculate start value y(1)\n y_val = calculate_y(dataframe.iloc[0], vector_w)\n\n # to reach the stable state of the component\n # it should be calculated 10^component_num times.\n for _ in range(10 ** component_num):\n y_vector = [y_val, ]\n for row in range(1, df_size):\n vector_w = calculate_w(dataframe.iloc[row], vector_w,\n y_vector[row - 1], df_size)\n y_val = calculate_y(dataframe.iloc[row], vector_w)\n y_vector.append(y_val)\n\n component = (y_vector, vector_w)\n return component", "def y(self) -> np.ndarray:\n return self.array[:, 2] if self.scalar_vector else self.array[:, 1]", "def y(self) -> float:\n return self.A[2] if self.scalar_vector else self.A[1]", "def cylindrical_y(\n cyl_y_unit_vector: sc.Variable, scattered_beam: sc.Variable\n) -> sc.Variable:\n return sc.dot(scattered_beam, cyl_y_unit_vector)", "def compute_Y(X, w):\n Y = np.sign(np.dot(X, w))\n return Y\n\n # for i in range(A.shape[0]):\n # # prevent overflow by subtracting the max value from each entry in row i\n # A[i, :] = A[i, :] - A[i, :].max()\n # A[i, :] = np.exp(A[i, :])\n # Y[i, :] = A[i, :] / A[i, :].sum()", "def yvec(self):\n return self._yvec", "def test_y_vector_init(self):\n # If you change the y-vector ordering, change here too #Y_VECTOR_CHANGESITE\n\n eng_fields = np.zeros(EngineeringState.N_ENGINEERING_FIELDS)\n component_array = eng_fields[EngineeringState._COMPONENT_START_INDEX:EngineeringState._COMPONENT_END_INDEX]\n for comp_i in range(0, N_COMPONENTS):\n component_array[comp_i + N_COMPONENTS * 0] = True # connected\n component_array[comp_i + N_COMPONENTS * 1] = 1 + (0.01 * comp_i) # capacity\n component_array[comp_i + N_COMPONENTS * 2] = 222200 + comp_i # temperature\n component_array[comp_i + N_COMPONENTS * 3] = comp_i % 2 # coolant_hab_one\n component_array[comp_i + N_COMPONENTS * 4] = True # coolant_hab_two\n component_array[comp_i + N_COMPONENTS * 5] = False # coolant_ayse\n\n coolant_array = eng_fields[EngineeringState._COOLANT_START_INDEX:EngineeringState._COOLANT_END_INDEX]\n for cool_i in range(0, N_COOLANT_LOOPS):\n coolant_array[cool_i + N_COOLANT_LOOPS * 0] = 555500 + cool_i # coolant_temp\n coolant_array[cool_i + N_COOLANT_LOOPS * 1] = cool_i % 2 # primary_pump_on\n coolant_array[cool_i + N_COOLANT_LOOPS * 2] = True # secondary_pump_on\n\n rad_array = eng_fields[EngineeringState._RADIATOR_START_INDEX:EngineeringState._RADIATOR_END_INDEX]\n for rad_i in range(0, N_RADIATORS):\n rad_array[rad_i + N_RADIATORS * 0] = rad_i % 4 # attached_to_coolant_loop\n rad_array[rad_i + N_RADIATORS * 1] = rad_i % 2 # functioning\n\n y0 = np.concatenate((np.array([\n 0x111, 0x222, # x\n 0x333, 0x444, # y\n 0x555, 0x777, # vx\n 0x888, 0x999, # vy\n 0.01, 0.02, # heading\n 0.03, 0.04, # spin\n 0xEEE, 0xFFF, # fuel\n 5, 6, # throttle\n 1, -1, # only First is landed on Second\n 0, 1, # Second is broken\n common.SRB_EMPTY,\n 1 # time_acc\n ]),\n eng_fields\n ))\n\n ps = PhysicsState(y0, self.proto_state)\n self.assertTrue(np.array_equal(ps.y0(), y0.astype(ps.y0().dtype)))\n self.assertEqual(ps['First'].landed_on, 'Second')\n\n proto_state = ps.as_proto()\n proto_state.timestamp = 50\n self.assertEqual(proto_state.entities[0].x, 0x111)\n self.assertEqual(proto_state.entities[0].y, 0x333)\n self.assertEqual(proto_state.entities[1].x, 0x222)\n self.assertEqual(proto_state.entities[1].y, 0x444)\n self.assertEqual(proto_state.entities[0].vx, 0x555)\n self.assertEqual(proto_state.entities[0].vy, 0x888)\n self.assertEqual(proto_state.entities[1].vx, 0x777)\n self.assertEqual(proto_state.entities[1].vy, 0x999)\n self.assertEqual(proto_state.entities[0].heading, 0.01)\n self.assertEqual(proto_state.entities[1].heading, 0.02)\n self.assertEqual(proto_state.entities[0].spin, 0.03)\n self.assertEqual(proto_state.entities[1].spin, 0.04)\n self.assertEqual(proto_state.entities[0].fuel, 0xEEE)\n self.assertEqual(proto_state.entities[1].fuel, 0xFFF)\n self.assertEqual(proto_state.entities[0].landed_on, 'Second')\n self.assertEqual(proto_state.entities[1].landed_on, '')\n self.assertEqual(proto_state.timestamp, 50)\n self.assertTrue(proto_state.entities[1].broken)\n\n for i, component in enumerate(ps.engineering.components):\n self.assertEqual(component.connected, True, msg=i)\n self.assertEqual(component.capacity, 1 + (0.01 * i), msg=i)\n self.assertEqual(component.temperature, 222200 + i, msg=i)\n self.assertEqual(component.coolant_hab_one, bool(i % 2), msg=i)\n self.assertEqual(component.coolant_hab_two, True, msg=i)\n self.assertEqual(component.coolant_ayse, False, msg=i)\n\n for i, coolant in enumerate(ps.engineering.coolant_loops):\n self.assertEqual(coolant.coolant_temp, 555500 + i, msg=i)\n self.assertEqual(coolant.primary_pump_on, bool(i % 2), msg=i)\n self.assertEqual(coolant.secondary_pump_on, True, msg=i)\n\n for i, radiator in enumerate(ps.engineering.radiators):\n pass\n self.assertEqual(radiator.attached_to_coolant_loop, i % 4, msg=i)\n self.assertEqual(radiator.functioning, bool(i % 2), msg=i)", "def calc_vertical_velocity(self):\n # Note: again we make use of the automatically added 'grid' attribute, and the stored coef_w attribute.\n # Here we arbitrarily set the vertical velocity to increase in the\n # y-direction (not very realistic).\n return self.coef_w[0] * self.grid.y[None, :] + self.coef_w[1]", "def return_vec(self) :\r\n y_vec = np.concatenate((self.x_vec,self.v_vec))\r\n return y_vec", "def vectorize(self):\n return vectorize(self)", "def compute_w(self):\n self.pinvX = np.linalg.pinv(self.X)\n return np.dot(self.pinvX, self.y)", "def y(self):\n return self[:, 1]", "def data_vector(self) -> np.ndarray:\r\n return np.dot(\r\n self.linear_obj_list[0].mapping_matrix.T, self.w_tilde.dirty_image\r\n )", "def vec_y(self):\t\t\r\n if self.oy != 0:\r\n ov = self.oy\r\n lv = self.self.ly + self.oy\r\n else:\r\n ov = self.dy / 2\r\n lv = self.ly\r\n\r\n yv = \"\"\r\n for num in np.arange(ov, lv, self.dy):\r\n yv += str(num) + \" \"\r\n\r\n return yv", "def svm_loss_vectorized(W, X, y, reg):\n num_classes = W.shape[1]\n num_train = X.shape[0]\n #loss = 0.0 \n loss = 0.0\n scores = np.zeros((1,num_classes))\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n \n # lines begin with double \"#\" are the last version of code!!!!!\n \n ##for i in xrange(num_train):\n #XX = np.tile(X[i,:],(num_classes,1)) # try to use broadcasting\n #scores = np.sum(np.multiply(XX,W.T), axis = 1)\n ## scores = np.sum(np.multiply(X[i,:],W.T), axis = 1)\n \n ## if i ==1: print scores\n \n #loss += np.sum(scores - scores[y[i]]) + num_classes -1\n #http://stackoverflow.com/questions/2900084/counting-positive-elements-in-a-list-with-python-list-comprehensions\n ## scores+=1\n ## scores[y[i]]-=1 \n #however, this is sum over index, not values, glaube ich \n #loss+= sum(x < 0 for x in (scores-scores[y[i]]))\n ## loss+= (scores-scores[y[i]])[scores-scores[y[i]]>0].sum()\n #pass\n ############################################\n # construct a zero loop version\n ############################################\n scores2D = np.zeros((num_train, num_classes)) #used to store dotted scores\n scores1D = np.zeros((num_train,1)) #used to store corrected scores\n #index1D = np.zeros((1,num_classes))\n #index1D = range(num_classes) \n #scores1D = y[index1D]\n \n scores2D = np.dot(X,W) \n ##for i in xrange(num_train):\n ## scores1D[i,0]=scores2D[i,y[i]]-1 #find the correct scores and fill them into scores1D, the value -1 is because: si-sj+1\n ## scores2D[i,y[i]]-=1 # we want at corrected score voxel, the value should be 0, correct score -1 - \n #(correct score -1) = 0\n #####################################\n #for loop replacement###\n indexInsert = np.arange(num_train)\n scores1D[indexInsert,0] = scores2D[indexInsert,y[indexInsert]] -1 #using array indexing\n scores2D[indexInsert,y[indexInsert]] -=1\n \n ##################################### \n \n #scores2D = X.dot(W)\n #http://stackoverflow.com/questions/9497290/how-would-i-sum-a-multi-dimensional-array-in-the-most-succinct-python\n #rewrite summation\n #loss += (scores2D-scores1D)[scores2D-scores1D >0].sum()\n #temp = scores2D-np.tile (scores1D, (1,num_classes)) # for each score minus the corrected score\n temp = scores2D-scores1D #broadcasting!!\n #print temp[1,:]\n temp= temp.clip(min=0) \n #loss += sum(map(sum, (temp)[temp>0]))\n #loss += sum(map(sum, (temp)))\n #loss += (temp)[temp >0].sum()\n loss += sum(sum(x) for x in temp) #sum them up\n #loss -= num_train # minus 1 is because in each train, due to the plus 1 above , correct score - correct \n # score +1 = 1, but it should be 0, therefore, i deduce them at the last minute \n # ( then I made this also in the for loop to meet intuitive)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n #tempBool = np.divide(temp, temp)\n #tempBool = tempBool.clip(max=1,min=0)\n #http://stackoverflow.com/questions/19666626/replace-all-elements-of-python-numpy-array-that-are-greater-than-some-value\n tempBool = np.copy(temp) # temp = scores2D-scores1D , temp= temp.clip(min=0)\n # temp is already the every score minus the correct labeled score\n tempBool[tempBool>0] = 1 # for every element, when it is positive, set it to one (for weighting)\n for j in xrange(num_train):\n tempBool[j,y[j]] =-1*sum(tempBool[j,:]) # calculate how many final scores, max(~~,0) are more than 0, add the number to the correct\n # label element, because it is the times that the corrected scores be used\n dW += np.reshape (X[j,:],(X.shape[1],1))*tempBool[j,:] # broadcasting, out-product\n #pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n dW/= num_train\n dW += reg*W\n \n return loss, dW", "def vector(self):\n return self.__vector", "def apply(self, v):\n u = np.zeros(self.Dimension, dtype=complex)\n for me in self.Elements:\n for index in range(v.Elements.size):\n if index == me.j:\n u[me.i] += me.val * v.Elements[index]\n u = Vector(u) \n return u", "def vectorized_value(self, x):\n self.vectorizer.vector[:] = x\n arrays = self.vectorizer.unpack()\n self.loss = self.handler.value(arrays).item()\n self.losses.append(self.loss)\n self._n += 1\n self._maybe_update_pbar()\n self._check_loss_target()\n self._maybe_call_callback()\n return self.loss", "def w(self) -> np.ndarray:\n return self.array[:, 0] if self.scalar_vector else self.array[:, 3]", "def vectorized_loops(self, data):\n\n # TODO: finish this.\n return np.add(np.multiply(data,data), data)", "def get_value(self, x):\n return np.dot(self.w, x)", "def y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\ty = np.array([\t3*(y2-y1)/(x2-x1) - 3*initial_slope , \t\\\n\t\t\t\t\t3*(y3-y2)/(x3-x2) - 3*(y2-y1)/(x2-x1), \\\n\t\t\t\t\t3*final_slope - 3*(y3-y2)/(x3-x2)\t], \\\n\t\t\t\t\tfloat)\n\treturn(y)", "def squareY(self):\n \n ## square the y portion element-wise ##\n self.y = [yElement**2 for yElement in self.y]", "def y(self):\n return np.array([f.y for f in self])", "def _get_y_data(self):\n return self.y(self.xs)", "def vector_component(u, v):\n x = dot_vectors(u, v) / length_vector_sqrd(v)\n return scale_vector(v, x)", "def __call__(self):\n return self._vector", "def y(x):\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4" ]
[ "0.6645866", "0.6599684", "0.65895844", "0.6283939", "0.61474097", "0.59581596", "0.59146947", "0.58367455", "0.5569749", "0.5507695", "0.5389976", "0.5309533", "0.5297916", "0.524368", "0.5234031", "0.5189916", "0.51827174", "0.5161803", "0.51593584", "0.51460093", "0.51297754", "0.5129212", "0.512901", "0.5100403", "0.50960606", "0.5082347", "0.5069119", "0.50633055", "0.5055908", "0.5051473" ]
0.88765943
0
Subtracts main component dataframe from the original one.
def subtract_component(dataframe, component_y, vector_w): if dataframe.empty: raise TypeError('It is impossible to calculate eigen vector W ' 'and component Y on the empty dataframe.') component_df = np.outer(component_y, vector_w) result_df = dataframe - component_df return result_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subtract(self, other):\n return self.as_dataframe(subtract(self.data, other.data))", "def inverse_transform(self, df):\n return df", "def subtract(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[1] - self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n\n if trans_method == 'original':\n df = pd.concat(\n [self.first_values, (df - df.shift(1)).tail(df.shape[0] - 1)], axis=0\n )\n return df\n else:\n df_len = df.shape[0]\n df = pd.concat([self.last_values, df], axis=0)\n df = df - df.shift(1)\n return df.tail(df_len)", "def background_subtract_data(data):\n bgsub_data = data.copy()\n bax_concs = data.columns.levels[0]\n lipo_concs = data.columns.levels[1]\n\n for bax_conc in data.columns.levels[0]:\n timecourses = data.xs(bax_conc, axis=1, level='Bax')\n bg = timecourses[0.]\n for lipo_conc in lipo_concs:\n bgsub_tc = timecourses[lipo_conc] - bg\n bgsub_data[(bax_conc, lipo_conc)] = bgsub_tc\n\n return bgsub_data", "def test_arithmetic_operations() -> None:\n\n # one two\n # 0 1\n # 2 3\n # 4 5\n df = pd.DataFrame(np.arange(6).reshape((3, 2)), columns=[\"one\", \"two\"])\n\n series = df.iloc[0] # first row == (0, 1)\n\n assert series.index.values.tolist() == [\"one\", \"two\"]\n assert series.values.tolist() == [0, 1]\n\n # Arithmetic operations between frames and series match the index of the\n # series (column names) on the columns of the frame, broadcasting over the\n # rows by default.\n\n df2 = df.sub(series) # axis=1\n\n # one two\n # 0 0\n # 2 2\n # 4 4\n assert df2.values.flatten().tolist() == [0, 0, 2, 2, 4, 4]\n\n # If you want to match on rows, use axis=0. This will match the index of the\n # series (row indices) on the rows of the frame, broadcasting over the\n # columns by default.\n series = df.loc[:, \"one\"]\n\n df2 = df.sub(series, axis=0)\n # one two\n # 0 1\n # 0 1\n # 0 1\n assert df2.values.flatten().tolist() == [0, 1, 0, 1, 0, 1]", "def subtract(self,*datas):\n\t\tdatas = list(datas)\n\t\tresult = datas.pop(0)\n\t\tfor data in datas:\n\t\t\tresult -= data\n\t\treturn result", "def subtract(self,ctSub):\n\n # First confirm eligible for subtraction\n if (not np.array_equal(self.x1_flat,ctSub.x1_flat)) or (not np.array_equal(self.x2_flat,ctSub.x2_flat)):\n raise Exception(\"Can't subtract because not meshed the same\")\n\n ctResult = copy.deepcopy(ctSub)# copy the class\n\n \n # Original method\n # ctResult.u = self.u - ctSub.u\n # ctResult.uMesh = griddata(np.column_stack([ctResult.y, ctResult.z]),ctResult.u,(ctResult.yMesh.flatten(), ctResult.zMesh.flatten()), method='cubic')\n\n # New method\n ctResult.u_mesh = self.u_mesh - ctSub.u_mesh\n ctResult.v_mesh = self.v_mesh - ctSub.v_mesh\n ctResult.w_mesh = self.w_mesh - ctSub.w_mesh\n ctResult.u_cubed = self.u_cubed - ctSub.u_cubed\n\n\n return ctResult", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n tile_len = len(self.tile_values_lag_1.index)\n df_len = df.shape[0]\n sdf = pd.DataFrame(\n np.tile(self.tile_values_lag_1, (int(np.ceil(df_len / tile_len)), 1))\n )\n if trans_method == 'original':\n sdf = sdf.tail(df_len)\n else:\n sdf = sdf.head(df_len)\n sdf.index = df.index\n sdf.columns = df.columns\n return df + sdf", "def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec - other.elec\n p.magn[:] = self.magn - other.magn\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))", "def clean(self, ref):\n # NOTE: This currently only works on the top-most frame\n f1 = self.frames[0]\n f2 = ref.frames[0]\n f1.subtract(f2)", "def reverse_transform(self):\n self.reaction_df['dG0'] = self.reaction_df['dG0_prime']\n\n for i, rxn in self.iterreactions():\n aq_cond = self.reaction_df.loc[i, ['pH', 'I', 'T']]\n self.reaction_df.at[i, 'dG0'] -= rxn.get_transform_ddG0(*aq_cond)", "def subtract(self):\n return self._do_calc(self.subtracter)", "def subtract(self):\n return self._do_calc(self.subtracter)", "def subtract(self):\n return self._do_calc(self.subtracter)", "def dst(df):\n pass", "def df_semi_minus(df1, df2, left, right=None):\n if right is None:\n right = left\n\n df2 = df2[right].copy()\n df2['_flag_'] = 1\n joined = pd.merge(df1, df2, left_on=left, right_on=right, how='left', suffixes=('', '_y'))\n joined = joined[joined['_flag_'].isna()]\n return joined.drop([col for col in joined.columns if col.endswith('_y')] + ['_flag_'], axis=1)", "def inverse_transform(self, df):\n if self.log:\n df = pd.DataFrame(np.exp(df))\n if self.squared:\n df = df ** 0.5\n df = df - self.shift_amount\n return df", "def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def clean_copy(self, df):\n assert self.is_appropriate_data_instance(df)\n # Polars doesn't need explicit copying due to copy on write semantics\n if isinstance(df, pl.LazyFrame):\n df = df.collect()\n return df", "def dataframe_diff(xxa,xxb):\n\n xa=pd.DataFrame(xxa)\n xb=pd.DataFrame(xxb)\n merged = xa.merge(xb, indicator=True, how='outer')\n\n diff=merged[merged['_merge'] != 'both']\n\n return diff", "def return_subtraction_df(\n df_1: pd.DataFrame,\n df_2: pd.DataFrame,\n index_col=\"yearmon\"\n) -> pd.DataFrame:\n df_1 = df_1.set_index(index_col).copy()\n df_2 = df_2.set_index(index_col).copy()\n\n overlapping_index_values = sorted(list(set(df_1.index.intersection(df_2.index))))\n num_cols = df_1.select_dtypes(include=np.number).columns.to_list()\n\n df_1_num_values = df_1.loc[overlapping_index_values, num_cols].to_numpy()\n df_2_num_values = df_2.loc[overlapping_index_values, num_cols].to_numpy()\n df_diff_values = df_1_num_values - df_2_num_values\n df_diff = pd.DataFrame(\n df_diff_values,\n columns=num_cols,\n index=sorted(overlapping_index_values)\n )\n return df_diff", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n df = df * self.center\n return df", "def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj", "def __sub__(self, other):\n if not hasattr(other, \"dtype\") or self.dtype != other.dtype:\n raise TypeError(\"Can only calculate distance between two DFs. of \"\n \"the same type.\")\n return abs(np.dot(self.df, other.df)/(self.norm*other.norm)-1.)", "def __sub__(self, other):\n tmp = VectorHeat1D(self.size)\n tmp.set_values(self.get_values() - other.get_values())\n return tmp", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n return df", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n return df", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n return df", "def make_and_append_negative_data(self):\n negative_df = self.get_negative_data()\n self.df = pd.concat((self.df, negative_df))" ]
[ "0.7236939", "0.62537533", "0.5925698", "0.5848495", "0.5840385", "0.58321095", "0.5831804", "0.58191985", "0.58087337", "0.57987154", "0.5797149", "0.5768833", "0.57661474", "0.57661474", "0.57661474", "0.57560253", "0.5725365", "0.5725322", "0.57107365", "0.57061017", "0.56789744", "0.56261927", "0.562289", "0.55695415", "0.5563374", "0.5560815", "0.556068", "0.556068", "0.556068", "0.5559624" ]
0.6353868
1
Function to check whether the latest IERS tables are present. Else downloads it.
def checkIERS(warn_update=14*u.day): try: currentTime = Time.now() table = iers.IERS_Auto.open() index_of_last_observation = ''.join(table['PolPMFlag_A']).index('IP') time_of_last_observation = Time(table['MJD'][index_of_last_observation],format='mjd') time_since_last_update = Time.now() - time_of_last_observation if int(currentTime.mjd)*u.day not in iers.IERS_Auto.open()['MJD']: print("IERS tables are outdated! Downloading latest table...") download_IERS_A() if warn_update < time_since_last_update: print("IERS tables are outdated! Downloading latest table...") download_IERS_A() if int(currentTime.mjd)*u.day in iers.IERS_Auto.open()['MJD']: print("Latest IERS tables are present. Proceeding...") except: print("Could not download latest IERS tables.\n Rise and Set time will be error prone.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_downloaded(self):\n return self._system.file_exists(self._tar_name)", "def check_downloaded(self):\n for o in self.order_lst:\n for item in o.get_items():\n mdata = item.get_metadata()\n if 'downloaded' in mdata.keys():\n if str(mdata['downloaded']) == 'True':\n return True\n \n return False", "def has_scn_download(self, unq_id):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scenes which need downloading.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id).one()\n ses.close()\n logger.debug(\"Closed the database session.\")\n return query_result.Downloaded", "def run_downloader(self):\n \"\"\"calls to the file downloader\"\"\"\n try:\n html = self.get_page(self.url)\n soup = self.get_soup(html)\n if soup is not None: # If we have soup -\n self.get_links(soup)\n self.get_files()\n else:\n self.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR\", 'data source format is not as expected',\n e)\n return False\n except Exception as e:\n self.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR\", 'data source format is not as expected', e)\n\n return False\n return True", "def check_fetch_lovecraft():\n url = 'https://dl.dropboxusercontent.com/u/15378192/lovecraft_fiction.zip'\n partial_path = get_dataset_dir(\"lovecraft\")\n full_path = os.path.join(partial_path, \"lovecraft_fiction.zip\")\n if not os.path.exists(partial_path):\n os.makedirs(partial_path)\n if not os.path.exists(full_path):\n download(url, full_path, progress_update_percentage=1)\n return full_path", "def input_data_is_downloaded(year: int, day: int, session: str) -> bool:\n cache_file = _join_path(year, day, session, file_type=\"input_file\")\n cache_file = Path(cache_file)\n return cache_file.exists()", "def is_file_downloading(self, data_url):\n\n # Sometimes it takes too long to load the list\n self.parent.wait_for_element_displayed(DOM.DownloadManager.download_list[0],\n DOM.DownloadManager.download_list[1], 60)\n return self.get_download_status(data_url) == \"downloading\"", "def _check_url_file (url, path_download, outfile) :\n if \"http://\" in url.lower () :\n dest = outfile if outfile != None else _get_file_url (url, path_download)\n down = False\n nyet = dest + \".notyet\"\n \n if os.path.exists (dest) and not os.path.exists (nyet) :\n try :\n fLOG(\"trying to connect\", url)\n f1 = urllib.urlopen (url)\n down = _first_more_recent (f1, dest)\n newdate = down\n f1.close ()\n except IOError :\n fLOG(\"unable to connect Internet, working offline for url\", url)\n down = False\n else : \n down = True\n newdate = False\n \n if down :\n if newdate : fLOG (\" downloading (updated) \", url)\n else : fLOG (\" downloading \", url)\n \n if len (url) > 4 and url [-4].lower () in [\".txt\", \".csv\", \".tsv\", \".log\"] :\n fLOG (\"creating text file \", dest)\n format = \"w\"\n else : \n fLOG (\"creating binary file \", dest)\n format = \"wb\"\n \n if os.path.exists (nyet) :\n size = os.stat (dest).st_size\n fLOG (\"resume downloading (stop at\", size, \") from \", url)\n request = urllib.request.Request(url) \n request.add_header(\"Range\", \"bytes=%d-\" % size)\n fu = urllib.request.urlopen (request) \n f = open (dest, format.replace (\"w\", \"a\"))\n else :\n fLOG (\"downloading \", url)\n request = urllib.request.Request(url) \n fu = urllib.request.urlopen (url)\n f = open (dest, format)\n \n open (nyet, \"w\").close ()\n c = fu.read (2**21)\n size = 0\n while len (c) > 0 :\n size += len (c)\n fLOG(\" size\", size)\n f.write (c)\n f.flush ()\n c = fu.read (2**21)\n fLOG (\"end downloading\")\n f.close ()\n fu.close ()\n os.remove (nyet)\n \n url = dest\n return url", "def check_for_updates():\n last_version = str(request.urlopen(__source__).read().decode(\"utf8\"))\n if str(open(__file__).read()) != last_version:\n log.warning(\"Theres new Version available!, Update from \" + __source__)\n else:\n log.info(\"No new updates!,You have the lastest version of this app.\")", "def can_download():\n downloading = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status='downloading'\")\n numdownload = len(downloading)\n used = get_space_used()\n avail = get_space_available()\n \n can_dl = (numdownload < config.download.numdownloads) and \\\n (avail > config.download.min_free_space) and \\\n (used < config.download.space_to_use)\n return can_dl", "def is_downloaded(self) -> bool:\n if not self.download_path:\n return False\n return Path(self.download_path).exists()", "def downloaded(self) -> bool:\n return not (self.data is None)", "def ferry_data_download(URL):\n explanation = 'File exists'\n file_downloaded = True\n # Request if the thredds server is working, add .html to URL\n req = requests.get(URL + '.html')\n if req.status_code == 200:\n \"\"\"File exists and is good for download, so write file\"\"\"\n print('File is ok')\n explanation = 'Good URL, File downloaded'\n file_downloaded = True\n ferry = xr.open_dataset(URL)\n else:\n print('File not found or unavailable')\n explanation = ' File not found or unavailable'\n file_downloaded = False\n ferry = np.nan\n return (ferry, file_downloaded, explanation)", "def _download_table_kernel(self, sql, fp, header={}, overwrite=False):\n\t\tfn = ntpath.basename(fp)\n\n\t\tif not os.path.isfile(fp) or overwrite:\n\t\t\tprint((\"[hscobj] querying table {} from HSC\".format(fn)))\n\n\t\t\thscsspquery.hscSspQuery_retry(n_trials=20, sql=sql, filename_out=fp, release_version=self.data_release)\n\n\t\t\tif os.path.isfile(fp) and (os.stat(fp).st_size > 0):\n\t\t\t\tprint(\"[hscobj] successful\")\n\n\t\t\t\tif len(header) > 0:\n\t\t\t\t\t_add_header_columns_to_table(fp, header)\n\n\t\t\t\tstatus = True\n\n\t\t\telse: \n\t\t\t\tprint((\"[hscobj] querying table {} from HSC failed\".format(fn)))\n\t\t\t\tif os.path.isfile(fp):\n\t\t\t\t\tos.remove(fp)\n\t\t\t\tstatus = False\n\n\t\telse:\n\t\t\tprint((\"[hscobj] skip querying table {} from HSC as file exists\".format(fn)))\n\t\t\tstatus = True\n\n\t\treturn status", "def check_fetch_frey():\n url = 'http://www.cs.nyu.edu/~roweis/data/frey_rawface.mat'\n partial_path = get_dataset_dir(\"frey\")\n full_path = os.path.join(partial_path, \"frey_rawface.mat\")\n if not os.path.exists(partial_path):\n os.makedirs(partial_path)\n if not os.path.exists(full_path):\n download(url, full_path, progress_update_percentage=1)\n return full_path", "def is_downloadable(url):\n h = requests.head(url, allow_redirects=True)\n header = h.headers\n content_type = header.get('content-type')\n if 'text' in content_type.lower():\n return False\n if 'html' in content_type.lower():\n return False\n return True", "def download_agent_if_missing(filename):\n if file_missing(filename):\n print filename+'is missing, downloading it first'\n download(filename)", "def download_needed(self, response, outfile, quiet=True):\r\n try:\r\n remote_date = datetime.strptime(response.headers['Last-Modified'],\r\n '%a, %d %b %Y %X %Z')\r\n if isfile(outfile):\r\n local_date = datetime.fromtimestamp(os.path.getmtime(outfile))\r\n if remote_date <= local_date:\r\n if not quiet:\r\n print(\r\n os.path.basename(outfile) +\r\n ': Skipping, found more recently modified local '\r\n 'copy (use --force to force download)')\r\n return False\r\n except:\r\n pass\r\n return True", "def is_downloadable(self):\n return True", "def is_downloadable(self):\n return False", "def check_if_downloaded( url, debug_print = True ):\n\t# Get pdf filename\n\tfilename = basename( url )\n\tfileno, ext_pdf = splitext( filename )\n\tfor file in listdir( getcwd() ):\n\t\tif fileno in file:\n\t\t\tif debug_print:\n\t\t\t\tprint 'Skipping %s' % ( filename )\n\t\t\treturn True\n\treturn False", "def CheckTables():\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table'\")\n tables = [row[0] for row in cursor.fetchall()]\n if not tables or not \"master\" in tables:\n cursor.execute('''CREATE TABLE master (\n hit TEXT,\n logfile TEXT,\n count INTEGER,\n date TEXT)''')\n con.commit()\n if not tables or not \"rotate\" in tables:\n cursor.execute('''CREATE TABLE rotate (\n logfile TEXT,\n hash TEXT,\n date TEXT)''')\n con.commit()", "def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)", "def refreshInstallersNeeded(self):\n for archive in dirs['installers'].list():\n apath = dirs['installers'].join(archive)\n if not apath.isfile() or not archive.cext in ('.7z','.zip','.rar'):\n continue\n installer = self.data.get(archive)\n if not installer or (installer.size,installer.modified) != (apath.size,apath.mtime):\n return True\n return False", "def ifAlreadyDone(self, cxRepo, schemaRepo, schema, tablename):\n logging.debug(f\"\"\"check if {schema}.{tablename} has been analyzed\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"select table_name from {schemaRepo}.tablediff where lower\n (table_name) = lower('{tablename}') and schema1 = '{schema}' and\n server1_status = 'ready' and server1_status = 'ready' and result in\n ('ready', 'init')\"\"\"\n with conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n row = curs.fetchone()\n if row is None:\n return 1\n else:\n return 0", "def check_if_up_to_date():\n last_daily = get_latest_dl_date()\n last_trading_day = get_last_open_trading_day()", "def download_updates_if_available(self):\n current_version = self.get_version(self.get_module_and_path(self._main_dir))\n latest_version = self.get_latest_version()\n\n print('Checking version... ')\n print('\\tCurrent version: ', current_version)\n print('\\tLatest version: ', latest_version)\n\n if not latest_version:\n return False\n\n if (not current_version) or (latest_version > current_version):\n print('Updating...')\n if not self.path_exists(self._module):\n os.mkdir(self._module)\n\n # Check if there's a botched download already. If next directory already exists remove it and tree.\n if self.path_exists(self.get_module_and_path('next')):\n self.rmtree(self.get_module_and_path('next')) # Remove the 'next' directory and contents.\n\n # Create the next directory and download the source files.\n os.mkdir(self.get_module_and_path('next'))\n self.download_all_files(self._github_repo + '/contents/' + self._main_dir, latest_version)\n\n # Last step is to write the .version file only if we have completed the download\n with open(self.get_module_and_path('next/.version'), 'w') as versionfile:\n versionfile.write(latest_version)\n versionfile.close()\n\n return True\n return False", "def is_downloadable(self):\n raise NotImplementedError('Implement this method.')", "def tables_exist():\n\n tables_in_db = False\n tables_exist_query = 'SHOW TABLES'\n my_cursor.execute(tables_exist_query)\n my_tables = my_cursor.fetchall()\n\n if len(my_tables) == 4:\n tables_in_db = True\n\n return tables_in_db", "def has_table(self, name: str) -> bool:\n try:\n self.execute(\"select * from {table} limit 1\", name)\n return True\n except sqlite3.OperationalError:\n return False" ]
[ "0.6312253", "0.6298091", "0.5929028", "0.5889317", "0.5832926", "0.56521076", "0.5616137", "0.55603564", "0.5538858", "0.5523581", "0.5517897", "0.5510933", "0.55036175", "0.5465765", "0.5457709", "0.5451811", "0.5449185", "0.54367334", "0.54341847", "0.542871", "0.5426154", "0.5422011", "0.54162526", "0.540372", "0.5395645", "0.5381298", "0.53784925", "0.5377543", "0.53565234", "0.53519624" ]
0.70178634
0
Commands for making your life in AWS easier
def aws(ctx): # pylint: disable=unused-argument pass # pylint: disable=unnecessary-pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aws():\n pass", "def cli(ctx, region, profile):\n session = boto3.session.Session(profile_name=profile, region_name=region)\n ecs = session.client('ecs')\n ecr = session.client('ecr')\n ctx.obj = {\n 'region': region,\n 'profile': profile,\n 'ecs': ecs,\n 'ecr': ecr\n }\n pass", "def main():\n t0 = time.time()\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--env', default='LOCAL', help='Enter one of DOCKER, LOCAL or S3')\n parser.add_argument('--bucket-name', help='Enter S3 bucket')\n parser.add_argument('--aws-access-key-id', help='Enter AWS access key id')\n parser.add_argument('--aws-secret-access-key', help='Enter AWS secrest access key')\n parser.add_argument('--aws-region', default='us-west-2', help='Enter AWS region')\n # subparser = parser.add_subparsers(dest='subcommand', help='Can choose bucket name if S3 is chosen')\n # parser_bucket = subparser.add_parser('S3')\n # parser_bucket.add_argument('bucket', help='S3 bucket name')\n args = vars(parser.parse_args())\n args['env'] = args['env'].upper()\n if args['env'] != 'S3' and args['bucket_name']:\n parser.error('Can specify a bucket name with only S3...')\n if args['env'] == 'S3' and not (args['bucket_name'] and \n args['aws_access_key_id'] and\n args['aws_secret_access_key']):\n parser.error('Specify a bucket, access key and secret access key...')\n # print(args)\n # print(args['env'])\n # print(args['subcommand'])\n\n if args['env'] == 'S3' and args['aws_region'] != '':\n s3_client = create_client(\n \"s3\",\n region=args['aws_region'],\n access_key_id=args['aws_access_key_id'],\n secret_access_key=args['aws_secret_access_key']\n )\n os.environ['AWS_ACCESS_KEY_ID'] = args['aws_access_key_id'].strip()\n os.environ['AWS_SECRET_ACCESS_KEY'] = args['aws_secret_access_key'].strip()\n logger.info('Check to see whether s3 bucket exits...')\n try:\n s3.meta.client.head_bucket(Bucket=args['bucket_name'])\n logger.info(f\"S3 bucket {args['bucket_name']} exits...\")\n except Exception as e:\n logger.warn(f\"Bucket {args['bucket_name']} doesn't exist...\")\n logger.info('Creating bucket...')\n create_s3_bucket(s3_client, args['bucket_name'], args['aws_region'])\n\n\n config = configparser.ConfigParser()\n if args['env'] == 'DOCKER':\n CFG_FILE = r'/usr/local/airflow/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n elif args['env'] == 'S3':\n obj = s3_client.get_object(Bucket=args['bucket_name'], Key='config/etl_config.cfg')\n try:\n config.read_string(obj['Body'].read().decode())\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n else:\n CFG_FILE = r'/Users/home/Documents/dend/Data-Engineering-ND/Capstone/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n\n sas_jar_ver = config['APP']['sas_jar_ver']\n os.environ['SAS_JAR'] = \".\".join(sas_jar_ver.split('.')[:-1])\n\n if args['env'] == 'DOCKER':\n base_dir = config['DOCKER']['base_dir']\n data_dir = config['DOCKER']['data_dir']\n path = config['DOCKER']['sas_data_dir']\n sas_file_path = os.path.join(base_dir, data_dir, path)\n dict_dir = config['DOCKER']['dict_dir']\n files = json.loads(config['DOCKER']['input_files'])\n airport_file = os.path.join(base_dir, data_dir, config['DOCKER']['airports_file'])\n demographic_file = os.path.join(base_dir, data_dir, config['DOCKER']['us_demographics_file'])\n dictionary_file = os.path.join(base_dir, dict_dir, config['DOCKER']['dictionary_file'])\n output_dir = os.path.join(base_dir, config['DOCKER']['output_dir'])\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n elif args['env'] == 'S3':\n bucket = args['bucket_name']\n path = config['S3']['s3_sas_key']\n dict_dir = config['S3']['s3_dict_key']\n csv_dir = config['S3']['s3_csv_key']\n sas_file_path = os.path.join(\"s3a://\", bucket, csv_dir, path)\n files = json.loads(config['S3']['input_files'])\n airport_file = os.path.join(\"s3a://\", bucket, csv_dir, config['S3']['airports_file'])\n demographic_file = os.path.join(\"s3a://\", bucket, csv_dir, config['S3']['us_demographics_file'])\n dictionary_file = os.path.join(\"s3a://\", bucket, config['S3']['dictionary_file'])\n output_dir = os.path.join(\"s3a://\", bucket, config['S3']['output_dir'])\n else:\n base_dir = config['LOCAL']['base_dir']\n data_dir = config['LOCAL']['data_dir']\n path = config['LOCAL']['sas_data_dir']\n sas_file_path = os.path.join(base_dir, data_dir, path)\n dict_dir = config['LOCAL']['dict_dir']\n files = json.loads(config['LOCAL']['input_files'])\n airport_file = os.path.join(base_dir, data_dir, config['LOCAL']['airports_file'])\n demographic_file = os.path.join(base_dir, data_dir, config['LOCAL']['us_demographics_file'])\n dictionary_file = os.path.join(base_dir, dict_dir, config['LOCAL']['dictionary_file'])\n output_dir = os.path.join(base_dir, config['LOCAL']['output_dir'])\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n \n try:\n # Log file written to Hadoop EMR env\n base_dir = config['HADOOP']['base_dir']\n log_dir = os.path.join(base_dir, config['HADOOP']['log_dir'])\n log_file = config['HADOOP']['log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n except:\n base_dir = config['LOCAL']['base_dir']\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n\n\n logger.info('ETL parsing has started...')\n logger.info(\"Create output dir if it doesn't exist...\")\n if args['env'] != 'S3':\n pathlib.Path(output_dir).mkdir(exist_ok=True)\n else:\n # config.set('S3', 's3_bucket_name', args['bucket_name'])\n # s3_client.put_object(Bucket=args['bucket_name'], Key=config['S3']['config_dir'], Body=)\n s3_client.put_object(Bucket=args['bucket_name'], Key=config['S3']['output_dir'])\n logger.info('Created S3 bucket...')\n \n spark = create_spark_session()\n logger.info('Pyspark session created...')\n logger.info('Register UDFs...')\n \n spark.udf.register('SASDateConverter', sas_date_converter, Date())\n logger.info('Register sas_date_converter UDF...')\n\n # change_date_format_1 = F.udf(lambda x: datetime.strptime(x.strip(), '%Y%m%d'), Date())\n # change_date_format_2 = F.udf(lambda x: datetime.strptime(x.strip(), '%m%d%Y'), Date())\n dt = F.udf(change_date_format, Date())\n\n logger.info('Read and concatenate the raw SAS files...')\n dfs = []\n for file in files:\n try:\n df = spark.read.format('com.github.saurfang.sas.spark')\\\n .load(os.path.join(sas_file_path, file))\n dfs.append(df)\n except Exception as e:\n logger.info(f'File {file} is not available. Skipping...')\n logger.info(f'Read {len(files)} files successfully...')\n df = []\n if len(dfs) > 0:\n df = concat_df(*dfs)\n logger.info(f'Successfully concatenated {len(files)}...')\n if not isinstance(df, list):\n # SAS raw data table creation begins here\n cols = ['cicid', 'i94yr', 'i94mon', 'i94port', 'i94mode', 'visapost', \n 'entdepa', 'entdepd', 'entdepu', 'matflag', \n 'dtadfile', 'dtaddto']\n parquet_tables = ['i94_immigrations', 'i94_trips', 'i94_visitors', 'i94_flights']\n f_transforms = [i94_immigrations, i94_trips, i94_visitors, i94_flights]\n res_df = None\n for table, f_transform in zip(parquet_tables, f_transforms):\n if table == 'i94_immigrations':\n # only table not using spark sql\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=None, cols=cols,\n udf=dt, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n elif table == 'i94_flights':\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='csv',\n is_partition=False,\n is_overwrite=True,\n crate_date_df=False)\n else:\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n\n if table == 'i94_trips':\n table = 'i94_dates'\n create_and_write_df(res_df, table, i94_dates, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n\n # Reference data for airports and us city demographics begins here\n airport_df = spark.createDataFrame([], R([]))\n demographic_df = spark.createDataFrame([], R([]))\n logger.info('Read the airports reference file...')\n try:\n airport_df = spark.read.option('header', True) \\\n .csv(airport_file)\n except Exception as e:\n logger.error(f'File {airport_file} is not available. Skipping...')\n\n logger.info('Read the US demographics reference file...')\n try:\n demographic_df = spark.read.options(header='True', delimiter=';') \\\n .csv(demographic_file) \n except Exception as e:\n logger.error(f'File {demographic_file} is not available. Skipping...')\n if airport_df.count() > 0 and demographic_df.count() > 0: \n csv_tables = ['i94_airports', 'i94_us_states_demographic', \n 'i94_us_cities_demographic']\n f_transforms = [i94_airports, i94_us_states_demographic, i94_us_cities_demographic]\n csv_dfs = [airport_df, demographic_df, demographic_df]\n for table, f_transform, df in zip(csv_tables, f_transforms, csv_dfs):\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=dt, fmt='csv',\n is_partition=False,\n is_overwrite=True)\n\n # SAS reference data creation begins here\n ref_csv_tables = ['i94_countries', 'i94_port_state_mapping', 'i94_travel_mode', \n 'i94_state_mapping', 'i94_visa']\n table_pos_dict = {\n 'i94_countries': [2, 3, 'country', 'country_id'],\n 'i94_port_state_mapping': [3, 4, 'city', 'i94_port'],\n 'i94_travel_mode': [4, 5, 'mode', 'mode_id'],\n 'i94_state_mapping': [5, 6, 'state', 'state_id'],\n 'i94_visa': [6, 7, 'visa_purpose', 'visa_id']\n }\n logger.info('Read the SAS data dictionary reference file...') \n for table in ref_csv_tables:\n create_and_write_ref_df(dictionary_file, table, output_dir, spark, \n fmt='csv', start_pos=table_pos_dict[table][0], \n end_pos=table_pos_dict[table][1],\n col_name=table_pos_dict[table][2], \n index_name=table_pos_dict[table][3],\n is_partition=False,\n is_overwrite=True)\n\n logger.info('ETL parsing has completed...')\n logger.info('Time taken to complete job {} minutes'.format((time.time() - t0) / 60))", "def cli(profile, region, clear):\n global SESSION, BUCKET_MANAGER, DOMAIN_MANAGER, CERT_MANAGER, \\\n DIST_MANAGER, EC2_MANAGER, ECS_MANAGER\n session_cfg = {}\n if profile:\n session_cfg['profile_name'] = profile\n\n if region:\n session_cfg['region_name'] = region\n\n if clear:\n util.clear_scr()\n\n# using **<variable> python expands it as a parameter=content\n SESSION = boto3.Session(**session_cfg)\n BUCKET_MANAGER = BucketManager(SESSION)\n DOMAIN_MANAGER = DomainManager(SESSION)\n CERT_MANAGER = CertificateManager(SESSION)\n DIST_MANAGER = DistributionManager(SESSION)\n EC2_MANAGER = EC2Manager(SESSION)\n ECS_MANAGER = ECSManager(SESSION)", "def main():\n P0(AwshCmd.__doc__)\n\n if len(sys.argv) >= 2:\n profile = sys.argv[1]\n else:\n profile = os.environ.get('AWS_DEFAULT_PROFILE','default')\n AWSDIR = os.environ['HOME']+'/.aws'\n\n if profile == '-':\n config = awshelpers.readconfig('config')\n credentials = awshelpers.readconfig('credentials')\n P('profiles:')\n for p in sorted(credentials.keys()):\n P(' %s'%p)\n sys.exit(0)\n\n historyFile = AWSDIR+'/%s.history'%profile\n open(historyFile,'a').close()\n\n if os.path.exists(historyFile):\n # gnu/libedit readline weirdness on macos. see\n # https://docs.python.org/2/library/readline.html\n if readline.__doc__.rfind('libedit') == -1:\n readline.read_history_file(historyFile)\n\n def writeHistory(historyFile = historyFile):\n readline.write_history_file(historyFile)\n atexit.register(writeHistory)\n\n cc = AwshCmd(profile)\n while True:\n try:\n cc.cmdloop()\n break\n except KeyboardInterrupt:\n P('^C')\n except Exception,e:\n P('')\n P('*'*72)\n P('Unexpected Error:')\n traceback.print_exc()\n P('')\n P('please file a bug report')\n P('and include the above error details:')\n P(' https://github.com/marhar/aws/issues/new')\n P('*'*72)", "def main(input_args):\n logging.basicConfig(level=logging.INFO)\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--create-ami', action='store_true',\n default=False,\n help='Creates an AMI instead of deploying an EC2 instance')\n parser.add_argument(\n '--dry-run', action='store_true',\n default=False,\n help='Do not create resources')\n parser.add_argument(\n '--include-apps', action='append',\n default=[],\n help='Assume other apps have already been deployed')\n parser.add_argument(\n '--local-docker', action='store_true',\n default=False,\n help='Start apps using the docker daemon on the machine'\\\n' executing the script')\n parser.add_argument(\n '--skip-create-network', action='store_true',\n default=False,\n help='Assume network resources have already been provisioned')\n parser.add_argument(\n '--prefix', action='store',\n default=None,\n help='prefix used to tag the resources created'\\\n ' (defaults to config name)')\n parser.add_argument(\n '--config', action='store',\n default=os.path.join(os.getenv('HOME'), '.aws', APP_NAME),\n help='configuration file')\n\n args = parser.parse_args(input_args[1:])\n run_config(args.config,\n create_ami=args.create_ami,\n local_docker=args.local_docker,\n include_apps=args.include_apps,\n skip_create_network=args.skip_create_network,\n tag_prefix=args.prefix,\n dry_run=args.dry_run)", "def main():\n\n args = parser_args()\n exit_code = 0\n con = AWSConnect()\n\n con.delete_unused(args.noop)\n\n return exit_code", "def aws():\n env.hosts = 'ec2-54-187-201-203.us-west-2.compute.amazonaws.com'\n env.user = 'ubuntu'\n env.key_filename = '/Users/jenniferchen/Downloads/hs698v2.pem'\n env.virtualenv = {'dir': '/server', 'name': 'venv'}", "def quickie():\n #info = { \"instance_type\": { default = \"t2.micro\", all = [ \"t2.micro\" ] }, \"image_id\" : { default = \"\", all = [] }, \"security_groups\" : { default = [], all = [] }, \"key_name\": { default = \"\", all = [] }}\n client = boto3.client(\"EC2\")\n data = client.describe_images()\n info[\"image_id\"][\"all\"]\n args = {}\n for attr in info:\n print(\"Available values for \"+attr+\":\\n\"+\" \".join(info[attr]))\n default = info[attr][0]\n var = raw_input(\"Choose \"+attr+\"[\"+default+\"]:\")\n if var == \"\":\n var = default\n if re.match(\"^.+\\s\", attr):\n args[attr] = [var]\n else:\n args[attr] = args\n reservation = client.run_instances(**args)", "def setup_aws():\n setup_queues()\n setup_buckets()\n setup_domains()", "def prepare_instance():\n sudo(\"apt-get -y update\")\n sudo(\"apt-get -y upgrade\")\n sudo(\"apt-get install -y python-pip python-setuptools\")\n sudo(\"pip install BeautifulSoup\")\n sudo(\"pip install --upgrade boto\")\n sudo(\"mv /usr/lib/pymodules/python2.6/boto /tmp\")", "def main():\n # Creating resources/clients for all needed infrastructure: EC2, IAM, Redshift\n ec2 = create_client('ec2', boto3.resource)\n iam = create_client('iam', boto3.client)\n redshift = create_client('redshift', boto3.client)\n \n # Create needed IAM / ARN roles for Redshift\n create_iam_role(iam)\n arn_role = create_arn_role(iam)\n \n # Create cluster and await its completion\n create_redshift_cluster(redshift, arn_role)\n cluster_props = query_redshift_status(redshift)\n \n # Get endpoint into to allow querying\n info = get_redshift_endpoint_info(redshift, cluster_props)\n print(info)\n # TODO: Save info to aws.cfg\n \n # Update security groups to ACTUALLY allow querying\n update_cluster_security_group(ec2, cluster_props)\n \n # Test connection to see that everything (hopefully) went well\n test_connection()\n \n # End of main\n return", "def main():\n t0 = time.time()\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--env', default='LOCAL', help='Enter one of DOCKER, LOCAL or S3')\n parser.add_argument('--bucket-name', help='Enter S3 bucket')\n parser.add_argument('--aws-access-key-id', help='Enter AWS access key id')\n parser.add_argument('--aws-secret-access-key', help='Enter AWS secrest access key')\n parser.add_argument('--aws-region', default='us-west-2', help='Enter AWS region')\n parser.add_argument('--tables', default='[]', type=json.loads, help='Enter list of tables to check')\n parser.add_argument('--table-col', default='{}', type=json.loads, help='Enter list of tables to check')\n # subparser = parser.add_subparsers(dest='subcommand', help='Can choose bucket name if S3 is chosen')\n # parser_bucket = subparser.add_parser('S3')\n # parser_bucket.add_argument('bucket', help='S3 bucket name')\n args = vars(parser.parse_args())\n args['env'] = args['env'].upper()\n if args['env'] != 'S3' and args['bucket_name']:\n parser.error('Can specify a bucket name with only S3...')\n if args['env'] == 'S3' and not (args['bucket_name'] and \n args['aws_access_key_id'] and\n args['aws_secret_access_key']):\n parser.error('Specify a bucket, access key and secret access key...')\n raise\n # print(args)\n # print(args['env'])\n # print(args['subcommand'])\n\n\n if args['env'] == 'S3':\n s3_client = create_client(\n \"s3\",\n region=args['aws_region'],\n access_key_id=args['aws_access_key_id'],\n secret_access_key=args['aws_secret_access_key']\n )\n os.environ['AWS_ACCESS_KEY_ID'] = args['aws_access_key_id'].strip()\n os.environ['AWS_SECRET_ACCESS_KEY'] = args['aws_secret_access_key'].strip()\n\n\n tables = args['tables']\n table_col_dict = args['table_col']\n\n config = configparser.ConfigParser()\n if args['env'] == 'DOCKER':\n CFG_FILE = r'/usr/local/airflow/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n elif args['env'] == 'S3':\n obj = s3_client.get_object(Bucket=args['bucket_name'], Key='config/etl_config.cfg')\n try:\n config.read_string(obj['Body'].read().decode())\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n else:\n CFG_FILE = r'/Users/home/Documents/dend/Data-Engineering-ND/Capstone/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n\n\n if args['env'] == 'DOCKER':\n base_dir = config['DOCKER']['base_dir']\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['dq_log_file']\n output_dir = os.path.join(base_dir, config['DOCKER']['output_dir'])\n elif args['env'] == 'S3':\n bucket = args['bucket_name']\n output_dir = config['S3']['s3_output_key']\n output_dir = os.path.join(\"s3a//\", bucket, output_dir)\n else:\n base_dir = config['LOCAL']['base_dir']\n # log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n # log_file = config['LOCAL']['log_file']\n output_dir = os.path.join(base_dir, config['LOCAL']['output_dir'])\n \n try:\n # Log file written to Hadoop EMR env\n base_dir = config['HADOOP']['base_dir']\n log_dir = os.path.join(base_dir, config['HADOOP']['log_dir'])\n log_file = config['HADOOP']['dq_log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n except:\n base_dir = config['LOCAL']['base_dir']\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['dq_log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n\n\n logger.info('Data quality check has started...')\n spark = create_spark_session()\n logger.info('Pyspark session created...')\n logger.info(\"Check whether table exists...\")\n valid_tables = []\n if args['env'] == 'S3':\n for table in tables:\n res = s3_client.list_objects(Bucket=bucket, Prefix=os.path.join(output_dir, table))\n if 'Contents' in res:\n valid_tables.append(table)\n else:\n logger.error(f'Table {table} is invalid...')\n else:\n for table in tables:\n try:\n if os.path.isdir(os.path.join(output_dir, table)):\n valid_tables.append(table)\n except Exception as e:\n logger.error(f'Table {table} is invalid...')\n logger.error(e)\n # assume the table names are the same in the\n # list and dict\n if len(table_col_dict) > 0:\n valid_table_cols = {table: table_col_dict[table] for table in valid_tables}\n else:\n valid_table_cols = {}\n\n logger.info('Checking for empty Dataframes...')\n if len(valid_tables) > 0:\n for table in tables:\n try:\n df = spark.read.parquet(os.path.join(output_dir, table), header=True)\n logger.info(f'Table {table} being checked is a parquet table')\n except:\n df = spark.read.csv(os.path.join(output_dir, table), header=True)\n logger.info(f'Table {table} being checked is a csv table...')\n if check_empty_table(spark, df) == 0:\n logger.error(f'Table {table} has empty rows...')\n else:\n logger.info(f'Table {table} has at least 1 record...')\n else:\n logger.info('No tables to check...')\n\n logger.info('Checking for null columns in tables...')\n if len(valid_table_cols) > 0:\n for table, col_list in table_col_dict.items():\n try:\n df = spark.read.parquet(os.path.join(output_dir, table), header=True)\n logger.info(f'Table {table} being checked is a parquet table')\n except:\n df = spark.read.csv(os.path.join(output_dir, table), header=True)\n logger.info(f'Table {table} being checked is a csv table...')\n if len(check_null_columns(spark, df, col_list)) != 0 and check_null_columns(spark, df, col_list)[0] == 'failed':\n logger.error('The null column check failed possibly due to invalid column selection...')\n elif len(check_null_columns(spark, df, col_list)) > 0: \n logger.info(f'Columns with nulls {col_list}')\n logger.error(f'Table {table} has columns with null values...')\n else:\n logger.info(f'Table {table} has no null values in the primary key(s)...')\n else:\n logger.info('No table columns to check...')\n \n logger.info('Data quality check has completed...')\n logger.info('Time taken to complete job {} minutes'.format((time.time() - t0) / 60))", "def main():\n\n # import aws_ecs_services.arguments as arguments\n from .arguments import get_cli_arguments\n\n # args = arguments.get_cli_arguments()\n args = get_cli_arguments()\n\n by_service_dns = False\n by_service_name = False\n by_task_name = False\n list_clusters = False\n only_cluster_instances = False\n only_ec2_instances = False\n list_running_services = False\n list_running_tasks = False\n list_services = False\n list_projects = False\n use_config = False\n\n debug = args.debug\n if debug:\n logger.setLevel(logging.DEBUG)\n logger.debug(\"Show DEBUG information.\")\n stream_handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(f\"%(lineno)s: {logging.BASIC_FORMAT}\")\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n logger.propagate = False\n else:\n logger.setLevel(logging.INFO)\n\n # If a configuration file and a project are given,the configruation file is used.\n # Otherwise the cli ooptions are considerd.\n project = args.project\n # Variable replacement in config file uses '{service}'.\n service = args.service\n config = args.config\n if (\n os.path.exists(config)\n and project\n or args.subcommand\n in (\"list-configured-projects\", \"list-configured-services\")\n ):\n logger.info(f\"Loading config from: '{config}'.\")\n if not os.path.exists(config):\n logger.error(f\"No config file: '{config}'.\")\n return 1\n use_config = True\n\n if use_config:\n data = None\n try:\n with open(config, \"r\") as config_file:\n data = json.load(config_file)\n except (ValueError) as e:\n logger.error(\n f\"Check the JSON sytanx in the config file '{config}': '{str(e)}'\"\n )\n return 1\n logger.debug(f\"Data: {data}\")\n if not data or not isinstance(data, dict):\n logger.error(f\"Could not load configuration: '{data}'.\")\n return 1\n\n if use_config:\n region = data.get(\"region\", args.region)\n else:\n region = args.region\n\n if use_config:\n projects = data.get(\"projects\", {})\n if args.subcommand not in (\"list-configured-projects\"):\n if project not in projects:\n logger.error(\n f\"Missing configuration for project: '{project}'. Choose from {list(projects.keys())}.\"\n )\n return 1\n project_config = projects.get(project, None)\n if not project_config:\n logger.error(\n f\"Missing configuration for project: '{project}'. Choose from {list(projects.keys())}.\"\n )\n return 1\n region = project_config.get(\"region\", region)\n cluster_name = project_config.get(\"cluster\", \"\")\n # Variable replacement in config file uses '{cluster}'.\n cluster = cluster_name\n cluster_ = cluster\n\n # Get service-specific configuration.\n services = project_config.get(\"services\", {})\n service_config = None\n if services:\n service_config = services.get(service, None)\n logger.debug(f\"Service config: {service_config}\")\n if service_config:\n cluster_ = service_config.get(\"cluster\", cluster_name)\n\n cluster_name = replace_config(cluster_, \"cluster\", locals())\n else:\n cluster_name = args.cluster\n\n logger.info(f\"Working in: {region}\")\n\n session = boto3.session.Session()\n ecs_client = session.client(\"ecs\", region)\n ec2_client = session.client(\"ec2\", region)\n ssm_client = session.client(\"ssm\", region)\n\n if args.subcommand == \"by-service-dns\":\n by_service_dns = True\n if use_config:\n service_dns = project_config.get(\"dns\", \"\")\n service_dns_ = service_dns\n if service_config:\n service_dns_ = service_config.get(\"dns\", service_dns)\n service_dns = replace_config(service_dns_, \"service_dns\", locals())\n else:\n service_dns = args.dns\n if not service_dns:\n logger.error(f\"DNS name missing.\")\n return 1\n\n output_info = args.output\n elif args.subcommand == \"by-service-name\":\n by_service_name = True\n if use_config:\n service_name = project_config.get(\"name\", \"\")\n service_name_ = service_name\n if service_config:\n service_name_ = service_config.get(\"name\", service_name)\n service_name = replace_config(\n service_name_, \"service_name\", locals()\n )\n service_name = service_name if service_name else service\n else:\n service_name = args.name\n elif args.subcommand == \"by-task-name\":\n by_task_name = True\n if use_config:\n task_name = project_config.get(\"name\", \"\")\n task_name_ = task_name\n if service_config:\n task_name_ = service_config.get(\"name\", task_name)\n task_name = replace_config(task_name_, \"task_name\", locals())\n task_name = task_name if task_name else service\n else:\n task_name = args.name\n elif args.subcommand == \"list-ec2-instances\":\n only_ec2_instances = True\n elif args.subcommand == \"list-clusters\":\n list_clusters = True\n elif args.subcommand == \"list-instances\":\n only_cluster_instances = True\n elif args.subcommand == \"list-services\":\n list_running_services = True\n service_name = None\n elif args.subcommand == \"list-tasks\":\n list_running_tasks = True\n task_name = None\n elif args.subcommand == \"list-configured-services\":\n list_services = True\n service_name = None\n elif args.subcommand == \"list-configured-projects\":\n list_projects = True\n service_name = None\n\n if list_projects:\n if not use_config:\n logger.error(\"Only available when using a configuration file.\")\n return 1\n if not projects:\n logger.error(\n \"Could not load projects from configuration file: '{config}'.\"\n )\n return 1\n print(f\"Found in {config}.\")\n print(*list(projects.keys()), sep=\"\\n\")\n return\n\n # No 'cluster' necessary for 'list-clusters'.\n if not list_clusters and not only_ec2_instances and not cluster_name:\n logger.error(f\"Cluster name missing.\")\n return 1\n\n if list_services:\n if not use_config:\n logger.error(\"Only available when using a configuration file.\")\n return 1\n if not services:\n logger.error(\n \"Could not load services from configuration file: '{config}'.\"\n )\n return 1\n print(f\"Found in {config}.\")\n print(*services, sep=\"\\n\")\n return\n elif only_ec2_instances:\n instances = get_instances_form_ec2(client=ec2_client)\n print(json.dumps(instances))\n return\n elif list_clusters:\n clusters = get_clusters(client=ecs_client)\n print(\"\\n\".join(clusters))\n return\n elif only_cluster_instances:\n logger.info(f\"Checking cluster: {cluster_name}\")\n instance_ids = get_instance_ids_from_cluster(\n cluster=cluster_name, client=ecs_client\n )\n print(\" \".join(instance_ids))\n return\n elif by_service_name or list_running_services:\n logger.info(f\"Checking cluster: {cluster_name}\")\n instance_ids = get_instance_ids_from_cluster(\n cluster=cluster_name, client=ecs_client\n )\n instance_id = get_instance_id_by_service_name(\n instance_ids=instance_ids,\n service=service_name,\n list_services=list_running_services,\n client=ssm_client,\n region=region,\n )\n\n return\n elif by_task_name or list_running_tasks:\n logger.info(f\"Checking cluster: {cluster_name}\")\n instance_ids = get_tasks_information(\n task=task_name,\n list_tasks=list_running_tasks,\n cluster=cluster_name,\n client=ecs_client,\n )\n print(instance_ids)\n\n return\n elif by_service_dns:\n logger.info(f\"Checking cluster: {cluster_name}\")\n service_ip = get_host_ip(host_name=service_dns)\n logger.info(f\"IP of {service_dns} is {service_ip}\")\n logger.debug(f\"Output: {output_info}.\")\n if output_info == \"service\":\n print(service_ip)\n return\n else:\n logger.debug(f\"Get instance IDs for cluster:' {cluster_name}'.\")\n instance_ids = get_instance_ids_from_cluster(\n cluster=cluster_name, client=ecs_client\n )\n logger.debug(instance_ids)\n logger.debug(\"Get instance details.\")\n (\n instance_private_ip,\n instance_private_dns,\n instance_id,\n ) = get_instance_info_by_service_dns(\n instance_ids=instance_ids,\n service_ip=service_ip,\n client=ec2_client,\n )\n if output_info == \"ip\":\n print(instance_private_ip)\n return\n elif output_info == \"id\":\n print(instance_id)\n return\n elif output_info == \"all\":\n print(instance_private_ip, instance_id, instance_private_dns)\n return\n logger.error(f\"Not the expected result - nothing accomplished.\")\n return 1", "def main():\n\n parser = get_args()\n args = parser.parse_args()\n\n if args.verbose:\n LOG.setLevel(logging.INFO)\n LOG.info('Verbose: on')\n else:\n ## If not verbose, turn down boto3.\n boto3.set_stream_logger(name='boto3', level=logging.WARNING)\n boto3.set_stream_logger(name='botocore', level=logging.WARNING)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n ## Ensure credentials.\n if not args.credentials:\n die_screaming('need a credentials argument')\n LOG.info('Will use credentials: ' + args.credentials)\n ## Ensure directory.\n if not args.directory:\n die_screaming('need a directory argument')\n args.directory = args.directory.rstrip('//')\n LOG.info('Will operate in: ' + args.directory)\n ## Ensure bucket.\n if not args.bucket:\n die_screaming('need a bucket argument')\n bucket, slash, toppath = args.bucket.partition('/')\n if toppath != '':\n LOG.info('Will put to bucket: ' + bucket + '; with path: ' + toppath)\n else:\n LOG.info('Will put to bucket at top level: ' + bucket)\n ## Ensure mimetype metadata.\n if not args.mimetypes:\n LOG.info('Will use internal mimetype defaults')\n else:\n LOG.info('TODO: Will get mimetype metadata from: ' + args.metadata)\n ## Ensure bucket location.\n if not args.location:\n args.location = 'us-east-1'\n LOG.info('Will use S3 bucket location default: ' + args.location)\n else:\n LOG.info('Will use S3 bucket location: ' + args.location)\n\n ## Extract S3 credentials.\n creds = None\n with open(args.credentials) as chandle:\n creds = json.loads(chandle.read())\n #LOG.info(creds)\n\n s3 = boto3.resource('s3', region_name=args.location,\n aws_access_key_id=creds['accessKeyId'],\n aws_secret_access_key=creds['secretAccessKey'])\n\n # s3 = boto3.resource(\"s3\", creds['accessKeyId'], creds['secretAccessKey'])\n\n #s3.Object('mybucket', 'hello.txt').put(Body=open('/tmp/hello.txt', 'rb'))\n\n ## Walk tree.\n for curr_dir, dirs, files in os.walk(args.directory):\n\n ## We can navigate up if we are not in the root.\n relative_to_start = curr_dir.rstrip('//')[len(args.directory):]\n relative_to_start = relative_to_start.lstrip('//')\n LOG.info('curr_dir: ' + curr_dir + ' (' + relative_to_start + ')')\n\n ## Note files and directories.\n for fname in files:\n\n ## Get correct mime type.\n fext = os.path.splitext(fname)[1].lstrip('.')\n mime = MIMES.get('') # start with default\n if MIMES.get(fext, False):\n mime = MIMES.get(fext)\n\n ## Figure out S3 path/key and final filename, keeping in\n ## mind that relative_to_Start can be empty if root.\n s3path = fname\n if relative_to_start:\n s3path = relative_to_start + '/' + fname\n filename = os.path.join(curr_dir, fname)\n\n tags = {}\n if args.number:\n tags['build-number'] = args.number\n if args.pipeline:\n tags['build-pipeline'] = args.pipeline\n tags_str = urllib.parse.urlencode(tags)\n\n ## Visual check.\n LOG.info('file: ' + filename)\n if toppath != '':\n s3path = toppath + '/' + s3path\n LOG.info(' -> [' + bucket + '] ' + s3path + \\\n '(' + mime + ', ' + tags_str + ')')\n\n ## Create the new object that we want.\n s3bucket = s3.Bucket(bucket)\n multipart_upload(filename, s3bucket, s3path, content_type=mime, metadata=tags, policy=\"public-read\")\n\n # newobj = s3.Object(args.bucket, s3path)\n # outfile = open(filename, 'rb')\n # newobj.put(Body=outfile, \\\n # ContentType=mime, \\\n # Metadata=tags,\n # ACL='public-read') #Tagging=tags_str)\n\n # outbod = open(os.path.join(curr_dir, fname), 'rb')\n # .put(Body=outbod, 'rb')\n\n # for dname in dirs:\n # #LOG.info('dir: ' + os.path.join(curr_dir, dname))\n # pass", "def cli():\n parser=argparse.ArgumentParser(\n description = 'Rotate through a given AWS account for per application keys. Keys are temporarily loaded into environment variables. Asks for a SSO cookie value.')\n parser.add_argument('role', help = 'Role to harvest session keys as')\n parser.add_argument(\n '-c', '--command', help = 'Custom command to run.', default = None)\n parser.add_argument('-a', '--application',\n help = 'Provide a specific application', default = None)\n parser.add_argument(\n '-l', '--list', help = 'Provide a list of applications. Lists should be one Application#,Application Name per line', default = None)\n parser.add_argument(\n '-p', '--awspx', help = 'Run awspx across all applications. Install from https://github.com/FSecureLABS/awspx', action=argparse.BooleanOptionalAction, default = False)\n parser.add_argument(\n '-s', '--scoutsuite', help = 'Run ScoutSuite across all applications. Install from https://github.com/nccgroup/ScoutSuite', action=argparse.BooleanOptionalAction, default = False)\n args=parser.parse_args()\n\n print(\"Please provide an SSO cookie value. Obtain from the dev console on a web browser, probably named something like x-amz-sso_authn\")\n token=input()\n\n return args.role, args.list, args.application, args.command, token, args.awspx, args.scoutsuite", "def dcos_aws() -> None:", "def run():\r\n if args.action == \"create\":\r\n if args.customer_id and args.node_type:\r\n my_aws = createaws()\r\n node_id = my_aws.create_ec2_instance(args.customer_id, args.node_type)\r\n print(\"Node Created: \", node_id, \"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Creation\")\r\n return False\r\n\r\n elif args.action == \"list-nodes\":\r\n if args.customer_id:\r\n my_aws = createaws()\r\n instance_lst = my_aws.get_instance_by_customer_id(args.customer_id)\r\n print(\"Customer\", args.customer_id, \"has \" + str(len(instance_lst)) + \" Instances: \", \",\".join(instance_lst)\r\n ,\"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Listing\")\r\n return False\r\n\r\n elif args.action == \"list-all\":\r\n my_aws = createaws()\r\n cust_inst_ip = my_aws.get_all_instances()\r\n print(\"All the Instances: customer_id, instance_id, instance_ip formatted\\n\")\r\n if len(cust_inst_ip) > 0:\r\n for rec in cust_inst_ip:\r\n print(', '.join(rec))\r\n else:\r\n print(\"No Instances!\")\r\n return False\r\n return True\r\n\r\n elif args.action == \"execute\":\r\n instance_ids, succ_id_list, not_worked_is_list, outs = [], [], [], []\r\n if args.script and (args.customer_id or args.node_type):\r\n my_aws = createaws()\r\n commands = args.script\r\n if args.customer_id:\r\n instance_ids.extend(my_aws.get_instance_by_customer_id(args.customer_id))\r\n if args.node_type:\r\n instance_ids.extend(my_aws.get_instance_by_node_type(args.node_type))\r\n instance_ids = list(set(instance_ids))\r\n\r\n succ_id_list, not_worked_is_list, outs = \\\r\n my_aws.execute_commands_on_linux_instances(commands, instance_ids)\r\n print(\"\\nInstances that run the commands:\\n\", '\\n '.join(succ_id_list))\r\n print(\"\\nInstances that don't run the commands: (Instance is not running or its SSM agent doesn't work or \"\r\n \"command couldn't be executed\\n\", '\\n '.join(not_worked_is_list))\r\n print(\"\\nOutputs of the Instances that run the commands:\")\r\n for i in outs:\r\n print(\"\\n\")\r\n for k, v in dict(i).items():\r\n print(str(k).lstrip(), \"-->\", str(v).replace('\\n', \"\"))\r\n return True\r\n else:\r\n print(\"Missed command parameters for Execution on Instance\")\r\n return False\r\n\r\n elif args.action == \"backup\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n s_id = my_aws.make_backup(args.node_id)\r\n print(s_id)\r\n else:\r\n return False\r\n\r\n elif args.action == \"list-backups\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n backup_list = my_aws.list_backups(args.node_id)\r\n if len(backup_list) > 0:\r\n for rec in backup_list:\r\n print(', '.join(rec))\r\n return True\r\n else:\r\n print(\"Snapshot yok !\")\r\n return True\r\n else:\r\n return False\r\n\r\n elif args.action == \"roll-back\":\r\n if args.backup_id:\r\n my_aws = createaws()\r\n my_aws.roll_back(args.backup_id, args.node_id)\r\n elif args.action == \"terminate-all\":\r\n my_aws = createaws()\r\n my_aws.terminate_instances('ALL')\r\n else:\r\n print(\"Please select a proper action\")", "def main():\n\n #01. Importing AWS parameters\n config = configparser.ConfigParser()\n config.read_file(open('dwh.cfg'))\n\n KEY = config.get('AWS','KEY')\n SECRET = config.get('AWS','SECRET')\n\n DB_CLUSTER_TYPE = config.get(\"CLUSTER\",\"DB_CLUSTER_TYPE\")\n DB_NUM_NODES = config.get(\"CLUSTER\",\"DB_NUM_NODES\")\n DB_NODE_TYPE = config.get(\"CLUSTER\",\"DB_NODE_TYPE\")\n\n DB_CLUSTER_IDENTIFIER = config.get(\"CLUSTER\",\"DB_CLUSTER_IDENTIFIER\")\n DB_NAME = config.get(\"CLUSTER\",\"DB_NAME\")\n DB_USER = config.get(\"CLUSTER\",\"DB_USER\")\n DB_PASSWORD = config.get(\"CLUSTER\",\"DB_PASSWORD\")\n DB_PORT = config.get(\"CLUSTER\",\"DB_PORT\")\n\n DB_IAM_ROLE_NAME = config.get(\"CLUSTER\", \"DB_IAM_ROLE_NAME\")\n \n print(\"Creating clients for AWS Services\")\n\n #02. Creating clients for AWS Services\n ec2 = boto3.resource (\n 'ec2',\n region_name='us-west-2',\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n \n s3 = boto3.resource('s3',\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\n iam = boto3.client (\n 'iam',\n region_name='us-west-2',\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\n redshift = boto3.client (\n 'redshift',\n region_name='us-west-2',\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\n #03. Creating IAM role\n try:\n sparkifyRole = iam.create_role (\n Path='/',\n RoleName=DB_IAM_ROLE_NAME,\n Description='Allows Redshift clusters to call AWS Services on your behalf.',\n AssumeRolePolicyDocument=json.dumps ({\n 'Statement': [{\n 'Action': 'sts:AssumeRole',\n 'Effect': 'Allow',\n 'Principal': {'Service': 'redshift.amazonaws.com'}\n }],\n 'Version': '2012-10-17'\n })\n )\n\n except Exception as e:\n print(e)\n\n #04. Ataching policy for IAM role\n iam.attach_role_policy (\n RoleName=DB_IAM_ROLE_NAME,\n PolicyArn='arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'\n )['ResponseMetadata']['HTTPStatusCode']\n \n roleArn = iam.get_role(RoleName=DB_IAM_ROLE_NAME)['Role']['Arn']\n print(roleArn)\n\n #05. Creating redshift cluster\n try:\n print('Creating cluster...')\n response = redshift.create_cluster( \n\n #Hardware parameters\n ClusterType=DB_CLUSTER_TYPE,\n NodeType=DB_NODE_TYPE,\n NumberOfNodes=int(DB_NUM_NODES),\n\n #Identifiers & credentials parameters\n DBName=DB_NAME,\n ClusterIdentifier=DB_CLUSTER_IDENTIFIER,\n MasterUsername=DB_USER,\n MasterUserPassword=DB_PASSWORD, \n\n #Role parameters\n IamRoles=[roleArn]\n )\n print ('Cluster will be created on redshift console.')\n \n except Exception as e:\n print(e)\n \n #06. Open an incoming TCP port to access the cluster endpoint\n myClusterProps = redshift.describe_clusters(ClusterIdentifier=DB_CLUSTER_IDENTIFIER)['Clusters'][0]\n\n try:\n print('Opening an incoming TCP port to access the cluster endpoint...')\n vpc = ec2.Vpc(id=myClusterProps['VpcId'])\n defaultSg = list(vpc.security_groups.all())[0]\n print(defaultSg)\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DB_PORT),\n ToPort=int(DB_PORT)\n )\n print('TCP port opened.')\n except Exception as e:\n print(e)", "def cli(ctx):", "def cli(ctx):", "def cli(ctx):\n #TODO", "def main(arguments):\n\n # set up logging\n # logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n\n # pull the setup data from hiera based on the node identifier given\n # hiera will return nil for unset variables that were queried, set some safe defaults\n metadata = metadata_get(arguments['<name>'])\n if 'region' not in metadata or metadata['region'] == 'nil':\n metadata['region'] = 'us-east-1'\n\n # handle arguments from docopt\n if arguments['check']:\n # check comes first since we don't need actually valid metadata to print what we found\n metadata_print(metadata)\n\n elif arguments['status']:\n # this status may (eventually) print all running instances from any provider\n # not entirely sure how to handle multiple regions/datacenters yet\n # status has only an optional filter, so if we get here without a name print all\n if arguments['<name>'] is None or metadata['hostname'] == 'nil':\n metadata['fqdn'] = '*'\n resource = boto3.resource('ec2', region_name=metadata['region'])\n ec2_status(resource, metadata)\n\n elif metadata['provider'] == 'aws':\n # make connection to ec2 and then perform actions\n resource = boto3.resource('ec2', region_name=metadata['region'])\n\n if arguments['start']:\n ec2_start(resource, metadata)\n elif arguments['stop']:\n ec2_stop(resource, metadata)\n elif arguments['toggle']:\n # we either start or stop to go to inverse of the current state\n count = ec2_status(resource, metadata, return_count=True)\n if count == 0:\n ec2_start(resource, metadata)\n else:\n ec2_stop(resource, metadata)\n\n elif metadata['provider'] == 'do':\n print(\"Digitalocean not yet supported\")\n\n else:\n # not really sure\n print(\"Unsupported metadata:provider from hiera: {0}\".format(metadata['provider']))\n sys.exit(1)", "def main():\n args = _parse_arguments()\n urlgetter = _default_urlgetter(cache_dir=args.cache)\n offers = Offers(\n start=args.start,\n urlgetter=urlgetter,\n )\n argsv = vars(args)\n service = argsv.get('service', 's3')\n region = argsv.get('region', 'us-east-1')\n offer = offers.get_offer(service)\n terms = offer.get_terms(region)\n print(terms)", "def init():\n\n @click.group()\n def aws():\n \"\"\"Manage treadmill on AWS\"\"\"\n pass\n\n @aws.command(name='init')\n def init():\n \"\"\"Initialise ansible files for AWS deployment\"\"\"\n pass\n # destination_dir = os.getcwd() + '/deploy'\n # try:\n # os.makedirs(destination_dir)\n # except OSError as e:\n # if e.errno == errno.EEXIST:\n # print('''AWS \"deploy\" directory already exists in this folder\n # \\n''', destination_dir)\n # copy_tree(deploy_path_join('../deploy'), destination_dir)\n\n @aws.command(name='cell')\n @click.option('--create', required=False, is_flag=True,\n help='Create a new treadmill cell on AWS',)\n @click.option('--destroy', required=False, is_flag=True,\n help='Destroy treadmill cell on AWS',)\n @click.option('--playbook', help='Playbok file',)\n @click.option('--inventory',\n 'controller.inventory',\n help='Inventory file',)\n @click.option('--key-file',\n default='key.pem',\n help='AWS ssh pem file',)\n @click.option('--aws-config',\n 'config/aws.yml',\n help='AWS config file',)\n @click.option('--with-freeipa/--no-freeipa',\n default=False,\n help='Create Cell with freeIPA',)\n def cell(create, destroy, playbook,\n inventory, key_file,\n aws_config, with_freeipa):\n \"\"\"Manage treadmill cell on AWS\"\"\"\n pass\n # playbook_args = [\n # 'ansible-playbook',\n # '-i',\n # inventory,\n # '-e',\n # 'aws_config={}'.format(aws_config) +\n # ' freeipa={}'.format(with_freeipa),\n # ]\n # if create:\n # playbook_args.extend([\n # playbook or deploy_path_join('cell.yml'),\n # '--key-file',\n # key_file,\n # ])\n # elif destroy:\n # playbook_args.append(\n # playbook or deploy_path_join('destroy-cell.yml')\n # )\n # else:\n # return\n\n # playbook_cli = PlaybookCLI(playbook_args)\n # playbook_cli.parse()\n # playbook_cli.run()\n\n @aws.command(name='node')\n @click.option('--create',\n required=False,\n is_flag=True,\n help='Create a new treadmill node',)\n @click.option('--playbook',\n 'node.yml',\n help='Playbok file',)\n @click.option('--inventory',\n 'controller.inventory',\n help='Inventory file',)\n @click.option('--key-file',\n default='key.pem',\n help='AWS ssh pem file',)\n @click.option('--aws-config',\n 'config/aws.yml',\n help='AWS config file',)\n def node(create, playbook, inventory, key_file, aws_config):\n \"\"\"Manage treadmill node\"\"\"\n pass\n # if create:\n # playbook_cli = PlaybookCLI([\n # 'ansible-playbook',\n # '-i',\n # inventory,\n # playbook,\n # '--key-file',\n # key_file,\n # '-e',\n # 'aws_config={}'.format(aws_config),\n # ])\n # playbook_cli.parse()\n # playbook_cli.run()\n\n del cell\n del node\n\n return aws", "def aws_cli(args: List[str]):\n\n try:\n text_output = subprocess.check_output(['aws'] + args, text=True)\n except subprocess.CalledProcessError as e:\n raise Exception(f\"failed to call AWS CLI ({e.returncode}): \\n{e.stdout}\\n\\n{e.stderr}\") from e\n\n try:\n json_obj = json.loads(text_output)\n except json.JSONDecodeError as e:\n raise Exception(f\"AWS CLI did not output JSON as expected ({e.msg}). Output was:\\n{text_output}\") from e\n\n return json_obj", "def ssm_run_command():\n try:\n table_name = CONTENT_TABLE_NAME\n ssm_client = boto3.client('ssm', config=MSAM_BOTO3_CONFIG)\n db_resource = boto3.resource('dynamodb', config=MSAM_BOTO3_CONFIG)\n db_table = db_resource.Table(table_name)\n instance_ids = {}\n items = []\n # get all the managed instances from the DB with tag MSAM-NodeType\n response = db_table.query(\n IndexName=\"ServiceRegionIndex\",\n KeyConditionExpression=Key(\"service\").eq(\"ssm-managed-instance\"),\n FilterExpression=\"contains(#data, :tagname)\",\n ExpressionAttributeNames={\"#data\": \"data\"},\n ExpressionAttributeValues={\":tagname\": \"MSAM-NodeType\"}\n )\n if \"Items\" in response:\n items = response[\"Items\"]\n while \"LastEvaluatedKey\" in response:\n response = db_table.query(\n IndexName=\"ServiceRegionIndex\",\n KeyConditionExpression=Key(\"service\").eq(\"ssm-managed-instance\"),\n FilterExpression=\"contains(#data, :tagname)\",\n ExpressionAttributeNames={\"#data\": \"data\"},\n ExpressionAttributeValues={\":tagname\": \"MSAM-NodeType\"},\n ExclusiveStartKey=response['LastEvaluatedKey']\n )\n if \"Items\" in response:\n items.append(response[\"Items\"])\n\n for item in items:\n data = json.loads(item['data'])\n if \"MSAM-NodeType\" in data[\"Tags\"]:\n instance_ids[data['Id']] = data['Tags']['MSAM-NodeType']\n\n # get all the SSM documents applicable to MSAM, filtering by MSAM-NodeType tag\n # When we support more than just ElementalLive, add to the list of values for MSAM-NodeType during filtering\n document_list = ssm_client.list_documents(\n Filters=[\n {\n 'Key': 'tag:MSAM-NodeType',\n 'Values': [\n 'ElementalLive',\n ]\n },\n {\n 'Key': 'Owner',\n 'Values': [\n 'Self'\n ]\n }\n ]\n )\n document_ids = document_list['DocumentIdentifiers']\n while \"NextToken\" in document_list:\n document_list = ssm_client.list_documents(\n Filters=[\n {\n 'Key': 'tag:MSAM-NodeType',\n 'Values': [\n 'ElementalLive',\n ]\n },\n {\n 'Key': 'Owner',\n 'Values': [\n 'Self'\n ]\n }\n ],\n NextToken=document_list[\"NextToken\"]\n )\n document_ids.append(document_list['DocumentIdentifiers'])\n\n document_names = {}\n for document in document_ids:\n if \"Tags\" in document:\n for tag in document[\"Tags\"]:\n if tag['Key'] == \"MSAM-NodeType\":\n document_names[document[\"Name\"]] = tag['Value']\n\n # loop over all instances and run applicable commands based on node type\n for id, id_type in instance_ids.items():\n for name, doc_type in document_names.items():\n if id_type in doc_type:\n # maybe eventually doc type could be comma-delimited string if doc applies to more than one type?\n print(\"running command: %s on %s \" % (name, id))\n try:\n response = ssm_client.send_command(\n InstanceIds=[\n id,\n ],\n DocumentName=name,\n TimeoutSeconds=600,\n Parameters={\n },\n MaxConcurrency='50',\n MaxErrors='0',\n CloudWatchOutputConfig={\n 'CloudWatchLogGroupName': SSM_LOG_GROUP_NAME,\n 'CloudWatchOutputEnabled': True\n }\n )\n print(response)\n except ClientError as error:\n print(error)\n if error.response['Error']['Code'] == \"InvalidInstanceId\":\n continue\n except ClientError as error:\n print(error)", "def main():\n\n # get AWS credentials\n aws_credentials = read_aws_credentials()\n access_key_id = aws_credentials['access_key_id']\n secret_access_key = aws_credentials['secret_access_key']\n aws_region = aws_credentials['region']\n\n # build Docker image\n docker_client = docker.from_env()\n image, build_log = docker_client.images.build(\n path='.', tag=LOCAL_REPOSITORY, rm=True)\n\n # get AWS ECR login token\n ecr_client = boto3.client(\n 'ecr', aws_access_key_id=access_key_id, \n aws_secret_access_key=secret_access_key, region_name=aws_region)\n\n ecr_credentials = (\n ecr_client\n .get_authorization_token()\n ['authorizationData'][0])\n\n ecr_username = 'AWS'\n\n ecr_password = (\n base64.b64decode(ecr_credentials['authorizationToken'])\n .replace(b'AWS:', b'')\n .decode('utf-8'))\n\n ecr_url = ecr_credentials['proxyEndpoint']\n\n # get Docker to login/authenticate with ECR\n docker_client.login(\n username=ecr_username, password=ecr_password, registry=ecr_url)\n\n # tag image for AWS ECR\n ecr_repo_name = '{}/{}'.format(\n ecr_url.replace('https://', ''), LOCAL_REPOSITORY)\n\n image.tag(ecr_repo_name, tag='latest')\n\n # push image to AWS ECR\n push_log = docker_client.images.push(ecr_repo_name, tag='latest')\n\n # force new deployment of ECS service\n ecs_client = boto3.client(\n 'ecs', aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key, region_name=aws_region)\n\n ecs_client.update_service(\n cluster=ECS_CLUSTER, service=ECS_SERVICE, forceNewDeployment=True)\n\n return None", "def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws", "def ec2_list(ctx):\n\n from opstools.aws import ec2_list as this_ec2_list\n this_ec2_list.main()" ]
[ "0.78308004", "0.6709078", "0.6430246", "0.64011174", "0.63430303", "0.63402027", "0.6281673", "0.6235944", "0.6224367", "0.6212776", "0.62037164", "0.6196544", "0.6187916", "0.61663157", "0.6166255", "0.61566585", "0.61485034", "0.61453444", "0.6092449", "0.608155", "0.608155", "0.606476", "0.6026055", "0.5997103", "0.5996426", "0.5942293", "0.59257567", "0.5918091", "0.5900359", "0.58543515" ]
0.7529648
1
Given a bucket location for load balancer logs, read and parse the latest logs. Currently only supports application loadbalancers
def lb_logs(ctx, lb, last, search): search_items = check_search_argument(search) from opstools.aws import lb_logs as this_ec2_list this_ec2_list.main(lb, last, search_items)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bucket_logging(Bucket=None):\n pass", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def collect_k8s_logs(cfg: ElasticBlastConfig):\n dry_run = cfg.cluster.dry_run\n k8s_ctx = cfg.appstate.k8s_ctx\n if not k8s_ctx:\n raise RuntimeError(f'kubernetes context is missing for {cfg.cluster.name}')\n # TODO use named constants for labels and containers\n # also modify corresponding YAML templates and their substitution\n get_logs(k8s_ctx, 'app=setup', [K8S_JOB_GET_BLASTDB, K8S_JOB_IMPORT_QUERY_BATCHES, K8S_JOB_SUBMIT_JOBS], dry_run)\n get_logs(k8s_ctx, 'app=blast', [K8S_JOB_BLAST, K8S_JOB_RESULTS_EXPORT], dry_run)", "def read_s3_file(date):\n \"\"\" history from S3 \"\"\"\n bucket = os.getenv(\"SPOTIFY_BUCKET_NAME\")\n path = os.getenv(\"SPOTIFY_BUCKET_PATH\")\n s3 = boto3.resource('s3')\n try:\n s3.Object(bucket, \"%s/%s.json\" % (path, date)).load()\n except botocore.exceptions.ClientError as e:\n logger.info(\"No existing history file found for %s, %s\" %\n (date, e.response['Error']['Code']))\n if e.response['Error']['Code'] == '404':\n return []\n else:\n logger.warning(\"Unexpected error code returned!\")\n return []\n else:\n logger.info(\"Reading history file for %s\" % date)\n content_object = s3.Object(bucket, \"%s/%s.json\" % (path, date))\n file_content = content_object.get()['Body'].read().decode('utf-8')\n json_content = json.loads(file_content)\n return json_content", "def clb_access_logging_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n elb = session.client(\"elb\")\n # ISO Time\n iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())\n for lb in describe_clbs(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(lb,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n clbName = lb[\"LoadBalancerName\"]\n clbArn = f\"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}\"\n dnsName = lb[\"DNSName\"]\n lbSgs = lb[\"SecurityGroups\"]\n lbSubnets = lb[\"Subnets\"]\n lbAzs = lb[\"AvailabilityZones\"]\n lbVpc = lb[\"VPCId\"]\n clbScheme = lb[\"Scheme\"]\n # Get Attrs\n if elb.describe_load_balancer_attributes(LoadBalancerName=clbName)[\"LoadBalancerAttributes\"][\"AccessLog\"][\"Enabled\"] is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-access-logging-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.5] Classic load balancers should enable access logging\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have access logging enabled. Refer to the remediation instructions to remediate this behavior\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-3\",\n \"NIST CSF V1.1 DE.AE-1\",\n \"NIST CSF V1.1 DE.AE-3\",\n \"NIST CSF V1.1 DE.CM-1\",\n \"NIST CSF V1.1 DE.CM-7\",\n \"NIST CSF V1.1 PR.PT-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 AU-12\",\n \"NIST SP 800-53 Rev. 4 CA-3\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CA-9\",\n \"NIST SP 800-53 Rev. 4 CM-2\",\n \"NIST SP 800-53 Rev. 4 CM-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 IR-5\",\n \"NIST SP 800-53 Rev. 4 IR-8\",\n \"NIST SP 800-53 Rev. 4 PE-3\",\n \"NIST SP 800-53 Rev. 4 PE-6\",\n \"NIST SP 800-53 Rev. 4 PE-20\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.12.1.1\",\n \"ISO 27001:2013 A.12.1.2\",\n \"ISO 27001:2013 A.12.4.1\",\n \"ISO 27001:2013 A.12.4.2\",\n \"ISO 27001:2013 A.12.4.3\",\n \"ISO 27001:2013 A.12.4.4\",\n \"ISO 27001:2013 A.12.7.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.2\",\n \"ISO 27001:2013 A.14.2.7\",\n \"ISO 27001:2013 A.15.2.1\",\n \"ISO 27001:2013 A.16.1.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-access-logging-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.5] Classic load balancers should enable access logging\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have access logging enabled.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-3\",\n \"NIST CSF V1.1 DE.AE-1\",\n \"NIST CSF V1.1 DE.AE-3\",\n \"NIST CSF V1.1 DE.CM-1\",\n \"NIST CSF V1.1 DE.CM-7\",\n \"NIST CSF V1.1 PR.PT-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 AU-12\",\n \"NIST SP 800-53 Rev. 4 CA-3\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CA-9\",\n \"NIST SP 800-53 Rev. 4 CM-2\",\n \"NIST SP 800-53 Rev. 4 CM-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 IR-5\",\n \"NIST SP 800-53 Rev. 4 IR-8\",\n \"NIST SP 800-53 Rev. 4 PE-3\",\n \"NIST SP 800-53 Rev. 4 PE-6\",\n \"NIST SP 800-53 Rev. 4 PE-20\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.12.1.1\",\n \"ISO 27001:2013 A.12.1.2\",\n \"ISO 27001:2013 A.12.4.1\",\n \"ISO 27001:2013 A.12.4.2\",\n \"ISO 27001:2013 A.12.4.3\",\n \"ISO 27001:2013 A.12.4.4\",\n \"ISO 27001:2013 A.12.7.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.2\",\n \"ISO 27001:2013 A.14.2.7\",\n \"ISO 27001:2013 A.15.2.1\",\n \"ISO 27001:2013 A.16.1.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def getLogs():", "def getLogs():", "def get_latest_data(bucket, dir):\n # get all the scraped json files in the directory in the bucket\n files = client.list_objects_v2(Bucket=BUCKET,\n Prefix=DIR)['Contents']\n # read the data from the object\n str_file = client.get_object(\n Bucket=BUCKET, Key=files[-1]['Key'])['Body'].read().decode('UTF-8')\n data = json.loads(str_file)\n return data", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def download_cloudtrail_logs(target_dir, bucket, cloudtrail_prefix, org_ids,\n account_ids, regions, from_date, to_date, parallelism):\n prefixes = _s3_key_prefixes(cloudtrail_prefix, org_ids, account_ids, regions, from_date, to_date)\n _s3_download_recursive(bucket, prefixes, target_dir, parallelism)", "def get_rolling_log_history():\n current_tag = get_current_tag()\n return get_log_history(current_tag)", "def read_pod_logs(self, pod: Pod) -> Generator[str, None, None]:\n\n # The timestamps returned from the Kubernetes API are in nanoseconds, and appear to never duplicate\n # across lines so we can use the timestamp plus the line content to deduplicate log lines across\n # multiple runs\n last_line = \"\"\n # We use a variable here instead of looping on self.pod_is_running so that we can get one more read\n # in the loop before breaking out\n pod_is_running = True\n\n try:\n while pod_is_running:\n pod_is_running = self.pod_is_running(pod)\n if not pod_is_running:\n self.log.info(\"pod stopped, pulling logs one more time\")\n\n for line in self._read_pod_log_chunk(pod, last_line):\n timestamp, log_line = line.split(b\" \", 1)\n yield log_line\n last_line = line\n\n time.sleep(POD_LOGS_POLL_INTERVAL_SECONDS)\n except BaseHTTPError as e:\n raise AirflowException(\n 'There was an error reading the kubernetes API: {}'.format(e)\n )", "def endpoint_log(self, endpoint_name=None, since=None):\n if endpoint_name is None:\n url = '/v1.1/endpoint/log'\n else:\n url = '/v1.1/endpoints/%s/log' % endpoint_name\n if since is not None:\n url += '?since=%f' % float(since)\n _, body = self.request(url, 'GET')\n return body", "def logs_bucket(self, logs_bucket):\n\n self._logs_bucket = logs_bucket", "def list_bucket(self, bucket):\n\n self.response.write(\"Listbucket result:\\n\")\n\n # Production apps should set page_size to a practical value.\n page_size = 1\n stats = cloudstorage.listbucket(bucket + \"/foo\", max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write(\"\\n\")\n\n if count != page_size or count == 0:\n break\n stats = cloudstorage.listbucket(\n bucket + \"/foo\", max_keys=page_size, marker=stat.filename\n )", "def access_log_configs(self) -> Sequence['outputs.GetLoadBalancersBalancerAccessLogConfigResult']:\n return pulumi.get(self, \"access_log_configs\")", "def list_bucket(self, bucket):\n self.response.write('Listbucket result:\\n')\n\n page_size = 1\n stats = gcs.listbucket(bucket + '/foo', max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write('\\n')\n\n if count != page_size or count == 0:\n break\n stats = gcs.listbucket(bucket + '/foo', max_keys=page_size,\n marker=stat.filename)", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def fetch_error_log(self):\n content = []\n\n def get_lines_at_tail(log_file, max_count):\n \"\"\"Fetch last n lines from a big file.\"\"\"\n if not os.path.exists(log_file):\n return []\n\n file_size = os.path.getsize(log_file)\n # Assume that in average a line has 512 characters at most\n block_size = max_count * 512 if max_count > 0 else file_size\n\n with open(log_file, \"r\") as file_handle:\n if file_size > block_size > 0:\n max_seek_point = file_size // block_size\n file_handle.seek((max_seek_point - 1) * block_size)\n elif file_size:\n file_handle.seek(0, os.SEEK_SET)\n lines = file_handle.read().splitlines()\n while lines and not lines[-1]:\n lines.pop()\n return lines[-max_count:] if max_count > 0 else lines\n\n logging_paths = {self.errpath, self.outpath, self.logpath}\n if self.cfg.file_logger:\n file_log_path = os.path.join(self.runpath, self.cfg.file_logger)\n if file_log_path not in logging_paths:\n logging_paths.add(file_log_path)\n\n for path in logging_paths:\n lines = (\n get_lines_at_tail(path, self.cfg.error_logs_max_lines)\n if path\n else []\n )\n if lines:\n if content:\n content.append(\"\")\n content.append(\"Information from log file: {}\".format(path))\n content.extend([\" {}\".format(line) for line in lines])\n\n return content", "def read_last_line_in_data_log():\n timestamp = datetime.datetime.utcnow().strftime(\"%Y%m%d\")\n log_file_path = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs'\n log_file_path += os.sep + timestamp\n file_name = log_file_path + os.sep + timestamp + \"_data.txt\"\n # file_name = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs/test.txt' # test generated data\n try:\n with open(file_name, 'rb') as f:\n f.seek(-2, os.SEEK_END)\n while f.read(1) != b'\\n':\n f.seek(-2, os.SEEK_CUR)\n content = f.readline().decode()\n except:\n with open(file_name, 'rb') as f:\n content = f.readlines()[-1].decode()\n return content", "def list_all_buckets(riak_host,riak_port):\n url='http://%s:%s/buckets?buckets=true' % (riak_host,riak_port)\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def read_linelog():", "def get_logs(self, job_id):\n\n # Get the logstream name\n response = self.batch_client.describe_jobs(jobs=[job_id])\n logstream = response[\"jobs\"][0][\"container\"][\"logStreamName\"]\n\n # Keep a list with the log messages\n logs = []\n\n # Get the logs\n response = self.logs_client.get_log_events(\n logGroupName=\"/aws/batch/job\", logStreamName=logstream\n )\n\n # Add to the list\n logs.extend([l[\"message\"] for l in response[\"events\"]])\n\n # Keep getting more pages\n while response[\"nextForwardToken\"] is not None:\n\n # Keep track of the last token used\n last_token = response[\"nextForwardToken\"]\n\n # Get the next page\n response = self.logs_client.get_log_events(\n logGroupName=\"/aws/batch/job\",\n logStreamName=logstream,\n nextToken=last_token,\n )\n\n # If the token is the same, we're done\n if response[\"nextForwardToken\"] == last_token:\n response[\"nextForwardToken\"] = None\n else:\n # Otherwise keep adding to the logs\n logs.extend([l[\"message\"] for l in response[\"events\"]])\n\n return logs", "def collect_backups(self, bucketname, prefix):\n backups = []\n \n bucket = self.conn.get_bucket(bucketname)\n\n logger.info(\"Scanning for backups: s3://%s/%s\", bucketname, prefix)\n\n for entry in natsort([key.name for key in bucket.list(prefix)]):\n # Check for a time stamp in the directory entry's name.\n match = TIMESTAMP_PATTERN.search(entry)\n if match:\n # Make sure the entry matches the given include/exclude patterns.\n if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):\n logger.debug(\"Excluded %r (it matched the exclude list).\", entry)\n elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):\n logger.debug(\"Excluded %r (it didn't match the include list).\", entry)\n else:\n backups.append(S3Backup(\n pathname=entry,\n timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),\n ))\n else:\n logger.debug(\"Failed to match time stamp in filename: %s\", entry)\n if backups:\n logger.info(\"Found %i timestamped backups in %s.\", len(backups), bucket)\n return sorted(backups)", "def read_bucket_objects(bucket_name):\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(bucket_name)\n for obj in bucket.objects.all():\n response = obj.get()\n body = response['Body'].read().decode('utf-8')\n print(body)", "def last_log(self) -> List:\n logs_list: List = os.listdir(LOGS_BASE_PATH)\n full_list = [os.path.join(LOGS_BASE_PATH, i) for i in logs_list]\n time_sorted_list: List = sorted(full_list, key=os.path.getmtime)\n return time_sorted_list[-1]", "def list_replays(bucket_url):\n\n logger = logging.getLogger(\"SimpleReplayLogger\")\n\n table = []\n bucket = bucket_dict(bucket_url)\n try:\n resp = client(\"s3\").list_objects_v2(Bucket=bucket.get('bucket_name'), Delimiter='/', Prefix='analysis/')\n if resp['KeyCount'] == 0:\n logger.error(f\"No replays available in S3. Please run a replay with replay analysis to access replays \"\n f\"from the command line.\")\n exit(-1)\n\n except Exception as e:\n logger.error(f\"Unable to access replays in S3. Please confirm bucket. {e}\")\n exit(-1)\n\n s3 = boto3.resource('s3')\n print(f\"Listed below are all the replay reports located in the S3 bucket: {bucket_url}.\\n\")\n\n for x in resp['CommonPrefixes']:\n try:\n s3.Object(bucket.get('bucket_name'), f'{x.get(\"Prefix\")}out/info.json').load()\n except ClientError as e:\n if e.response['Error']['Code'] == \"404\": # if info.json does not exist in folder, do not add to list\n continue\n else:\n logger.error(f\"Unable to access replay. {e}\")\n\n content_object = s3.Object(bucket.get('bucket_name'), f'{x.get(\"Prefix\")}out/info.json')\n file_content = content_object.get()['Body'].read().decode('utf-8')\n json_content = json.loads(file_content)\n\n table.append([json_content['replay_id'],\n json_content['id'],\n json_content['start_time'],\n json_content['end_time'],\n json_content['replay_tag']])\n # use tabulate lib to format output\n print(tabulate(table, headers=[\"Replay\", \"Cluster ID\", \"Start Time\", \"End Time\", \"Replay Tag\"]))", "def get_bucket_lifecycle_configuration(Bucket=None):\n pass", "def get_logs(job_key):\n job = Job.fetch(job_key, connection=conn)\n if job.is_finished:\n logs = job.result\n elif job.is_failed:\n logs = job.exc_info\n else:\n logs = \"Task is still running\"\n return str(logs), 200", "def get_lines_from_logs(logs):\n for log in logs:\n if log.endswith('gz'):\n with gzip.open(log, mode='rt') as fin:\n for line in fin:\n yield line\n else:\n with open(log) as fin:\n for line in fin:\n yield line" ]
[ "0.6314314", "0.5689416", "0.5672334", "0.5559864", "0.55531245", "0.54379284", "0.54379284", "0.5270909", "0.5237804", "0.52304417", "0.521594", "0.5178806", "0.51596767", "0.51460856", "0.5141342", "0.51357716", "0.51021546", "0.5082686", "0.5015959", "0.49990225", "0.49807477", "0.4979189", "0.4973089", "0.496922", "0.49674165", "0.4957362", "0.49498752", "0.49262306", "0.4921808", "0.49135286" ]
0.62359434
1
Print a report of what is using a security group
def sg_report(ctx, security_group_id, all_sgs): from opstools.aws import sg_report as this_sg_report this_sg_report.main(security_group_id, all_sgs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_secgroups(self, name=None):", "def show_security_group(self, security_group, **_params):\r\n return self.get(self.security_group_path % (security_group),\r\n params=_params)", "def show_signatories_for_group(oid):\n signatories = signatories_for_group(oid)\n group = group_by_oid(oid)\n\n if not signatories and not group:\n print(red('CalLink OID {} not found.'.format(oid)))\n return\n elif not group:\n print(yellow(('CalLink OID {} has signatories but is currently not '\n 'active on CalLink.').format(oid)))\n\n if group:\n accounts = ' '.join(group['accounts']) if group['accounts'] else 'n/a'\n subtitle = 'Group accounts: {}'.format(accounts)\n title = '{} (OID: {} - Email: {})'.format(group['name'], oid, group['email'])\n else:\n subtitle = None\n title = '<unknown group> ({})'.format(oid)\n\n print_title(title, subtitle=subtitle)\n\n if signatories:\n columns = [(attrs['name'], uid) for uid, attrs in signatories.items()]\n headers = ('Signatory', 'UID')\n print(tabulate(columns, headers=headers))\n else:\n print('No signatories found.')", "def printUsersInGroup(group) -> None:\n click.echo(tabulate(listUsersInDict(group), headers=\"keys\", tablefmt=\"grid\"))", "def show_groups_by_student_signat(uid):\n name = directory.name_by_calnet_uid(uid)\n\n if not name:\n print(red('CalNet UID {} not found.').format(uid))\n return\n\n title = '{} ({})'.format(name, uid)\n print_title(title)\n\n groups = groups_by_student_signat(uid)\n\n if groups:\n columns = [(attrs['name'], ' '.join(attrs['accounts']) or 'n/a', oid)\n for oid, attrs in groups.items()]\n headers = ('Group', 'Accounts', 'OID')\n print(tabulate(columns, headers=headers))\n else:\n print('Not a signatory of any student group.')", "def display(self):\n print \"\\n\\n***********************\\n\"\n print \"Info about group %s, name=%s, path=%s\" % (self.sdef['id'], \n self.name, self.path)\n print \"sdef=\"\n pp.pprint(self.sdef)\n print \"expanded_def=\"\n pp.pprint (self.expanded_def)\n print \"includes=\"\n pp.pprint (self.includes)\n print \"parent_attributes=\"\n pp.pprint (self.parent_attributes)\n print \"attributes=\"\n pp.pprint (self.attributes)\n print \"mstats=\"\n pp.pprint (self.mstats)", "def show_signatories_by_group_name(name):\n print('Searching for groups... ', end='', flush=True)\n\n groups = list_groups(name=name)\n\n if not groups:\n print()\n print(red('No student groups found.'))\n return\n\n plural_case = 'entry' if len(groups) == 1 else 'entries'\n\n print('Found {} {}.'.format(len(groups), plural_case))\n\n print('Searching for signatories...')\n\n for (oid, attrs) in groups.items():\n print()\n show_signatories_for_group(oid)", "def get_security_group_short_name(self):\n return self.config['security_group']", "def security_groups(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"security_groups\")", "def test_break_security_group_usual_case_specify_sg():", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def __repr__(self):\n return str(self.group)", "def group_describe(self, group):\n mapped = self.map_vects(datanorm)\n mappednp= np.array(mapped)\n \n groups= mappednp[:,0]\n data['Group'] = pd.Series(groups, index=data.index)\n print(data[data['Group']==group].describe())", "def security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_groups\")", "def security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_groups\")", "def showStat(self):\n print \">>[Stat Information]:\"\n if self.gid != DEFALUT_GROUP_ID:\n print \"Gid = %u\" % self.gid\n print \"[Queries] Arp = %u, Original_to_controller= %u, Current_to_controller = %u\" % (self.query_arp, self.query_control_origin, self.query_control_current)\n print \"TP = %u, TN = %u, FP = %u\" % (self.tp, self.tn, self.fp)\n print \"[Flow] local_switch = %u, within the group = %u,across groups = %u\" % (self.flow_local, self.flow_within_group, self.flow_cross_group)\n print \"[Traffic] local_switch = %u byte, within the group = %u byte,across groups = %u byte\" % (self.byte_local, self.byte_within_group, self.byte_cross_group)", "def show(ctx):\n skale = ctx.obj['skale']\n # from skale.utils.contracts_provision.main import add_test_permissions\n # add_test_permissions(skale)\n show_all_schains_names(skale)", "def security_group_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_group_arns\")", "def log_group_arn(self) -> str:\n ...", "def desc_groups(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"].replace(\",\", \" \").split()\n print(sgid)\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n response = ec2.describe_security_groups(GroupIds=sgid)\n attachment = MessageAttachmentsClass()\n for i in range(len(response[\"SecurityGroups\"])):\n d = response[\"SecurityGroups\"][i][\"Description\"]\n attachment.title = d\n message.message_text = \"Description of Security groups are:\"\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Delete Security Group\"\n button.value = \"Delete Security Group\"\n button.name = \"Delete Security Group\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"delete-security-group\",\n \"data\": {\"Security-group-ID\":response[\"SecurityGroups\"][i][\"Description\"], \"Region\": region}}\n attachment.attach_button(button)\n\n return message.to_json()", "def test_aws_service_api_security_groups_get(self):\n pass", "async def display_group(ctx, owner: str, group_name: str=None, option: str=None):\n\n groups = bg_bot.manager.get_groups(owner, group_name)\n\n if len(groups) == 0:\n await ctx.send(\"No groups exist that match the input criteria.\")\n else:\n embed = discord.Embed(title=\"Open Groups\")\n for group in groups:\n if group.comp:\n open_spots = [group.comp[role]['number'] - len(group.comp[role]['players']) for role in group.comp]\n availability = f'Open Spots\\nTanks: {open_spots[0]}, Healers: {open_spots[1]}, DPS: {open_spots[2]}'\n else:\n availability = f'No specified comp, {group._max - group._total} spots left'\n \n embed.add_field(name=f'{group.name} by {group.owner}: {group.rating} {group.group_type}', value=availability)\n\n await ctx.send(embed=embed)", "def list_groups(self):\n\n for counter, label in enumerate(self.exp_labels_list):\n print('Key {}: {} \\n'.format(str(counter), label))", "def cute_output(insights_request):\n json_report = insights_request.get_insights()\n\n if not json_report:\n print('Error ocurred, unable to print!!!')\n else:\n for groups in json_report:\n print('GROUP: ' + groups['display_name'])\n for systems in groups['systems']:\n print('\\n\\t\\t Host name: ' + systems['hostname'])\n print('\\n\\t\\t Product: ' + systems['product'])\n print('\\n\\t\\t Type: ' + systems['type'])\n print('\\n\\t\\t Registered at Insights: ' + systems['created_at'])\n print('\\n\\t\\t Last checked at Insights: ' + systems['last_check_in'] + '\\n\\n')", "def find_secgrp ( ec2_conn, secgrp_name ) :\n sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ secgrp_name ] } )\n if len( sec_grps ) > 0 :\n return sec_grps[ 0 ]\n \n return None", "def check_security_group(self):\n return True", "def mysummary(self):\n return self.sprintf(\"IGMPv3 Group Record %IGMPv3gr.type% %IGMPv3gr.maddr%\")", "def cli(ctx, group_id):\n return ctx.gi.groups.show_group(group_id)", "def print_group_message(group, contact, message):\n print(f\"{group}: {contact}: {message}\")" ]
[ "0.643156", "0.64158314", "0.6374457", "0.6211249", "0.614532", "0.6136983", "0.61095774", "0.5930601", "0.5903047", "0.5879476", "0.58454436", "0.58454436", "0.57944185", "0.57904917", "0.5784019", "0.5784019", "0.57687044", "0.5731467", "0.57308704", "0.5686011", "0.56849176", "0.5675036", "0.56690896", "0.56677294", "0.5663509", "0.5638313", "0.5615224", "0.5615131", "0.5573781", "0.55547154" ]
0.66926736
0
Checks [search] against a regex for the correct format
def check_search_argument(search): if search != '' and not re.match(r"^(([\w.:\/\-)+\=([\w.:\/\-])+\s?)+", search): print("The search items must match the format 'field=string'") sys.exit(0) search_items = search.split(' ') return search_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_search_regex(self):\n # search via regex emails.\n test = self.data.search(regex='[-\\w\\d.+]+@[-\\w\\d.]+', all_names=True) # noqa\n # taking out the self.assertIn until I figure out the order of the\n # tests. See test_zeditor() for more information.\n\n # self.assertIn('[email protected]', test[0].title)\n\n # search via regex phone numbers.\n test_2 = self.data.search(regex='\\(?\\d{3}\\)?-?\\s?\\d{3}-\\d{4}',\n all_names=True)\n self.assertIn('(555) 555-3425', test_2[0].notes)", "def validate_string_search(self, pattern, file):\r\n try:\r\n file_open = open(file, 'r')\r\n except:\r\n logging.info(\"file not found\")\r\n return -1\r\n file_data = file_open.read()\r\n ret_out = re.search(pattern, file_data)\r\n if ret_out:\r\n return True, ret_out\r\n else:\r\n return False, ret_out", "def search_by_regex(self):\n print(\"*** Regex Search ***\\n\")\n print(\"Enter a regular expression (REGEX) to search NAMES and NOTES...\")\n print(\"DO NOT include either single (') or double (\\\") quotes\")\n while True:\n try:\n regex = input(\">>> \")\n results = self.regex_entry_search(regex)\n except:\n print(\"Couldn't parse regex. Please try again\")\n else:\n clear_screen()\n print(f\"Found {len(results)} matches for regex \\\"{regex}\\\"...\\n\")\n self.print_selected_entries(results)\n break", "def search(self, regex):\n if isinstance(regex, str):\n regex = re.compile(regex, re.IGNORECASE)\n return regex.search(self.sequence)", "def regex_search(regex, *fields):\n for match_field in fields:\n if re.search(regex, match_field):\n return True\n return False", "def advanced_search(self, pattern):\n pass", "def validaURL(url: AnyStr) -> bool:\n\n return re.compile(patternURL).search(url) != None # Linea 1", "def regex_pattern(self):\n regex_to_match = input(\"Enter the regex pattern you'd like to use> \")\n return regex_to_match", "def _verify_format(s, format):\n r = re.compile(format)\n if r.match(s) is not None:\n return True\n return False", "def __search(findwhat, content, ignorecase, regexp):\n\t\tfrom re import search, IGNORECASE\n\t\tif regexp:\n\t\t\tif ignorecase:\n\t\t\t\tflag = IGNORECASE\n\t\t\telse:\n\t\t\t\tflag = 0\n\t\t\tif search(findwhat, content, flag):\n\t\t\t\treturn True\n\t\telse:\n\t\t\tif ignorecase:\n\t\t\t\tcontent = content.lower()\n\t\t\t\tfindwhat = findwhat.lower()\n\t\t\t\t\n\t\t\tif content.find(findwhat) != -1:\n\t\t\t\treturn True\n\t\treturn False", "def address_regex(self) -> Any:", "def matches_rule(word):\n return re.search(pattern, word)", "def validate_regex(self, pattern, flags=0):\r\n try:\r\n re.compile(pattern, flags)\r\n return False\r\n except:\r\n errormsg(_(\"Invalid Regular Expression!\"))\r\n error(traceback.format_exc())\r\n return True", "def search(self, regexp):\n try:\n self.rematch = regexp.search(self.matchstring)\n except AttributeError:\n self.rematch = re.search(regexp, self.matchstring)\n return bool(self.rematch)", "def build_regex_search(search_string):\n\n sspat = None\n valid_flags = {\n 'i': re.IGNORECASE\n }\n if search_string:\n try:\n search_string, flag_letters = re.match(r'^(.+?)(?:/([a-z]+))?$', search_string).groups()\n flags = 0\n # if flags are given, OR together all the valid flags\n # see https://docs.python.org/3/library/re.html#re.compile\n if flag_letters:\n for letter in flag_letters:\n if letter in valid_flags:\n flags = flags | valid_flags[letter]\n sspat = re.compile(search_string, flags)\n except re.error:\n sspat = None\n\n return sspat", "def found(self, command, regex):\n result = self.sys(command)\n for line in result:\n found = re.search(regex,line)\n if found:\n return True\n return False", "def found(self, command, regex):\n result = self.sys(command)\n for line in result:\n found = re.search(regex,line)\n if found:\n return True\n return False", "def validate_lookup_date_format(search_query):\n try:\n datetime.datetime.strptime(search_query, '%d/%m/%Y')\n clear()\n return search_query\n\n except ValueError:\n clear()\n return False", "def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)", "async def match_regex(text, opts):\n\n def is_case_sensitive():\n if opts[\"case_sensitive\"]:\n return False\n return regex.IGNORECASE\n\n if opts[\"matching_condition\"].lower() == \"search\":\n matched_regex = regex.search(opts[\"expression\"], text, is_case_sensitive())\n elif opts[\"matching_condition\"].lower() == \"fullmatch\":\n matched_regex = regex.fullmatch(opts[\"expression\"], text, is_case_sensitive())\n else:\n matched_regex = regex.match(opts[\"expression\"], text, is_case_sensitive())\n return matched_regex", "def __call__(self, value):\n valid = True\n for regex in self.regexs:\n search = regex.search(value)\n valid = valid and ( search != None)\n if not valid or len(value) < self.min_length:\n raise ValidationError(self.message, code=self.code)", "def check_match_pattern(self):\n text = self.ui.plainTextEdit.toPlainText()\n pattern = self.ui.textPattern.text()\n result = re.search(pattern, text)\n group = int(self.ui.spinGroup.text())\n if result:\n self.ui.textMatch.setText(result.group(group))", "def test_syntax_converter_expand_search_patterns_alone(self):\n spi_search = \"find t bob sam\"\n inv_search = \"title:bob and title:sam\"\n self._compare_searches(inv_search, spi_search)", "def checked_regexp(regexp, value, label):\n if isinstance(regexp, (unicode, str)):\n match = re.match(regexp, value)\n else:\n match = regexp.match(value)\n if match is None:\n flash(label + \" Is Incorrectly Formatted\")\n return None\n else:\n return match", "def fix_search(search):\n ends = ['w', 'l', 'i', 'n', 'f', 'p', 'x', 's']\n if not search:\n return\n if isinstance(search, STRINGTYPE):\n return search\n if search.get('t'):\n return search\n newsearch = {}\n for srch, pat in search.items():\n if len(srch) == 1 and srch in ends:\n srch = 'm%s' % srch\n if isinstance(pat, dict):\n for k, v in list(pat.items()):\n if k != 'w':\n newsearch[srch + k] = pat_format(v)\n else:\n newsearch[srch] = pat_format(v)\n else:\n newsearch[srch] = pat_format(pat)\n return newsearch", "def test_regex_constraint(self):\n from petstore_api.model import apple\n\n # Test with valid regex pattern.\n inst = apple.Apple(\n cultivar=\"Akane\"\n )\n assert isinstance(inst, apple.Apple)\n\n inst = apple.Apple(\n cultivar=\"Golden Delicious\",\n origin=\"cHiLe\"\n )\n assert isinstance(inst, apple.Apple)\n\n # Test with invalid regex pattern.\n err_regex = r\"Invalid value `.+?`, must match regular expression `.+?` at \\('args\\[0\\]', 'cultivar'\\)\"\n with self.assertRaisesRegex(\n petstore_api.ApiValueError,\n err_regex\n ):\n inst = apple.Apple(\n cultivar=\"!@#%@$#Akane\"\n )\n\n err_regex = r\"Invalid value `.+?`, must match regular expression `.+?` at \\('args\\[0\\]', 'origin'\\)\"\n with self.assertRaisesRegex(\n petstore_api.ApiValueError,\n err_regex\n ):\n inst = apple.Apple(\n cultivar=\"Golden Delicious\",\n origin=\"!@#%@$#Chile\"\n )", "def validate_search_inputs(self):\r\n\r\n debug(\"validate\")\r\n fail = False\r\n msg = \"\"\r\n if self.m_regex_search_checkbox.GetValue():\r\n if self.m_searchfor_textbox.GetValue() == \"\" or self.validate_search_regex():\r\n msg = _(\"Please enter a valid search regex!\")\r\n fail = True\r\n elif self.m_searchfor_textbox.GetValue() == \"\":\r\n msg = _(\"Please enter a valid search!\")\r\n fail = True\r\n if not fail and self.m_fileregex_checkbox.GetValue():\r\n if self.m_filematch_textbox.GetValue().strip() == \"\" or self.validate_regex(self.m_filematch_textbox.Value):\r\n msg = \"Please enter a valid file regex!\"\r\n fail = True\r\n elif self.m_filematch_textbox.GetValue().strip() == \"\":\r\n msg = _(\"Please enter a valid file pattern!\")\r\n fail = True\r\n if not fail and self.m_dirregex_checkbox.GetValue():\r\n if self.validate_regex(self.m_exclude_textbox.Value):\r\n msg = _(\"Please enter a valid exlcude directory regex!\")\r\n fail = True\r\n if not fail and not exists(self.m_searchin_text.GetValue()):\r\n msg = _(\"Please enter a valid search path!\")\r\n fail = True\r\n if (\r\n not fail and\r\n self.m_logic_choice.GetStringSelection() != \"any\" and\r\n re.match(r\"[1-9]+[\\d]*\", self.m_size_text.GetValue()) is None\r\n ):\r\n msg = _(\"Please enter a valid size!\")\r\n fail = True\r\n if not fail:\r\n try:\r\n self.m_modified_date_picker.GetValue().Format(\"%m/%d/%Y\")\r\n except:\r\n msg = _(\"Please enter a modified date!\")\r\n fail = True\r\n if not fail:\r\n try:\r\n self.m_created_date_picker.GetValue().Format(\"%m/%d/%Y\")\r\n except:\r\n msg = _(\"Please enter a created date!\")\r\n fail = True\r\n if fail:\r\n errormsg(msg)\r\n return fail", "def match(self, s):\n self.matches = self.re.search(s)\n return self.matches", "def _validator_regex(self, field, value):\n try:\n re.compile(value)\n except re.error:\n self._error(field, \"{} is not a valid regex\".format(value))", "def upy_re_match(regex,value):\n reg = re.compile(regex)\n return reg.match(value)" ]
[ "0.69959843", "0.620321", "0.6156715", "0.60446566", "0.6004517", "0.59803575", "0.5927296", "0.5909053", "0.59069043", "0.5873332", "0.58515716", "0.5811227", "0.57970244", "0.57713956", "0.57709163", "0.5760355", "0.5760355", "0.5744164", "0.57201004", "0.57087517", "0.57053506", "0.56720406", "0.5671586", "0.56654805", "0.5660016", "0.56201434", "0.56117094", "0.5596796", "0.5576637", "0.55754954" ]
0.6678549
1
Creates a new thrift client. host host of server. port port of server. service the class the server implements framed should this client be framed? (for nonblocking clients) timeout timeout in ms
def __init__(self, host, port, service, framed=True, timeout=50000): self.host = host self.port = port self.service = service self.framed = framed self.timeout = timeout self.create()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_transport(self, host):\r\n from thrift.transport import TSocket, TTransport\r\n\r\n thrift_socket = TSocket.TSocket(host.name, host.port)\r\n\r\n if self._timeout is not None:\r\n thrift_socket.setTimeout(self._timeout)\r\n\r\n return TTransport.TFramedTransport(thrift_socket)", "def create_tcp(cls, port, host=LOCALHOST, *args, **kwargs):\n return ZeroRPCClientTransport(\"tcp://{}:{}\".format(host, port), *args, **kwargs)", "async def open(self) -> None:\n if self.client is not None:\n return # _refresh_thrift_client opened the transport\n\n logger.debug(f\"Opening Thrift transport to {self.host}:{self.port}\")\n self.client = await self._client_factory(**self.client_kwargs)", "def beta_create_Hetr_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):\n request_deserializers = {\n ('Hetr', 'BuildTransformer'): BuildRequest.FromString,\n ('Hetr', 'Computation'): ComputationRequest.FromString,\n ('Hetr', 'FeedInput'): FeedInputRequest.FromString,\n ('Hetr', 'GetResults'): GetResultsRequest.FromString,\n }\n response_serializers = {\n ('Hetr', 'BuildTransformer'): BuildReply.SerializeToString,\n ('Hetr', 'Computation'): ComputationReply.SerializeToString,\n ('Hetr', 'FeedInput'): FeedInputReply.SerializeToString,\n ('Hetr', 'GetResults'): GetResultsReply.SerializeToString,\n }\n method_implementations = {\n ('Hetr', 'BuildTransformer'): face_utilities.unary_unary_inline(servicer.BuildTransformer),\n ('Hetr', 'Computation'): face_utilities.unary_unary_inline(servicer.Computation),\n ('Hetr', 'FeedInput'): face_utilities.unary_unary_inline(servicer.FeedInput),\n ('Hetr', 'GetResults'): face_utilities.unary_unary_inline(servicer.GetResults),\n }\n server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)\n return beta_implementations.server(method_implementations, options=server_options)", "def async_io_factory(host=\"127.0.0.1\", port=Defaults.TLSPort, sslctx=None,\n server_hostname=None, framer=None, source_address=None,\n timeout=None, **kwargs):\n import asyncio\n from pymodbus.client.asynchronous.async_io import init_tls_client\n loop = kwargs.get(\"loop\") or asyncio.new_event_loop()\n proto_cls = kwargs.get(\"proto_cls\", None)\n if not loop.is_running():\n asyncio.set_event_loop(loop)\n cor = init_tls_client(proto_cls, loop, host, port, sslctx, server_hostname,\n framer)\n client = loop.run_until_complete(asyncio.gather(cor))[0]\n else:\n cor = init_tls_client(proto_cls, loop, host, port, sslctx, server_hostname,\n framer)\n future = asyncio.run_coroutine_threadsafe(cor, loop=loop)\n client = future.result()\n\n return loop, client", "def start_server(**params):\n\n def _grpc_server_async(options):\n call_command(\"grpcserver\", **options)\n\n port = 50000 + randint(0, 10000)\n params[\"port\"] = port\n # Start grpc server\n srv = threading.Thread(\n target=_grpc_server_async, args=[params]\n )\n srv.start()\n sleep(5)\n return \"localhost:%s\" % port", "def gen_server():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers = 10), \n options= [('grpc.max_send_message_length', 100 * 1024 * 1024),\n ('grpc.max_receive_message_length', 100 * 1024 * 1024)])\n with open('server.key', 'rb') as f:\n private_key = f.read()\n with open('server.crt', 'rb') as f:\n certificate_chain = f.read()\n server_credentials = grpc.ssl_server_credentials(((private_key, certificate_chain,),))\n server.add_secure_port('[::]:50051', server_credentials)\n return server", "def create_client(service_name: str, config_name: str = None, **client_args):\n session = get_session(config_name)\n return session.client(service_name, **client_args)", "def connect(host, port = DEFAULT_SERVER_PORT):\n return factory.connect(host, port, SlaveService)", "def connect(host, port, service=VoidService, config={}, ipv6=False, keepalive=False):\n s = SocketStream.connect(host, port, ipv6=ipv6, keepalive=keepalive)\n return connect_stream(s, service, config)", "def create_server(\n handle_event: EventCallback,\n host: str = \"0.0.0.0\",\n port: int = 0,\n ssl_context: Optional[SSLContext] = None,\n) -> Server:\n return Server(handle_event, host, port, ssl_context)", "def create_socket(host, port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(300)\n while True:\n try:\n s.connect( (host, int(port)) )\n except:\n s.shutdown(socket.SHUT_RDWR)\n s.close()\n time.sleep(1)\n else:\n break\n return ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1, keyfile=\"zagent_client.key\",\n certfile=\"zagent_client.pem\")", "def create_tcp_client_socket(address, port):\n\n sock = s.socket(s.AF_INET, s.SOCK_STREAM)\n\n\n return sock", "def create_client(self) -> None:\n pass", "def create_client(host, user, password):\n client = paramiko.client.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy)\n client.connect(hostname=host, username=user, password=password, timeout=60)\n return client", "def serve(port, server='0.0.0.0'):\n server = make_server(lang_thrift.CodeExecutor, Dispatcher(), server, port)\n server.serve()", "async def create_turn_endpoint(protocol_factory, server_addr, username, password, lifetime=600):\n loop = asyncio.get_event_loop()\n _, inner_protocol = await loop.create_datagram_endpoint(\n lambda: TurnClientProtocol(server_addr,\n username=username,\n password=password,\n lifetime=lifetime),\n family=socket.AF_INET)\n\n protocol = protocol_factory()\n transport = TurnTransport(protocol, inner_protocol)\n await transport._connect()\n\n return transport, protocol", "def create_client(self):\n client = iperf3.Client()\n client.duration = self._host[CONF_DURATION]\n client.server_hostname = self._host[CONF_HOST]\n client.port = self._host[CONF_PORT]\n client.num_streams = self._host[CONF_PARALLEL]\n client.protocol = self._host[CONF_PROTOCOL]\n client.verbose = False\n return client", "def new(\n host: str = \"localhost\",\n port: int = 4110,\n user: str = \"pyserval\",\n passwd: str = \"pyserval\",\n ):\n connection = RestfulConnection(host=host, port=port, user=user, passwd=passwd)\n return LowLevelClient(connection=connection)", "def connect_thread(service=VoidService, config={}, remote_service=VoidService, remote_config={}):\n listener = socket.socket()\n listener.bind((\"localhost\", 0))\n listener.listen(1)\n remote_server = partial(_server, listener, remote_service, remote_config)\n spawn(remote_server)\n host, port = listener.getsockname()\n return connect(host, port, service=service, config=config)", "def coredns_tcp(kube_module: kubetest.client.TestClient\n ) -> kubetest.objects.Service:\n svc_list = get_services_by_proto(\"TCP\")\n svc = kubetest.objects.Service(svc_list[0])\n svc.create()\n kube_module.wait_until_created(svc, timeout=10)\n svc.wait_until_ready(timeout=10)\n yield svc\n svc.delete(options=None)\n svc.wait_until_deleted()", "def __init__(self, processor, server_address,\r\n inputProtocolFactory, outputProtocolFactory = None,\r\n server_class = BaseHTTPServer.HTTPServer):\r\n\r\n if outputProtocolFactory is None:\r\n outputProtocolFactory = inputProtocolFactory\r\n\r\n TServer.TServer.__init__(self, processor, None, None, None,\r\n inputProtocolFactory, outputProtocolFactory)\r\n\r\n thttpserver = self\r\n\r\n class RequestHander(BaseHTTPServer.BaseHTTPRequestHandler):\r\n def do_POST(self):\r\n # Don't care about the request path.\r\n itrans = TTransport.TFileObjectTransport(self.rfile)\r\n otrans = TTransport.TFileObjectTransport(self.wfile)\r\n itrans = TTransport.TBufferedTransport(itrans, int(self.headers['Content-Length']))\r\n otrans = TTransport.TMemoryBuffer()\r\n iprot = thttpserver.inputProtocolFactory.getProtocol(itrans)\r\n oprot = thttpserver.outputProtocolFactory.getProtocol(otrans)\r\n try:\r\n thttpserver.processor.process(iprot, oprot)\r\n except ResponseException, exn:\r\n exn.handler(self)\r\n else:\r\n self.send_response(200)\r\n self.send_header(\"content-type\", \"application/x-thrift\")\r\n self.end_headers()\r\n self.wfile.write(otrans.getvalue())\r\n\r\n self.httpd = server_class(server_address, RequestHander)", "def start_server(self):\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n\n # The two services we added in the proto. You can find these functions in\n # jellybeanrobot_pb2_grpc.py.\n jellybeanrobot_pb2_grpc.add_JellyServicer_to_server(Robot(), server)\n\n # Start listening on a port.\n server.add_insecure_port(\"localhost:%d\" % self.port)\n print \"Listening on localhost:%d!\\n\" % self.port\n server.start()\n\n try:\n while True:\n time.sleep(3600) # one hour. \n except KeyboardInterrupt:\n server.stop(0)", "async def connect(\n address,\n device_id,\n local_key,\n protocol_version,\n enable_debug,\n listener=None,\n port=6668,\n timeout=5,\n):\n loop = asyncio.get_running_loop()\n on_connected = loop.create_future()\n _, protocol = await loop.create_connection(\n lambda: TuyaProtocol(\n device_id,\n local_key,\n protocol_version,\n enable_debug,\n on_connected,\n listener or EmptyListener(),\n ),\n address,\n port,\n )\n\n await asyncio.wait_for(on_connected, timeout=timeout)\n return protocol", "def __init__(\n self,\n server_address: str,\n compress: bool = False,\n call_timeout: Optional[Union[int, float, datetime.timedelta]] = None,\n wait_for_ready: bool = True,\n ):\n self._init_args = (server_address, compress)\n self._address = str(server_address)\n self._compress = compress\n self._client = py_client.PyClient(self._address)\n self._call_timeout = call_timeout if call_timeout else datetime.timedelta(0)\n if not isinstance(self._call_timeout, datetime.timedelta):\n self._call_timeout = datetime.timedelta(seconds=self._call_timeout)\n self._wait_for_ready = wait_for_ready\n self._async_client = _AsyncClient(self._client, self._wait_for_ready,\n self._call_timeout, self._compress)", "def __init__(self, servicer, device_proxy=None):\n self.servicer = servicer\n self.server = grpc.server(\n futures.ThreadPoolExecutor(max_workers=PLUGIN_SERVICE_WORKER_COUNT))\n self.device_proxy = device_proxy", "def __init__(self):\n super(LoopbackTransport, self).__init__([_JSON_RPC_SERVER_PATH])", "def client():\n\n client = Client()\n return client", "def hello_svc_client():\n from clients.hello_svc import HelloServiceClient\n return HelloServiceClient()", "def buildProtocol(self, addr):\n return ClientConnection()" ]
[ "0.6973163", "0.5815138", "0.57890433", "0.5741834", "0.55016434", "0.54852235", "0.5437825", "0.5426382", "0.5389692", "0.53569806", "0.5336856", "0.53230494", "0.5321206", "0.5312297", "0.5288609", "0.5285945", "0.52812237", "0.52649456", "0.5262709", "0.52576977", "0.52479964", "0.52396494", "0.52156055", "0.51770455", "0.5168415", "0.5157355", "0.51377267", "0.5130742", "0.5122802", "0.51030076" ]
0.6733042
1
Constructor for a NN used a sthe comparer submodule
def __init__(self, input_size, hidden_sizes, num_labels=10, output_size=1, batchnorm_comparer_bool=False, dropout_comparer_bool=False): super(NeuralNetComparer, self).__init__() sizes = [2 * num_labels] + hidden_sizes + [output_size] self.layers_comparer = nn.ModuleList( [nn.Linear(in_f, out_f) for in_f, out_f in zip(sizes, sizes[1:])]) self.bns_comparer = nn.ModuleList( [nn.BatchNorm1d(out_f) for in_f, out_f in zip(sizes, sizes[1:])]) self.dps_comparer = nn.ModuleList( [nn.Dropout(p=0.5) for _ in range(len(self.layers_comparer))]) self.relus_comparer = nn.ModuleList( [nn.ReLU() for _ in range(len(self.layers_comparer))]) self.sigmoid = nn.Sigmoid() self.batchnorm_comparer_bool = batchnorm_comparer_bool self.dropout_comparer_bool = dropout_comparer_bool
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, network: Network):\n self.graph = network.graph", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n # set number of nodes in each input, hidden, output layer\n self.inodes = inputnodes\n self.hnodes = hiddennodes\n self.onodes = outputnodes\n\n #learning rate\n self.lr = learningrate", "def __init__(self, input_size,\n hidden_sizes_comparer,\n batchnorm_comparer_bool=False,\n dropout_comparer_bool=False):\n super(CNNCalssifierComparer, self).__init__()\n self.input_size = input_size\n self.classifier = CNNClassifier()\n self.comparer = NeuralNetComparer(\n input_size,\n hidden_sizes_comparer,\n batchnorm_comparer_bool=batchnorm_comparer_bool,\n dropout_comparer_bool=dropout_comparer_bool)", "def __init__(self):\n super(SCNN, self).__init__()\n\n # Linear classifier.\n self.inplanes = 128\n self._norm_layer = nn.BatchNorm2d\n self.dilation = 1\n self.groups = 1\n self.base_width = 64\n\n self.num_class = 125\n backbone = torchvision.models.resnet34(pretrained=True)\n self.shared_features = nn.Sequential(*list(backbone.children())[0:6])\n #self.realistic_head = nn.Sequential(*list(backbone.children())[6:8])\n # self.synthetic_head = nn.Sequential(nn.Conv2d(128, 128, 3, 2, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 256, 3, 2, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),\n # nn.Conv2d(256, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True))\n\n self.synthetic_head1 = self._make_layer(BasicBlock, 128, 1, stride=2, dilate=False)\n self.synthetic_head2 = self._make_layer(BasicBlock, 256, 1, stride=2, dilate=False)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifier = nn.Linear(256, self.num_class)\n\n for m in self.synthetic_head1.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.synthetic_head2.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n weight_init(self.classifier)\n\n for param in self.shared_features.parameters():\n param.requires_grad = False", "def __init__(self):\n Algorithm.__init__(self)\n self.name = \"Otsus Threshold\"\n self.parent = \"Segmentation\"", "def __init__(self, poolIndex, cls):\n super(P1, self).__init__(poolIndex, \"abstractnode\", [\"edges\", \"map\"], [None for i in range(0, 0)], cls)", "def __init__(self, model_config):\n # Training Parameters\n self.__learning_rate = model_config[\"cnnLearningRate\"]\n\n # Network Parameters\n self.__num_classes = model_config[\"numClasses\"]\n self.__weight_decay = 1e-4\n self.__num_gpus = model_config[\"numGpus\"]\n self.__use_csnn = model_config[\"useCsnn\"]\n\n self.__csnn = Csnn(model_config)", "def __init__(self,\r\n n,\r\n sort,\r\n algo,\r\n comps,\r\n exs,\r\n predata,\r\n postdata,\r\n comp_eq,\r\n ex_eq,\r\n time):\r\n self.n = n\r\n self.sort = sort\r\n self.algo = algo\r\n self.comps = comps\r\n self.exs = exs\r\n self.predata = predata\r\n self.postdata = postdata\r\n self.comp_eq = comp_eq\r\n self.ex_eq = ex_eq\r\n self.time = time", "def __init__(self, nx):\n if not isinstance(nx, int):\n raise TypeError('nx must be an integer')\n if nx < 1:\n raise ValueError('nx must be a positive integer')\n\n \"\"\"\n W = The weights vector for the neuron. Upon instantiation\n using a random normal distribution.\n \"\"\"\n self.W = np.random.normal(0, 1, (1, nx))\n\n \"\"\"The bias for the neuron. Upon instantiation, it should be initialized to 0.\"\"\"\n self.b = 0\n\n \"\"\"The activated output of the neuron (prediction).\n Upon instantiation, it should be initialized to 0.\"\"\"\n self.A = 0", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def __init__(self, graph, args):\n self.args = args\n self.seeding = args.seed\n self.graph = graph\n self.nodes = [node for node in graph.nodes()]\n self.rounds = args.rounds\n self.labels = {node: node for node in self.nodes}\n self.label_count = len(set(self.labels.values()))\n self.flag = True\n self.weight_setup(args.weighting)", "def __init__(self):\n self.sum_of_node_inputs = 0\n self.output = 0\n self.delta = 0\n self.dp = 0\n self.onehot_label = 0", "def __init__(self):\n self.tree = nx.Graph() \n self.orig_dist_matrix = pd.DataFrame()\n self.work_dist_matrix = pd.DataFrame() \n self.cluster_map = {} \n self.class_map = {}", "def __init__(self, **kwargs):\n\n super(KNN, self).__init__()\n self.nneighbors = kwargs.pop('n_neighbors', 5)\n self.clf = neighbors.KNeighborsClassifier(n_neighbors=self.nneighbors, **kwargs)", "def __init__(self, nodeLabels: tuple):\n super().__init__(DEFAULT_MODEL)\n pass", "def __init__(self, \n n_neurons = \"micro\", # else: \"brunel\" or arrays\n C_ab = \"micro\", # else: \"brunel\" or arrays\n area = net.area, # simulation size\n neuron_model = net.neuron_model, # \"iaf_psc_delta\" or \"iaf_psc_exp\"\n connection_rule = net.connection_rule, # \"fixed_total_number\" or \"fixed_indegree\"\n j02 = net.j02, \n weight_rel_sd = net.weight_rel_sd, \n delay_rel_sd = net.delay_rel_sd, \n g = net.g, \n rate_ext = net.rate_ext):\n ###################################################\n ### \tNetwork parameters\t\t### \n ###################################################\n\n # area of network in mm^2; scales numbers of neurons\n # use 1 for the full-size network (77,169 neurons)\n self.area = area\n \n self.layers = net.layers #np.array([\"L23\", \"L4\", \"L5\", \"L6\"])\n self.types = net.types #np.array([\"e\", \"i\"]) \n self.populations = np.array([layer + typus for layer in self.layers for typus in self.types])\n self.n_populations = len(self.populations)\n self.n_layers = len(self.layers)\n self.n_types = len(self.types)\n \n # Neuron numbers\n if n_neurons == \"micro\":\n self.n_neurons = np.int_(net.full_scale_n_neurons * self.area)\n elif n_neurons == \"brunel\":\n # Provide an array of equal number of neurons in each exc./inh. population\n gamma = 0.25\n inh_factor = 1. / (gamma + 1.)\n exc_factor = 1. - inh_factor \n n_total_micro = np.sum(net.full_scale_n_neurons * self.area)\n N_exc = n_total_micro/self.n_populations * exc_factor\n N_inh = n_total_micro/self.n_populations * inh_factor\n self.n_neurons = np.tile([N_exc, N_inh], self.n_layers).astype(int)\n else:\n if type(n_neurons) == np.ndarray:\n if n_neurons.shape == (self.n_populations, ):\n self.n_neurons = np.int_(n_neurons)\n else:\n raise Exception(\"'n_neurons' has wrong shape. \"+\n \"Expects (%i,)\"%self.n_populations)\n else: \n raise Exception(\"'n_neurons' expects either numpy.ndarray or string \"+\n \"in {'micro', 'brunel'}\")\n self.n_total = np.sum(self.n_neurons)\n\n \n # Synapse numbers\n # Connection probabilities: conn_probs[post, pre] = conn_probs[target, source]\n conn_probs = net.conn_probs\n # Scale synapse numbers of the C_ab\n if net.scale_C_linearly:\n n_outer_full = np.outer(net.full_scale_n_neurons, net.full_scale_n_neurons)\n C_full_scale = np.log(1. - conn_probs) / np.log(1. - 1. / n_outer_full)\n C_scaled = np.int_(C_full_scale * self.area)\n else:\n n_outer = np.outer(self.n_neurons, self.n_neurons)\n C_scaled = np.int_(np.log(1. - conn_probs) / np.log(1. - 1. / n_outer))\n\n self.connection_rule = connection_rule\n if self.connection_rule == \"fixed_total_number\":\n C_ab_micro = C_scaled # total number, do not divide! \n elif self.connection_rule == \"fixed_indegree\":\n C_ab_micro = (C_scaled.T / (net.full_scale_n_neurons * self.area)).T\n else:\n raise Exception(\"Unexpected connection type. Use 'fixed_total_number' for microcircuit \" + \n \"model or 'fixed_indegree' for Brunel's model!\")\n\n if C_ab == \"micro\":\n self.C_ab = C_ab_micro # shall not be integer at this point!\n elif C_ab == \"brunel\":\n C_e = np.mean(C_ab_micro) # mean for microcircuit (= 501 in full scale)\n C_i = gamma * C_e\n self.C_ab = np.tile([C_e, C_i], (self.n_populations, self.n_layers)).astype(int) \n else:\n if type(C_ab) == np.ndarray:\n if C_ab.shape == (self.n_populations, self.n_populations):\n self.C_ab = np.int_(C_ab)\n else:\n raise Exception(\"'C_ab' has wrong shape. \"+\n \"Expects (%i, %i)\"%(self.n_populations, self.n_populations))\n else: \n raise Exception(\"'C_ab' expects either numpy.ndarray or string \"+\n \"in {'micro', 'brunel'}\")\n\n\n ###################################################\n ### Single-neuron parameters\t\t### \n ###################################################\n self.neuron_model = neuron_model\n self.Vm0_mean = net.Vm0_mean # mean of initial membrane potential (mV)\n self.Vm0_std = net.Vm0_std # std of initial membrane potential (mV)\n self.model_params = net.model_params\n if not self.neuron_model==\"iaf_psc_delta\":\n self.model_params[\"tau_syn_ex\"] = net.tau_syn_ex # excitatory synaptic time constant (ms)\n self.model_params[\"tau_syn_in\"] = net.tau_syn_in # inhibitory synaptic time constant (ms)\n self.tau_syn_ex = net.tau_syn_ex # ms\n self.tau_syn_in = net.tau_syn_in # ms\n self.tau_syn = np.tile([self.tau_syn_ex, self.tau_syn_in], (self.n_populations, self.n_layers))\n # Rescaling for model calculations: these values are not used in the simulation!\n self.tau_m = self.model_params[\"tau_m\"] # ms\n self.t_ref = self.model_params[\"t_ref\"] # ms\n self.E_L = self.model_params[\"E_L\"] # mV\n self.V_r = self.model_params[\"V_reset\"] - self.E_L # mV\n self.theta = self.model_params[\"V_th\"] - self.E_L # mV\n self.C_m = self.model_params[\"C_m\"] # pF\n\n\n ######################################################\n # Synaptic weights. Depend on neuron_model! ##\n ######################################################\n self.g = g\n self.j02 = j02\n\n g_all = np.tile([1., -self.g], (self.n_populations, self.n_layers))\n L23e_index = np.where(self.populations == \"L23e\")[0][0]\n L4e_index = np.where(self.populations == \"L4e\")[0][0]\n g_all[L23e_index, L4e_index] *= self.j02\n \n self.J = net.PSP_e # mv; mean PSP, used as reference PSP\n self.J_ab = self.J * g_all\n self.weight_rel_sd = weight_rel_sd # Standard deviation of weight relative to mean weight\n # Transformation from peak PSP to PSC\n delta_tau = self.tau_syn - self.tau_m\n ratio_tau = self.tau_m / self.tau_syn\n PSC_over_PSP = self.C_m * delta_tau / (self.tau_m * self.tau_syn * \\\n (ratio_tau**(self.tau_m / delta_tau) - ratio_tau**(self.tau_syn / delta_tau))) \n # Actual weights have to be adapted: from peak PSP to PSC (and back...)\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weights = self.J_ab * PSC_over_PSP # neuron populations\n elif self.neuron_model==\"iaf_psc_delta\":\n self.weights = self.J_ab * PSC_over_PSP * (self.tau_syn_ex) / self.C_m\n # This might be an overkill / doing things twice...\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weights = self.J_ab * np.exp(1) / (self.tau_syn_ex) / self.C_m\n else:\n raise Exception(\"Neuron model should be iaf_psc_ - {delta, exp, alpha}!\")\n\n\n ###################################################\n ### Delays and dicts ### \n ###################################################\n # mean dendritic delays for excitatory and inhibitory transmission (ms)\n self.delay_e = net.delay_e # ms, excitatory synapses\n self.delay_i = net.delay_i # ms, inhibitory synapses\n\n self.delays = np.tile([self.delay_e, self.delay_i], (self.n_populations, self.n_layers)) # adapt...\n self.delay_rel_sd = delay_rel_sd \n \n # Synapse dictionaries\n # default connection dictionary\n self.conn_dict = {\"rule\": connection_rule}\n # weight distribution of connections between populations\n self.weight_dict_exc = net.weight_dict_exc\n self.weight_dict_inh = net.weight_dict_inh\n # delay distribution of connections between populations\n self.delay_dict = net.delay_dict\n # default synapse dictionary\n self.syn_dict = net.syn_dict\n \n \n ###################################################\n ### External stimuli ## \n ###################################################\n # rate of background Poisson input at each external input synapse (spikes/s) \n self.rate_ext = rate_ext # Hz \n self.J_ext = net.PSP_ext # external synaptic weight\n self.delay_ext = self.delay_e # ms; mean delay of external input\n self.dc_amplitude = net.dc_amplitude # constant bg amplitude\n self.C_aext = net.C_aext # in-degrees for background input\n # Adapt weights\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weight_ext = self.J_ext * PSC_over_PSP[0, 0] \n elif self.neuron_model==\"iaf_psc_delta\":\n self.weight_ext = self.J_ext * PSC_over_PSP[0, 0] * self.tau_syn_ex / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weight_ext = self.J_ext * np.exp(1) / self.tau_syn_ex / self.C_m\n\n # optional additional thalamic input (Poisson)\n self.n_th = net.n_th # size of thalamic population\n self.th_start = net.th_start # onset of thalamic input (ms)\n self.th_duration = net.th_duration # duration of thalamic input (ms)\n self.th_rate = net.th_rate # rate of thalamic neurons (spikes/s)\n self.J_th = net.PSP_th # mean EPSP amplitude (mV) for thalamic input\n # Adapt weights\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weight_th = self.J_th * PSC_over_PSP[0, 0] \n elif self.neuron_model==\"iaf_psc_delta\":\n self.weight_th = self.J_th * PSC_over_PSP[0, 0] * self.tau_syn_ex / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weight_th = self.J_th * np.exp(1) / self.tau_syn_ex / self.C_m\n\n \n # connection probabilities for thalamic input\n conn_probs_th = net.conn_probs_th\n if net.scale_C_linearly:\n if not self.n_th == 0:\n C_th_full_scale = np.log(1. - conn_probs_th) / \\\n np.log(1. - 1. / (self.n_th * net.full_scale_n_neurons))\n self.C_th_scaled = np.int_(C_th_full_scale * self.area)\n else:\n if not self.n_th == 0:\n self.C_th_scaled = np.int_(np.log(1. - conn_probs_th) / \\\n np.log(1. - 1. / (self.n_th * self.n_neurons_micro)))\n if self.n_th == 0:\n self.C_th_scaled = None\n \n # mean delay of thalamic input (ms)\n self.delay_th = net.delay_th\n # standard deviation relative to mean delay of thalamic input\n self.delay_th_rel_sd = net.delay_th_rel_sd\n\n\n ######################################################\n # Predefine matrices for mean field ##\n ######################################################\n if self.neuron_model==\"iaf_psc_delta\":\n self.J_mu = self.weights\n self.J_sd = self.weights\n self.J_mu_ext = self.weight_ext \n self.J_sd_ext = self.weight_ext\n elif self.neuron_model==\"iaf_psc_exp\":\n self.J_mu = self.weights * self.tau_syn / self.C_m\n self.J_sd = self.weights * np.sqrt(self.tau_syn / 2.) / self.C_m\n self.J_mu_ext = self.weight_ext * self.tau_syn_ex / self.C_m\n self.J_sd_ext = self.weight_ext * np.sqrt(self.tau_syn_ex / 2.) / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\":\n self.J_mu = self.weights * self.tau_syn**2 / self.C_m\n self.J_sd = self.weights * self.tau_syn**(3./2.) / (self.C_m * 2.)\n self.J_mu_ext = self.weight_ext * self.tau_syn_ex**2 / self.C_m\n self.J_sd_ext = self.weight_ext * self.tau_syn_ex**(3./2.) / (self.C_m * 2.)\n self.mat_mu = self.tau_m * 1e-3 * self.J_mu * self.C_ab\n self.mu_ext = self.tau_m * 1e-3 * self.J_mu_ext * self.C_aext * self.rate_ext\n self.mat_var = self.tau_m * 1e-3 * (1 + self.weight_rel_sd ** 2) * self.J_sd**2 * self.C_ab\n self.var_ext = self.tau_m * 1e-3 * (1 + self.weight_rel_sd ** 2) * self.J_sd_ext**2 * self.C_aext * self.rate_ext", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n super().__init__(inputnodes, hiddennodes, outputnodes, learningrate)\n\n # link weight matrices, wih and who\n # weights inside the arrays are w_i_j, where link is from node i to node i to j in the next layer\n # w11 w21\n # w12 w22 etc\n self.wih = cupy.random.normal(\n 0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = cupy.random.normal(\n 0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n\n #activation function is the sigmoid function\n self.activation_function = lambda x: 1 / (1 + cupy.exp(x) ** (-1))", "def __init__(self):\n self.name = \"GomokuAssignment3\"\n self.version = 1.0\n self.NN = 10", "def __init__(self, input_nodes, hidden_nodes, hidden_layers, output_nodes):\n super().__init__(input_nodes, hidden_nodes, hidden_layers, output_nodes)", "def __init__(self, n_neighbors=2):\n self.n_neighbors = n_neighbors", "def __init__(self, n_lm, n_ang):\n super(MVCNet, self).__init__()\n self.convM1_sag = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.convM1_cor = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.xModule1 = xModule([64, 128, 64], 64, 4, 2, 1, 128, 128, 0.25)\n self.xModule2 = xModule([128, 64, 32], 128, 4, 2, 1, 256, 256, 0.25)\n self.xModule3 = xModule([256, 32, 16], 256, 4, 2, 1, 512, 512, 0.25)\n self.SLE_sag = SLE([512, 16, 8], 512, n_lm)\n self.SLE_cor = SLE([512, 16, 8], 512, n_lm)\n self.CAE_sag = CAE(512, n_lm, n_ang)\n self.CAE_cor = CAE(512, n_lm, n_ang)", "def __init__(self, adjacency, directed=False, node_weights=None,\n silence_level=0):\n # Call constructor of parent class Network\n Network.__init__(self, adjacency=adjacency, directed=directed,\n node_weights=node_weights,\n silence_level=silence_level)", "def __init__(self, nodes=None):\r\n self.nodes = nodes", "def __init__(self, **kwargs):\n #super(Net, self).__init__()\n nn.Module.__init__(self)\n # Build CNN\n module, shapes, optim = build_neuron_network(**kwargs)\n self._configuration = kwargs\n self.add_module('cnn', module)\n self.shapes = shapes\n # Loss and optimization\n self.criterion = nn.MSELoss(reduction='mean')\n self.optimizer = optim\n self._kwargs = kwargs", "def __init__(self, temperature: float, crops_for_assign: tuple, nmb_crops: tuple, sinkhorn_iterations: int, epsilon: float, gpus: int, num_nodes: int):\n super().__init__()\n self.temperature = temperature\n self.crops_for_assign = crops_for_assign\n self.softmax = nn.Softmax(dim=1)\n self.sinkhorn_iterations = sinkhorn_iterations\n self.epsilon = epsilon\n self.nmb_crops = nmb_crops\n self.gpus = gpus\n self.num_nodes = num_nodes\n if self.gpus * self.num_nodes > 1:\n self.assignment_fn = self.distributed_sinkhorn\n else:\n self.assignment_fn = self.sinkhorn", "def __init__(self, n_in, n_out, ensemble_size, non_linearity='leaky_relu'):\n\n\t\tsuper().__init__()\n\n\t\tweights = torch.zeros(ensemble_size, n_in, n_out).float()\n\t\tbiases = torch.zeros(ensemble_size, 1, n_out).float()\n\n\t\tfor weight in weights:\n\t\t\tif non_linearity == 'swish':\n\t\t\t\tnn.init.xavier_uniform_(weight)\n\t\t\telif non_linearity == 'relu':\n\t\t\t\tnn.init.kaiming_normal_(weight)\n\t\t\telif non_linearity == 'leaky_relu':\n\t\t\t\tnn.init.kaiming_normal_(weight)\n\t\t\telif non_linearity == 'tanh':\n\t\t\t\tnn.init.xavier_uniform_(weight)\n\t\t\telif non_linearity == 'linear':\n\t\t\t\tnn.init.xavier_normal_(weight)\n\n\t\tself.weights = nn.Parameter(weights)\n\t\tself.biases = nn.Parameter(biases)\n\n\t\tif non_linearity == 'swish':\n\t\t\tself.non_linearity = swish\n\t\telif non_linearity == 'relu':\n\t\t\tself.non_linearity = F.relu\n\t\telif non_linearity == 'leaky_relu':\n\t\t\tself.non_linearity = F.leaky_relu\n\t\telif non_linearity == 'tanh':\n\t\t\tself.non_linearity = torch.tanh\n\t\telif non_linearity == 'linear':\n\t\t\tself.non_linearity = linear", "def __init__(self, colour, san, nags=[], comment=\"\", variations=[]):\n self.colour = colour\n self.san = san\n self.nags = self.nodes_to_nags(nags)\n self.comment = comment\n self.variations = variations", "def __init__(self, input_size,\n hidden_sizes_comparer,\n num_labels=10,\n batchnorm_comparer_bool=False,\n dropout_comparer_bool=False):\n super(CNNCalssifierComparerAuxLoss, self).__init__()\n self.input_size = input_size\n self.classifier = CNNClassifier()\n self.comparer = NeuralNetComparer(\n input_size,\n hidden_sizes_comparer,\n batchnorm_comparer_bool=batchnorm_comparer_bool,\n dropout_comparer_bool=dropout_comparer_bool)\n\n self.num_labels = num_labels", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n \n super().__init__(inputnodes, hiddennodes, outputnodes, learningrate)\n\n # link weight matrices, wih and who\n # weights inside the arrays are w_i_j, where link is from node i to node i to j in the next layer\n # w11 w21\n # w12 w22 etc\n self.wih = numpy.random.normal(\n 0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = numpy.random.normal(\n 0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n\n #activation function is the sigmoid function\n self.activation_function = lambda x: scipy.special.expit(x)\n\n pass", "def __init__(self, n_neighbors=6, dim=2, seed=None, n_jobs=1):\n self.n_neighbors = n_neighbors\n self.dim = dim\n self.seed = seed\n self.n_jobs = n_jobs" ]
[ "0.6710942", "0.67074245", "0.6682953", "0.6604958", "0.64907384", "0.6483975", "0.6477833", "0.64563036", "0.6446919", "0.6438356", "0.64310414", "0.6428357", "0.6427498", "0.63945305", "0.6375418", "0.63718176", "0.6370687", "0.63609296", "0.6339547", "0.63324785", "0.63302886", "0.6329091", "0.6323305", "0.6323175", "0.6319932", "0.6319625", "0.63133425", "0.6309261", "0.62952465", "0.62925744" ]
0.6730846
0
Constructor for a CNN with two sub Module 1. Classifier 2.Comparer
def __init__(self, input_size, hidden_sizes_comparer, batchnorm_comparer_bool=False, dropout_comparer_bool=False): super(CNNCalssifierComparer, self).__init__() self.input_size = input_size self.classifier = CNNClassifier() self.comparer = NeuralNetComparer( input_size, hidden_sizes_comparer, batchnorm_comparer_bool=batchnorm_comparer_bool, dropout_comparer_bool=dropout_comparer_bool)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n super(SCNN, self).__init__()\n\n # Linear classifier.\n self.inplanes = 128\n self._norm_layer = nn.BatchNorm2d\n self.dilation = 1\n self.groups = 1\n self.base_width = 64\n\n self.num_class = 125\n backbone = torchvision.models.resnet34(pretrained=True)\n self.shared_features = nn.Sequential(*list(backbone.children())[0:6])\n #self.realistic_head = nn.Sequential(*list(backbone.children())[6:8])\n # self.synthetic_head = nn.Sequential(nn.Conv2d(128, 128, 3, 2, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 256, 3, 2, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),\n # nn.Conv2d(256, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True))\n\n self.synthetic_head1 = self._make_layer(BasicBlock, 128, 1, stride=2, dilate=False)\n self.synthetic_head2 = self._make_layer(BasicBlock, 256, 1, stride=2, dilate=False)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifier = nn.Linear(256, self.num_class)\n\n for m in self.synthetic_head1.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.synthetic_head2.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n weight_init(self.classifier)\n\n for param in self.shared_features.parameters():\n param.requires_grad = False", "def __init__(self):\n\n super(ConvModule, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=[1, 2])\n self.conv1_bn = nn.BatchNorm2d(64)\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=[1, 2])\n self.conv2_bn = nn.BatchNorm2d(128)\n self.pool1 = nn.MaxPool2d(kernel_size=4, stride=2)\n self.dropout0 = nn.Dropout(p=0.4)\n\n self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=[1, 2])\n self.conv3_bn = nn.BatchNorm2d(256)\n self.conv4 = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3, stride=[1, 2])\n self.conv4_bn = nn.BatchNorm2d(64)\n self.pool2 = nn.MaxPool2d(kernel_size=4, stride=2)\n #\n # self.conv5 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=[1, 2])\n # self.conv5_bn = nn.BatchNorm2d(64)\n # self.pool3 = nn.MaxPool2d(kernel_size=3, stride=[1, 2])", "def __init__(self, nfeat, nhid, nclass, dropout, alpha):\n super(GCN, self).__init__()\n self.dropout = dropout\n\n self.conv1 = GraphConvolutionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, not_final=True)\n \n self.add_module('conv1', self.conv1)\n\n self.conv2 = GraphConvolutionLayer(nhid, nclass, dropout=dropout, alpha=alpha, not_final=False)", "def __init__(self, input_size,\n hidden_sizes_comparer,\n num_labels=10,\n batchnorm_comparer_bool=False,\n dropout_comparer_bool=False):\n super(CNNCalssifierComparerAuxLoss, self).__init__()\n self.input_size = input_size\n self.classifier = CNNClassifier()\n self.comparer = NeuralNetComparer(\n input_size,\n hidden_sizes_comparer,\n batchnorm_comparer_bool=batchnorm_comparer_bool,\n dropout_comparer_bool=dropout_comparer_bool)\n\n self.num_labels = num_labels", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, **config):\n super(Classifier, self).__init__()\n self.input_dim_drug = config['hidden_dim_drug']\n self.input_dim_protein = config['hidden_dim_protein']\n self.hidden_dims = config['cls_hidden_dims']\n self.visual_attention=config['visual_attention']\n dims = [self.input_dim_drug + self.input_dim_protein] + self.hidden_dims + [2]\n if config['attention']:\n if config['concatenation']:\n dims[0]+=config['cnn_target_filters'][-1]\n else:\n dims[0]=self.input_dim_drug+config['cnn_target_filters'][-1]\n self.predictor = nn.ModuleList([nn.Linear(dims[i], dims[i + 1]) for i in range(len(self.hidden_dims)+1)])\n self.dropout = nn.Dropout(0.25)\n self._initialize()", "def __init__(self, input_size, hidden_sizes,\n num_labels=10, output_size=1,\n batchnorm_comparer_bool=False,\n dropout_comparer_bool=False):\n super(NeuralNetComparer, self).__init__()\n sizes = [2 * num_labels] + hidden_sizes + [output_size]\n self.layers_comparer = nn.ModuleList(\n [nn.Linear(in_f, out_f) for in_f, out_f in zip(sizes, sizes[1:])])\n self.bns_comparer = nn.ModuleList(\n [nn.BatchNorm1d(out_f) for in_f, out_f in zip(sizes, sizes[1:])])\n self.dps_comparer = nn.ModuleList(\n [nn.Dropout(p=0.5) for _ in range(len(self.layers_comparer))])\n self.relus_comparer = nn.ModuleList(\n [nn.ReLU() for _ in range(len(self.layers_comparer))])\n self.sigmoid = nn.Sigmoid()\n\n self.batchnorm_comparer_bool = batchnorm_comparer_bool\n self.dropout_comparer_bool = dropout_comparer_bool", "def __init__(self):\r\n torch.nn.Module.__init__(self)\r\n # Convolution and pooling layers of VGG-16.\r\n self.features = torchvision.models.vgg19_bn(pretrained=False).features\r\n self.features = torch.nn.Sequential(*list(self.features.children())\r\n [:-1]) # Remove pool5.\r\n # Linear classifier.\r\n self.fc = torch.nn.Linear(512**2, 11)", "def __init__(self):\n torch.nn.Module.__init__(self)\n # Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-1]) # Remove pool5.\n # Linear classifier.\n self.fc = torch.nn.Linear(512**2, 36)", "def __init__(self, type, embedding_size: tuple): \n super(AbstractClassifier, self).__init__()\n \n # CNN TODO: shape checks\n self.layer1 = nn.Sequential(\n nn.Conv1d(1, 20, kernel_size=2, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv1d(20, 50, kernel_size=2, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2, stride=2))\n self.dropout = nn.Dropout()\n self.fc1 = nn.Linear(50*44, 1024)\n self.fc2 = nn.Linear(1024, 2)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, channels, num_classes):\n super(AuxiliaryHead, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n # image size = 2 x 2\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False),\n nn.Conv2d(channels, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True),\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, input_size,\n hidden_sizes_classifier,\n hidden_sizes_comparer,\n batchnorm_classifer_bool=False,\n dropout_classifier_bool=False,\n batchnorm_comparer_bool=False,\n dropout_comparer_bool=False):\n super(NeuralNetCalssifierComparer, self).__init__()\n self.input_size = input_size\n self.classifier = NeuralNetCalssifier(\n input_size,\n hidden_sizes_classifier,\n batchnorm_classifer_bool=batchnorm_classifer_bool,\n dropout_classifier_bool=dropout_classifier_bool)\n self.comparer = NeuralNetComparer(\n input_size,\n hidden_sizes_comparer,\n batchnorm_comparer_bool=batchnorm_comparer_bool,\n dropout_comparer_bool=dropout_comparer_bool)", "def __init__(self, num_classes=200):\n nn.Module.__init__(self)\n # Convolution and pooling layers of VGG-16.\n self.features = vgg16(pretrained=True).features\n self.features_conv5_1 = nn.Sequential(*list(self.features.children())[:-5])\n self.features_conv5_2 = nn.Sequential(*list(self.features.children())[-5:-3])\n self.features_conv5_3 = nn.Sequential(*list(self.features.children())[-3:-1])\n self.bilinear_proj = nn.Sequential(nn.Conv2d(512, 8192, kernel_size=1, bias=False),\n nn.BatchNorm2d(8192),\n nn.ReLU(inplace=True))\n # Linear classifier.\n self.fc = torch.nn.Linear(8192 * 3, num_classes)", "def __init__(\n self,\n include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)\n self.include_top = include_top\n self.pooling = pooling\n self.weights = weights\n self.backend = backend\n self.layers = layers\n self.classes = classes\n\n if not (weights in {'imagenet', None} or os.path.exists(weights)):\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization), `imagenet` '\n '(pre-training on ImageNet), '\n 'or the path to the weights file to be loaded.')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as `\"imagenet\"` with `include_top`'\n ' as true, `classes` should be 1000')\n self.block1_conv1 = []\n self.block1_conv2 = []\n self.block1_pool = []\n\n self.block2_conv1 = []\n self.block2_conv2 = []\n self.block2_pool = []\n\n self.block3_conv1 = []\n self.block3_conv2 = []\n self.block3_conv3 = []\n self.block3_conv4 = []\n self.block3_pool = []\n\n self.block4_conv1 = []\n self.block4_conv2 = []\n self.block4_conv3 = []\n self.block4_conv4 = []\n self.block4_pool = []\n\n self.block5_conv1 = []\n self.block5_conv2 = []\n self.block5_conv3 = []\n self.block5_conv4 = []\n self.block5_pool = []\n\n for i in xrange(FLAGS.num_replica):\n # Block 1\n self.block1_conv1.append(layers.Conv2D(64, (3, 3),\n activation='relu',\n padding='same',\n name='block1_conv1'))\n self.block1_conv2.append(layers.Conv2D(64, (3, 3),\n activation='relu',\n padding='same',\n name='block1_conv2'))\n self.block1_pool.append(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))\n\n # Block 2\n self.block2_conv1.append(layers.Conv2D(128, (3, 3),\n activation='relu',\n padding='same',\n name='block2_conv1'))\n self.block2_conv2.append(layers.Conv2D(128, (3, 3),\n activation='relu',\n padding='same',\n name='block2_conv2'))\n self.block2_pool.append(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))\n\n # Block 3\n self.block3_conv1.append(layers.Conv2D(256, (3, 3),\n activation='relu',\n padding='same',\n name='block3_conv1'))\n self.block3_conv2.append(layers.Conv2D(256, (3, 3),\n activation='relu',\n padding='same',\n name='block3_conv2'))\n self.block3_conv3.append(layers.Conv2D(256, (3, 3),\n activation='relu',\n padding='same',\n name='block3_conv3'))\n self.block3_conv4.append(layers.Conv2D(256, (3, 3),\n activation='relu',\n padding='same',\n name='block3_conv4'))\n self.block3_pool.append(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))\n\n # Block 4\n self.block4_conv1.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block4_conv1'))\n self.block4_conv2.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block4_conv2'))\n self.block4_conv3.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block4_conv3'))\n self.block4_conv4.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block4_conv4'))\n self.block4_pool.append(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))\n\n # Block 5\n self.block5_conv1.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block5_conv1'))\n self.block5_conv2.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block5_conv2'))\n self.block5_conv3.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block5_conv3'))\n self.block5_conv4.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block5_conv4'))\n self.block5_pool.append(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))\n\n if include_top:\n # Classification block\n self.flatten = layers.Flatten(name='flatten')\n self.fc1 = layers.Dense(4096, activation='relu', name='fc1')\n self.fc2 = layers.Dense(4096, activation='relu', name='fc2')\n self.predict = layers.Dense(classes, activation='softmax', name='predictions')\n else:\n if pooling == 'avg':\n self.pool = layers.GlobalAveragePooling2D()\n elif pooling == 'max':\n self.pool = layers.GlobalMaxPooling2D()", "def __init__(self, num_gpus):\n\n super(Critic, self).__init__()\n n_in = IMG_CHANNELS\n n_out = 1\n\n feature_map = IMG_SIZE\n kernel_size = 4\n stride = 2\n padding = 1\n bias = False\n\n self.num_gpus = num_gpus\n\n self.network = nn.Sequential(\n # nodes = IMG_CHANNELS * IMG_SIZE * IMG_SIZE\n nn.Conv2d(n_in, feature_map, kernel_size, stride, padding, bias=bias),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 2\n nn.Conv2d(feature_map, feature_map * 2, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 4\n nn.Conv2d(feature_map * 2, feature_map * 4, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 8\n nn.Conv2d(feature_map * 4, feature_map * 8, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 8),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 8\n nn.Conv2d(feature_map * 8, n_out, kernel_size, 1, 0, bias=bias),\n # scratched sigmoid activation function\n )", "def __init__(self, input_dim, hidden_dim_1, hidden_dim_2):\n super(BinaryGraphClassifier, self).__init__()\n\n # Define the graph convolutional layers\n self.conv_1 = DenseSAGEConv(in_feats=input_dim, out_feats=hidden_dim_1)\n self.conv_2 = DenseSAGEConv(in_feats=hidden_dim_1, out_feats=hidden_dim_2)\n\n # Define the fully connected layers\n self.fc_1 = nn.Linear(hidden_dim_2, hidden_dim_2)\n self.fc_2 = nn.Linear(hidden_dim_2, 1)\n\n # Drop out layers\n self.conv_dropout_1 = nn.Dropout(p=0.4)\n self.conv_dropout_2 = nn.Dropout(p=0.4)\n self.fc_dropout = nn.Dropout(p=0.4)\n\n # The output activation function\n self.output_func = nn.Sigmoid()", "def __init__(self, *units):\n super().__init__()\n self.convs = torch.nn.ModuleList([\n torch.nn.Conv2d(in_, out, 3, 1, 1)\n for in_, out in zip(units[:-1], units[1:])\n ])", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, vgg_net):\n super().__init__()\n # create a conv layer that corresponds to the first linear layer\n linear1 = vgg_net.classifier[0]\n conv = nn.Conv2d(512, 4096, 7, 7)\n\n # copy data into it\n conv.bias.data.copy_(linear1.bias.data)\n conv.weight.data.view(4096, -1).copy_(linear1.weight.data)\n\n # replace the layer in the sequential classifier part\n vgg_net.classifier = nn.Sequential(\n conv, nn.Flatten(1), *vgg_net.classifier[1:]\n )\n\n self.vgg_net = vgg_net", "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.multiclass\n self.model = sklearn.multiclass.OneVsRestClassifier", "def __init__(self, **kwargs):\n super(CIFAR10Classifier, self).__init__() #pylint: disable=super-with-arguments\n self.model_conv = models.resnet50(pretrained=True)\n for param in self.model_conv.parameters():\n param.requires_grad = False\n num_ftrs = self.model_conv.fc.in_features\n num_classes = 10\n self.model_conv.fc = nn.Linear(num_ftrs, num_classes)\n\n self.scheduler = None\n self.optimizer = None\n self.args = kwargs\n\n self.train_acc = Accuracy()\n self.val_acc = Accuracy()\n self.test_acc = Accuracy()\n\n self.preds = []\n self.target = []\n self.example_input_array = torch.rand((1, 3, 64, 64))", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def __init__(self):\n super(enc_clf, self).__init__()\n\n self.fc1 = nn.Linear(784, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n self.fc3 = nn.Linear(1024, 512)\n self.fc4 = nn.Linear(512, 10)", "def __init__(self, hparams):\n super(ThreeLayerClassifier, self).__init__()\n self.hparams = hparams\n self.layer_1 = torch.nn.Linear(self.hparams[\"input_size\"], 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, self.hparams[\"targets\"])", "def setup_class(cls):\n name1 = \"Conv2D.Conv2D-op369.0.0.1\"\n tensor1 = np.array([[[-1.2808e-03, 7.7629e-03, 1.9241e-02],\n [-1.3931e-02, 8.9359e-04, -1.1520e-02],\n [-6.3248e-03, 1.8749e-03, 1.0132e-02]],\n [[-2.5520e-03, -6.0005e-03, -5.1918e-03],\n [-2.7866e-03, 2.5487e-04, 8.4782e-04],\n [-4.6310e-03, -8.9111e-03, -8.1778e-05]],\n [[1.3914e-03, 6.0844e-04, 1.0643e-03],\n [-2.0966e-02, -1.2865e-03, -1.8692e-03],\n [-1.6647e-02, 1.0233e-03, -4.1313e-03]]], np.float32)\n info1 = d.TensorInfo(node_name=\"Default/network-WithLossCell/_backbone-AlexNet/conv1-Conv2d/Conv2D-op369\",\n slot=1, iteration=2, rank_id=0, root_graph_id=0, is_output=False)\n\n name2 = \"Parameter.fc2.bias.0.0.2\"\n tensor2 = np.array([-5.0167350e-06, 1.2509107e-05, -4.3148934e-06, 8.1415592e-06,\n 2.1177532e-07, 2.9952851e-06], np.float32)\n info2 = d.TensorInfo(node_name=\"Default/network-WithLossCell/_backbone-AlexNet/fc3-Dense/\"\n \"Parameter[6]_11/fc2.bias\",\n slot=0, iteration=2, rank_id=0, root_graph_id=0, is_output=True)\n\n tensor3 = np.array([2.9060817e-07, -5.1009415e-06, -2.8662325e-06, 2.6036503e-06,\n -5.1546101e-07, 6.0798648e-06], np.float32)\n info3 = d.TensorInfo(node_name=\"Default/network-WithLossCell/_backbone-AlexNet/fc3-Dense/\"\n \"Parameter[6]_11/fc2.bias\",\n slot=0, iteration=3, rank_id=0, root_graph_id=0, is_output=True)\n\n name3 = \"CudnnUniformReal.CudnnUniformReal-op391.0.0.3\"\n tensor4 = np.array([-32.0, -4096.0], np.float32)\n info4 = d.TensorInfo(node_name=\"Default/CudnnUniformReal-op391\",\n slot=0, iteration=2, rank_id=0, root_graph_id=0, is_output=False)\n\n name4 = \"Cast.Cast-op4.0.0.1\"\n tensor_all_zero = np.array([[[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]], np.float32)\n info5 = d.TensorInfo(node_name=\"Default/network-WithLossCell/_backbone-AlexNet/Cast-op4\",\n slot=0, iteration=0, rank_id=0, root_graph_id=0, is_output=True)\n\n name5 = \"Cast.Cast-op40.0.0.1\"\n tensor_all_one = np.array([[[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]], np.float32)\n info6 = d.TensorInfo(node_name=\"Default/network-WithLossCell/_backbone-AlexNet/Cast-op40\",\n slot=0, iteration=0, rank_id=0, root_graph_id=0, is_output=True)\n\n tensor_info = [info1, info2, info3, info4, info5, info6]\n tensor_name = [name1, name2, name2, name3, name4, name5]\n tensor_list = [tensor1, tensor2, tensor3, tensor4, tensor_all_zero, tensor_all_one]\n cls.temp_dir = build_dump_structure(tensor_name, tensor_list, \"Test\", tensor_info)", "def __init__(self, hparams):\n super(ImagenetTransferLearning, self).__init__()\n self.hparams = hparams\n self.feature_extractor = models.mobilenet_v2(pretrained=True)\n self.feature_extractor.eval()\n\n # Establish classifier\n # self.layer_1 = torch.nn.Linear(hparams[\"input_size\"], 128)\n self.layer_1 = torch.nn.Linear(1000, 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, hparams[\"targets\"])" ]
[ "0.703168", "0.6759595", "0.6644531", "0.66385365", "0.6632358", "0.6617524", "0.65919757", "0.6583789", "0.6558685", "0.65006775", "0.649134", "0.6481717", "0.6467541", "0.6467387", "0.6456215", "0.6450546", "0.64445513", "0.64377886", "0.6429936", "0.6427293", "0.6427293", "0.6427293", "0.63968295", "0.6361502", "0.63594544", "0.6294823", "0.62924194", "0.62796634", "0.6274539", "0.6270892" ]
0.70681906
0
add a dummy client contact
def add_dummy_contact(index, client, user, client_manager = None): if client_manager == None: client_manager = ClientManager(user.user) return client_manager.add_client_contact( client = client, name = 'name_%i' % index, email = 'email%[email protected]' % index )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_contact():\n return 'add contact'", "def create_empty_contact():\n if request.method == 'GET':\n tel = request.args.get('tel')\n try:\n io_client.create_contact(\n urns=[\"tel:+52\" + tel],\n groups=[\"cc9543a2-33ca-43cd-a3b7-4839b694605a\"])\n return jsonify({\"creado\": \"Si\"}), 201\n except:\n pass\n return jsonify({\"creado\": \"No\"}), 404", "def add_contact(self, contact):\n\t\tclient_log.debug(f'Создание контакта {contact}')\n\t\treq = {\n\t\t\tACTION: ADD_CONTACT,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username,\n\t\t\tACCOUNT_NAME: contact\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tself.process_server_ans(get_message(self.transport))", "def test_add_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n dictionary = org.as_dict()\n assert dictionary['contacts']\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']", "def test_add_contacts(self):\n response = self.contacts.add(\"alex\", \"0708913841\")\n self.assertEqual(response, \"Successfully added contacts\" )", "def __init__(self, contacts_client):\n self.contacts_client = contacts_client", "def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)", "def add_contact(self, name, number, email, zipcode):\n \n new_contact = f\"{name}, {number}, {email}, {zipcode}\"\n contact_list = [name,number,email,zipcode]\n self.contacts.append(contact_list)\n self.save()\n print(f\"Thank you {new_contact} has been added to your contact book.\")", "def setUp(self):\n self.new_contact = Contact(\"zoo\", \"vier\", 254719702373, \"[email protected]\")", "def do_adduser(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tself.cl.add_contact()\n\t\telse:\n\t\t\tprint(\"To add contacts you need to open or create a book.\")", "def test_client_address_create(self):\n pass", "def add_contact(self):\n contact_mob_num = self._input_mob_num(\"-=\" * 30 + \"\\n\" + \"Please enter contact's mobile number to be added: \")\n if contact_mob_num == self._user.mob_num:\n print(\"You can't add yourself, IDIOT!!\")\n return self.homepage()\n \n found_contact = self.auth.get_users_by_MobNum(contact_mob_num)\n if found_contact != None:\n print('A user with Mobile number: \"{0}\", and User name: \"{1}\" is found'.format(found_contact.mob_num, found_contact.username))\n user_choice = self._int_input_in_range(\" (1) Add the found user. \\n (0) Back to Home page \\n Your choice: \" \n ,range_ = (0, 1))\n if user_choice:\n add_flag = self._user.add_contact(found_contact)\n if not add_flag:\n print('This user is already one of your contacts')\n return self.homepage()\n print(\"Contact added successfully\")\n else:\n self.homepage()\n else:\n print('This user mobile number has no matches')\n return self.homepage()", "def test_new_empty_invoice_address(self):\r\n self.original = self.env[\"res.partner\"].create({\r\n \"is_company\": False,\r\n \"type\": 'invoice',\r\n \"lastname\": \"\",\r\n \"firstname\": \"\"})", "def create_contact(contact, party_type, party):\n\tcontact = contact\t.split(\" \")\n\n\tcontact = frappe.get_doc({\n\t\t\"doctype\":\"Contact\",\n\t\t\"first_name\":contact[0],\n\t\t\"last_name\": len(contact) > 1 and contact[1] or \"\"\n\t})\n\tcontact.append('links', dict(link_doctype=party_type, link_name=party))\n\tcontact.insert()", "def create_clients(client_name): # Crear nuevo Cliente\n global clients\n\n if client_name not in clients:\n clients.append(client_name)\n else:\n print('The client name is alredy in the client\\'s list')", "def add_contact_to_SIM_Card(self, index):\n click_textview_by_id('account_type')\n click_textview_by_text('SIM')\n #entertext_edittext_by_index(0, 'hacker'+str(index))\n #entertext_edittext_by_index(1, '918801970004')\n self.ime.IME_input_english(1, SC.PRIVATE_CONTACT_NAME)\n self.ime.IME_input_number(1, \"918801970004\", 'n')\n return", "def create(ctx, name, company, mail, age):\n client = Client(name,company,mail,age)\n client_service = ClientService(ctx.obj['clients_table']) \n client_service.create_client(client)", "def test_get_contact(self):\n pass", "def do_addContact(self, line):\n\t\tif not(self.db is None):\n\t\t\tcont = self.db.contact\n\t\t\tcontact_info = {\n\t\t\t\t'first_name': input(\"First name: \"),\n\t\t\t\t'surname': input(\"Surname: \"),\n\t\t\t\t'company': input(\"Company: \"),\n\t\t\t\t'address': input(\"Address: \"),\n\t\t\t\t'telephone': input(\"Telephone: \"),\n\t\t\t\t'email': input(\"Email: \")\n\t\t\t}\n\t\t\tcont.insert_one(contact_info)\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")", "def test_add_contact_duplicate(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n\n with pytest.raises(BusinessException) as exception:\n org.add_contact(TestContactInfo.contact2)\n assert exception.value.code == Error.DATA_ALREADY_EXISTS.name", "def setUp(self):\n # Below creating the new contact object to test.\n self.new_contact = Contact(\n \"James\", \"Muriuki\", \"0712345678\", \"[email protected]\")", "def force_contact(self, *args, **kwargs) -> Any:\n pass", "def create_contact_on_google(self, info):\n\n\t\twith open('client.pickle') as pickle_file:\n\t\t\tclient = pickle.load(pickle_file)\n\n\t\t#create contact in google\n\t\tnew_contact = gdata.contacts.data.ContactEntry()\n\n\t\t# Set the contact's name.\n\t\tnew_contact.name = gdata.data.Name( given_name=gdata.data.GivenName(text=info['name']), family_name=gdata.data.FamilyName(text=info['name']),\n\t\t\tfull_name=gdata.data.FullName(text=info['name']))\n\n\t\tnew_contact.content = atom.data.Content(text='Notes')\n\n\t\t# Set the contact's email addresses.\n\t\tnew_contact.email.append(gdata.data.Email(address=info['email'], primary='true', rel=gdata.data.WORK_REL, display_name=info['name']))\n\n\t\t# Set the contact's phone numbers.\n\t\tnew_contact.phone_number.append(gdata.data.PhoneNumber(text=info['phone'], rel=gdata.data.WORK_REL, primay='true'))\n\n\t\tcontact_entry = client.CreateContact(new_contact)\n\t\twebnotes.errprint(\"Contact's ID: %s\" % contact_entry.id.text)\n\n\t\twebnotes.conn.set_value(\"Contact\",self.name,\"contct_id\", contact_entry.id.text)", "def test_new_contact_association(self):\n node = self.create_xml_patient({'Mobile_Number': '12223334444',\n 'Pin_Code': '4444'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n patient = payload.patients.all()[0]\n self.assertTrue(patient.contact is not None)\n self.assertEqual(patient.contact.phone, '+12223334444')\n self.assertEqual(patient.contact.pin, '4444')", "def create_dummy_client(index, user, client_manager = None, language = None, currency = None):\r\n \r\n if client_manager == None:\r\n client_manager = ClientManager(user)\r\n \r\n if currency is None:\r\n currency = create_dummy_currency(index)\r\n \r\n if language is None:\r\n language = create_dummy_language(index)\r\n \r\n return client_manager.add_client(\r\n name = 'client_%i' %index,\r\n address = 'address_%i' % index,\r\n email = 'corp_email_%[email protected]' % index,\r\n default_currency_id = currency.key().id(),\r\n default_language_id = language.key().id(),\r\n )", "def test_create_contact(self):\n \n url = reverse('contact-list')\n contact = self.get_dummy_contact()\n\n response = self.client.post(url, contact,\n format='json',\n HTTP_AUTHORIZATION=self.get_auth())\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Contact.objects.count(), 1)\n self.assertEqual(Contact.objects.get().email_address, contact['email_address'])", "def add_client(name):\n return create_client(name)", "def add_contact(self, request, **kwargs):\n if request.data is None:\n return Response({'message': 'Invalid contact details'}, status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('first_name') is None:\n return Response({'message': 'First name not provided'}, status=status.HTTP_400_BAD_REQUEST)\n\n contact_data = request.data.get('contact')\n for data in contact_data:\n print(data.get('phone'))\n try:\n parse_number = phonenumbers.parse(data.get('phone'), None)\n except Exception:\n return Response({'details': 'Invalid Phonenumber'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n if not phonenumbers.is_valid_number(parse_number):\n return Response({'details': 'Invalid Phonenumber entered'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n new_contact_data = ContactCreationAndUpdationMixin().create(request.data)\n group = self.get_object()\n group.contacts.add(new_contact_data)\n serializer_data = ContactSerializer(new_contact_data) \n return Response(serializer_data.data)", "def test_client_create(self):\n pass", "def AddContact(self, contact):\n\t\tcontact.group_membership_info = [gdata.contacts.data.GroupMembershipInfo(href=self.GetFirstGroupId())]\n\t\ttry:\n\t\t\tself.client.CreateContact(contact)\n\t\texcept gdata.client.RequestError:\n\t\t\tpass" ]
[ "0.66256905", "0.6618168", "0.65759665", "0.6517715", "0.62706566", "0.6233464", "0.6205126", "0.6143171", "0.6122024", "0.6082095", "0.602202", "0.60178286", "0.59804857", "0.5960017", "0.59527576", "0.5950094", "0.5937796", "0.59137475", "0.58988756", "0.58725286", "0.5861971", "0.5854007", "0.5838641", "0.5825351", "0.5797936", "0.57815975", "0.57773364", "0.5767598", "0.57639235", "0.57507527" ]
0.8227372
0
Retrieve an event with a provided event ID.
def retrieve(cls, event_id): return Event(Requester.get(cls.endpoint + '/' + event_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_event(self, eventid):\n return self.s.query(Event).get(eventid)", "def get_event_by_id(event_id):\n db = get_db()\n return db.execute((\n 'SELECT id, name, start_time, end_time, location '\n 'FROM event WHERE id=?'),\n (event_id,)).fetchone()", "def get_event(event_id):\n try:\n return Event.objects.get(id=event_id)\n except ObjectDoesNotExist:\n raise ObjectDoesNotFound(\n 'There is no event with id={}.'.format(event_id))", "def get_event(self, event_id):\n if not event_id:\n return None\n\n return self.service.events().get(calendarId=self.calendar_id, eventId=event_id).execute()", "def getEventById(self, eventid):\n\n e_id = EventId()\n e_id.setHashed(eventid)\n event = Event.getById(e_id)\n return event.getAsDict()", "def get_event_eid(eid):\n return EventModel.query.get_or_404(eid)", "def get(self, id):\n offset, limit, expand = self.get_pagination_values()\n event = self.session.query(Event).filter_by(id=id).scalar()\n if not event:\n raise exc.NotFound(\"No such Event {} found\".format(id))\n\n json = event.to_dict(base_uri=self.href_prefix, expand=expand)\n\n self.success(json)", "def select_event(self, event_id):\n with self.conn:\n self.c.execute(\n \"\"\"SELECT * FROM {table} WHERE {event} = ?\"\"\".format(\n table=TABLE, event=EVENT\n ),\n (event_id,),\n )\n return self.c.fetchone()", "def query_event_by_id():\n try:\n event_id = request.args['event_id']\n response = requests.put(app.config['EVENTS_ENDPOINT'] + event_id)\n if response.status_code == 200:\n return render_template(\n 'search_results.html',\n auth=is_organizer(get_user()),\n events=parse_events(response.json()),\n app_config=app.config\n )\n else:\n return 'Unable to retrieve events', 500\n except BadRequestKeyError as error:\n return f'Error: {error}.', 400", "def get_event(self, event_id):\n mask = \"\"\"mask[\n acknowledgedFlag,\n attachments,\n impactedResources,\n statusCode,\n updates,\n notificationOccurrenceEventType]\n \"\"\"\n return self.client.call('Notification_Occurrence_Event', 'getObject', id=event_id, mask=mask)", "def get_one_event(cls, event_id):\n try:\n event = events_coll.find_one({\"_id\": ObjectId(event_id)})\n return cls(**event)\n except Exception as e:\n print(e)", "def event(self, event_id):\r\n return e.Event(self, event_id)", "def get_one(self, message_id):\r\n event_filter = storage.EventFilter(message_id=message_id)\r\n events = [event for event\r\n in pecan.request.storage_conn.get_events(event_filter)]\r\n if not events:\r\n raise EntityNotFound(_(\"Event\"), message_id)\r\n\r\n if len(events) > 1:\r\n LOG.error(_(\"More than one event with \"\r\n \"id %s returned from storage driver\") % message_id)\r\n\r\n event = events[0]\r\n\r\n return Event(message_id=event.message_id,\r\n event_type=event.event_type,\r\n generated=event.generated,\r\n traits=event.traits)", "def get_event(self, param):\n\n if param is None:\n return None\n if isinstance(param, str):\n url = self.build_url(\n self._endpoints.get('get_event').format(id=self.calendar_id,\n ide=param))\n params = None\n by_id = True\n else:\n url = self.build_url(\n self._endpoints.get('get_events').format(id=self.calendar_id))\n params = {'$top': 1}\n params.update(param.as_params())\n by_id = False\n\n response = self.con.get(url, params=params,\n headers={'Prefer': 'outlook.timezone=\"UTC\"'})\n if not response:\n return None\n\n if by_id:\n event = response.json()\n else:\n event = response.json().get('value', [])\n if event:\n event = event[0]\n else:\n return None\n return self.event_constructor(parent=self,\n **{self._cloud_data_key: event})", "async def get_event(\n self,\n event_id: str,\n redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact,\n get_prev_content: bool = False,\n allow_rejected: bool = False,\n allow_none: bool = False,\n check_room_id: Optional[str] = None,\n ) -> Optional[EventBase]:\n if not isinstance(event_id, str):\n raise TypeError(\"Invalid event event_id %r\" % (event_id,))\n\n events = await self.get_events_as_list(\n [event_id],\n redact_behaviour=redact_behaviour,\n get_prev_content=get_prev_content,\n allow_rejected=allow_rejected,\n )\n\n event = events[0] if events else None\n\n if event is not None and check_room_id is not None:\n if event.room_id != check_room_id:\n event = None\n\n if event is None and not allow_none:\n raise NotFoundError(\"Could not find event %s\" % (event_id,))\n\n return event", "def get_event(username, event_id=None, maxResults=None):\n token = \"tokens/\" + username + \".pkl\"\n credentials = pickle.load(open(token, \"rb\"))\n service = build('calendar', 'v3', credentials=credentials)\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n\n if event_id and maxResults:\n raise ValueError(\"event_id and maxResults cannot be set at the same time. Choose one.\")\n\n if event_id:\n return service.events().get(calendarId=CALENDAR_ID, eventId=event_id).execute()\n\n if maxResults:\n events_result = service.events().list(calendarId=CALENDAR_ID, timeMin=now,\n maxResults=maxResults, singleEvents=True,\n orderBy='startTime').execute()\n return events_result.get('items', [])", "def get(self, case_number, event_id):\n return self._connection.get(\n u\"{}/{}\".format(self._uri_prefix.format(case_number), event_id)\n )", "def get_one(self, id):\n rpc_ilog = objects.event_log.get_by_uuid(\n pecan.request.context, id)\n\n return EventLog.convert_with_links(rpc_ilog)", "def get(self, eventId):\n event = EventDao().get_by_id(event_id=eventId)\n event_dict = event.to_dict_view()\n return event_dict", "def event(self, id):\r\n return Event(self, id)", "def get_events(events_id):\n # Filter events matching events_id and select the first one found\n events = Events.query.filter_by(id=events_id).first()\n # If no events matches album_id, respond HTTP 404\n if events is None:\n abort(404)\n # Serialize the album as a JSON object and return it\n schema = EventsSchema()\n return jsonify(schema.dump(events))", "def get_event(id_client, id_person, id_event):\n id_client = validate_id_client(id_client)\n\n # noinspection PyUnusedLocal\n def get_action_on_namespace(id_current_client):\n id_current_person = validate_id_person(id_person)\n\n id_current_event = validate_id_event(id_event)\n\n return Evento.get_by_id_for_person(id_current_event, id_current_person, u\"Event\")\n\n return on_client_namespace(id_client, get_action_on_namespace)", "def get_event(self, uuid):\n return Event.deserialize(self._get_single('events', {'uuid': uuid}))", "def get(self, id):\n offset, limit, expand = self.get_pagination_values()\n event_type = (\n self.session.query(EventType).filter_by(id=id).scalar()\n )\n if not event_type:\n raise exc.NotFound(\"No such EventType {} found\".format(id))\n\n json = event_type.to_dict(self.href_prefix)\n json[\"limit\"] = limit\n json[\"offset\"] = offset\n\n # We will perform expansion of events here b/c we want to apply\n # limits and offsets\n events = []\n for event in (\n event_type.get_latest_events().limit(limit).offset(offset)\n .from_self().order_by(Event.timestamp).all()\n ):\n if \"events\" in expand:\n events.append(\n event.to_dict(\n base_uri=self.href_prefix, expand=set(expand)\n )\n )\n else:\n events.append({\n \"id\": event.id, \"href\": event.href(self.href_prefix)\n })\n json[\"events\"] = events\n\n self.success(json)", "def view_event(request, event_id):\n event = get_object_or_404(Event, pk=event_id)\n context = {'event': event }\n return render_to_response('event_view.html',\n context,\n context_instance=RequestContext(request))", "def GetEventIdentifier(self):\n return self._event_identifier", "def get(self, eventId, uid):\n raise NotImplementedError", "def get_event(self):\r\n return self.events[0]", "def detail(request, event_id):\n event = get_object_or_404(Event, pk=event_id)\n user = request.user\n return render(request, 'kvent/event-detail.html', {'event': event, 'user': user})", "def event_id(self):\n return self._event_id" ]
[ "0.8653071", "0.8415355", "0.8306787", "0.8270487", "0.7743675", "0.74382395", "0.7345693", "0.72627985", "0.72339475", "0.7229438", "0.7221798", "0.71465296", "0.7099162", "0.68521637", "0.6657219", "0.66425383", "0.663405", "0.6582111", "0.64599323", "0.6412161", "0.63824356", "0.63661623", "0.62889874", "0.623873", "0.6205085", "0.6163212", "0.6159161", "0.6145498", "0.61177796", "0.6109988" ]
0.85686564
1
Multiply a sparse Tensor by a vector along a particular dimension.
def sparse_dim_multiply( A: Tensor, x: Tensor, dim: int ) -> Tensor: idx = A._indices()[dim] vals = A._values() vals *= x[idx] return A
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matvec(self, x):\n return self * x", "def dot(x, y, sparse=False):\n if sparse:\n res = tf.sparse_tensor_dense_matmul(x, y)\n else:\n res = tf.matmul(x, y)\n return res", "def dot(x,y,sparse=False):\n if sparse:\n res = tf.sparse_tensor_dense_matmul(x, y)\n else:\n res = tf.matmul(x, y)\n return res", "def apply_scalar(vector, scalar):\n new_coordinates = []\n index = 0\n while index < vector.dimension:\n new_value = vector.coordinates[index] * scalar\n new_coordinates.append(new_value)\n index += 1\n new_vector = Vector(new_coordinates)\n return new_vector", "def mul_dense(x, y): # pragma: no cover\n return x * y", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def multiplyByVector(matrix:[[int]], vector: [int]):\n # assuming vector and result are transposed\n _validate(matrix, vector)\n if len(matrix[0]) != len(vector):\n raise InvalidArgumentError(f\"cannot multiply vector which length is {len(vector)} by matrix that has a {len(matrix[0])} columns\")\n result = [0 for _ in range(len(matrix))] # initialize empty array\n for matrix_row_idx, _ in enumerate(matrix):\n for matrix_column_idx, v_value in enumerate(vector):\n result[matrix_row_idx] ^= (v_value * matrix[matrix_row_idx][matrix_column_idx])\n return result", "def multiply(matrix, vector):\n result = []\n for row in matrix:\n assert len(row) == len(vector)\n result.append(sum([a*b for (a, b) in zip(row, vector)]))\n return Vector3D.from_list(result)", "def sparseVectorDotProduct(v1, v2):\n ans = 0\n for index, val in v1.items():\n ans += val * v2[index]\n return ans", "def prod(tensor, axis=None):\n raise NotImplementedError", "def _time_distributed_multiply(self, x, w):\n # dimension of vector\n n_dim = K.ndim(x)\n embedding_size = K.int_shape(x)[-1]\n timesteps = K.int_shape(x)[1]\n if timesteps is None:\n timesteps = K.shape(x)[1]\n\n # collapse time dimension and batch dimension together\n x = K.reshape(x, (-1, embedding_size))\n # reshape to (?, 1, embedding_size)\n x = K.expand_dims(x, axis=1)\n # reshape weights to (1, mp_dim, embedding_size)\n w = K.expand_dims(w, axis=0)\n # element-wise multiply\n x = x * w\n # reshape to original shape\n if n_dim == 3:\n x = K.reshape(x, K.stack([-1, timesteps, self.mp_dim, embedding_size]))\n x.set_shape([None, None, None, embedding_size])\n elif n_dim == 2:\n x = K.reshape(x, K.stack([-1, self.mp_dim, embedding_size]))\n x.set_shape([None, None, embedding_size])\n return x", "def csr_mulvec_wrap(fn):\n\n @functools.wraps(fn)\n def csr_mul_vector(A, x):\n if A.nnz > 50000 and _NUM_THREAD_WORKERS > 1:\n return par_dot_csr_matvec(A, x)\n else:\n y = fn(A, x)\n if isinstance(x, qarray):\n y = qarray(y)\n return y\n\n return csr_mul_vector", "def sparse_matlab(i, j, v, m, n):\n return csr_matrix((v, (i, j)), shape=(m, n))", "def __mul__(self, tensor):\n return self.mul(tensor)", "def dense(x, size, name, weight_init=xavier):\n w = tf.get_variable(name + \"/w\", [x.get_shape()[1], size], initializer=weight_init)\n b = tf.get_variable(name + \"/b\", [size], initializer=tf.constant_initializer(0.0))\n return tf.matmul(x, w) + b", "def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0):\n with tf.variable_scope(\n name, default_name=\"embedding\", values=[x], reuse=reuse):\n embedding_var = tf.get_variable(\"kernel\", [vocab_size, dense_size])\n emb_x = tf.gather(embedding_var, x)\n if multiplier != 1.0:\n emb_x *= multiplier\n return emb_x", "def scalar_multiply(s: float, v: Vector) -> Vector:\n return [s * v_item for v_item in v]", "def sparseVectorDotProduct(v1, v2):\n # BEGIN_YOUR_CODE (our solution is 4 lines of code, but don't worry if you deviate from this)\n return sum(v1[k]*v2[k] for k in v1 and v2)\n # END_YOUR_CODE", "def incrementSparseVector(v1, scale, v2):\n for index in v2:\n v1[index] += v2[index] * scale", "def sparse_matmul(A: SparseTensor, B: SparseTensor, out: torch.Tensor) -> torch.Tensor:\n if A.nnz() == 0 or B.nnz() == 0:\n return out\n\n if A.is_cuda:\n return _sparse_matmul_cuda(A, B, out)\n else:\n return _sparse_matmul_cpu(A, B, out)", "def matmul_any_tensor_dense_tensor(a,\n b,\n a_is_sparse = True,\n transpose_a = False):\n if a_is_sparse:\n _check_type('a', a, tf.SparseTensor)\n return tf.sparse.sparse_dense_matmul(\n b, a, adjoint_a=False, adjoint_b=not transpose_a)\n else:\n return tf.transpose(\n a=tf.matmul(a, tf.transpose(a=b), transpose_a=transpose_a))", "def sparseVectorDotProduct(v1, v2):\n # BEGIN_YOUR_ANSWER (our solution is 3 lines of code, but don't worry if you deviate from this)\n return sum([v1[v1_key] * v2.get(v1_key, 0) for v1_key in v1])\n # END_YOUR_ANSWER", "def dot(x, y):\n if isinstance(x, tf.SparseTensor) and isinstance(y, tf.SparseTensor):\n res = tf.sparse_tensor_dense_matmul(x, y)\n else:\n res = tf.matmul(x,y)\n return res", "def scalar_vector_mult(alpha, v):\n return [alpha*x for x in v]", "def _sparse_matmul_cuda(A: SparseTensor, B: SparseTensor, out: torch.Tensor):\n from falkon.sparse.sparse_helpers import spspmm, csr2dense\n\n if not A.is_csr:\n raise ValueError(\"A must be CSR matrix\")\n if not B.is_csr:\n raise ValueError(\"B must be CSR matrix\")\n\n # 2. MatMul\n out_indexptr, out_index, out_data = spspmm(\n A.indexptr, A.index, A.data, B.indexptr, B.index, B.data, A.shape[1])\n # 3. Convert to dense\n out = csr2dense(out_indexptr, out_index, out_data, out)\n return out", "def tensorProduct(self, a):\n \n assert (type(a) == SparseMatrix), 'Incompatible Matrices'\n elements = []\n dimension = self.Dimension * a.Dimension\n for me1 in self.Elements:\n for mea in a.Elements:\n row = me1.i*a.Dimension + mea.i\n col = me1.j*a.Dimension + mea.j\n value = complex(me1.val * mea.val)\n elements.append(MatrixElement(int(row), int(col), complex(value)))\n return SparseMatrix(dimension, elements)", "def spmv (n, A, x):\n y = dense_vector (n)\n for (i, A_i) in A.items ():\n s = 0\n for (j, a_ij) in A_i.items ():\n s += a_ij * x[j]\n y[i] = s\n return y", "def scalarMultiplication(self, factor):\n components = self.components() * factor\n return Vector.initializeFromComponents(components)", "def __call__(self, x):\n return self._pre_scale * tf.matmul(x, self._weight) + self._bias" ]
[ "0.60994023", "0.60374564", "0.6016153", "0.60002637", "0.5900316", "0.59003067", "0.59003067", "0.58817714", "0.5870412", "0.5835138", "0.5826516", "0.5803307", "0.57906675", "0.5764829", "0.5762314", "0.57615113", "0.5732349", "0.57092464", "0.57058054", "0.56932294", "0.56802565", "0.56504416", "0.56397444", "0.56076956", "0.5596073", "0.5584929", "0.5584808", "0.5553639", "0.55511487", "0.55299747" ]
0.7774255
0
Add identity matrix to a sparse Tensor.
def sparse_add_identity( A: Tensor ) -> Tensor: idx1, idx2 = A._indices() vals = A._values() vals[idx1 == idx2] += 1 return A
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _identity_sparse(d, stype=\"csr\", dtype=complex):\n return sp.eye(d, dtype=dtype, format=stype)", "def simple_sparse_add():\n examples = [\n benchmark.Example(\n inputs=[\n tf.SparseTensor(indices=[[0, 0], [0, 1]],\n values=[12, 34],\n dense_shape=[2, 2]),\n [[-3, 0], [-5, 0]],\n ],\n output=tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0]],\n values=[9, 34, -5],\n dense_shape=[2, 2]),\n ),\n ]\n constants = []\n description = 'Add sparse tensor with dense tensor'\n target_program = 'tf.sparse.add(in1, tf.sparse.from_dense(in2))'\n source = 'handwritten task'\n return benchmark.Benchmark(examples=examples,\n constants=constants,\n description=description,\n target_program=target_program,\n source=source,\n name='simple_sparse_add')", "def SimpleSparseTensorFrom(x):\n x_ix = []\n x_val = []\n for batch_i, batch in enumerate(x):\n for time, val in enumerate(batch):\n x_ix.append([batch_i, time])\n x_val.append(val)\n x_shape = [len(x), np.asarray(x_ix).max(0)[1]+1]\n x_ix = tf.constant(x_ix, tf.int64)\n x_val = tf.constant(x_val, tf.int32)\n x_shape = tf.constant(x_shape, tf.int64)\n\n #return tf.SparseTensor(x_ix, x_val, x_shape)\n return ([x_ix, x_val, x_shape])", "def add_sparse(self, key, element):\n self.add(self._sparse2seq(key), element)", "def make_sparse(sparse_mx, args):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n\n indices = tensor(np.vstack((sparse_mx.row, sparse_mx.col)), args, torch.long)\n values = tensor(sparse_mx.data, args)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def makesparse(matrix):\n n = matrix[0].size\n elements = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] != 0 :\n temp = MatrixElement(i, j, matrix[i][j])\n elements.append(temp)\n return SparseMatrix(n, elements)", "def sparse_mx_to_torch_sparse_tensor(self, adj_mat_sparse):\n \n sparse_mx = adj_mat_sparse.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n \n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \n # sparse_mx = sparse_mx.astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def dense_to_sparse(self, tensor: tf.Tensor) -> tf.Tensor:\n tensor_shape = tensor.shape\n expand_dims = len(tensor_shape) == 3\n\n tensor = tf.gather_nd(tf.reshape(tensor, (-1, 1)), self.observations_index)\n if expand_dims:\n tensor = tf.expand_dims(tensor, axis=-1)\n return tensor", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def identity_matrix():\r\n return numpy.identity(4)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)\n )\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\r\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\r\n indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\r\n values = torch.from_numpy(sparse_mx.data)\r\n shape = torch.Size(sparse_mx.shape)\r\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\r\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\r\n indices = torch.from_numpy(\r\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\r\n values = torch.from_numpy(sparse_mx.data)\r\n shape = torch.Size(sparse_mx.shape)\r\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\r\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\r\n indices = torch.from_numpy(\r\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\r\n values = torch.from_numpy(sparse_mx.data)\r\n shape = torch.Size(sparse_mx.shape)\r\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\r\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\r\n indices = torch.from_numpy(\r\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\r\n values = torch.from_numpy(sparse_mx.data)\r\n shape = torch.Size(sparse_mx.shape)\r\n return torch.sparse.FloatTensor(indices, values, shape)", "def identity(input_tensor):\n input_tensor = tf.convert_to_tensor(input_tensor)\n return tf.identity(input_tensor)" ]
[ "0.6873758", "0.6549222", "0.63008565", "0.62816167", "0.62674975", "0.6254118", "0.6229", "0.6197494", "0.618009", "0.6088036", "0.60854536", "0.60821396", "0.60820395", "0.60820395", "0.60820395", "0.60820395", "0.60820395", "0.60820395", "0.60820395", "0.60820395", "0.60820395", "0.60820395", "0.60820395", "0.60820395", "0.60820395", "0.6075321", "0.6066828", "0.6066828", "0.6066828", "0.60644114" ]
0.760876
0
place l'image du boutton
def boutton(self,img1,x,y): self.button.append(self.creat_image(img1,x,y))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_image(self, img, img_pos):\n image = tk.Label(self.top, image=img)\n image.grid(row=img_pos[0], column=img_pos[1],\n columnspan=img_pos[2], rowspan=img_pos[3])", "def placeImage(self, img, x=0, y=0):\n if img.getSize() == self.getSize() and img.getWidth() == self.__width:\n # Same dimensions\n self._c = img._c\n\n elif x == 0 and self.__height == img.getHeight():\n # Same height, just overwrite a block\n p_start = y * self.__height\n p_end = y*self.__height + img.getSize()\n self._c[p_start:p_end] = img._c\n\n else:\n # Different dimensions\n for dx in range(min(img.getWidth(), self.getWidth() - x)):\n self.writeCol(x+dx, img.getCol(dx), y)", "def on_image(self, image):", "def __init__(self):\r\n self.image1 = pygame.image.load(img_path1)\r\n self.image2 = pygame.image.load(img_path2)\r\n self.image3 = pygame.image.load(img_path3)\r\n self.image4 = pygame.image.load(img_path4)\r\n self.image = self.image1\r\n self.imageBk1 = pygame.image.load(img_bk1)\r\n self.imageBk1 = pygame.transform.scale(self.imageBk1, (900, 700))\r\n self.imageBk1_rect = self.imageBk1.get_rect()\r\n\r\n # the bird's position\r\n self.x = 0\r\n self.y = 0", "def pos_image(image, x,y):\n image.anchor_x = x\n image.anchor_y = y", "def car(img, x, y):\n gameDisplay.blit(img, (x, y)) # blit display the image", "def car(img,x, y):\n gameDisplay.blit(img, (x, y)) # blit display the image", "def place_entity(entity, base, x, y):\n \n img = entity.copy().convert(\"RGBA\")\n\n # Get random angle for placement\n angle = random.randint(-ROTATION_RATE, ROTATION_RATE)\n img = img.rotate(angle, expand=1)\n\n # Placement\n base.paste(img, (x, y), img)", "def assemble_img_frame(self):\n\n self.p2_label_img = ttk.Label(self.p2_frame_img, text=self.lang.VP_IMG_LABEL,\n font=FONT_MSG)\n self.p2_label_img.grid(row=1, column=2, padx=5, pady=0)", "def change_image(self):\n image_lst = [\"images/hangman01.png\",\n \"images/hangman02.png\",\n \"images/hangman03.png\",\n \"images/hangman04.png\",\n \"images/hangman05.png\",\n \"images/hangman06.png\",\n \"images/hangman07.png\"]\n self.strikes = self.strikes % len(image_lst)\n self.photo = PhotoImage(file=image_lst[self.strikes])\n self.canvas.create_image(340, 240, image=self.photo)", "def newAvatarImage(self, imgPath, imgName): \n img = ocempgui.draw.Image.load_image(imgPath)\n if not self.images[imgName]: \n imgOcemp = guiobjects.OcempImageMapTransparent(img)\n imgOcemp.topleft = 528, 114\n self.window.add_child(imgOcemp)\n self.images[imgName] = imgOcemp\n else:\n self.images[imgName].picture = img", "def _drawimage(self, item, pos, image):\n x, y = pos\n self.cv.coords(item, (x * self.xscale, -y * self.yscale))\n self.cv.itemconfig(item, image=image)", "def place_images(self, final_list, points):\n\t\tfor i in range(8): \n # Please change this (8) into a class-level variable --KOH\n\t\t\timage_object = final_list[i]\n#\t\tif type(image_object) == 'CorrectImage':\n#\t\t\t\tself.correct = [i, points[i]]\n\t\t\timage = pygame.image.load(image_object.file_path)\n # Why can't these be stored as a property of the class --KOH\n\t\t\timagerect = image.get_rect()\n\t\t\treimage = pygame.transform.scale(image, image_object.size)\n\t\t\tself.screen.blit(reimage, points[i])", "def blitme(self):\r\n #draw the image to the screen at the position specifid by self.rect.\r\n self.screen.blit(self.image,self.rect)", "def aktualisiere(self):\n if(self.zeichnung == None):\n self.zeichnung = self.leinwand.create_image(self.x, self.y, \n image=self.grafik)\n delta_x = self.zielX - self.x\n delta_y = self.zielY - self.y\n self.leinwand.move(self.zeichnung, delta_x, delta_y)\n\n self.x = self.zielX\n self.y = self.zielY", "def mostrar(imagenObj,x,y):\n gameDisplay.blit(imagenObj,(x,y))", "def update(self):\n self.imagecounter +=1\n if self.imagecounter > 7:\n self.imagecounter = 0\n self.image = pygame.image.load(self.pictures[self.imagecounter])\n self.rect = self.image.get_rect()\n self.rect.left = self.x\n self.rect.top = self.y", "def __init__(self,jeu,can,c,x,y,nbr,img,bto):\r\n self.c,self.jeu,self.can=c,jeu,can\r\n self.X,self.Y,self.img,self.bto=x,y,img,bto\r\n self.grille(can,nbr,x,y,c)\r\n self.blabla(nbr,x-46,y-92,c)", "def __init__(self):\n self.image = pygame.image.load(\"cart.jpg\")\n # the bird's position\n self.x = (size[0]/2)-(self.image.get_width()/2)\n self.y = (size[1]-100)-self.image.get_height()\n self.myfont = pygame.font.Font(None, 72)", "def place_object(self, thing):\n color = [i * 255 for i in thing.color.rgb]\n size = (20, 20)\n if thing.name == \"luna\":\n size = (5, 5)\n if self.is_visible(thing.position, max(size)):\n position = self.get_position(thing.position, size)\n pygame.draw.ellipse(self.screen, color, (position, size))", "def on_draw_over_image(self):", "def build_filler_images(self):", "def view(self):\n window = tk.Tk()\n label = tk.Label(window)\n label.pack()\n img = self.get_tkimage()\n label[\"image\"] = label.img = img\n window.mainloop()", "def blanck_picture(img):\r\n\r\n blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)\r\n blank_image[0:img.shape[0], 0:img.shape[1]] = 0, 0, 0", "def main():\n fg = SimpleImage('image_contest/me.jpg')\n bg = SimpleImage('image_contest/house.png')\n bg.make_as_big_as(fg)\n combined_img = combine(bg, fg)\n combined_img.show()", "def blitme(self):\n\t\tself.screen.blit(self.image,self.rect)", "def draw_a50(self):\r\n\t\tpg.draw.rect(self.image, (100, 200, 100), self.rect)\r\n\t\r\n\t\t#self.display_surface.blit(self.image, self.rect)\r", "def blitme(self):\n\t\tself.screen.blit(self.image, self.rect)", "def blitme(self):\n\t\tself.screen.blit(self.image, self.rect)", "def draw_how_to(self):\n howto = pygame.image.load(res.IMG_HOW_TO)\n self.screen.blit(howto, (0, 0))" ]
[ "0.66279835", "0.642285", "0.63489205", "0.63303906", "0.6293574", "0.6274839", "0.62658745", "0.62628156", "0.6240533", "0.6208319", "0.6188061", "0.61310315", "0.61250305", "0.61130965", "0.6110246", "0.6009262", "0.59982324", "0.59761536", "0.596173", "0.595518", "0.59529346", "0.5946024", "0.593598", "0.5931047", "0.5927512", "0.5923403", "0.58967453", "0.58894295", "0.58894295", "0.5881183" ]
0.7225571
0
aligne les hitbox sur la grille du joueur
def aligne_grille(self,x,y,t): [xmin,ymin,xmax,ymax] = self.can.coords(self.hitbox[t]) tx,ty=xmax-xmin,ymax-ymin a,b=23,23 if tx==92 or ty==92 or tx==184 or ty==184: if tx==92 or tx==184:a,b=0,23 if ty==92 or ty==184:a,b=23,0 if 142<y<602 and 66<x<528: x=(x-66)//46*46+66+a y=(y-142)//46*46+142+b return x,y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_bounding_box(objects,color):\n\n for i in range(len(objects)):\n x, y, w, h, d = objects[i].get_attributes()\n print(x, y, w, h, d)\n corr = get_correction(d, a, hfov, x)\n cv2.rectangle(color, (x-corr, y), (x+w-corr, y+h), (0, 255, 0), 4)\n\n try:\n real_x, real_y = get_dimensions(d, w, h, hfov, vfov, 640, 480)\n real_x = round(real_x, 3)\n real_y = round(real_y, 3)\n\n except:\n real_x, real_y = 'ERROR'\n\n cv2.putText(color, 'depth = ' + str(d) + 'm', (30, i*60 + 30) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n cv2.putText(color, 'width = ' + str(real_x)+ 'm', (30, i*60 + 45) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n cv2.putText(color, 'height = ' + str(real_y)+ 'm', (30, i*60 + 60) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n\n if(i < len(objects)-1):\n ## distance between left and right object\n distance = round(distance_between_objects(objects[i], objects[i+1], hfov, 640), 3)\n if distance > l:\n textcolor = (0, 255, 0)\n else:\n textcolor = (0, 0, 255)\n\n cv2.putText(color, 'distance between objects = ' + str(distance) + 'm',\n (320, i*60 + 70) , cv2.FONT_HERSHEY_SIMPLEX, 0.5, textcolor, 1)", "def draw(self, win, player, displayList, enemyHitboxList, mapWidth, mapHeight):\n cameraX = player.rect.left + (player.rect.width // 2) - (SCREEN_WIDTH // 2)\n cameraY = player.rect.top + (player.rect.height // 2) - (SCREEN_HEIGHT // 2)\n\n # On centre la camera tant que le joueurs n'atteind pas les bords\n if cameraX >= 0 and cameraX < mapWidth - SCREEN_WIDTH:\n self.x = cameraX\n\n if cameraY >= 0 and cameraY < mapHeight - SCREEN_HEIGHT:\n self.y = cameraY\n\n # Calcul de l'X du joueur en fonction s'il est en haut, bas ou entre les 2\n if cameraX >= 0 and cameraX < mapWidth - SCREEN_WIDTH:\n playerX = (SCREEN_WIDTH // 2) - (player.rect.width // 2)\n else:\n # Si le joueur est a droite\"\"\"\n if cameraX >= mapWidth - SCREEN_WIDTH:\n self.x = mapWidth - SCREEN_WIDTH\n playerX = player.rect.left - mapWidth + SCREEN_WIDTH\n # Si le joueur est a gauche\"\"\"\n else:\n self.x = 0\n playerX = player.rect.left\n\n\n # Calcul de l'Y du joueur en fonction s'il est a gauche, droite ou entre les 2\n if cameraY >= 0 and cameraY < mapHeight - SCREEN_HEIGHT:\n playerY = (SCREEN_HEIGHT // 2) - (player.rect.height // 2)\n else:\n # Si le joueur est en dessous\n if cameraY >= mapHeight - SCREEN_HEIGHT:\n self.y = mapHeight - SCREEN_HEIGHT\n playerY = player.rect.top - mapHeight + SCREEN_HEIGHT\n # Si le joueur est au dessus \n else:\n self.y = 0\n playerY = player.rect.top\n\n for element in displayList:\n element.draw(win,element.rect.left - self.x,element.rect.top - self.y)\n #for elem in enemyHitboxList:\n #pg.draw.rect(win, (200, 200, 200), pg.Rect(elem.left - self.x,elem.top - self.y, elem.width, elem.height))\n player.draw(win, playerX, playerY)", "def draw_on(self, surface):\n for x, y in self.alive_cells():\n #size = (self.box_size, self.box_size)\n #position = (x * self.box_size, y * self.box_size)\n #thickness = 1\n pygame.draw.rect(surface, DARK_RED, (x * self.box_size, y * self.box_size,self.box_size, self.box_size ))", "def gonio_axis_align():\n \n # Invert camera image, so dark pin on light image becomes a peak\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # High threshold, so AD centroid doesn't interpret background\n cam_8ThresholdOld = cam_8.stats4.centroid_threshold.get()\n cam_8.stats4.centroid_threshold.put(150)\n cam_7ThresholdOld = cam_7.stats4.centroid_threshold.get()\n cam_7.stats4.centroid_threshold.put(150)\n \n # HiMag\n # Copy ROI2 geometry (HiMag Mag3) to ROI4 and use ROI4 centroid plugin\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get())\n cam_8.roi4.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get())\n cam_8.roi4.size.x.put(cam_8.roi2.size.x.get() * 0.20)\n cam_8.roi4.size.y.put(cam_8.roi2.size.y.get())\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get() + cam_8.roi2.size.x.get()/2 - cam_8.roi4.size.x.get()/2)\n \n # LoMag\n # Copy ROI2 geometry (LoMag Mag1) to ROI4 and use ROI4 centroid plugin\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get())\n cam_7.roi4.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get())\n cam_7.roi4.size.x.put(cam_7.roi2.size.x.get() * 0.05)\n cam_7.roi4.size.y.put(cam_7.roi2.size.y.get())\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get() + cam_7.roi2.size.x.get()/2 - cam_7.roi4.size.x.get()/2)\n \n centerPinYHiMag0 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag0 = centroid_avg(cam_7.stats4)[1]\n yield from bps.mvr(gonio.o,180)\n time.sleep(2)\n centerPinYHiMag180 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag180 = centroid_avg(cam_7.stats4)[1]\n centerPinYHiMag = (centerPinYHiMag0 + centerPinYHiMag180)/2\n centerPinYLoMag = (centerPinYLoMag0 + centerPinYLoMag180)/2\n\n centerPinOffsYHiMag = centerPinYHiMag - cam_8.roi4.size.y.get() / 2\n centerPinOffsYLoMag = centerPinYLoMag - cam_7.roi4.size.y.get() / 2\n \n # Correct Mag 3 (cam_8 ROI2)\n cam_8.roi2.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + centerPinOffsYHiMag)\n # Correct Mag 4 (cam_8 ROI1)\n cam_8.roi1.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + (cam_8.roi2.size.y.get()-cam_8.roi1.size.y.get())/2)\n \n # Correct Mag 1 (cam_7 ROI2)\n cam_7.roi2.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + centerPinOffsYLoMag)\n # Correct Mag 2 (cam_7 ROI3)\n cam_7.roi3.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + (cam_7.roi2.size.y.get()-cam_7.roi3.size.y.get())/2)\n\n # De-invert image\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # Set thresold to previous value\n cam_8.stats4.centroid_threshold.put(cam_8ThresholdOld)\n cam_7.stats4.centroid_threshold.put(cam_7ThresholdOld)\n \n return", "def autolabel(rects):", "def align(self):\n ...", "def __call__(self, loc, scores, anchors, img_size):\n anchors = bbox.loc2bbox(anchor)", "def change_loc_coords(self, field_size):\r\n self.top_left_corner = _get_center_writing(self.button) # sets new center\r\n font_size = int(field_size * 2) # resizes font\r\n self.font = pygame.font.SysFont(None, font_size) # updates font\r", "def OnSize(self, event):\r\n\r\n for pos, item in self._items.items():\r\n widget, horizontalalignment, verticalalignment = item.widget, item.horizontalalignment, item.verticalalignment\r\n\r\n rect = self.GetFieldRect(pos)\r\n widgetpos = widget.GetPosition()\r\n widgetsize = widget.GetSize()\r\n\r\n rect = self.GetFieldRect(pos)\r\n\r\n if horizontalalignment == ESB_EXACT_FIT:\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((rect.width-2, rect.height-2))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.width - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y+diffs))\r\n else:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_LEFT:\r\n\r\n xpos = rect.x - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_RIGHT:\r\n\r\n xpos = rect.x + rect.width - widgetsize[0] - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_CENTER_HORIZONTAL:\r\n\r\n xpos = rect.x + (rect.width - widgetsize[0])/2 - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height))\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-1))\r\n widget.SetPosition((xpos, rect.y+1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n if event is not None:\r\n event.Skip()", "def box(self, x, y, w, h):\n\t\tpass", "def SimpleReferenceGrid(min_x,min_y,max_x,max_y,x_divisions,y_divisions,\n color=(0.5,1.0,0.5,1.0),xoff=-0.15,yoff=-0.04,\n label_type=None,shapes_name=\"Grid\"):\n\n shps=gview.GvShapes(name=shapes_name)\n gview.undo_register( shps )\n shps.add_field('position','string',20)\n\n if os.name == 'nt':\n font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n else:\n #font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n #font=\"-urw-helvetica-medium-r-normal-*-9-*-*-*-p-*-iso8859-2\"\n font=\"-adobe-helvetica-medium-r-normal-*-8-*-*-*-p-*-iso10646-1\"\n #font=\"-misc-fixed-medium-r-*-*-9-*-*-*-*-*-*-*\"\n\n\n lxoff=(max_x-min_x)*xoff # horizontal label placement\n lyoff=(max_y-min_y)*yoff # vertical label placement\n\n hspc=(max_x-min_x)/x_divisions\n vspc=(max_y-min_y)/y_divisions\n\n for hval in numpy.arange(min_x,max_x+hspc/100.0,hspc):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(hval,max_y,0,0)\n nshp.set_node(hval,min_y,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(hval,min_y+lyoff)\n pshp.set_property('position',\"%.1f\" % hval)\n shps.append(pshp)\n\n for vval in numpy.arange(min_y,max_y+vspc/100.0,vspc):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(min_x,vval,0,0)\n nshp.set_node(max_x,vval,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(min_x+lxoff,vval)\n pshp.set_property('position',\"%.1f\" % vval)\n shps.append(pshp)\n\n cstr=gvogrfs.gv_to_ogr_color(color)\n if len(cstr) < 9:\n cstr=cstr+\"FF\"\n clstr=str(color[0])+' '+str(color[1])+' '+str(color[2])+' '+str(color[3])\n\n layer=gview.GvShapesLayer(shps)\n layer.set_property('_line_color',clstr)\n layer.set_property('_point_color',clstr)\n # Set antialias property so that lines look nice\n # when rotated.\n layer.set_property('_gl_antialias','1')\n layer.set_property('_gv_ogrfs_point',\n 'LABEL(t:{position},f:\"'+font+'\",c:'+cstr+')')\n layer.set_read_only(True) \n\n return layer", "def Haut():\r\n X1, Y1, X2, Y2 = canvas.coords(boule)\r\n canvas.coords(boule,X1,Y1-20,X2,Y2-20)", "def rectan_button(msg,x,y,w=100,h=100,ic=green,ac=green_bright,action=None,size=20,font='freesansbold.ttf'):\n mouse = pygame.mouse.get_pos() #pobiera pozycje myszki i zwraca x w mouse[0] i y w mouse[1]\n click = pygame.mouse.get_pressed() # click[0] lewy, click[1] srodkowy , click[2] prawy przycisk myszy \n \n #print(mouse)\n a = (x+w > mouse[0] and x < mouse[0] and y+h>mouse[1] and y < mouse[1]) #warunek na to , czy pozycja myszki jest w prostokacie przycisku\n if a: \n pygame.draw.rect(gameDisplay,ac,(x,y,w,h)) #rysuje jasniejszy prostokąt, wydaje sie ze podswietlony, gdy myszka na nim.\n \n if click[0]==1 and action!=None:\n #sleep zeby sie nie wcisnely 2 przyciski jak np. wychodzisz z opcji, a w miejscu przycisku 'back' w glownym menu jest 'start'\n time.sleep(0.1)\n action() \n else:\n pygame.draw.rect(gameDisplay,ic,(x,y,w,h)) #rysuje ciemny prostokat, jesli a nie jest prawdą\n \n\n # tutaj tworzy sie napis na srodku ekranu. \n # mozna dorzucic opcje wyboru \n textfont = pygame.font.Font('freesansbold.ttf',20)\n textsurf,textrect = text_objects(msg,textfont,black)\n textrect.center = ((x+(w/2)),(y+(h/2)))\n gameDisplay.blit(textsurf,textrect)", "def _align_toplevel_grid(self):\n\n # align origin with nearest multple of 128\n self.mins[0] -= self.mins[0] % 128\n self.mins[1] -= self.mins[1] % 128\n\n width = self.maxs[0] - self.mins[0]\n height = self.maxs[1] - self.mins[1]\n greatest_dim = max(width, height)\n nearest_pow_two = int(2 ** np.ceil(np.log2(greatest_dim)))\n width_adjustment = (nearest_pow_two - width)\n height_adjustment = (nearest_pow_two - height)\n\n self.maxs[0] += width_adjustment\n self.maxs[1] += height_adjustment", "def show_boxes(img, boundary_boxes, gt_boxes=None):\n\n for (x_tl, y_tl, x_br, y_br) in boundary_boxes:\n cv2.rectangle(img, (x_tl, y_tl),\n (x_br, y_br),\n (0, 0, 255), 2)\n\n if gt_boxes is not None:\n for (x_tl, y_tl, x_br, y_br) in gt_boxes:\n cv2.rectangle(img, (x_tl, y_tl),\n (x_br, y_br),\n (0, 255, 0), 2)\n\n cv2.imshow(\"img\", img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def update_center(self): \r\n \r\n self.grfx[0].center = self.center\r\n\r\n self.update_bbox()", "def draw_bounding_boxes(display, bounding_boxes):\n\n bb_surface = pygame.Surface((VIEW_WIDTH, VIEW_HEIGHT))\n bb_surface.set_colorkey((0, 0, 0))\n for bbox in bounding_boxes:\n points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)]\n # draw lines\n # base\n pygame.draw.line(bb_surface, BB_COLOR, points[0], points[1])\n pygame.draw.line(bb_surface, BB_COLOR, points[1], points[2])\n pygame.draw.line(bb_surface, BB_COLOR, points[2], points[3])\n pygame.draw.line(bb_surface, BB_COLOR, points[3], points[0])\n # top\n pygame.draw.line(bb_surface, BB_COLOR, points[4], points[5])\n pygame.draw.line(bb_surface, BB_COLOR, points[5], points[6])\n pygame.draw.line(bb_surface, BB_COLOR, points[6], points[7])\n pygame.draw.line(bb_surface, BB_COLOR, points[7], points[4])\n # base-top\n pygame.draw.line(bb_surface, BB_COLOR, points[0], points[4])\n pygame.draw.line(bb_surface, BB_COLOR, points[1], points[5])\n pygame.draw.line(bb_surface, BB_COLOR, points[2], points[6])\n pygame.draw.line(bb_surface, BB_COLOR, points[3], points[7])\n display.blit(bb_surface, (0, 0))", "def draw_bounds():\n\n pass", "def positioning(self):\n pass", "def gridalign(self):\n self.position.x = int(round(self.position.x))\n self.position.y = int(round(self.position.y))\n self.position.z = int(round(self.position.z))\n\n if self.fan:\n self.fan = (int(round(self.fan[0])),int(round(self.fan[1])),int(round(self.fan[2])))\n\n bestDist = 2*9\n bestMatrix = makeMatrix(0,0,0)\n\n for compass in [0, 90, 180, 270]:\n for pitch in [0, 90, 180, 270]:\n for roll in [0, 90, 180, 270]:\n m = makeMatrix(compass,pitch,roll)\n dist = matrixDistanceSquared(self.matrix, m)\n if dist < bestDist:\n bestMatrix = m\n bestDist = dist\n\n self.matrix = bestMatrix\n self.positionOut()\n self.directionOut()", "def _calibrate_box(self, bboxes, offsets):\n x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]\n w = x2 - x1 + 1.0\n h = y2 - y1 + 1.0\n w = torch.unsqueeze(w, 1)\n h = torch.unsqueeze(h, 1)\n\n # this is what happening here:\n # tx1, ty1, tx2, ty2 = [offsets[:, i] for i in range(4)]\n # x1_true = x1 + tx1*w\n # y1_true = y1 + ty1*h\n # x2_true = x2 + tx2*w\n # y2_true = y2 + ty2*h\n # below is just more compact form of this\n\n # are offsets always such that\n # x1 < x2 and y1 < y2 ?\n\n translation = torch.cat([w, h, w, h], 1).float() * offsets\n bboxes += torch.round(translation).int()\n return bboxes", "def SimpleMeasuredGrid(min_x,min_y,max_x,max_y,x_spacing,y_spacing,\n color=(0.5,1.0,0.5,1.0),xoff=-0.14,yoff=1.04,\n label_type=None,shapes_name=\"Grid\"):\n\n shps=gview.GvShapes(name=shapes_name)\n gview.undo_register( shps )\n shps.add_field('position','string',20)\n\n if os.name == 'nt':\n font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n else:\n #font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n #font=\"-urw-helvetica-medium-r-normal-*-9-*-*-*-p-*-iso8859-2\"\n font=\"-adobe-helvetica-medium-r-normal-*-8-*-*-*-p-*-iso10646-1\"\n #font=\"-misc-fixed-medium-r-*-*-9-*-*-*-*-*-*-*\"\n\n\n # Round to nearest integer space\n max_x=min_x+numpy.floor((max_x-min_x)/x_spacing)*x_spacing\n max_y=min_y+numpy.floor((max_y-min_y)/y_spacing)*y_spacing\n\n lxoff=(max_x-min_x)*xoff # horizontal label placement\n lyoff=(max_y-min_y)*yoff # vertical label placement\n\n for hval in numpy.arange(min_x,\n max_x+x_spacing/100.0,\n x_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(hval,max_y,0,0)\n nshp.set_node(hval,min_y,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(hval,min_y+lyoff)\n pshp.set_property('position',\"%d\" % int(hval+0.5))\n shps.append(pshp)\n\n for vval in numpy.arange(min_y,\n max_y+y_spacing/100.0,\n y_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(min_x,vval,0,0)\n nshp.set_node(max_x,vval,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(min_x+lxoff,vval)\n pshp.set_property('position',\"%d\" % int(vval+0.5))\n shps.append(pshp)\n\n cstr=gvogrfs.gv_to_ogr_color(color)\n if len(cstr) < 9:\n cstr=cstr+\"FF\"\n clstr=str(color[0])+' '+str(color[1])+' '+str(color[2])+' '+str(color[3])\n\n layer=gview.GvShapesLayer(shps)\n layer.set_property('_line_color',clstr)\n layer.set_property('_point_color',clstr)\n # Set antialias property so that lines look nice\n # when rotated.\n layer.set_property('_gl_antialias','1')\n layer.set_property('_gv_ogrfs_point',\n 'LABEL(t:{position},f:\"'+font+'\",c:'+cstr+')')\n layer.set_read_only(True) \n\n return layer", "def interaction_box(self) -> None:\n assert(0 <= self.target.x_obj+self.d_x <= self.grid.width and 0 <=\n self.target.y_obj+self.d_y <= self.grid.height)\n x_beyond_target = self.target.x_obj + self.d_x\n y_beyond_target = self.target.y_obj + self.d_y\n beyond_target = self.grid.obj_list[ # Object on which we could push the box\n x_beyond_target, y_beyond_target]\n if isinstance(beyond_target, ob.Void): # Simply pushing the box\n self.grid.obj_list.swap_obj(beyond_target, self.target)\n self.grid.obj_list.swap_obj(beyond_target, self.moving_character)\n elif isinstance(beyond_target, ob.Hole):\n if beyond_target.depth == 1:\n # Destroying box and hole\n void1 = ob.Void(self.target.x_obj, self.target.y_obj)\n void2 = ob.Void(x_beyond_target, y_beyond_target)\n self.grid.obj_list[self.target] = void1\n self.grid.obj_list[beyond_target] = void2\n # Then moving character\n self.grid.obj_list.swap_obj(void1, self.moving_character)\n else:\n # Reducing depth of the hole\n beyond_target.reduce_depth()\n # Destructing the box\n void = ob.Void(self.target.x_obj, self.target.y_obj)\n self.grid.obj_list[self.target] = void", "def placement(self,event):\r\n x,y,ship=event.x,event.y,False\r\n [xmin,ymin,xmax,ymax] = self.can.coords(self.hitbox[self.select])\r\n k=2\r\n if self.select==1 or self.select==2 or self.select==0:k=1\r\n axe,a,b=1,0*k,-1*k\r\n if xmax-xmin == 46:axe,a,b=0*k,-1*k,0\r\n x,y=(x-20)//46,(y-96)//46\r\n if self.select!=-1:\r\n ship=self.game.j1.replace_ship(x+b,y+a,self.select,axe)\r\n if 0<=x<=11 and 0<=y<=11:\r\n self.game.j1.main_ship(x+b,y+a,self.select,axe)\r\n self.game.j1.affichage()", "def update_transform(self):\n\n self.a = self.scale * self.pixel_size * math.cos(self.angle)\n self.d = self.scale * self.pixel_size * math.sin(self.angle)\n self.b = self.d\n self.e = -self.a\n self.c = self.point.x() - self.a*self.width/2.0 - self.b*self.height/2.0\n self.f = self.point.y() - self.d*self.width/2.0 - self.e*self.height/2.0\n\n self.bounding_box = [[self.c,self.f],[self.c+self.a*self.width,self.f+self.d*self.width],[self.c+self.a*self.width+self.b*self.height,self.f+self.d*self.width+self.e*self.height],[self.c+self.b*self.height,self.f+self.e*self.height],]", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n height = np.round(height, 3)\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom',\n fontsize=20)", "def draw_ocr_group_rects(orig, new_horz, new_verz):\n print(len(new_horz) + len(new_verz),'groups')\n for i,tbox in enumerate(new_horz):\n cv2.rectangle(orig, tbox.p1,tbox.p2, [0,0,200])\n\n for i,tbox in enumerate(new_verz):\n cv2.rectangle(orig, tbox.p1,tbox.p2, [0,180,0])", "def update_bbox(self): \r\n \r\n centX, centY = self.center\r\n\r\n brush_thickness = self.brush[0]\r\n\r\n margin = self.__size + brush_thickness + BOUNDARY_MARGIN\r\n\r\n self.bbox = [int(centX - margin), int(centY - margin),\r\n int(centX + margin), int(centY + margin)]", "def draw_boxes(indexes, frame, all_boxes):\n bbox = []\n mid_points = []\n\n for i in indexes:\n x = i[0]\n box = all_boxes[x]\n bbox.append(box)\n mid_points.append(mid_point(frame, box))\n x1, y1, w, h = box[0], box[1], box[2], box[3]\n x2, y2 = x1+w, y1+h\n\n cv2.rectangle(frame, (x1,y1),(x2,y2),(255,0,0),2) \n\n return mid_points, bbox", "def _bounce(self):\n right = self.surface.get_width() - self.size\n left = self.size\n top = self.size\n bottom = self.surface.get_height() - self.size\n if self.pos.x > right: # right border\n self.pos.x = right\n self.direction = self.direction.elementwise() * pygame.Vector2(-1.0, 1.0)\n elif self.pos.x < left: # left border\n self.pos.x = left\n self.direction = self.direction.elementwise() * pygame.Vector2(-1.0, 1.0)\n if self.pos.y > bottom: # bottom border\n self.pos.y = bottom\n self.direction = self.direction.elementwise() * pygame.Vector2(1.0, -1.0)\n elif self.pos.y < top: # top border\n self.pos.y = top\n self.direction = self.direction.elementwise() * pygame.Vector2(1.0, -1.0)" ]
[ "0.6347145", "0.5834143", "0.5796904", "0.5795583", "0.56838274", "0.56507903", "0.5610431", "0.558297", "0.5550353", "0.5532743", "0.5494045", "0.5491575", "0.54865843", "0.5478262", "0.5457589", "0.5449843", "0.5440341", "0.54399365", "0.543528", "0.5425247", "0.5423976", "0.54167515", "0.541662", "0.5411574", "0.54075587", "0.5394967", "0.53694385", "0.5363606", "0.5357821", "0.53547424" ]
0.6969691
0
get rank size and rank id
def _get_rank_info(): rank_size = int(os.environ.get("RANK_SIZE", 1)) if rank_size > 1: rank_size = get_group_size() rank_id = get_rank() else: rank_size = 1 rank_id = 0 return rank_size, rank_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rank(self):\r\n return self.rank", "def get_rank(self) -> int:\r\n return self.rank", "def get_rank(self):\n return self.rank", "def rank():\n return 0", "def getRank(self):\r\n return self.rank", "def _get_local_rank_size(comm):\n this_node = platform.node()\n ranks_nodes = comm.allgather((comm.Get_rank(), this_node))\n node2rankssofar = collections.defaultdict(int)\n local_rank = None\n for (rank, node) in ranks_nodes:\n if rank == comm.Get_rank():\n local_rank = node2rankssofar[node]\n node2rankssofar[node] += 1\n assert local_rank is not None\n return local_rank, node2rankssofar[this_node]", "def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def getRank(self):\n return self.rank", "def get_rank(self):\n return self.__rank", "def __rank__(self) -> int:", "def get_rank(self):\n return int(self._rank)", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def getRank(self):\n return self._rank", "def get_rank() -> int:\n return collective.get_rank()", "def get_rank(self) -> int:\n return dist.get_rank()", "def rank(self):\n return self.lib.calculate_rank()", "def rank() -> int:\n return dist.get_rank() if dist.is_initialized() else 0", "def getRank(self, steamid):\r\n if self.__contains__(steamid):\r\n return self.ranks.index(steamid) + 1\r\n return self.__len__()", "def get_rank(self):\n \n if self.rank == None:\n self.rank = self.main_ranker(self.string)\n \n return self.rank", "def size(self):\n return self.prev(self.rank).prev().rank + 1", "def calc_rank(id=13197473):\r\n player_url = urllib.parse.urlparse(\"http://osu.ppy.sh/pages/include/profile-general.php?u=player_id&m=0\".replace('player_id', str(id)))\r\n page = urlopen(player_url.geturl())\r\n soup = BeautifulSoup(page, features=\"html.parser\")\r\n table_divs = soup.findAll('div', attrs={'class': 'profileStatLine'})\r\n\r\n import re\r\n pattern = '\\(#\\d*,*\\d+\\)'\r\n for div in table_divs:\r\n for childdiv in div.find_all('b'):\r\n result = re.search(pattern, str(childdiv.text))\r\n my_ranking = int(result.group(0).replace(',', '').replace(\"(#\", '').replace(\")\", ''))\r\n break\r\n break\r\n return my_ranking", "def rank():\n return int(os.environ['RANK'])", "def get_size(self) -> Tuple[int]:\r\n return self.files, self.ranks", "def bgsize_rankspernode(self):\n\t bg_size = int(math.ceil((self.mpi_procs * self.omp_threads)/ self.hw.cores_per_node))\n\t bg_size = max(bg_size, 32) # TODO hardcoded\n\t ranks_per_node = int(math.ceil(self.mpi_procs / bg_size))\n\n\t return bg_size, ranks_per_node", "def _get_rank(self,fitness):\n # infact you can get the order or rank by only once sort.\n rank=fitness[:,0].argsort().argsort() # [n]\n return rank", "def rank(self) -> int:\n return self._rank" ]
[ "0.7020067", "0.6835348", "0.68337727", "0.681984", "0.6792252", "0.67085946", "0.668368", "0.6615326", "0.65932256", "0.65334713", "0.65328425", "0.65063375", "0.65063375", "0.65063375", "0.65063375", "0.65063375", "0.64363235", "0.6394922", "0.63880634", "0.63718396", "0.625561", "0.6194855", "0.618231", "0.61676824", "0.61334145", "0.6128162", "0.6122792", "0.6105688", "0.6100074", "0.60875803" ]
0.79825246
0
Creates model adapter from config
def get_model_adapter(config): if config['task'] == 'joint': return JointModelAdapter() elif config['task'] == 'keypoints': return KeypointsModelAdapter() elif config['task'] == 'headsegmentation': return HeadSegmentationModelAdapter() elif config['task'] == 'detect': return DetectionModelAdapter(config['model']) return ClassificationModelAdapter()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_config(cls, model_config: Union[dict, ModelConfig]) -> Type[AbstractModel]:\n\n if not (model_config and isinstance(model_config, (ModelConfig, dict))):\n msg = f\"Need a valid model config to create a text/tagger model in AutoModel. \" \\\n f\"Found model_config={model_config} of type({type(model_config)})\"\n raise ValueError(msg)\n\n # get model type upon validation\n model_config = cls._resolve_model_config(model_config)\n model_type = cls._get_model_type(model_config)\n\n # load metadata and return\n if model_type == \"text\":\n model_class = AutoTextModel.get_model_class(model_config)\n elif model_type == \"tagger\":\n model_class = AutoTaggerModel.get_model_class(model_config)\n\n return model_class(model_config)", "def __init__(self, vendor, generic_config, adapter_config):\n self.set_generic_config(**generic_config)\n self.em_adapter = construct_adapter(vendor, module_type='em', **adapter_config)", "def _from_config(cls, config, **kwargs):\n return cls(config, **kwargs)", "def create_model(config_obj: Union[ModelConfig, dict], random_seed: int = default_random_seed) -> BaseModel:\n if isinstance(config_obj, dict):\n config_obj = ModelConfig.from_dict(config_obj)\n model_type = get_from_registry(config_obj.model_type, model_type_registry)\n return model_type(config_obj, random_seed=random_seed)", "def __init__(self, config):\n super().__init__()\n self.model_list = []\n self.model_name_list = []\n for key in config[\"Models\"]:\n model_config = config[\"Models\"][key]\n freeze_params = False\n pretrained = None\n if \"freeze_params\" in model_config:\n freeze_params = model_config.pop(\"freeze_params\")\n if \"pretrained\" in model_config:\n pretrained = model_config.pop(\"pretrained\")\n model = BaseModel(model_config)\n if pretrained is not None:\n load_pretrained_params(model, pretrained)\n if freeze_params:\n for param in model.parameters():\n param.trainable = False\n self.model_list.append(self.add_sublayer(key, model))\n self.model_name_list.append(key)", "def __init__(self, name, config):\n super(Model, self).__init__()\n # set all config values as attributes on the model for ease of access\n self.config = config\n for key in config.keys():\n setattr(self, key, config[key])\n # override the name with the run number appended name\n self.name = name\n self.param_groups = []", "def from_config(cls, config):\n return cls(**config)", "def from_config(cls, config):\n return cls(**config)", "def from_config(cls,config):\n ## find labels in list\n label_list = load_label_list(config.label_list)\n use_cuda = True if torch.cuda.is_available() else False\n\n global_args = {\n \"fp16\" : False,\n \"classification_report\" : True,\n \"tensorboard_dir\" : config.tensorboard_dir,\n \"wandb_project\" : config.wandb_project,\n \"wandb_kwargs\" : {\n \"name\" : config.wandb_name,\n \"entity\" : config.wandb_entity,\n }\n }\n\n model = NERModel(\n config.model_name,\n config.model_type,\n use_cuda=use_cuda,\n labels=label_list,\n args=global_args,\n )\n return cls(model,config)", "def build_model(cfg, **kwargs):\n name = cfg.name\n return MODEL_REGISTRY.get(name)(cfg=cfg, **kwargs)", "def __init__(self, dsn, model=None, transactional=True, twophase=False,\n engine_options={}, session_options={},\n extension_options={}, **kw):\n\n self.dsn = dsn\n self.url = make_url(dsn)\n self.host = self.url.host\n self.port = self.url.port\n self.username = self.url.username\n self.password = self.url.password\n self.dbname = self.url.database\n self.twophase = twophase\n self.drivername = self.url.drivername\n self.transactional = transactional\n self.engine_options = engine_options\n if 'echo' in kw:\n self.engine_options.update(echo=kw['echo'])\n self.session_options = session_options\n self.extension_options = extension_options\n self._model = None\n self._createEngine()\n\n if model:\n\n if isinstance(model, Model):\n self._model = model\n\n elif isinstance(model, str):\n\n try:\n util = getUtility(IModelProvider, model)\n except ComponentLookupError:\n msg = \"No named utility '%s' providing IModelProvider\"\n raise ComponentLookupError(msg % model)\n\n self._model = util.getModel(self.metadata)\n\n elif callable(model):\n self._model = model(self.metadata)\n\n else:\n raise ValueError(\"The 'model' parameter passed to constructor \"\n \"must either be the name of a named utility \"\n \"implementing IModelProvider or an instance \"\n \"of z3c.sqlalchemy.model.Model.\")\n\n if not isinstance(self._model, Model):\n raise TypeError('_model is not an instance of model.Model')\n\n # mappers must be initialized at last since we need to acces\n # the 'model' from within the constructor of LazyMapperCollection\n self._mappers = LazyMapperCollection(self)", "def from_config(cls, config):\n if \"implementation\" in config and config[\"implementation\"] == 0:\n config[\"implementation\"] = 1\n return cls(**config)", "def from_config(cls, config: Dict[str, Any]) -> \"ClassyLoss\":\n raise NotImplementedError()", "def from_config(config: dict):\n pass", "def from_config(cls, *args, **kwargs):\n _config = args\n\n if isinstance(args, tuple): # multiple non-keyword arguments were provided\n if len(args) > 0:\n _config = args[0]\n\n else:\n _config = kwargs['config_path']\n kwargs.pop('config_path')\n\n local = False\n if 'make_new_path' in kwargs:\n local = True\n elif isinstance(_config, str) and os.path.isfile(_config):\n local = True\n elif isinstance(_config, dict) and \"category\" in _config:\n local = True\n\n if local:\n config = None\n config_path = None\n\n # we need to build ai4water's Model class\n if isinstance(_config, dict):\n config = _config\n else:\n config_path = _config\n return BaseModel._get_config_and_path(\n cls,\n config=config,\n config_path=config_path,\n **kwargs\n )\n\n # tf1.15 has from_config so call it\n return super().from_config(*args, **kwargs)", "def create_model(self):\n pass", "def create_model(self):\n pass", "def from_configuration(cls, **kwargs):\n return cls(**kwargs)", "def create_model(self, model_config):\n\n return self.conn.create_model(\n **model_config)", "def from_config(cls, config: dict):\n timestamp = config.get('timestamp', None)\n return cls(config.get('id'),\n config.get('type'),\n config.get('data', dict()),\n config.get('origin', None),\n timestamp,\n config.get('object_type', None),\n config.get('object_id', None),\n config.get('object_key', None))", "def from_config_plan(cls,\n model_cfg: dict,\n plan_arch: dict,\n plan_anchors: dict,\n log_num_anchors: str = None,\n **kwargs,\n ):\n raise NotImplementedError", "def from_dict(cls, dikt) -> 'ModelDeploymentConfig':\n return util.deserialize_model(dikt, cls)", "def convert_from_config(config):\n\n if isinstance(config, str):\n yamlConfig = parse_yaml_config(config)\n else:\n yamlConfig = config\n\n model = None\n if 'OnnxModel' in yamlConfig:\n if __onnx_enabled__:\n model = onnx_to_hls(yamlConfig)\n else:\n raise Exception(\"ONNX not found. Please install ONNX.\")\n elif 'PytorchModel' in yamlConfig:\n if __pytorch_enabled__:\n model = pytorch_to_hls(yamlConfig)\n else:\n raise Exception(\"PyTorch not found. Please install PyTorch.\")\n else:\n model = keras_to_hls(yamlConfig)\n\n return model", "def models(config_path):\n autograder.setup_app(config_path)\n\n # Now that setup has occurred, we can import the models\n from autograder import models as m\n\n # Make sure that if we've used a different db setup in another module\n # we don't keep trying to write to that database\n m.db.session.remove()\n\n m.drop_all()\n m.create_all()\n return m", "def _init_model(\n self,\n cfg: ConfigType,\n weights: Optional[str],\n device: str = 'cpu',\n ) -> nn.Module:\n checkpoint: Optional[dict] = None\n if weights is not None:\n checkpoint = _load_checkpoint(weights, map_location='cpu')\n\n if not cfg:\n assert checkpoint is not None\n try:\n # Prefer to get config from `message_hub` since `message_hub`\n # is a more stable module to store all runtime information.\n # However, the early version of MMEngine will not save config\n # in `message_hub`, so we will try to load config from `meta`.\n cfg_string = checkpoint['message_hub']['runtime_info']['cfg']\n except KeyError:\n assert 'meta' in checkpoint, (\n 'If model(config) is not provided, the checkpoint must'\n 'contain the config string in `meta` or `message_hub`, '\n 'but both `meta` and `message_hub` are not found in the '\n 'checkpoint.')\n meta = checkpoint['meta']\n if 'cfg' in meta:\n cfg_string = meta['cfg']\n else:\n raise ValueError(\n 'Cannot find the config in the checkpoint.')\n cfg.update(\n Config.fromstring(cfg_string, file_format='.py')._cfg_dict)\n\n # Delete the `pretrained` field to prevent model from loading the\n # the pretrained weights unnecessarily.\n if cfg.model.get('pretrained') is not None:\n del cfg.model.pretrained\n\n model = MODELS.build(cfg.model)\n model.cfg = cfg\n self._load_weights_to_model(model, checkpoint, cfg)\n model.to(device)\n model.eval()\n return model", "def get_model(self) -> BaseLanguageModel:\n model = available_models[self.model_name.value]\n kwargs = model._lc_kwargs\n secrets = {secret: getattr(model, secret) for secret in model.lc_secrets.keys()}\n kwargs.update(secrets)\n\n model_kwargs = kwargs.get(\"model_kwargs\", {})\n for attr, value in self.dict().items():\n if attr == \"model_name\":\n # Skip model_name\n continue\n if hasattr(model, attr):\n # If the model has the attribute, add it to kwargs\n kwargs[attr] = value\n else:\n # Otherwise, add it to model_kwargs (necessary for chat models)\n model_kwargs[attr] = value\n kwargs[\"model_kwargs\"] = model_kwargs\n\n # Initialize a copy of the model using the config\n model = model.__class__(**kwargs)\n return model", "def get_adapter(self, name = \"memory\", *args, **kwargs):\r\n\r\n name_f = name.title() + \"Adapter\"\r\n adapter_c = getattr(netius.adapters, name_f)\r\n adapter = adapter_c(*args, **kwargs)\r\n return adapter", "def config(self, user_config: ModelConfigDict):\n self._user_config = user_config\n self.config_obj = ModelConfig.from_dict(self._user_config)", "def create_model(configuration):\n model = find_model_using_name(configuration['model_name'])\n instance = model(configuration)\n print(\"model [{0}] was created\".format(type(instance).__name__))\n return instance", "def _build_data_connector_from_config(\n self,\n name: str,\n config: Dict[str, Any],\n ) -> DataConnector:\n new_data_connector: DataConnector = instantiate_class_from_config(\n config=config,\n runtime_environment={\n \"name\": name,\n \"datasource_name\": self.name,\n \"execution_engine\": self.execution_engine,\n },\n config_defaults={\n \"module_name\": \"great_expectations.datasource.data_connector\"\n },\n )\n new_data_connector.data_context_root_directory = (\n self._data_context_root_directory # type: ignore[assignment]\n )\n\n self.data_connectors[name] = new_data_connector\n return new_data_connector" ]
[ "0.65065086", "0.6332431", "0.6174222", "0.602394", "0.5932728", "0.5869356", "0.58404166", "0.58404166", "0.5832524", "0.582974", "0.582962", "0.5828994", "0.57814497", "0.5775429", "0.57427996", "0.5738938", "0.5738938", "0.5731172", "0.5715343", "0.569226", "0.5625734", "0.5624008", "0.56060505", "0.5587136", "0.5574393", "0.55738926", "0.5570389", "0.5567938", "0.5567108", "0.5550685" ]
0.74726766
0
saves the vectorizer to disk using json
def save_vectorizer(self, vectorizer_filepath): with open(vectorizer_filepath, "w") as fp: json.dump(self._vectorizer.to_serializable(), fp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, dirname=None):\n self.genio.save(dirname)\n logging.info(\n f'Saved word vectorizations for {dirname}')", "def pickle_vectorizer(self, path='models/TFIDFVectorizer.pkl'):\n with open(path, 'wb') as f:\n pickle.dump(self.vectorizer, f)\n print(\"Pickled vectorizer at {}\".format(path))", "def save(self, filename):\n data = {\"sizes\": self.sizes,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases]}\n f = open(filename, \"w\")\n json.dump(data, f)\n f.close()", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)", "def save(self, path):\n np.savez_compressed(path, **self.model_dict)", "def write_vector(vector, outfile):\r\n out_dir = os.path.dirname(outfile)\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n vector = vector.copy()\r\n for k in vector:\r\n if isinstance(vector[k], np.ndarray):\r\n vector[k] = vector[k].round(4).tolist()\r\n with open(outfile, 'w') as f:\r\n json.dump(vector, f)\r\n f.write('\\n')\r\n\r\n print(\" ... wrote {}\".format(outfile))", "def save(self, tfidf_vectorizer_path):\n with open(tfidf_vectorizer_path, \"wb\") as fw:\n pickle.dump(self, fw)", "def write_vector(vector, outfile):\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n vector = vector.copy()\n for k in vector:\n if isinstance(vector[k], np.ndarray):\n vector[k] = vector[k].round(4).tolist()\n with open(outfile, 'w') as f:\n json.dump(vector, f, separators=(',', ': '), indent=4)\n f.write('\\n')\n\n print(\" ... wrote {}\".format(outfile))", "def save(self, filename):\n data = {\"sizes\": self.sizes,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases],\n \"cost\": str(self.cost.__name__)}\n f = open(filename, \"w\")\n json.dump(data, f)\n f.close()", "def Save(self, filename: str):\n data_object = {\n \"input_layer_count\" : self.input_layer_size,\n \"hidden_layer_count\" : self.hidden_layer_size,\n \"output_layer_count\" : self.output_layer_size,\n\n \"hidden_layer_biases\" : self.hidden_layer_biases.tolist(),\n \"output_layer_biases\" : self.output_layer_biases.tolist(),\n\n \"input_to_hidden_weights\" : self.input_to_hidden_weights.tolist(),\n \"hidden_to_output_weights\" : self.hidden_to_output_weights.tolist()\n }\n\n with open(filename, \"w\") as f:\n json.dump(data_object, f)", "def run(self, verbose=False):\n from utils import write_to_file # function to write json to file\n self.read_json()\n graph = self.parse_jsons()\n json = self.pipe_vl2vg(graph)\n return self.write_to_file(rawinput=json, filetype='json', output_path=self.output_path, engine_name=self.engine_name, algorithm_name=self.algorithm_name, suffix=self.file_suffix, verbose=verbose)", "def save(self, path: str):\n\n\t\tinfo_dict = {\n\t\t\t\"n_gram_size\": self.n_gram_size,\n\t\t\t\"caseless\": self.caseless,\n\t\t\t\"ignore_punctuation\": self.ignore_punctuation,\n\t\t\t\"add_pos_tags\": self.add_pos_tags,\n\t\t\t\"uses_lemma\": self.uses_lemma,\n\t\t\t\"uses_sentences\": self.uses_sentences\n\t\t}\n\n\t\twith open(path, \"wt\", encoding=\"utf8\") as f:\n\t\t\tjson.dump(info_dict, f)", "def dump_vecs():\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n \n with open(v_file, 'wb') as f:\n pickle.dump(VECTORIZER, f)\n with open(d_file, 'wb') as f:\n pickle.dump(CECTORIZER, f)", "def save_training(self):\n\n filename = str(hashlib.sha1(str(self.training_data).encode(\"utf-8\"))\n .hexdigest())\n path = \"./training/\" + filename + \".json\"\n\n data = {\n \"states\": self.states,\n \"transitions\": self.transitions,\n \"matrix\": self.matrix.tolist()\n }\n\n with open(path, \"w\") as outfile:\n json.dump(data, outfile)", "def saveFile(self, filename=\"UQModelTest.json\"):\n sd = self.saveDict()\n with open(filename, \"w\") as f:\n json.dump(sd, f, indent=2)", "def WriteStructuralMaterialsjson(save_path,dic_in_json_format):\n complete_name=os.path.join(save_path,\"StructuralMaterials.json\") \n with open(complete_name, \"w\") as save_file:\n save_file.write(dic_in_json_format)\n if(DEBUG):\n print(\"StructuralMaterials.json written\")", "def save_trained_model(self, filename):\n d = self.pack_npz()\n with open(filename, 'wb') as f:\n np.savez(f, base_str=super(SpatialGP, self).__repr_base_params__(), **d)", "def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)", "def savemodel(self, fname):\n if not fname.endswith('.gz'):\n fname += '.gz'\n D = {'clf':self.clf, 'vocab':self.vocab,\n 'idxlabelmap':self.labelmap}\n with gzip.open(fname, 'w') as fout:\n dump(D, fout)\n print 'Save model into file: {}'.format(fname)", "def save_data_file(self):\n with open(self.files['data'], 'w') as outfile:\n outfile.write(self.to_json())\n outfile.close()", "def _save(self):\n with open(self.file_path, 'w') as fid:\n json.dump(self.data, fid, indent=4, sort_keys=True)", "def save(self, filename):\n\t\tnp.savetxt(filename, self.V)\n\t\treturn", "def save(self, path: str):\n with open(path, 'w', encoding='utf-8') as f:\n f.write(self.to_json())", "def result_writer(result_poly):\n val = {}\n val[\"type\"] = \"FeatureCollection\"\n val[\"features\"] = result_poly\n with open(output_file_path, 'w') as outfile:\n json.dump(val, outfile, indent=3)\n outfile.close()", "def to_file(self, fn):\n store.store_dict(fn, 'trainalgorithm', self.to_dict())", "def save(self):\n pickle.dump([self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word], open(self.save_file, 'wb'), protocol=4)", "def save_model(self, filename) -> None:\n #t.save(self, filename)\n traced=t.jit.script(self)\n t.jit.save(traced,filename)", "def save(self, path=\"word2vec_keras.tar.gz\"):\n tokenizer_path = os.path.join(tempfile.gettempdir(), \"tokenizer.pkl\")\n label_encoder_path = os.path.join(tempfile.gettempdir(), \"label_encoder.pkl\")\n params_path = os.path.join(tempfile.gettempdir(), \"params.pkl\")\n keras_path = os.path.join(tempfile.gettempdir(), \"model.h5\")\n w2v_path = os.path.join(tempfile.gettempdir(), \"model.w2v\")\n\n # Dump pickle\n pickle.dump(self.tokenizer, open(tokenizer_path, \"wb\"))\n pickle.dump(self.label_encoder, open(label_encoder_path, \"wb\"))\n pickle.dump(self.__attributes__(), open(params_path, \"wb\"))\n pickle.dump(self.w2v_model, open(w2v_path, \"wb\"))\n self.k_model.save(keras_path)\n # self.w2v_model.save(w2v_path)\n\n # Create Tar file\n tar = tarfile.open(path, \"w:gz\")\n for name in [tokenizer_path, label_encoder_path, params_path, keras_path, w2v_path]:\n tar.add(name, arcname=os.path.basename(name))\n tar.close()\n\n # Remove temp file\n for name in [tokenizer_path, label_encoder_path, params_path, keras_path, w2v_path]:\n os.remove(name)", "def save(self, filename):\n import json\n\n json = json.dumps(self.joint_limits)\n with open(filename, 'w') as f:\n f.write(json)", "def save(self, path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n np.save(os.path.join(path, 'V.npy'), self.V.cpu().numpy())\n\n if self.W is not None:\n np.save(os.path.join(path, 'W.npy'), self.W.cpu().numpy())\n\n if self.vb is not None:\n np.save(os.path.join(path, 'v_bias.npy'), self.vb.cpu().numpy())\n\n if self.wb is not None:\n np.save(os.path.join(path, 'w_bias.npy'), self.wb.cpu().numpy())\n\n if self.dictionary is not None:\n self.dictionary.save(os.path.join(path, 'dictionary'))" ]
[ "0.6963584", "0.680034", "0.6709311", "0.6637569", "0.6591434", "0.65723944", "0.65660405", "0.6541175", "0.6505122", "0.6481239", "0.6412874", "0.6401869", "0.63947797", "0.63763016", "0.6329883", "0.6322585", "0.6317829", "0.62863415", "0.6257502", "0.6255731", "0.623655", "0.6221323", "0.6173903", "0.6126636", "0.612619", "0.6125182", "0.61240315", "0.61185634", "0.61028457", "0.60543096" ]
0.821503
0
This function inserts geocode to soybean collection
def insert_geo_to_mongo(collection): if not collection: log_utils.log_msg_error(logger=logger, key='INSERTGEOCODE0001', msg='Collection is None') return None cursor = collection.find() count = 1 for each in cursor: location = each['location_desc'] id = each['_id'] lat, lon = geo_utils.get_location_from_geopy(location) collection.update({'_id': id}, {"$set" :{'geo_location': {'lat': lat, 'lon': lon}}}, True) print count count += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upsert_location(self, location):", "def insert_city(self, city_point):\n city = City(city_point)\n self.map.insert(city)", "def geocode(df, col):\r\n pass", "def insert(self, name, address, city, state, zipcode, hour, phone, rating, image):\r\n pass", "def sync_set_geocoding(provider, tracking_id, event_id):\n coordinates = get_coordinates_from_id(tracking_id=tracking_id, event_id=event_id)\n geocoding = None\n if coordinates:\n if not provider or provider == 'osm':\n geocoding = get_osm_geocoding(coordinates)\n if geocoding == None:\n geocoding = get_google_geocoding(coordinates)\n elif provider == 'google':\n geocoding = get_google_geocoding(coordinates)\n if geocoding == None:\n geocoding = get_osm_geocoding(coordinates)\n if geocoding:\n if tracking_id:\n json_entry = dict()\n json_entry['geocoding'] = str(geocoding)\n mongo.update_single_document(collection='TRACKING', document=json_entry, filter={'_id':ObjectId(tracking_id)})\n if event_id:\n json_entry = dict()\n json_entry['geocoding'] = str(geocoding)\n mongo.update_single_document(collection='EVENT', document=json_entry, filter={'_id': ObjectId(event_id)})", "def zipcode_validation(add):\r\n lng=get_address(add)[1]\r\n lat=get_address(add)[0]\r\n engine = get_sql_engine()\r\n query = text(\r\n \"\"\"\r\n SELECT\r\n code\r\n FROM philly_zipcode\r\n WHERE ST_Intersects(geom, ST_SetSRID(ST_MakePoint(:lng, :lat), 4326))\r\n \"\"\"\r\n )\r\n resp = engine.execute(query,lng=lng, lat=lat).fetchall()\r\n return resp", "def save_venue(data, form):\n venue = form.save()\n\n venue.city = City.objects.get(id=int(data.get('city_identifier')))\n venue.country = Country.objects.get(name='Canada')\n venue.location = Point((\n float(data.get('location_lng')),\n float(data.get('location_lat'))\n ))\n venue.save()", "def geocode(self, geocoder):\n for term in self.terms:\n # No need to geocode regions\n if not term.get('region'):\n geo = geocoder.geocode(term['string'])\n if geo:\n term['geo'] = geo\n if not self.region:\n # TODO: descobrir regiao do ponto\n self.region = \"???\"\n else:\n self.region = term['region']", "def add_location(cities, states, business):\n\tcities.add(business[CITY])\n\tstates.add(business[STATE])", "def add_city(g, code, name, country, continent, timezone, coordinates, population, region):\n port = Ports(code, name, country, continent, timezone, coordinates, population, region)\n g.city_dict[code] = port\n g.convert[name] = code \n return g", "def add_place(name, country, city, street):\n place = Place(name=name, country=country, city=city, street=street)\n session.add(place)\n session.commit()", "def create_place():\n\n q = \"\"\"\n INSERT INTO escuelasutf8 (nombre, direccion, localidad,\n wkb_geometry_4326,\n id_distrito, id_seccion)\n VALUES ('%s', '%s', '%s', '%s', '%s', '%s')\n RETURNING ogc_fid\n \"\"\" % (\n request.form['nombre'].replace(\"'\", \"''\"),\n request.form['direccion'].replace(\"'\", \"''\"),\n request.form['localidad'].replace(\"'\", \"''\"),\n request.form['wkb_geometry_4326'],\n request.form['distrito'],\n request.form['seccion']\n )\n r = db.query(q)\n return flask.Response(flask.json.dumps(r.next()),\n mimetype=\"application/json\")", "def insertWords(db, geo_id, words):\n def f( word):\n return {'geo_id' : geo_id, 'word': word}\n try:\n db.word2geo.insert(map( f, words))\n except:\n print \"error in \" + geo_id\n print map( f, words)", "def maxmind_geocode():\n reader = maxminddb.open_database('GeoLite2-City.mmdb')\n asn = maxminddb.open_database('GeoLite2-ASN.mmdb')\n\n unique_ips = session.query(UniqueVictims).all()\n\n for ip in unique_ips:\n try:\n current_ip = reader.get(ip.ip)\n asn_ip = asn.get(ip.ip)\n ip.lat = current_ip['location']['latitude']\n ip.long = current_ip['location']['longitude']\n if 'city' in current_ip:\n ip.city = current_ip['city']['names']['en']\n if 'country' in current_ip:\n ip.country = current_ip['country']['names']['en']\n if asn_ip:\n ip.isp = asn_ip['autonomous_system_organization']\n except TypeError:\n continue\n session.commit()", "def test_pos_operate_incr_with_geospatial_new_record(self):\n key = (\"test\", \"demo\", \"geospatial_key\")\n\n llist = [\n {\n \"op\": aerospike.OPERATOR_INCR,\n \"bin\": \"geospatial\",\n \"val\": aerospike.GeoJSON({\"type\": \"Point\", \"coordinates\": [42.34, 58.62]}),\n },\n {\"op\": aerospike.OPERATOR_READ, \"bin\": \"geospatial\"},\n ]\n\n (key, _, bins) = TestOperate.client_no_typechecks.operate(key, llist)\n\n assert bins[\"geospatial\"].unwrap() == {\"coordinates\": [42.34, 58.62], \"type\": \"Point\"}\n TestOperate.client_no_typechecks.remove(key)", "def geolocate_address(self):\n self.geolocator = Nominatim(user_agent=\"fundaft\")\n\n # If latitude / longitude are missing, try to geocode them on the basis\n # of the address \n self.coords = [self.get_coords(address) if np.isnan(lat)\n else (lat, lon) for address, lat, lon in\n zip(self.df_ads['property_title'], \n self.df_ads['latitude'], \n self.df_ads['longitude'])]\n \n df = pd.DataFrame(self.coords, columns=['latitude', 'longitude'])\n \n # If new coordinates are not in Dublin, change to na again\n df = self.is_in_dublin(df)\n\n self.df_ads[[\"latitude\",\"longitude\"]] = df", "def add_geo_shape(self, field, point, distance):\n from haystack.utils.geo import ensure_point, ensure_distance\n self.geo_shape = {\n 'field': field,\n 'point': ensure_point(point),\n 'distance': ensure_distance(distance),\n }", "def geocode(postcode):\n key = current_app.config.get(\"OS_PLACES_API_KEY\")\n formatted_addresses = FormattedAddressLookup(key=key).by_postcode(postcode)\n response = [{\"formatted_address\": address} for address in formatted_addresses if address]\n return Response(json.dumps(response), mimetype=\"application/json\")", "def insert_to_database(self, cur):\n if (not self.non_empty):\n self.logger.warning('No businesses in cbg {}.'.format(self.cbg))\n return None\n # If some businesses are present\n values = {}\n for b in self.json['businesses']:\n # Iterate through businesses \n for f in database_fields_level_1:\n # Get the level - 1 fields\n if (f == 'price'):\n try:\n values[f] = len(b[f])\n except KeyError:\n values[f] = -1\n except:\n logger.error('Error in price handling. ', exc_info = True) \n else:\n values[f] = b[f]\n for f in database_fields_level_2:\n # Get the level - 2 fields\n values[f] = b['location'][f]\n for f in database_fields_level_3:\n # Get the level - 3 fields\n values[f] = b['coordinates'][f]\n # Format the insert statement\n values = {key: value for (key, value) in values.items() if value}\n (all_fields, all_values) = zip(*values.items())\n all_values = ', '.join([f'%({x})s' for x in all_fields])\n all_fields = ', '.join(all_fields)\n values = {key: to_str_if_list(value) for (key,value) in values.items()}\n local_insert_statement = insert_statement.format(\n all_fields = all_fields,\n all_values = all_values\n )\n self.logger.debug(local_insert_statement)\n # Execute the insert statement \n cur.execute(local_insert_statement, values)\n return None", "def geo_coder(house_number, boro_code, street_name, zip_code): \r\n wa1 = '1B{}{}{}{}{}C{}{}'.format(rightpad(house_number, 16), rightpad('', 38), boro_code, rightpad('', 10), rightpad(street_name, 32), rightpad('', 113), rightpad(zip_code, 5))\r\n wa1 = rightpad(wa1, 1200)\r\n wa2 = rightpad('', 4300)\r\n NYCGeo.NYCgeo(wa1, wa2)\r\n return wa1, wa2", "def _geocode(self, phn, street, borough_code=None, zip=None):\n try:\n r = self._g[self.geofunction](house_number=phn, street=street, borough_code=borough_code, zip=zip)\n self.results.append(r)\n except GeosupportError as ge:\n if 'SIMILAR NAMES' in ge.result[\"Message\"]:\n list_of_street_names = ge.result['List of Street Names']\n r = [{\n 'street': s,\n 'borough_code': borough_code\n } for s in list_of_street_names]\n self.similiar_names.extend(r)", "def insert_fix(**kwargs):\n kwargs['fix_center'] = ut.mk_point(lon=kwargs['lon'], lat=kwargs['lat'], alt=0)\n \n \"\"\"\n sql = \"insert into fix(ident, major, point) values(\"\n sql += \"%(ident)s, %(major)s, \"\n sql += \"ST_Transform(ST_GeomFromText(%(fix_center)s, 4326),3857));\"\n \"\"\"\n sql = \"insert into navaid(ident, name, search, ntype_id, point) values(\"\n sql += \"%(ident)s, %(ident)s, %(ident)s, 201, \"\n sql += \"ST_Transform(ST_GeomFromText(%(fix_center)s, 4326),3857));\"\n #print sql\n db.Cur.execute(sql, kwargs)", "def geo_locate(cursor: sqlite3.Cursor):\n cursor.execute('''DELETE FROM location_cache''') # Scrub previous results to start over\n\n geo_code = Nominatim(user_agent=\"capstone_project\")\n cursor.execute(\"\"\"SELECT location FROM combined_jobs\"\"\")\n jobs = cursor.fetchall() # Set to .fetchall once development is complete\n\n for location in jobs:\n try:\n full_loc = geo_code.geocode(location[0])\n print(location[0])\n cursor.execute(f\"\"\"INSERT INTO location_cache(location, latitude, longitude)\n VALUES (?,?,?)\"\"\", (location[0], full_loc.latitude, full_loc.longitude))\n except AttributeError:\n print(AttributeError)\n except sqlite3.IntegrityError:\n print(sqlite3.IntegrityError)", "def test_has_geocode(self):\n\n lat, lng = 33.210241, -97.148857\n name = Name.objects.create(name=\"Test Name\", name_type=Name.PERSONAL)\n Location.objects.create(belong_to_name=name, longitude=lng,\n latitude=lat)\n assert name.has_geocode()", "def post(self, request, format=None):\n success = False\n try:\n line1=request.data[\"line1\"]\n district=request.data[\"district\"]\n state=request.data[\"state\"]\n pincode=request.data[\"pincode\"]\n branch=request.data[\"branch\"]\n address_obj = Address(line1=line1,district=district,\n state=state,pincode=pincode,branch=Branch.objects.get(pk=branch))\n address_obj.save()\n address_string = district+\", \"+state+\", \"+pincode\n if address_obj.id:\n location_coordinates = GeolocationApi.get_lat_lng(address_string)\n geolocation_obj = Geolocation(address=address_obj,\n lat=location_coordinates[\"latitude\"],\n lng=location_coordinates[\"latitude\"])\n geolocation_obj.save()\n success=True\n except Exception as e:\n success=False\n print(e)\n return Response(success)", "def geotransform(street_address_column, borough_column, zip_code_column, in_csv_file_loc, out_csv_file_loc):\r\n with open(out_csv_file_loc, 'wb') as csv_new_file:\r\n fieldnames = ['2010 Census Block',\r\n '2010 Census Block Suffix',\r\n '2010 Census Tract',\r\n 'Assembly District',\r\n 'Atomic Polygon',\r\n 'B10SC First Borough and Street Code',\r\n 'Bike Lane',\r\n 'Borough Block Lot (BBL)',\r\n 'Building Identification Number (BIN) of Input Address or NAP',\r\n 'City Council District',\r\n 'Community District',\r\n 'Community School District',\r\n 'Congressional District',\r\n 'DSNY Snow Priority Code',\r\n 'Election District',\r\n 'First Borough Name',\r\n 'House Number Display Format',\r\n 'House Number Sort Format',\r\n 'Hurricane Evacuation Zone (HEZ)',\r\n 'Message',\r\n 'NTA Name',\r\n 'Neighborhood Tabulation Area (NTA)',\r\n 'Police Precinct',\r\n 'Roadway Type',\r\n 'Second Street Name Normalized',\r\n 'Spatial Coordinates of Segment',\r\n 'State Senatorial District',\r\n 'USPS Preferred City Name',\r\n 'X-Y Coordinates of Lot Centroid',\r\n 'Zip Code',\r\n 'Latitude',\r\n 'Longitude',\r\n 'Spatial X',\r\n 'Spatial Y']\r\n writer = csv.DictWriter(csv_new_file, fieldnames=fieldnames)\r\n writer.writeheader()\r\n \r\n with open(in_csv_file_loc, 'rb') as csvfile:\r\n csvreader = csv.DictReader(csvfile, delimiter = ',')\r\n for row in csvreader:\r\n full_address = row[street_address_column].strip()\r\n split_full_address = full_address.split(' ')\r\n house_number = split_full_address[0]\r\n borough = row[borough_column].strip()\r\n boro_code = borough_transform(borough)\r\n zip_code = row[zip_code_column].strip()\r\n street_name = ' '.join(split_full_address[1:])\r\n \r\n (wa1, wa2) = geo_coder(house_number, boro_code, street_name, zip_code)\r\n \r\n output = Parser(wa1, wa2)\r\n \r\n writer.writerow(output)", "def upload_point(x, y, label=\"\"):\n\n conn = None\n cur = None\n\n try:\n # check the point is inside the usa, both point and states must be WGS84\n conn = utils.pgconnect(**settings.DEFAULT_CONNECTION)\n cur = conn.cursor()\n #if the point is inside this will return (True,) otherwise None\n cur.execute(\"\"\"select result from\n (select st_contains(s.geom,ST_GeomFromText('POINT(%s %s)', 4326)) as result \n from %s as s) as subquery\n where result is true\"\"\",(AsIs(x),AsIs(y), AsIs(settings.STATES_TABLE_NAME)))\n\n result = cur.fetchone()\n #print(result)\n\n if result: # if result is not None\n\n #check numbers size, crop to 4 digits, define the marker size\n\n # size symbol\n size=None\n\n # store number of decimal digits\n lx = 0\n ly = 0\n\n # convert numbers to string\n #x = str(x);y = str(y)\n\n if ',' in x or ',' in y:\n raise Exception(\"decimal numbers should not contain ','\")\n\n # check the number of decimal digits and crop to 4\n if '.' in x: # do only for float number\n lx = len(x.split('.')[1]) # get decimals\n if lx > 4: # crop size to 4\n x = x[:(4 - lx)]\n lx = 4\n if '.' in y: # do only for float number\n ly = len(y.split('.')[1])\n if ly > 4:\n y = y[:(4 - ly)]\n ly = 4\n\n # select a symbol size according\n # for the size take the bigger number of digits of the two numbers\n ndigits = max([lx, ly])\n if ndigits == 0:\n size = 5\n elif ndigits == 1:\n size = 4\n elif ndigits == 2:\n size = 3\n elif ndigits == 3:\n size = 2\n elif ndigits == 4:\n size = 1\n\n #upload to database\n cur.execute(\n \"\"\"INSERT INTO %s(lat,lon,label,size) VALUES (%s,%s,%s,%s) RETURNING id\"\"\",\n ( AsIs(settings.BOOKMARKS_TABLE_NAME), y, x, label, size))\n #id = cur.fetchone()[0]\n #print(id)\n cur.execute(\"\"\"UPDATE %s SET geom = ST_PointFromText('POINT(' || lon || ' ' || lat || ')', 4326)\"\"\", (AsIs(settings.BOOKMARKS_TABLE_NAME),))\n conn.commit()\n\n else:\n raise Exception(\"the point is not inside USA\")\n\n except Exception as e:\n raise Exception(e)\n\n else:\n return x, y, size #return the cropped coordinates and marker size\n\n finally:\n if cur: cur = None\n if conn: conn = None", "def insert_address_info(address,client_id,extern_client_id,con,cur):\n psql_address=f\"\"\" insert into address \n (extern_id,line1,line2,city,postal_code,state,country,client_id,extern_client_id)\n values \n {\n address.id,\n address.line1,\n address.line2,\n address.city,\n address.postal_code,\n address.state,\n address.country,\n client_id,\n extern_client_id,};\"\"\"\n psql=psql_address\n cur.execute(psql)\n con.commit()", "def insert_school(mongo_collection, **kwargs):\n return mongo_collection.insert(kwargs)", "def geocode(self, resource):\n # Turn the different address components into a formatted string\n search_address = \", \".join(a for a in [resource.street,\n resource.city, resource.state,\n resource.zipcode, resource.country]\n if a is not None and not\n a.isspace())\n\n # Make sure we generated something meaningful\n if search_address and search_address is not None:\n # Now query the geocoder with this formatted string\n geolocator = GoogleV3(api_key=self.api_key)\n address, (latitude, longitude) = geolocator.geocode(search_address)\n\n # Update the resource based on the returned geopy.location.Location\n if address and not address.isspace():\n resource.fulladdress = address\n\n if latitude and longitude:\n resource.latitude = latitude\n resource.longitude = longitude\n\n # FUTURE: Perform additional normalization operations based\n # on the information in Location.raw\n pass" ]
[ "0.65075386", "0.5994585", "0.56468296", "0.5555482", "0.5555266", "0.5511169", "0.54705995", "0.5420754", "0.5408374", "0.5395964", "0.5340212", "0.53328806", "0.5276935", "0.52544767", "0.52543485", "0.52541894", "0.5232013", "0.5220423", "0.5204102", "0.5150884", "0.51253146", "0.5102965", "0.5085384", "0.50724196", "0.50666463", "0.5052962", "0.5042786", "0.5038316", "0.5035375", "0.5019165" ]
0.67928666
0
Creates a plot of the correlations of each of the basemodel's predictions on the training set. Specifically, crossvalidation is used and the predictions from each holdoutfold (for each model) are used to build a training set for the stacker. The plot shows the correlations for the predictions of each basemodel. The correlations are taken before the predictions are transformed by the stackerspecific transformations.
def plot_correlation_heatmap(self): if self._model_object is None: raise ModelNotFittedError() OOLearningHelpers.plot_correlations(correlations=self._train_meta_correlations, title='Correlations of Models (based on meta-training set)') plt.tight_layout()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pairplot_cross_val(df=None, features_corr_matrice=None, model=None, figsize=(10,10), save=False, prefix_name_fig=None, folder='Charts', **kwargs):\r\n\r\n corr_matrice = deepcopy(features_corr_matrice)\r\n features_number = len(corr_matrice.columns) \r\n\r\n fig, ax = plt.subplots(features_number, features_number, figsize=figsize)\r\n\r\n if model is None:\r\n model = LinearRegression\r\n\r\n # Takes the first feature that we will be used to predict the other features\r\n # Will do it for each of the features\r\n for index1, feature1 in enumerate(corr_matrice.index):\r\n xi = df[feature1].to_frame()\r\n\r\n # Takes another feature. This feature is the one that will be predicted thanks to the first feature\r\n # Each feature will be predicted thanks to the selected model and the first feature from the parent loop\r\n for index2, feature2 in enumerate(corr_matrice.columns):\r\n xj = df[feature2].to_frame()\r\n corr_coefs_list = []\r\n\r\n xi_train, xi_test, xj_train, xj_test = train_test_split(xi, xj, test_size=0.5)\r\n\r\n # xj test will be predicted from xi and the model trained on the train set\r\n model.fit(xi_train, xj_train.values.ravel())\r\n mod_predict_test = model.predict(xi_test)\r\n\r\n # xj train will be predicted from xi and the model trained on the test set\r\n model.fit(xi_test, xj_test.values.ravel())\r\n mod_predict_train = model.predict(xi_train)\r\n\r\n # Plots in the same graph the xj prediction against true xj from the test set, and from the train set\r\n ax[index1, index2].plot(xj_test, mod_predict_test, \".\")\r\n ax[index1, index2].plot(xj_train, mod_predict_train, \".\")\r\n\r\n if index2 == 0:\r\n ax[index1, index2].set_ylabel(feature1)\r\n\r\n if index1 == features_number-1:\r\n ax[index1, index2].set_xlabel(feature2)\r\n\r\n # Plots the affine curve y=x starting, and set the x-axis from the min value to the max value of the features values\r\n mi = min(min(xj_test.values), min(mod_predict_test), min(xj_train.values), min(mod_predict_train))\r\n ma = max(max(xj_test.values), max(mod_predict_test), max(xj_train.values), max(mod_predict_train))\r\n ax[index1, index2].plot([mi, ma], [mi, ma], \"--\")\r\n\r\n if save == True:\r\n prefix_name_fig = prefix_name_fig + '_' if prefix_name_fig is not None else ''\r\n plt.savefig(folder + '/' + prefix_name_fig + '.png')\r\n \r\n return ax", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def model_visualization(model,X,y,classifier):\n sns.set_context(context='notebook',font_scale=2)\n plt.figure(figsize=(16,9))\n from matplotlib.colors import ListedColormap\n X_set, y_set = X, y\n X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.6, cmap = ListedColormap(('green', 'blue')))\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n for i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n color = ListedColormap(('turquoise', 'blue'))(i), label = j)\n plt.title(\"%s Model Set\" %(model))\n plt.xlabel('PC 1')\n plt.ylabel('PC 2')\n plt.legend()\n plt.savefig('images/{0}.png'.format(model))", "def plot_corr_normalized(models,data,fit,**kwargs):\n _fnNMod = len(models)\n _fnIdx = [0] ## -- index of plotted function, in array so it can be modified in functions\n ## -- objects to hold all plot data\n ## - Dat/Fit refers to the correlator data or the fit function\n ## - Central/Error are the central value and errors\n _fnDatCentral = []\n _fnDatError = []\n _fnFitOnes = []\n _fnFitError = []\n #\n ## -- other objects\n _fnTDataNonZero = []\n _fnTFitNonZero = []\n _fnTData = []\n _fnTFit = []\n _fnTRem = [] # number of previous timeslices removed\n fig,ax = plt.subplots(1)\n #\n ## -- setup plot function\n def do_plot_normalized(idx,fig=fig):\n fig.clear()\n ax = fig.add_subplot(111)\n key = models[idx[0]].datatag\n\n ax.set_xlim([-1,len(_fnTData[idx[0]])])\n ax.set_ylim(utp.get_option(\"y_limit\",[0.2,1.8],**kwargs[key]))\n #\n ## -- plot fit\n ax.plot(_fnTDataNonZero[idx[0]],_fnFitOnes[idx[0]],\n color=utp.get_option(\"color2\",'b',**kwargs[key]))\n ax.plot(_fnTDataNonZero[idx[0]],_fnFitError[idx[0]][0],\n color=utp.get_option(\"color2\",'g',**kwargs[key]),\n ls=utp.get_option(\"linestyle2\",'--',**kwargs[key]))\n ax.plot(_fnTDataNonZero[idx[0]],_fnFitError[idx[0]][1],\n color=utp.get_option(\"color2\",'g',**kwargs[key]),\n ls=utp.get_option(\"linestyle2\",'--',**kwargs[key]))\n ## -- plot correlator data\n ax.errorbar(_fnTDataNonZero[idx[0]],_fnDatCentral[idx[0]],yerr=_fnDatError[idx[0]],\n mfc=utp.get_option(\"markerfacecolor1\",'None',**kwargs[key]),\n mec=utp.get_option(\"markeredgecolor1\",'k',**kwargs[key]),\n color=utp.get_option(\"markeredgecolor1\",'k',**kwargs[key]),\n ls=utp.get_option(\"linestyle1\",'None',**kwargs[key]),\n marker=utp.get_option(\"marker1\",'o',**kwargs[key]),\n ms=utp.get_option(\"markersize\",6,**kwargs[key]))\n ax.scatter(_fnTFitNonZero[idx[0]],\n [ _fnDatCentral[idx[0]][t] for t in\n list(np.array(_fnTFitNonZero[idx[0]])-np.array(_fnTRem[idx[0]])) ],\n color=utp.get_option(\"color1\",'r',**kwargs[key]),\n marker=utp.get_option(\"marker\",'o',**kwargs[key]),\n s=utp.get_option(\"markersize\",36,**kwargs[key]))\n fig.suptitle(utp.get_option(\"plottitlefn\",str(idx[0])+\" default title \"+str(key),**kwargs[key]),\n fontsize=utp.get_option(\"titlesize\",20,**kwargs[key]))\n ## -- modify some options \n ax.set_xlabel(r'$t$')\n ax.set_ylabel(utp.get_option(\"yaxistitle\",r\"$C(t)/C_{fit}(t)$\",**kwargs[key]))\n for item in ([ax.xaxis.label,ax.yaxis.label]):\n # must be after setting label content (LaTeX ruins it)\n item.set_fontsize(fontsize=utp.get_option(\"fontsize\",20,**kwargs[key]))\n rect =fig.patch\n rect.set_facecolor('white')\n if utp.get_option(\"to_file\",False,**kwargs[key]):\n save_dir = utp.get_option(\"fn_save_dir\",\"./plotdump\",**kwargs[key])\n save_name = utp.get_option(\"fn_save_name\",\"fnplot-\"+key+\".pdf\",**kwargs[key])\n plt.savefig(save_dir+'/'+save_name)\n if utp.get_option(\"to_terminal\",True,**kwargs[key]):\n plt.draw()\n pass\n #\n ## -- setup button press action function\n def press_normalized(event,idx=_fnIdx):\n #print('press_normalized', event.key)\n try:\n ## -- manually indicate index\n idx[0] = int(event.key) + (idx[0])*10\n except ValueError:\n if event.key==' ': ## -- space\n ## -- allows for replotting when changing index by typing number keys\n idx[0] = idx[0] % _fnNMod\n do_plot_normalized(idx)\n elif event.key=='left':\n idx[0] = (idx[0] - 1) % _fnNMod\n do_plot_normalized(idx)\n elif event.key=='right':\n idx[0] = (idx[0] + 1) % _fnNMod\n do_plot_normalized(idx)\n elif event.key=='backspace':\n ## -- reset index so can manually flip through using number keys\n idx[0] = 0\n elif event.key=='d':\n ## -- dump plots into ./plotdump directory\n for ix,model in zip(range(len(models)),models):\n key = model.datatag\n save_dir = utp.get_option(\"fn_save_dir\",\"./plotdump\",**kwargs[key])\n save_name = utp.get_option(\"fn_save_name\",\"fnplot-\"+key+\".png\",**kwargs[key])\n do_plot_normalized([ix])\n plt.savefig(save_dir+'/'+save_name)\n do_plot_normalized(idx)\n #\n ## -- \n fig.canvas.mpl_connect('key_press_event',press_normalized)\n ## -- save plot data\n for idx,model in zip(range(len(models)),models):\n key = model.datatag\n _fnTData.append(model.tdata)\n _fnTFit.append(model.tfit)\n _fnTFit[-1] = np.append(_fnTFit[-1],list(sorted([len(_fnTData[-1]) - t for t in _fnTFit[-1]])))\n ## -- fit\n _fnFitFunc = utp.create_fit_func(model,fit)\n _fnFitMean = gv.mean(_fnFitFunc(_fnTData[-1]))\n _fnTDataNonZero.append([t for t in _fnTData[-1] if np.abs(_fnFitMean[t]) > 1e-20])\n _fnTFitNonZero.append([t for t in _fnTFit[-1] if np.abs(_fnFitMean[t]) > 1e-20])\n _fnTRem.append([(0 if np.abs(_fnFitMean[t]) > 1e-20 else 1) for t in model.tdata])\n _fnTRem[-1] = \\\n [sum(_fnTRem[-1][:i+1]) for i in range(len(_fnTRem[-1])) if i in _fnTFitNonZero[-1]]\n _fnFitMean = gv.mean(_fnFitFunc(_fnTDataNonZero[-1]))\n _fnFitSdev = list(np.array(gv.sdev(_fnFitFunc(_fnTDataNonZero[-1])))/np.array(_fnFitMean))\n _fnFitOnes.append(list(np.ones(len(_fnTDataNonZero[-1]))))\n _fnFitError.append([ list(np.array(_fnFitOnes[-1])-np.array(_fnFitSdev)),\n list(np.array(_fnFitOnes[-1])+np.array(_fnFitSdev)) ])\n ## -- data\n _fnDatCentral.append( list(np.array([gv.mean(data[key])[t] for t in _fnTDataNonZero[-1]])/\n np.array(_fnFitMean)) )\n _fnDatSdev = ( np.array([gv.sdev(data[key])[t] for t in _fnTDataNonZero[-1]])/\n np.array(_fnFitMean) )\n _fnDatError.append([ list(_fnDatSdev), list(_fnDatSdev) ])\n ## -- done saving data\n \n if not(utp.get_option(\"to_terminal\",True,**kwargs[key])) and\\\n utp.get_option(\"to_file\",False,**kwargs[key]):\n for ix in range(len(models)):\n ## -- loops and saves all without creating window\n do_plot_normalized([ix])\n else:\n do_plot_normalized(_fnIdx)", "def plot_correlation_comparison(evaluators: List, annot=False):\n nr_plots = len(evaluators) + 1\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n fig, ax = plt.subplots(2, nr_plots, figsize=(4 * nr_plots, 7))\n flat_ax = ax.flatten()\n flat_ax[nr_plots + 1].clear()\n fake_corr = []\n real_corr = associations(evaluators[0].real, nominal_columns=evaluators[0].categorical_columns, plot=False, theil_u=True,\n mark_columns=True, annot=False, cmap=cmap, cbar=False, ax=flat_ax[0])['corr']\n for i in range(1, nr_plots):\n cbar = True if i % (nr_plots - 1) == 0 else False\n fake_corr.append(\n associations(evaluators[i - 1].fake, nominal_columns=evaluators[0].categorical_columns, plot=False, theil_u=True,\n mark_columns=True, annot=False, cmap=cmap, cbar=cbar, ax=flat_ax[i])['corr']\n )\n if i % (nr_plots - 1) == 0:\n cbar = flat_ax[i].collections[0].colorbar\n cbar.ax.tick_params(labelsize=20)\n\n for i in range(1, nr_plots):\n cbar = True if i % (nr_plots - 1) == 0 else False\n diff = abs(real_corr - fake_corr[i - 1])\n sns.set(style=\"white\")\n az = sns.heatmap(diff, ax=flat_ax[i + nr_plots], cmap=cmap, vmax=.3, square=True, annot=annot, center=0,\n linewidths=0, cbar=cbar, fmt='.2f')\n if i % (nr_plots - 1) == 0:\n cbar = az.collections[0].colorbar\n cbar.ax.tick_params(labelsize=20)\n titles = ['Real'] + [e.name if e.name is not None else idx for idx, e in enumerate(evaluators)]\n for i, label in enumerate(titles):\n flat_ax[i].set_yticklabels([])\n flat_ax[i].set_xticklabels([])\n flat_ax[i + nr_plots].set_yticklabels([])\n flat_ax[i + nr_plots].set_xticklabels([])\n title_font = {'size': '28'}\n flat_ax[i].set_title(label, **title_font)\n plt.tight_layout()", "def plot_final_roc(prediction_matrix, model_names, y_test, PATH = None):\n plt.figure(figsize=(10, 8))\n for i, model in enumerate(model_names): \n predictions = prediction_matrix[:,i]\n fpr, tpr, threshholds = roc_curve(y_test, predictions)\n sns.set_style('darkgrid', {'axes.facecolor': '0.9'})\n lw = 2\n plt.plot(fpr, tpr,\n lw=lw, label=f'{model_names[i]} AUC: {round(auc(fpr, tpr), 3)}')\n plt.plot([0, 1], [0, 1], lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.yticks([i/20.0 for i in range(21)], size = 14)\n plt.xticks([i/20.0 for i in range(21)], rotation = 45, size = 14)\n plt.xlabel('False Positive Rate', size =16)\n plt.ylabel('True Positive Rate', size =16)\n plt.title('ROC Curve', size = 20)\n plt.legend(loc='lower right', prop = {\"size\" : 20})\n if PATH:\n plt.savefig(PATH, bbox_inches='tight', transparent = True)\n plt.show()", "def plot_feature_correlations(self):\n\n fig = plt.figure(figsize=(18,18), tight_layout=True)\n fig.suptitle('Feature correlations', fontsize=24)\n\n sns.heatmap(self.train_data.astype(float).corr(method='kendall'), linewidths=0.1, vmin=-1.0,\n vmax=1.0, square=True, linecolor='white', annot=True, \n cmap=\"PiYG\")\n plt.savefig(r'data_analysis\\correlations_kendall_' + self.file_name + '.png', \n facecolor=fig.get_facecolor())", "def plot_results(self, predictions: list):\n fig, ax = plt.subplots()\n cm = confusion_matrix(self.test[1], predictions)\n conf = confusion_matrix(self.test[1], predictions).ravel()\n nbr_labels = len(set(self.test[1]))\n cm = conf.reshape(nbr_labels, nbr_labels)\n sns.heatmap(cm, annot=True, fmt=\"d\", cmap=\"Spectral\")\n ax.set_xlabel(\"predicted label\")\n ax.set_ylabel(\"true label\")\n fig.savefig(\"confusion_matrix\")\n\n fig, ax = plt.subplots()\n x = self.train[0] + self.test[0]\n y = self.train[1] + self.test[1]\n x = [i[0] for i in x]\n y = [i for i in y]\n results = pd.DataFrame({\"polarity strength\": x, \"true label\": y})\n sns.boxplot(data=results, x=\"true label\", y=\"polarity strength\")\n fig.savefig(\"boxplot\")", "def plot_results(\n train_data: tuple[Tensor, Tensor],\n test_data: tuple[Tensor, Tensor],\n correct_class: Tensor\n):\n #fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(21,7), subplot_kw=dict(box_aspect=1))\n fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(14,7), subplot_kw=dict(box_aspect=1))\n fig2, ax3 = plt.subplots(figsize=(7,7), subplot_kw=dict(box_aspect=1))\n ax1.set_title('Training data')\n plot_dataset(train_data, ax1)\n\n ax2.set_title('Test data')\n plot_dataset(test_data, ax2)\n\n ax3.set_title('Test prediction correctness')\n plot_dataset((test_data[0], correct_class.int()), ax3, cmap={0: '#ff0000', 1: '#00ff00'})\n \n fig1.savefig('plots/datasets')\n fig2.savefig('plots/predictions')\n plt.show()", "def plot_roc_curves(distribution_shift_name, dataset_to_model_results,\n plot_dir: str):\n set_matplotlib_constants()\n datasets = list(sorted(list(dataset_to_model_results.keys())))\n\n roc_types = {\n 'drd': {\n 'y_true': 'y_true',\n 'y_pred': 'y_pred'\n },\n 'ood_detection': {\n 'y_true': 'is_ood',\n 'y_pred': 'y_pred_entropy'\n },\n }\n\n thresholds = np.arange(0, 1.02, 0.02)\n\n for dataset in datasets:\n dataset_results = dataset_to_model_results[dataset]\n for tuning_domain in ['indomain', 'joint']:\n for roc_type, roc_dict in roc_types.items():\n # Need the joint datasets, which have an `is_ood` field\n if roc_type == 'ood_detection' and 'joint' not in dataset:\n continue\n\n y_true_key = roc_dict['y_true']\n y_pred_key = roc_dict['y_pred']\n\n fig, ax = plt.subplots()\n plt.subplots_adjust(left=0.20, bottom=0.20)\n\n # The actual DRD predictions are quite far from the diagonal,\n # whereas OOD detection is close. Set frame accordingly.\n if roc_type == 'ood_detection':\n ax.plot([0, 1], [0, 1], linestyle=':', color='black')\n ax.set_ylim([-0.05, 1.05])\n ax.set_xlim([-0.03, 1.03])\n elif roc_type == 'drd':\n ax.plot(\n 0.2,\n 0.85,\n marker='o',\n color='limegreen',\n markersize=6,\n label='NHS Recommendation',\n linestyle='None')\n ax.set_ylim([0.45, 1.05])\n ax.set_xlim([-0.03, 0.93])\n\n roc_name = ROC_TYPE_TO_FULL_NAME[roc_type]\n\n plot_name = (f'roc-{distribution_shift_name}-{dataset}'\n f'-{tuning_domain}-{roc_type}')\n\n model_names = []\n for ((mt, k, is_d, key_tuning_domain, n_mc),\n model_dict) in dataset_results.items():\n if tuning_domain != key_tuning_domain:\n continue\n\n model_name = get_model_name((mt, k, is_d, key_tuning_domain, n_mc))\n model_names.append(model_name)\n\n print(model_name)\n print(model_dict.keys())\n print(dataset)\n print(tuning_domain)\n\n y_true = np.array(model_dict[y_true_key])\n y_pred = np.array(model_dict[y_pred_key])\n\n tpr_values = np.zeros(shape=(thresholds.shape[0], y_true.shape[0]))\n\n for seed_idx in range(y_true.shape[0]):\n y_true_seed = y_true[seed_idx, :]\n y_pred_seed = y_pred[seed_idx, :]\n fpr, tpr, _ = roc_curve(y_true=y_true_seed, y_score=y_pred_seed)\n\n for j in range(thresholds.shape[0]):\n fpr_idx = np.abs(fpr - thresholds[j]).argmin()\n tpr_values[j, seed_idx] = tpr[fpr_idx]\n\n tpr_value_mean = tpr_values.mean(1)\n tpr_value_ste = sem(tpr_values, axis=1)\n\n color, linestyle = get_colors_and_linestyle(\n MODEL_TYPE_TO_FULL_NAME[(mt, k > 1)])\n\n # Visualize mean with standard error\n ax.plot(\n thresholds,\n tpr_value_mean,\n color=color,\n label=model_name,\n linestyle=linestyle)\n ax.fill_between(\n thresholds,\n tpr_value_mean - tpr_value_ste,\n tpr_value_mean + tpr_value_ste,\n color=color,\n alpha=0.25,\n )\n\n # ax.legend(facecolor=\"white\")\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n ax.plot([0, 1], [0, 1], ls='--', c='.3', lw=0.75)\n fig.tight_layout()\n\n if isinstance(plot_dir, str):\n os.makedirs(plot_dir, exist_ok=True)\n metric_plot_path = os.path.join(plot_dir, f'{plot_name}.pdf')\n fig.savefig(metric_plot_path, transparent=True, dpi=300, format='pdf')\n logging.info(\n f'Saved ROC plot for distribution shift {distribution_shift_name},'\n f'dataset {dataset}, tuning domain {tuning_domain}, '\n f'roc_type {roc_name}, models {model_names} to '\n f'{metric_plot_path}')\n\n print(plot_name)\n # plt.show()", "def generate_correlation_scatter_plots(data_sets, abscissa_label, assume_scaled_solar,\n compare_against_reference_labels, output_figure_stem, run_title,\n abundances_over_h=True):\n\n # Metadata about all the labels which we can plot the Cannon's precision in estimating\n label_metadata = LabelInformation().label_metadata\n\n # Metadata data about all of the horizontal axes that we can plot precision against\n abscissa_info = AbscissaInformation().abscissa_labels[abscissa_label]\n\n # Look up a list of all the (unique) labels the Cannon tried to fit in all the data sets we're plotting\n unique_json_files = set([item['cannon_output'] for item in data_sets])\n labels_in_each_data_set = [json.loads(gzip.open(json_file + \".summary.json.gz\", \"rt\").read())['labels']\n for json_file in unique_json_files]\n unique_labels = set([label for label_list in labels_in_each_data_set for label in label_list])\n\n # Filter out any labels where we don't have metadata about how to plot them\n label_names = [item for item in unique_labels if item in label_metadata]\n\n # LaTeX strings to use to label each stellar label on graph axes\n labels_info = [label_metadata[ln] for ln in label_names]\n\n # Create directory to store output files in\n os.system(\"mkdir -p {}\".format(output_figure_stem))\n\n data_set_titles = []\n output_figure_stem = os_path.abspath(output_figure_stem) + \"/\"\n data_set_counter = -1\n plot_cross_correlations = [{} for j in data_sets]\n\n # If requested, plot all abundances (apart from Fe) over Fe\n if not abundances_over_h:\n for j, label_name in enumerate(label_names):\n test = re.match(\"\\[(.*)/H\\]\", label_name)\n if test is not None:\n if test.group(1) != \"Fe\":\n label_names[j] = \"[{}/Fe]\".format(test.group(1))\n\n # Loop over the various Cannon runs we have, e.g. LRS and HRS\n data_file_names = []\n for counter, data_set in enumerate(data_sets):\n\n cannon_output = json.loads(gzip.open(data_set['cannon_output'] + \".full.json.gz\", \"rt\").read())\n\n # If no label has been specified for this Cannon run, use the description field from the JSON output\n if data_set['title'] is None:\n data_set['title'] = re.sub(\"_\", r\"\\_\", cannon_output['description'])\n\n # Calculate the accuracy of the Cannon's abundance determinations\n accuracy_calculator = CannonAccuracyCalculator(\n cannon_json_output=cannon_output,\n label_names=label_names,\n compare_against_reference_labels=compare_against_reference_labels,\n assume_scaled_solar=assume_scaled_solar,\n abscissa_field=abscissa_info['field']\n )\n\n stars_which_meet_filter = accuracy_calculator.filter_test_stars(constraints=data_set['filters'].split(\";\"))\n\n accuracy_calculator.calculate_cannon_offsets(filter_on_indices=stars_which_meet_filter)\n\n # Add data set to plot\n legend_label = data_set['title'] # Read the title which was supplied on the command line for this dataset\n if run_title:\n legend_label += \" ({})\".format(run_title) # Possibly append a run title to the end, if supplied\n\n # add data set\n\n # Work out multiplication factor to convert SNR/pixel to SNR/A\n snr_converter = SNRConverter(raster=np.array(cannon_output['wavelength_raster']),\n snr_at_wavelength=snr_defined_at_wavelength)\n\n data_set_titles.append(legend_label)\n\n # Create a sorted list of all the abscissa values we've got\n abscissa_values = list(accuracy_calculator.label_offsets.keys())\n abscissa_values = sorted(set(abscissa_values))\n\n data_set_counter += 1\n\n # Construct a datafile listing all the offsets for each label, for each abscissa value\n # This full list of data points is used to make histograms\n for abscissa_index, abscissa_value in enumerate(abscissa_values):\n displayed_abscissa_value = abscissa_value\n if abscissa_label == \"SNR/A\":\n displayed_abscissa_value = snr_converter.per_pixel(abscissa_value).per_a()\n\n y = []\n for i, (label_name, label_info) in enumerate(zip(label_names, labels_info)):\n # List of offsets\n diffs = accuracy_calculator.label_offsets[abscissa_value][label_name]\n y.append(diffs)\n\n # Filename for data file containing all offsets\n data_file = \"{}/data_offsets_all_{:d}_{:06.1f}.dat\".format(output_figure_stem,\n data_set_counter,\n displayed_abscissa_value)\n\n # Output data file of label mismatches at this abscissa value\n np.savetxt(fname=data_file,\n X=np.transpose(y),\n header=\"\"\"\n# Each row represents a star\n# {column_headings}\n\n\"\"\".format(column_headings=\" \".join([\"offset_{}\".format(x) for x in label_names]))\n )\n\n # Output scatter plots of label cross-correlations at this abscissa value\n plot_cross_correlations[data_set_counter][displayed_abscissa_value] = (data_file, snr_converter)\n data_file_names.append(data_file)\n\n del cannon_output\n\n # Now plot the data\n\n # Create pyxplot script to produce this plot\n plotter = PyxplotDriver()\n\n # Create a new pyxplot script for correlation plots\n item_width = 4 # centimetres\n for data_set_counter, data_set_items in enumerate(plot_cross_correlations):\n for abscissa_index, (displayed_abscissa_value, plot_item) in enumerate(sorted(data_set_items.items())):\n data_filename, snr_converter = plot_item\n\n if abscissa_label == \"SNR/A\":\n snr = snr_converter.per_a(displayed_abscissa_value)\n caption = \"SNR/A {0:.1f}; SNR/pixel {1:.1f}\". \\\n format(snr.per_a(), snr.per_pixel())\n elif abscissa_label == \"SNR/pixel\":\n snr = snr_converter.per_pixel(displayed_abscissa_value)\n caption = \"SNR/A {0:.1f}; SNR/pixel {1:.1f}\". \\\n format(snr.per_a(), snr.per_pixel())\n else:\n caption = \"{0} {1}\".format(abscissa_info[\"latex\"], displayed_abscissa_value)\n\n ppl = \"\"\"\nset numerics errors quiet\nclear\nset width {width}\nset size square\nset multiplot\nset nokey\nset fontsize 1.6\n \"\"\".format(width=item_width)\n\n for i in range(len(label_names) - 1):\n for j in range(i + 1, len(label_names)):\n label_info = label_metadata[label_names[j]]\n if i == 0:\n ppl += \"unset yformat\\n\"\n ppl += \"set ylabel \\\"$\\Delta$ {}\\\"\\n\".format(label_info[\"latex\"])\n else:\n ppl += \"set yformat '' ; set ylabel ''\\n\"\n ppl += \"set yrange [{}:{}]\\n\".format(-label_info[\"offset_max\"] * 1.2,\n label_info[\"offset_max\"] * 1.2)\n\n label_info = label_metadata[label_names[i]]\n if j == len(label_names) - 1:\n ppl += \"unset xformat\\n\"\n ppl += \"set xlabel \\\"$\\Delta$ {}\\\"\\n\".format(label_info[\"latex\"])\n else:\n ppl += \"set xformat '' ; set xlabel ''\\n\"\n\n ppl += \"set xrange [{}:{}]\\n\".format(-label_info[\"offset_max\"] * 1.2,\n label_info[\"offset_max\"] * 1.2)\n\n ppl += \"set origin {},{}\\n\".format(i * item_width, (len(label_names) - 1 - j) * item_width)\n\n ppl += \"plot \\\"{}\\\" using {}:{} w dots ps 2\\n\".format(data_filename, i + 1, j + 1)\n\n output_filename = \"{}/correlation_{:d}_{:d}\".format(output_figure_stem, abscissa_index, data_set_counter)\n\n plotter.make_plot(output_filename=output_filename,\n data_files=data_file_names,\n # caption=r\"\"\"\n # {data_set_title} \\newline {caption}\n # \"\"\".format(data_set_title=data_set_titles[data_set_counter],\n # caption=caption\n # ).strip(),\n pyxplot_script=ppl\n )", "def correlations_cont_cat(self):\n \"\"\" Use ICC to define correlations, give box-plots for highly correlated pairs \"\"\"\n \n warnings.filterwarnings('ignore')\n \n # Print correlations and column names\n print('One-way ANOVA p-values - Predictors')\n for i,j,v in self.cont_cat_distance:\n print('{} and {} = {:.2}'.format(i,j,v))\n \n # Box plot of the highly correlated pairs\n for i,j,v in self.cont_cat_distance:\n fg,ax = plt.subplots(figsize=(12, 8))\n fg = self._dataset.boxplot(i, j, ax=ax, grid=False)\n plt.xticks(rotation=90)\n plt.show()", "def plot_predictions(self):\n\n plt.title(\"Targets vs. Predictions\")\n plt.plot(self.T, label=\"Targets\")\n plt.plot(self.Y, label=\"Predictions\")\n plt.xlabel(\"Sample number\")\n plt.legend()\n plt.show()", "def visualize_cross_validation_results(cross_val_results, plots_filepath):\n\n pair_model_scores, pair_model_stds, \\\n siamese_model_scores_2, siamese_model_stds_2, \\\n siamese_model_scores_10, siamese_model_stds_10 = cross_val_results\n param_names = (\"NBCH1\", \"NBCH2\", \"NBFCH\", \"BATCH_NORM\", \"SKIP_CON\", \"LR\")\n\n def aggregate_results(scores, stds):\n \"\"\"\n Helper function to aggregate score means and standard deviations for a model across parameter values\n\n :param scores: dictionary of score means {param_combo: score_mean}\n :param stds: dictionary of score stds {param_combo: score_std}\n\n :returns: list of tuples of pandas.Dataframe objects containing aggregated mean and std data\n \"\"\"\n\n scores = pd.DataFrame(scores.values(),\n index=scores.keys(),\n columns=[\"SCORE MEAN\", ])\n stds = pd.DataFrame(stds.values(),\n index=stds.keys(),\n columns=[\"SCORE STD\", ])\n scores.index.name = param_names\n stds.index.name = param_names\n data = []\n for param_gropby_levels in ((0,), (1,), (2,), (3, 4), (5,)):\n aggregate_scores = scores.groupby(level=param_gropby_levels).mean()\n aggregate_stds = scores.groupby(level=param_gropby_levels).std()\n data.append((aggregate_scores, aggregate_stds))\n return data\n\n pair_model_data = aggregate_results(pair_model_scores, pair_model_stds)\n siamese_model_2_data = aggregate_results(siamese_model_scores_2, siamese_model_stds_2)\n siamese_model_10_data = aggregate_results(siamese_model_scores_10, siamese_model_stds_10)\n\n # Group results for all models\n model_names = (\"Pair\", \"Siamese 2\", \"Siamese 10\")\n grouped_data = []\n for pair_model_group_data, siamese_model_2_group_data, siamese_model_10_group_data in zip(pair_model_data,\n siamese_model_2_data,\n siamese_model_10_data):\n score_means = (pair_model_group_data[0], siamese_model_2_group_data[0], siamese_model_10_group_data[0])\n score_mean_data = pd.concat(score_means, axis=1)\n score_mean_data.columns = model_names\n\n score_stds = (pair_model_group_data[1], siamese_model_2_group_data[1], siamese_model_10_group_data[1])\n score_std_data = pd.concat(score_stds, axis=1)\n score_std_data.columns = model_names\n\n grouped_data.append((score_mean_data, score_std_data))\n\n plots_param_names = (\"nbch1\", \"nbch2\", \"nbfch\", \"batch_norm+skip_con\", \"lr\")\n for i, (plot_param_names, (score_mean_data, score_std_data)) in enumerate(zip(plots_param_names, grouped_data)):\n plt.figure(figsize=(10, 5))\n score_mean_data.plot(kind=\"line\" if plot_param_names == \"lr\" else \"bar\",\n yerr=score_std_data,\n capsize=5,\n ylim=(0.4, 1.1),\n colormap=colormap_brg_darker)\n plt.title(\"Cross validation results for parameters:\\n{}\".format(plot_param_names), fontsize=18)\n plt.xlabel(\"Parameter value\", fontsize=14)\n plt.ylabel(\"Average accuracy\", fontsize=14)\n plt.xticks(fontsize=12, rotation=30)\n plt.yticks(fontsize=12)\n plt.legend(title=\"Model\", title_fontsize=10)\n plt.tight_layout()\n plt.savefig(fname=plots_filepath + \"cross_validation_{}.eps\".format(plot_param_names),\n dpi=\"figure\", format=\"eps\")\n plt.close()", "def plot_scatter(self):\n if Trainer.y_pred is None or Trainer.y_true is None:\n messagebox.showerror(\"Information\", \"Please train the model first before plotting\")\n return\n\n fig = plt.figure(figsize=(8, 4))\n plt.xlabel(\"Prediction\")\n plt.ylabel(\"Target\")\n plt.figtext(0, 0, f\"RMSE: {self.test_rmse}\", fontsize=13)\n plt.grid()\n plt.scatter(x=Trainer.y_true, y=Trainer.y_pred, c='b', s=1)\n\n win = tk.Toplevel()\n win.wm_title(\"Window\")\n win.geometry(\"1000x500\")\n\n # specify the window as master\n canvas = FigureCanvasTkAgg(fig, master=win)\n canvas.draw()\n canvas.get_tk_widget().grid(row=0, column=0, sticky=tk.W)\n\n # navigation toolbar\n toolbarFrame = tk.Frame(master=win)\n toolbarFrame.grid(row=1, column=0)\n toolbar = NavigationToolbar2Tk(canvas, toolbarFrame)", "def plot(self):\n\t\traw_labels = self.make_raw_data()[1]\n\t\tbalanced_labels = self.get_extra()[1]\n\t\tfig, ax1 = subplots()\n\t\tax2 = ax1.twinx()\n\t\tx = array(range(1, NCLASSES + 1))\n\t\tl1 = ax1.bar(x - 0.3, self.prior_sizes, width = 0.25, color = 'b', align = 'center', label = 'train')\n\t\tl2 = ax2.bar(x, bincount(raw_labels - 1), width = 0.25, color = 'r', align = 'center', label = 'confident')\n\t\tl3 = ax2.bar(x + 0.3, bincount(balanced_labels - 1), width = 0.25, color = 'g', align = 'center', label = 'rebalanced')\n\t\tconfident_frac = len(raw_labels) / float(self.predictions.shape[0])\n\t\tusable_frac = len(balanced_labels) / float(self.predictions.shape[0])\n\t\tax1.set_title('at >{0:.1f}%, {1:.1f}% reliable, {2:.1f}% usable'.format(self.confidence * 100, confident_frac * 100, usable_frac * 100))\n\t\tax1.legend([l1, l2, l3], [l1.get_label(), l2.get_label(), l3.get_label()], loc = 'upper right')\n\t\tax1.set_xticks(x)", "def predictions_scatter(self, features=None, mask=None, marker_size=20, alpha=0.1, grid_columns=2):\n features = self.common_features if features is None else features\n mask, df, = self._apply_mask(mask, self._get_features(features))\n correlation_plots = []\n for name, prediction in self.prediction.items():\n correlation_pairs = [(feature, name) for feature in features]\n df[name] = prediction[mask]\n correlation_plots += self._scatter_addition(df, correlation_pairs, marker_size=marker_size, alpha=alpha)\n return plotting.GridPlot(grid_columns, *correlation_plots)", "def plot_main_effects(self, figsize=(15, 5)):\n\n import matplotlib.pyplot as plt\n import seaborn as sbn\n\n fig, axes = plt.subplots(1, len(self.predictors),\n figsize=figsize, sharey=True)\n if type(axes) is not np.ndarray:\n axes = [axes]\n\n for i, ax in enumerate(axes):\n if self.outcome_type == \"discrete\":\n n = len(self.predictors[i].x)\n ax.scatter(\n self.predictors[i].x,\n self.y + np.random.normal(0, 0.02, n)\n )\n else:\n ax.scatter(self.predictors[i].x, self.y)\n ax.set_xlabel(self.predictors[i])\n if i == 0:\n ax.set_ylabel(\"Outcome\")", "def prediction():\r\n\r\n loaded_model = load_model('imageTrainedModel.h5')\r\n print(loaded_model.summary())\r\n\r\n # retrieve history also:\r\n f = open('history.pckl', 'rb')\r\n history = pickle.load(f)\r\n f.close()\r\n\r\n print(history.keys())\r\n print(history)\r\n\r\n epochs = len(history['loss']) # length of the list stored at 'loss'\r\n # Plot losses for train and validation\r\n plt.figure()\r\n plt.title('Loss as training progresses')\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Loss')\r\n plt.plot(history['loss'], label='Train Error')\r\n plt.plot(history['val_loss'], label='Val Error')\r\n plt.legend()\r\n plt.show()\r\n\r\n # Plot metrics\r\n plt.plot(history['acc']) # use same metric that was used for training. 'history' is a dictionary.\r\n plt.title('Accuracy as training progresses')\r\n plt.ylabel('Accuracy (%)')\r\n plt.xlabel('Epoch')\r\n ymax = max(history['acc'])\r\n xpos = history['acc'].index(ymax)\r\n xmax = xpos\r\n plt.annotate('Maximum accuracy: %s' % round(ymax, 3),\r\n xy=(xmax, ymax), xycoords='data',\r\n xytext=(0.5, 0.5), textcoords='axes fraction',\r\n fontsize=12)\r\n plt.show()\r\n\r\n # make predictions using x_test\r\n test_y_predictions = loaded_model.predict(x_test, batch_size=None, verbose=1, steps=None)\r\n test_y_predictions = np.around(test_y_predictions, decimals=0) # round to whole integers\r\n true_false_array = np.equal(y_test, test_y_predictions) # test of equality.\r\n true_count = np.sum(true_false_array) # number of correctly categorised images\r\n false_count = true_false_array.shape[0] - true_count # number of images not correctly categorised\r\n\r\n # Plot predicted and actual image categories\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111)\r\n plt.title('Classification of Image Categories')\r\n plt.ylabel('Number of Images')\r\n plt.xlabel('Image Classification')\r\n label = ['Correct', 'Incorrect']\r\n index = np.arange(len(label))\r\n plt.xticks(index, label, fontsize=10, rotation=0)\r\n ax1.bar(index, [true_count, false_count])\r\n plt.show()", "def plot_predictions(net, x_train, y_train, idx_train, x_val, y_val, idx_val):\n fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(20, 30))\n pred1 = net.predict(x_val, batch_size=batch_size)\n # print(\"pred1.shape:\", pred1.shape)\n ax1.plot(idx_val, y_val, label=\"Actual Data\", marker=\"+\")\n ax1.plot(idx_val, pred1, label=\"Prediction\", marker=\"o\")\n # ax1.set_ylim(-0.1, 1.1)\n ax1.set_xlabel(\"Year\")\n ax1.set_ylabel(\"Sunspot Numbers\")\n ax1.legend()\n ax1.set_title(\"Predicted vs Actual Validation Data\")\n\n pred2 = net.predict(x_train, batch_size=batch_size)\n # print(\"pred2.shape:\", pred2.shape)\n ax2.plot(idx_train, y_train, label=\"Actual Data\", marker=\"+\")\n ax2.plot(idx_train, pred2, label=\"Prediction\", marker=\"o\")\n # ax2.set_ylim(-0.1, 1.1)\n ax2.set_xlabel(\"Year\")\n ax2.set_ylabel(\"Sunspot Numbers\")\n ax2.legend()\n ax2.set_title(\"Predicted vs Actual Training Data\")\n\n plt.tight_layout()\n\n filename = \"img/\"\n filename += datetime.now().strftime(\"%y%m%d_%H%M\")\n filename += \"_predicted_vs_actual_data.png\"\n fig.savefig(filename, format=\"png\")", "def illustrate_prediction(model, test_data, test_target):\n selects = np.random.random_integers(0, len(test_data), 16)\n labels = test_target[selects]\n predicts = model.predict(test_data[selects])\n plt.figure()\n for k in range(16):\n plt.subplot(4, 4, k+1)\n plot_face(test_data[selects[k]])\n if predicts[k] == 1:\n plt.title('smile')\n else:\n plt.title('ugly')\n\n if predicts[k] != labels[k]:\n plt.plot([0, 24], [0, 24], 'r', linewidth=2)\n plt.plot([0, 24], [24, 0], 'r', linewidth=2)", "def plot_results(self):\n\n\n f1, ax1 = plt.subplots()\n h1, = ax1.plot(self.history[\"step\"], self.history[\"trainLoss\"],\\\n \"b-\", label=\"Loss - Train\")\n h2, = ax1.plot(self.history[\"step\"], self.history[\"validLoss\"],\\\n \"b.\", label=\"Loss - Validation\")\n\n ax1.set_ylabel(\"Loss\", color = \"blue\")\n ax1.tick_params(\"y\", color = \"blue\")\n ax1.yaxis.label.set_color(\"blue\")\n ax1.set_xlabel(\"Training Steps [{}]\".format(self.FLAGS.eval_every))\n\n ax2 = ax1.twinx()\n h3, = ax2.plot(self.history[\"step\"], self.history[\"trainAccr\"], \"r-\",\\\n label = \"Accuracy - Train\")\n h4, = ax2.plot(self.history[\"step\"], self.history[\"validAccr\"], \"r.\",\\\n label = \"Accuracy - Validation\")\n\n ax2.set_ylabel(\"Accuracy\", color = \"red\")\n ax2.tick_params(\"y\", color = \"red\")\n ax2.yaxis.label.set_color(\"red\")\n\n hds = [h1,h2,h3,h4]\n lbs = [l.get_label() for l in hds]\n ax1.legend(hds, lbs)\n f1.tight_layout()\n plt.savefig(\"trainingHistory.png\")\n\n plt.close(f1)\n #plt.show()", "def build_corr_plot():\r\n fig = plt.figure(figsize=(12, 12))\r\n ax = fig.add_subplot", "def Plot_predict(X,Y,model,X_path): \n labels = {0: 'CNV', 1: 'DME', 2: 'DRUSEN', 3: 'NORMAL'}\n Y_pred_classes = np.argmax(model.predict(X),axis = 1) \n Y_true = np.argmax(Y,axis = 1)\n \n fig = plt.figure(figsize=(40, 40)) \n for i in range(X.shape[0]):\n ax = fig.add_subplot(8, 4, i + 1, xticks=[], yticks=[])\n ax.set_title(\"Groundtruth : {} \\n Prediction : {}\".format(labels[Y_true[i]],labels[Y_pred_classes[i]]), \\\n color=(\"green\" if Y_true[i] == Y_pred_classes[i] else \"red\"),fontsize=20) \n img = image.load_img(X_path[i])\n ax.imshow(img)\n plt.show()\n return", "def show_reconstruction(dataset, model, num_samples, color='black'):\n mpl.rcParams['text.color'] = color\n mpl.rcParams['axes.labelcolor'] = color\n mpl.rcParams['xtick.color'] = color\n mpl.rcParams['ytick.color'] = color\n\n # Create dataloader\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=num_samples,\n )\n\n # Get next batch\n x, _ = next(iter(dataloader))\n target = x\n\n # Compute prediction and diff\n pred, _ = model(x)\n pred = pred.detach()\n diff = target - pred\n ymax = max(target.max(), pred.max())\n ymin = min(target.min(), pred.min())\n\n if len(x.shape) != 4:\n target = target[:, :, :, None]\n pred = pred[:, :, :, None]\n diff = diff[:, :, :, None]\n\n for i_channel in range(target.shape[-1]):\n # Create plot\n for i_sample in range(num_samples):\n f, axes = plt.subplots(1, 3, figsize=(20, 5))\n # f.suptitle(\"Input vs reconstruction, channel: {}\".format(i_channel), fontsize=16)\n\n # Label rows\n labels = {0: 'Ground truth',\n 1: 'Prediction',\n 2: 'Deviation'}\n\n for i in range(3):\n plt.sca(axes[i])\n axes[i].set_title(labels[i], rotation=0, size=16)\n axes[i].set_ylim([ymin - .5, ymax + .5])\n axes[i].tick_params(labelsize=12)\n\n # Plot ground truth\n axes[0].plot(target[i_sample, 0, :, i_channel].numpy())\n\n # Plot prediction\n axes[1].plot(pred[i_sample, 0, :, i_channel].numpy())\n\n # Plot deviation\n axes[2].plot(diff[i_sample, 0, :, i_channel].numpy())\n\n plt.show()", "def plot(self):\r\n \r\n\r\n print(\"Printing decision surfaces of decision trees\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n for _ in range (self.n_estimators):\r\n plt.subplot(2, 3, _ + 1)\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = self.clfs[_].predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface of a decision tree using paired features\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig1 = plt\r\n\r\n # Figure 2\r\n print(\"Printing decision surface by combining the individual estimators\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = config.Classifier_AB.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface by combining individual estimators\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig2 = plt\r\n\r\n return [fig1,fig2]", "def plot_results(infer_images, inference_predicted_class, inference_predictions, class_names=['plants', 'water']):\n plt.style.use(['dark_background', 'bmh'])\n rc('figure', figsize=(8, 8), max_open_warning=False)\n rc('axes', facecolor='none')\n plt.figure(figsize=(15, 15))\n\n for i, (infer_img, _) in enumerate(infer_images.take(10)):\n ax = plt.subplot(5, 2, i + 1)\n plt.imshow(infer_img.numpy()/255)\n\n # Find the predicted class from predictions\n m = \"Predicted: {}, {:.2f}%\".format(\n class_names[inference_predicted_class[i]], inference_predictions[i]*100)\n plt.title(m)\n plt.axis(\"off\")\n plt.show()", "def visualise_dataset_classifier_results(dataset_results):\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n file_name = \"raw_dump_{0}.txt\".format(current_time)\n with open(os.path.dirname(os.path.realpath(__file__)) + \"/../results/\" + file_name, \"wb\") as output_file:\n output_file.write(str(dataset_results))\n sns.set(style='ticks')\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(1, 1, 1)\n markers = [\"s\", \"o\", \"^\", \"*\"]\n colors = [\"#64B3DE\", \"#1f78b4\", \"#B9B914\", \"#FBAC44\", \"#bc1659\", \"#33a02c\", \"#6ABF20\", \"#ff7f00\", \"#6a3d9a\", \"grey\", \"#b15928\", \"#e31a1c\", \"black\"]\n color_dict = {}\n index = 0\n for (_, classifier_description) in dataset_results[0][1]:\n color_dict[classifier_description] = colors[index]\n index += 1\n\n hatches = [None, \"////\", \"..\"]\n\n # Move left y-axis and bottom x-axis to centre, passing through (0,0)\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position('center')\n\n # Eliminate upper and right axes\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n\n # Show ticks in the left and lower axes only\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.set_axis_on()\n ax.spines['left'].set_color('black')\n ax.spines['bottom'].set_color('black')\n plt.xlabel(\"Change in TPR\")\n plt.ylabel(\"Change in TNR\")\n\n ax.xaxis.set_label_coords(0.1, 0.52)\n ax.yaxis.set_label_coords(0.53, 0.9)\n\n plt.ylim(-0.2, 0.2)\n plt.xlim(-0.2, 0.2)\n data_set_labels = []\n classifier_labels = []\n data_set_index = 0\n for (data_set, dataset_result) in dataset_results:\n data_set_labels.append(mlines.Line2D(range(1), range(1), color=\"white\", marker=markers[data_set_index], markeredgecolor=\"black\", markeredgewidth=1.0, label=data_set.replace(\"_\", \" \")))\n median_true_pos = np.median(np.array([result_arr[3] for (result_arr, classifier_description) in dataset_result]))\n median_true_neg = np.median(np.array([result_arr[4] for (result_arr, classifier_description) in dataset_result]))\n\n i = 0\n for (result_arr, classifier_description) in dataset_result:\n if data_set_index == 0:\n classifier_labels.append(mpatches.Patch(facecolor=color_dict[classifier_description], hatch=hatches[i % len(hatches)], label=classifier_description, alpha=0.8, edgecolor=\"black\"))\n ax.scatter(result_arr[3] - median_true_pos, result_arr[4] - median_true_neg, marker=markers[data_set_index], hatch=hatches[i % len(hatches)], s=200, alpha=0.8, color=colors[i],\n edgecolor=\"black\", zorder=data_set_index, lw=0.8)\n i += 1\n data_set_index += 1\n\n plt.legend(handles=data_set_labels + classifier_labels)\n sns.despine()\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/classifier_dataset_plt_{0}.png\".format(current_time), bbox_inches='tight')\n plt.close(fig)", "def plot_results(sgd_train_acc, sgd_train_std, sgd_heldout_acc, sgd_heldout_std, sgd_test_acc,\n dt_train_acc, dt_train_std, dt_heldout_acc, dt_heldout_std, dt_test_acc,\n dt4_train_acc, dt4_train_std, dt4_heldout_acc, dt4_heldout_std, dt4_test_acc,\n stumps_train_acc, stumps_train_std, stumps_heldout_acc, stumps_heldout_std, stumps_test_acc):\n train_x_pos = [0, 4, 8, 12]\n cv_x_pos = [1, 5, 9, 13]\n test_x_pos = [2, 6, 10, 14]\n ticks = cv_x_pos\n\n labels = ['sgd', 'dt', 'dt4', 'stumps (4 x 50)']\n\n train_accs = [sgd_train_acc, dt_train_acc, dt4_train_acc, stumps_train_acc]\n train_errors = [sgd_train_std, dt_train_std, dt4_train_std, stumps_train_std]\n\n cv_accs = [sgd_heldout_acc, dt_heldout_acc, dt4_heldout_acc, stumps_heldout_acc]\n cv_errors = [sgd_heldout_std, dt_heldout_std, dt4_heldout_std, stumps_heldout_std]\n\n test_accs = [sgd_test_acc, dt_test_acc, dt4_test_acc, stumps_test_acc]\n\n fig, ax = plt.subplots()\n ax.bar(train_x_pos, train_accs, yerr=train_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='train')\n ax.bar(cv_x_pos, cv_accs, yerr=cv_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='held-out')\n ax.bar(test_x_pos, test_accs, align='center', alpha=0.5, capsize=10, label='test')\n ax.set_ylabel('Accuracy')\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels)\n ax.set_title('Models')\n ax.yaxis.grid(True)\n ax.legend()\n plt.tight_layout()", "def visualize_predictions(self, images, preds, targets):\n\n class_names = ['angry', 'happy', 'sad']\n images = images[:8]\n preds = preds[:8]\n targets = targets[:8]\n\n # determine size of the grid based for the given batch size\n num_rows = int(torch.tensor(len(images)).float().sqrt().floor())\n\n fig = plt.figure(figsize=(7, 7))\n for i in range(len(images)):\n plt.subplot(num_rows, len(images) // num_rows + 1, i+1)\n img = images[i].permute(1, 2, 0).cpu().numpy()\n img = np.array([0.229, 0.224, 0.225]) * img + np.array([0.485, 0.456, 0.406])\n img = np.clip(img, 0, 1)\n plt.imshow(img)\n plt.title(f'pred: {class_names[preds[i]]}'\n f'\\ntruth: [{class_names[targets[i]]}]')\n plt.axis('off')\n\n self.logger.experiment.add_figure(\n 'predictions', fig, global_step=self.global_step)" ]
[ "0.6313132", "0.6275113", "0.61071104", "0.6047933", "0.5924972", "0.5916628", "0.58634686", "0.58615613", "0.58423764", "0.58360696", "0.58246493", "0.5787134", "0.57615566", "0.5745429", "0.5735826", "0.5730945", "0.57147706", "0.5708738", "0.57017416", "0.5699573", "0.5678714", "0.5677304", "0.56610155", "0.56546193", "0.56523275", "0.564425", "0.5640645", "0.563759", "0.5615905", "0.5601302" ]
0.68460006
0
Return actual line string length
def len(self): return len(self.line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def line_length(self, dLine = 0):\n return self.buffer.line_length(self.line + dLine)", "def _getOldCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"+\"):\n nb_lines += 1\n return nb_lines", "def __len__(self):\n nlines = self.get_endline() - self.get_startline() + 1\n if nlines < 0:\n nlines = 0\n return nlines", "def _getNewCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"-\"):\n nb_lines += 1\n return nb_lines", "def get_string_length(self):\n return int(self.read('H')[0])", "def get_line_width(self):\n return self.lwidth", "def get_line_length(file_path):\n with open(file_path, 'rb+') as f:\n return len(f.readline())", "def num_bytes_per_line(self):\n return self._num_bytes_per_line", "def get_linecount(self):\n self._update_linetab(len(self.input))\n lcount = len(self.__linepos)\n return lcount - (self.input.endswith('\\n'))", "def embedcount(line):\r\n\r\n x_temp = line.count(BOX_CHAR['lu'])\r\n return self.defaults.get('size')-(4*x_temp)", "def Length(self) -> int:", "def Length(self) -> int:", "def test_lineLength(self):\n failures = []\n for line in self.output:\n if not len(line) <= lineWidth:\n failures.append(len(line))\n if failures:\n self.fail(\"%d of %d lines were too long.\\n\"\n \"%d < %s\" % (len(failures), len(self.output),\n lineWidth, failures))", "def get_width(self):\n return max(map(len, self.get_lines()))", "def length(self):\n return self._info.length # pylint: disable=E1101", "def total_length():\n return", "def _get_length(self):\n return self._length", "def length(self):\n return len(self.text)", "def longlines(x):\n return sum(len(plain(line)) // cw for line in x.split(\"\\n\"))", "def get_length(self):\n return self.run_command('get_length')[0]", "def indentsize(line):\r\n expline = string.expandtabs(line)\r\n return len(expline) - len(string.lstrip(expline))", "def n_lines(self):\n try: \n return self._n_lines\n except AttributeError:\n self._n_lines = len(self.lines())\n return self._n_lines", "def getTextLength(self):\r\n return 0", "def do_len(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tprint(len(self.cl.data))\n\t\telse:\n\t\t\tprint(\"To get number of contacts you need to open or create book\")", "def length(self):\n\t\treturn self.n", "def get_length(self):\n return self._length", "def get_length(self):\n return self._length", "def get_length(self):\n\n return self.length", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def length(self):\n total_length = 0\n for location_a, location_b in zip(\n self.locations[:-1], self.locations[1:]):\n total_length += Line(location_a, location_b).length\n return total_length" ]
[ "0.83488667", "0.7556461", "0.75529397", "0.7546989", "0.7420517", "0.7266771", "0.72320795", "0.7193497", "0.71531826", "0.7086452", "0.7078299", "0.7078299", "0.7030356", "0.7019079", "0.70126826", "0.70002663", "0.6998436", "0.6954156", "0.6948814", "0.6942688", "0.6910902", "0.6878067", "0.68516344", "0.68282163", "0.6824556", "0.6814959", "0.6814959", "0.68027925", "0.68019724", "0.6792304" ]
0.7881204
1
Return first num characters. Just read, no actual modification on the line string.
def readFirst(self, num): return self.line[:num]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first_line(self):\n with open(self.file_path) as file:\n return file.readline()", "def get_first_line(file: str) -> str:\n with open(file) as f:\n return f.readline().split('\\n')[0]", "def read(self, n=1):\n return self.string[self.pos:self.pos + n]", "def read_nchars(string, n=1):\n return string[:n]", "def read_file_first_line(filename):\n result = None\n with open(filename, 'r') as f:\n result = f.readline()\n result = result.rstrip(\"\\n\")\n f.close()\n return result", "def _get(self):\r\n c = self.theLookahead\r\n self.theLookahead = None\r\n if c == None:\r\n c = self.instream.read(1)\r\n if c >= ' ' or c == '\\n':\r\n return c\r\n if c == '': # EOF\r\n return '\\000'\r\n if c == '\\r':\r\n return '\\n'\r\n return ' '", "def get_first_line(filename):\n try:\n with open(filename, \"r\") as ff:\n first_line = ff.readlines()[0].strip(\" \\n\\r\")\n except FileNotFoundError: # pragma: no cover\n first_line = \"xxx\"\n return first_line", "def get_number(self):\n number = ''\n while self.current_character.isdigit():\n number += self.current_character\n self.current_character = self.file.read(1)\n self.file.seek(self.file.tell() - 1, 0)\n character = self.file.read(1)\n if character.isdigit():\n pass\n else:\n self.file.seek(self.file.tell() - 1, 0)\n return number", "def __get_line(file_path: str, line_no: int, errors: str = 'ignore') -> str:\n try:\n with open(file_path, mode='r',\n encoding='utf-8', errors=errors) as f:\n for line in f:\n line_no -= 1\n if line_no == 0:\n return line\n return ''\n except IOError:\n LOG.error(\"Failed to open file %s\", file_path)\n return ''", "def readchar(self) -> int:", "def getFirstChar(self):\n if self.i1 is None:\n self.firstChar = None\n else:\n chrNum = int(self.i1 // 10)\n if chrNum < 26:\n # should result in something like A4 for 4, B6 for 16\n self.firstChar = chr(ASCII_LETTER_A + chrNum) + str(self.i1 % 10)\n else:\n runLog.warning(\n \"invalid location. ring {0} is too many rings!\".format(self.i1),\n self,\n )", "def _get_line(self, line: int) -> str:\n line_offsets_with_sentinel = self._line_offsets + [len(self._text)]\n return self._text[line_offsets_with_sentinel[line]:line_offsets_with_sentinel[line+1]]", "def _get_first_code_line():\n return min(_code_lines)", "def getFileFirstLine(filename, mode=\"r\", encoding=None):\n\n with withFileLock(\"reading file %s\" % filename):\n with openTextFile(filename, mode, encoding=encoding) as f:\n return f.readline()", "def readline( shell ):\n global readbuf\n readbuf += read( shell, 1024 )\n if '\\n' not in readbuf:\n return None\n pos = readbuf.find( '\\n' )\n line = readbuf[ 0: pos ]\n readbuf = readbuf[ pos + 1: ]\n return line", "def readline( self ):\n self.readbuf += self.read( 1024 )\n if '\\n' not in self.readbuf:\n return None\n pos = self.readbuf.find( '\\n' )\n line = self.readbuf[ 0 : pos ]\n self.readbuf = self.readbuf[ pos + 1: ]\n return line", "def _first_row_with_climate_data(self, fp):\n if isinstance(fp, str):\n csvfile = open(fp, newline=\"\")\n else:\n csvfile = fp\n csvreader = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\n for i, row in enumerate(csvreader):\n if row[0].isdigit():\n break\n return i", "def peek(string, n=0):\n return string[:n]", "def extract_chars(infile, n=10000):\n reader = partial(get_chars, n)\n return read_on(reader, infile)", "def first_lines(log_str, n_lines):\n return \"\\n\".join((log_str.split(\"\\n\")[:n_lines])) if n_lines >= 0 else log_str", "def _get_string(self):\n result = self.sfile.readline().rstrip('\\n')\n return result", "def Left(text, number):\n return text[:number]", "def _get_line(self, lnum):\n start, end = self._get_linespan(lnum)\n return self.input[start:end]", "def line(self, n):\n return self.__contents[n]", "def extract_first_line(func_code):\r\n if func_code.startswith(FIRST_LINE_TEXT):\r\n func_code = func_code.split('\\n')\r\n first_line = int(func_code[0][len(FIRST_LINE_TEXT):])\r\n func_code = '\\n'.join(func_code[1:])\r\n else:\r\n first_line = -1\r\n return func_code, first_line", "def read_line(filename):\n line = \"Unknown\"\n try:\n with open(filename) as f:\n line = f.readline().strip()\n finally:\n return line", "def readline(self):\n if self.index < self.length:\n result = self.store[self.index:]\n elif False == self.closed:\n result = self.input.readline()\n self.lineNumber += 1\n else:\n result =''\n self.index = 0\n self.length = 0\n return result", "def _get_line(self):\n line = self.file.readline(self.maxline + 1)\n if len(line) > self.maxline:\n print(f\"ERROR: got more than {self.maxline} bytes\")\n if not line:\n print(\"Received EOF\")\n if line[-2:] == CRLF:\n line = line[:-2]\n elif line[-1:] in CRLF:\n line = line[:-1]\n return line + CRLF", "def readline(f):\n line = f.readline()\n while not (len(line) > 2 or line[0].isalnum()):\n line = f.readline()\n if line is None:\n raise PFSPWTIO.PFSPException('Reached EOF.')\n return line.replace('\\n', '')", "def chars(count):\n\n global offset\n\n bytes=midifile[offset:offset+count]\n offset+=count\n return bytes" ]
[ "0.655525", "0.6426731", "0.6293631", "0.6194903", "0.6155995", "0.61236703", "0.61074185", "0.6102843", "0.6096315", "0.60518306", "0.60010046", "0.599532", "0.5951977", "0.5920654", "0.5913139", "0.58947694", "0.5872971", "0.58560044", "0.5845062", "0.5841828", "0.5816927", "0.5801214", "0.57835764", "0.57814616", "0.5779346", "0.5775326", "0.5765675", "0.575155", "0.5748839", "0.57451093" ]
0.78747165
0
Remove given string checking for existence Raises CutException if given string was not found
def remove(self, string): val = self.line[:len(string)] if val != string: raise CutException("No match for given string") self.line = self.line[len(string):]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(self, string=str) -> None:\n try:\n if self.exists(string):\n del self.table[string]\n except Exception as error:\n print(f\"Error: self.remove({string}) -> {error}\")", "def remove(name):", "def str_remove(string: str, index: int) -> str: # _3 [✅]\n if len(string) == 0:\n raise ValueError # put the msg inside here - refer to the doc \n else:\n return string.replace(string[index], '')", "def remove(somestring, sub):\n location = somestring.find(sub)\n length = len(sub)\n part_before = somestring[:length+location]\n part_after = somestring[location+length:]\n return part_before + part_after", "def removeFactor(self, string: str, string2: str) -> _AbstractKnobBuilder__T:\n ...", "def func(self):\n char = self.character\n clothing = char.search(self.args, candidates=char.contents)\n if not clothing:\n return\n if not clothing.db.worn:\n char.msg(\"You're not wearing that!\")\n return\n if clothing.db.covered_by:\n char.msg(\"You have to take off %s first.\" % clothing.db.covered_by.name)\n return\n clothing.remove(char)", "def test_remove_non_string_raises_type_error(full_trie):\n with pytest.raises(TypeError):\n full_trie.remove(103)", "def cut(value, arg):\n return value.replace(arg, '')", "def cut(value, arg):\n return value.replace(arg, '')", "def cut(value, arg):\n return value.replace(arg, '') # we can replace arg with ''. We also need to register it", "def cut_string(value, arg):\n\n return value.replace(arg, '')", "def cut(value,arg):\n return value.replace(arg, '')", "def remove_line(file, string=[]):\n with open(file, 'r') as f, open('tmp.txt', '+a') as new_f:\n for line in f:\n clean = True\n for word in string:\n if word in line:\n clean = False\n if clean is True:\n new_f.write(line)\n os.remove(file)\n os.rename('tmp.txt', file)", "def test_llist_remove_element_negative(self):\n\n try:\n TestLList.llist_string.remove('kk')\n\n except UDFError as exception:\n assert exception.code == 100L\n except LargeItemNotFound as exception:\n assert exception.code == 125L", "def cut(value,arg):\n return cut.replace(arg,\"\")", "def cut_text(text):\n for phrase in TERMINALS:\n if phrase in text:\n return text[:text.index(phrase)]\n\n SavedSource(label=LABEL, subject='cut_text', body=text).put()\n return text", "def test_remove_longer_word_removes_word(multi_trie):\n multi_trie.remove(\"hello\")\n assert multi_trie.contains(\"hello\") is False", "def test_remove_shorter_word_retains_longer_form_of_that_word(multi_trie):\n multi_trie.remove(\"hell\")\n assert multi_trie.contains(\"hello\") is True", "def deleteLine(oldFile, string1, string2, newString, newFile = \"TempFile\"):\n with open(oldFile, \"r\") as oldfile, open(newFile, \"w\") as newfile:\n oldfile_read = oldfile.readlines()\n for line in oldfile_read:\n line_number = oldfile_read.index(line)\n if 'ATOM' in line and not (string1 in line and string2 in line.split()[5]):\n newfile.writelines(oldfile_read[line_number])\n shutil.move(newFile, oldFile)", "def remove(self, name, source):\n self.m.path.assert_absolute(source)\n self._run(name, ['remove', source])\n self.m.path.mock_remove_paths(source)", "def removePiece(self, address):\r\n\r\n try:\r\n del self.pieces[address]\r\n except KeyError:\r\n print(\"error removing piece!\")", "def clean_str(string):\n #just return string if already cleaned\n return string", "def mycut(value, arg):\r\n return value.replace(arg, '')", "def cutAt(self, character):\t\n\t\t\n\t\tif self.line.find(character) < 0:\n\t\t\traise CutException('Character not found')\n\t\t\n\t\tval = self.line[:self.line.find(character)]\n\t\tself.line = self.line[self.line.find(character)+1:]\n\t\treturn val", "def remove(part, word):\n n = word.find(part)\n m = len(part)\n if part in word:\n part1 = word[:n]\n part2 = word[(m+1):]\n new_word = part1 + part2\n else:\n new_word = word\n return new_word", "def test_remove_word_with_one_bifurcation(multi_trie):\n multi_trie.remove(\"howdy\")\n assert multi_trie.contains(\"howdy\") is False", "def removeMaskString(maskedString):\n global masked_value_set\n # Since we cannot remove an item from a set during itteration over\n # the said set, we only mark a flag and if the flag is set to True\n # we remove the string from the set.\n found = False\n for item in masked_value_set:\n if item == maskedString:\n found = True\n if found:\n masked_value_set.remove(maskedString)", "def cutting(value,arg):\n return value.replace(arg,'working')", "def remove_substring(substring, string):\n return string.replace(substring, '')", "def cut(value,arg):\n return value.replace(arg,'')" ]
[ "0.6289555", "0.5905014", "0.58504665", "0.56325716", "0.5621997", "0.5611954", "0.5557018", "0.55479324", "0.55479324", "0.5523663", "0.55215895", "0.5517186", "0.5495908", "0.5477499", "0.5460453", "0.5420246", "0.53903854", "0.5370281", "0.5328302", "0.5328038", "0.5295019", "0.529161", "0.52912676", "0.52845395", "0.5273278", "0.5253687", "0.52507174", "0.5246018", "0.52438885", "0.52364475" ]
0.80095285
0
Cut line at given position
def cutAtPos(self, position): val = self.line[:position] self.line = self.line[position:] return val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cut_line(self):\r\n self.parachute.pop(0)", "def cutAt(self, character):\t\n\t\t\n\t\tif self.line.find(character) < 0:\n\t\t\traise CutException('Character not found')\n\t\t\n\t\tval = self.line[:self.line.find(character)]\n\t\tself.line = self.line[self.line.find(character)+1:]\n\t\treturn val", "def cut_line_at_point(line, point):\n\n distance = line.project(point)\n if distance <= 0.0 or distance >= line.length:\n return [LineString(line)]\n\n coords = list(line.coords)\n for i, p in enumerate(coords):\n pd = line.project(Point(p))\n if pd == distance:\n return [LineString(coords[: i + 1]), LineString(coords[i:])]\n if pd > distance:\n cp = line.interpolate(distance)\n return [\n LineString(coords[:i] + [(cp.x, cp.y)]),\n LineString([(cp.x, cp.y)] + coords[i:]),\n ]", "def cut(lines=[],params=\"\"):\n if not core.is_unixy():\n raise(\"cut is only implemented on unix-like systems\")\n cmd = \"cut\"\n if params != \"\":\n cmd = cmd + \" \" + params\n res = act.call(cmd,lines)\n return res", "def decomposing_line_cut_by_splicing(P, v, w):\n\n\n\tv_Point = Point(v)\n\tw_Point = Point(w)\n\n\tchain = LineString(P[0]+[P[0][0]])\n\n\tdistance_to_v = chain.project(v_Point)\n\tdistance_to_w = chain.project(w_Point)\n\n\tif not chain.intersects(v_Point):\n\t\tprint(\"decomposing_cut_as_line: V not on chain\")\n\tif not chain.intersects(w_Point):\n\t\tprint(\"decomposing_cut_as_line: W not on chain\")\n\tif distance_to_w == distance_to_v:\n\t\tprint(\"decomposing_cut_as_line: W and V are the same\")\n\n\n\tif distance_to_w >= chain.length or distance_to_w == 0:\n\n\t\tleft_chain, right_chain = cut_linestring(chain, distance_to_v)\n\n\t\tp_l = left_chain.coords[:]\n\t\tp_r = right_chain.coords[:]\t\t\n\n\t\treturn p_l, p_r\n\n\tif distance_to_v >= chain.length or distance_to_v == 0:\n\n\t\tleft_chain, right_chain = cut_linestring(chain, distance_to_w)\n\n\t\tp_l = right_chain.coords[:]\n\t\tp_r = left_chain.coords[:]\t\t\n\n\t\treturn p_l, p_r\n\n\n\tif distance_to_w > distance_to_v:\n\n\t\tleft_v_cut, right_v_cut = cut_linestring(chain, distance_to_v)\n\n\t\tdistance_to_w = right_v_cut.project(w_Point)\n\t\tleft_w_chain, right_w_chain = cut_linestring(right_v_cut, distance_to_w)\n\n\t\tp_l = left_v_cut.coords[:]+right_w_chain.coords[:-1]\n\t\tp_r = left_w_chain.coords[:]\n\n\t\treturn p_l, p_r\n\n\telse:\n\n\t\tleft_w_cut, right_w_cut = cut_linestring(chain, distance_to_w)\n\n\t\tdistance_to_v = right_w_cut.project(v_Point)\n\t\tleft_v_chain, right_v_chain = cut_linestring(right_w_cut, distance_to_v)\n\n\t\tp_l = left_w_cut.coords[:]+right_v_chain.coords[:-1]\n\t\tp_r = left_v_chain.coords[:]\n\n\t\treturn p_l, p_r", "def cut_linestring(line, distance):\n\n\tpd = 0\n\n\tdistance = distance % line.length\n\n\tif distance == 0.0:\n\t\treturn [line, []]\n\n\n\tcoords = list(line.coords)\n\tfor i in range(1, len(coords)):\n\t\t\n\t\tpd = LineString(coords[:i+1]).length\n\n\t\tif pd == distance:\n\t\t\treturn [\n\t\t\t\tLineString(coords[:i+1]),\n\t\t\t\tLineString(coords[i:])]\n\n\t\tif pd > distance:\n\t\t\tcp = line.interpolate(distance)\n\t\t\treturn [\n\t\t\t\tLineString(coords[:i] + [(cp.x, cp.y)]),\n\t\t\t\tLineString([(cp.x, cp.y)] + coords[i:])]", "def cut(self, piece):\n self.substrates = self.substrates.difference(piece)", "def _truncateLine(self, line):\n return line[: irc.MAX_COMMAND_LENGTH - len(self.delimiter)]", "def cut_line_at_points(line, points):\n\n segments = []\n remainder = line\n\n for point in points:\n segment, remainder = cut_line_at_point(remainder, point)\n segments.append(segment)\n\n segments.append(remainder)\n\n return segments", "def onCut(self):\n pass", "def cut_in_lines(self,line):\n limit_screen = 30 #caracteres que tiene de ancho la pantalla\n length = 0 #para comparar leineas\n res = ''\n\n for linea in line.split('\\n'):\n if length + len(linea) <= limit_screen:\n new_linea = linea\n length += len(new_linea)\n else:\n if len(linea) > limit_screen:\n linea = self.cut_in_words(linea)\n new_linea = '\\n' + linea\n length = len(new_linea) - 2 #-2 para no tener en cuenta el \\n\n res += new_linea\n return res", "def splitLine(line):\n # Find a point where our line changes direction\n l = np.copy(line)\n change = l[2:] - l[:-2]\n # Create breaks where derivative equals 0\n break_indicies = np.unique(np.where(change == 0)[0])\n line_segments = []\n while break_indicies.size > 0:\n i = break_indicies[0]\n\n # Add the beginning of the line to our list\n new_line= l[0:i+1]\n line_segments.append(new_line)\n\n # The rest of the line becomes our new line\n l = l[i+1:]\n\n # Recalculate the change vector\n change = l[2:] - l[:-2]\n break_indicies = np.unique(np.where(change == 0)[0])\n\n line_segments.append(l)\n return line_segments", "def FilterLine(self, a_line):\n return a_line", "def lineTo(self, pt: Tuple[float, float]) -> None:\n raise NotImplementedError", "def delete_substr(self, y, x1, x2):\n self.lines[y] = self.lines[y][ : x1] + self.lines[y][x2 : ]", "def slice_by_pos(val: str, start: SourcePos, end: SourcePos) -> str:\n if \"\\n\" in val:\n lines = val.split(\"\\n\")\n if end.row > start.row:\n top = lines[start.row][start.col :]\n filling = lines[start.row + 1 : end.row]\n bottom = lines[end.row][: end.col]\n return \"\\n\".join(line for line in chain([top], filling, [bottom]))\n else:\n return lines[start.row][start.col : end.col]\n else:\n return val[start.col : end.col]", "def writeCutLines(self, fid, drawing_code, X1, Y1, X2, Y2):\n def notEdge(x, X):\n return round(abs(1000*(x-X)))\n\n assert self.x and self.y\n\n radius = config.GAT[drawing_code].dimx/2.0\n \n # Start at lower-left, proceed clockwise\n x = self.x - radius\n y = self.y - radius\n\n left = notEdge(self.x, X1)\n right = notEdge(self.x+self.width_in(), X2)\n bot = notEdge(self.y, Y1)\n top = notEdge(self.y+self.height_in(), Y2)\n\n BL = ((x), (y))\n TL = ((x), (y+self.height_in()+2*radius))\n TR = ((x+self.width_in()+2*radius), (y+self.height_in()+2*radius))\n BR = ((x+self.width_in()+2*radius), (y))\n\n if not left:\n BL = (BL[0]+2*radius, BL[1])\n TL = (TL[0]+2*radius, TL[1])\n\n if not top:\n TL = (TL[0], TL[1]-2*radius)\n TR = (TR[0], TR[1]-2*radius)\n\n if not right:\n TR = (TR[0]-2*radius, TR[1])\n BR = (BR[0]-2*radius, BR[1])\n\n if not bot:\n BL = (BL[0], BL[1]+2*radius)\n BR = (BR[0], BR[1]+2*radius)\n\n BL = (util.in2gerb(BL[0]), util.in2gerb(BL[1]))\n TL = (util.in2gerb(TL[0]), util.in2gerb(TL[1]))\n TR = (util.in2gerb(TR[0]), util.in2gerb(TR[1]))\n BR = (util.in2gerb(BR[0]), util.in2gerb(BR[1]))\n\n # The \"if 1 or ...\" construct draws all four sides of the job. By\n # removing the 1 from the expression, only the sides that do not\n # correspond to panel edges are drawn. The former is probably better\n # since panels tend to have a little slop from the cutting operation\n # and it's easier to just cut it smaller when there's a cut line.\n # The way it is now with \"if 1 or....\", much of this function is\n # unnecessary. Heck, we could even just use the boardoutline layer\n # directly.\n if 1 or left:\n fid.write('X%07dY%07dD02*\\n' % BL)\n fid.write('X%07dY%07dD01*\\n' % TL)\n\n if 1 or top:\n if not left: fid.write('X%07dY%07dD02*\\n' % TL)\n fid.write('X%07dY%07dD01*\\n' % TR)\n\n if 1 or right:\n if not top: fid.write('X%07dY%07dD02*\\n' % TR)\n fid.write('X%07dY%07dD01*\\n' % BR)\n\n if 1 or bot:\n if not right: fid.write('X%07dY%07dD02*\\n' % BR)\n fid.write('X%07dY%07dD01*\\n' % BL)", "def cut_lines(lines, pseudoread_length):\n step = int(pseudoread_length / 2)\n\n line_iterate = [x for x in range(0, len(lines), 2)]\n\n result = []\n\n for index in line_iterate:\n\n if (index % 100000) == 0:\n print(index)\n\n id = lines[index].strip()\n\n sequence = lines[index + 1].strip()\n\n # if sequence is shorter than single window, we return just window\n end_of_range = len(sequence) - step if (len(sequence) - step > 0) else len(sequence)\n range_iterate = [x for x in\n range(0, end_of_range, step)]\n\n for i in range_iterate:\n new_id = id + '|{}'.format(i)\n kmer = sequence[i:i + pseudoread_length]\n result.append(new_id)\n result.append(kmer)\n\n return result", "def line_moved(self):\n\n # The line is supposed to be moved by hand to the beginning of first wrinkle.\n # The optimal spot is local maximum (not always visible)\n ext_index = self.index_of_drop + int(self.line.value() * 10000)\n ext_value = self.data[ext_index]\n\n p_i, p_f = toolbox_2.get_pressure_change(self.measurement)\n smallest_growing_particle = toolbox_2.minimum_particle_diameter(p_i, p_f, self.saturation_percentage / 100)\n\n n = toolbox_2.particle_count_2(ext_value)\n\n # measurement series 1\n if self.selected_data == 3 and 7 <= self.meas_selected_number <= 17 and self.meas_selected_series == 1:\n index = self.meas_selected_number - 7 # Assumes that first measurement is number 7\n self.smallest_particles[index] = smallest_growing_particle\n self.number_counts[index] = n\n\n self.update_distribution()\n # Update plot\n self.curve_distribution.setData(self.particle_distribution_x, self.particle_distribution_y*1e-10)\n self.curve_distribution_cumulative.setData(self.smallest_particles, self.number_counts*1e-10)\n\n # measurement series 2\n elif self.selected_data == 3 and self.meas_selected_series == 2:\n index = self.meas_selected_number - 1 # begins from 1, 0th measurement is just copy of 8th\n self.number_counts_2[index] = n\n\n self.curve_rotatometer.setData(np.array([4, 6, 8, 10, 12, 14, 16, 18]), self.number_counts_2*1e-10)\n x = np.linspace(3.5, 20, 100)\n self.curve_rotatometer_fit.setData(x, self.number_counts_2[0] * 4 * (1 / x) *1e-10)\n\n #print(\"N\", \"%.2e\"%n, \"dpres\", round(p_i - p_f))", "def _(event):\n deleted = line.delete_before_cursor(count=-line.document.get_start_of_line_position())\n line.set_clipboard(ClipboardData(deleted))", "def bend_towards(line, where, to):\n \n if not line.contains(where) and not line.touches(where):\n raise ValueError('line does not contain the point where.')\n \n coords = line.coords[:]\n # easy case: where is (within numeric precision) a vertex of line\n for k, vertex in enumerate(coords):\n if where.almost_equals(Point(vertex)):\n # move coordinates of the vertex to destination\n coords[k] = to.coords[0]\n return LineString(coords)\n \n # hard case: where lies between vertices of line, so\n # find nearest vertex and move that one to point to\n _, min_k = min((where.distance(Point(vertex)), k) \n for k, vertex in enumerate(coords))\n coords[min_k] = to.coords[0]\n return LineString(coords)", "def cut(tail, primer, snp_position='last'):\n\n if snp_position == 'last':\n strprimer = str(primer)\n else:\n strprimer = str(primer.rev_comp())\n\n if strprimer.startswith('TATGAC'):\n new_tail = tail[:-6]\n elif strprimer.startswith('ATGAC'):\n new_tail = tail[:-5]\n elif strprimer.startswith('TGAC'):\n new_tail = tail[:-4]\n elif strprimer.startswith('GAC'):\n new_tail = tail[:-3]\n elif strprimer.startswith('AC'):\n new_tail = tail[:-2]\n elif strprimer.startswith('C'):\n new_tail = tail[:-1]\n else:\n new_tail = tail\n\n return new_tail", "def cut_characters(lines=[],nfrom=None,nto=None,complement=0):\n return cut_by(\"-c\",lines,nfrom,nto,complement)", "def trim_line(x1, y1, x2, y2, a, b):\n\tm = (y2 - y1)/(x2 - x1)\n\n\tif x1 < a:\n\t\ty1 += m * (a - x1)\n\n\tif x2 > b: \n\t\ty2 += m * (b - x2)\n\n\treturn x1, y1, x2, y2", "def gicp(line):\n import pyperclip\n import shlex\n args = shlex.split(line)\n if len(args) == 0:\n num_lines_prior = 1\n else:\n num_lines_prior = int(args[1])\n pyperclip.copy(In[-1-num_lines_prior])", "def cutout(self, centre, radius):", "def decline(self):\n\n x, y = self._get_xy_lims()\n decline = Line2D(\n xdata=[x, x],\n ydata=[y + self.offset, y + self.span],\n color=self.colour,\n linewidth=2,\n zorder=10,\n path_effects=[pe.Stroke(linewidth=3, foreground='k'), pe.Normal()]\n )\n\n return decline", "def cutPoly(self,geom,startPt,endPt,debug=False):\r\n #if we have disjoint Multi geometry as geom to split we need to iterate over its parts\r\n splittedGeoms=[]\r\n leftFragments=[]\r\n rightFragments=[]\r\n #if self.debug: print \"Number of geoms when slicing: \",str(len(geom.asGeometryCollection()))\r\n for geomPart in geom.asGeometryCollection():\r\n #split the actual part by cut line defined by startPt,endPt\r\n (res,splittedGeomsPart,topo)=geomPart.splitGeometry([startPt,endPt],False)\r\n splittedGeoms+=splittedGeomsPart\r\n #Add the remaining geomPart to the rightFragments or letfFragments\r\n #depending on distance\r\n d=self.signedDistCentroidFromLine(geomPart,startPt,endPt)\r\n if d>0:\r\n rightFragments.append(geomPart)\r\n else:\r\n leftFragments.append(geomPart)\r\n #if self.debug: print j,splittedGeoms\r\n\r\n for fragment in splittedGeoms:\r\n \"\"\"\r\n calculate signed distance of centroid of fragment and the splitline\r\n if signed distance is below zero, the point is to the left of the line\r\n if above zero the point is to the right of the line\r\n \"\"\"\r\n d=self.signedDistCentroidFromLine(fragment,startPt,endPt)\r\n #if debug==True:\r\n #if self.debug: print d\r\n\r\n if d>0:\r\n rightFragments.append(fragment)\r\n else:\r\n leftFragments.append(fragment)\r\n\r\n #if self.debug: print \"Left frags:\",len(leftFragments),\"Right frags:\",len(rightFragments)\r\n leftGeom=self.buildMultiPolygon(leftFragments)\r\n rightGeom=self.buildMultiPolygon(rightFragments)\r\n return leftGeom,rightGeom", "def _clip_line(point1, point2, xmin, ymin, xmax, ymax):\n deltax, deltay = point2[0] - point1[0], point2[1] - point1[1]\n deltas = [-deltax, -deltay, deltax, deltay] # p\n distances = [ # q\n point1[0] - xmin, point1[1] - ymin,\n xmax - point1[0], ymax - point1[1]]\n ratios = np.divide(distances, deltas) # r\n pct1, pct2 = 0, 1 # how much of the line is inside the window\n side = [None, None]\n for i in range(4):\n if deltas[i] == 0 and distances[i] < 0:\n return (), side\n if deltas[i] < 0:\n if ratios[i] > pct1: # entered\n side[0] = i\n pct1 = ratios[i]\n if deltas[i] > 0:\n if ratios[i] < pct2: # exited\n side[1] = i\n pct2 = ratios[i]\n if pct1 > pct2:\n return (), side\n clipped = (\n tuple(np.add((point1[0], point1[1]), (pct1*deltax, pct1*deltay))),\n tuple(np.add((point1[0], point1[1]), (pct2*deltax, pct2*deltay))),\n )\n return clipped, side", "def move_to_line_start(self) -> None:\n self.index = self.buffer.get_line_start(self.index)" ]
[ "0.73306286", "0.6732413", "0.6677863", "0.64723504", "0.61549896", "0.6125919", "0.61119336", "0.6012234", "0.5828049", "0.5815221", "0.57717556", "0.56684816", "0.56131065", "0.553599", "0.5533644", "0.55323756", "0.55173707", "0.5457276", "0.5445595", "0.5443708", "0.544283", "0.54337955", "0.5395658", "0.53595454", "0.5350896", "0.5347293", "0.5308668", "0.52971476", "0.5293497", "0.5282371" ]
0.78466594
0
Creates an Operation that will scan through every item in a table when run.
def scan(table_name: str): build = ab.builder( table_name=table_name) description = shake( TableName=build(args.TableName)) return Operation(description, run)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_scan(self, arg):\n results = self._table.scan()\n for item in results[\"Items\"]:\n self._print(_pretty(item))", "def items(kwargs=None):\n if kwargs is None:\n kwargs = {}\n while True:\n resp = table.scan(**kwargs)\n yield from resp['Items']\n kwargs['ExclusiveStartKey'] = resp['LastEvaluatedKey']", "def action_gen():\n for n, doc in enumerate(cursor):\n # print fields\n did = doc.pop('_id')\n if doc == {}:\n print \"Empty document, skipping\"\n continue\n op_dict = {\n '_index': db.lower(),\n '_type': collection,\n '_id': int('0x' + str(did), 16),\n '_source': doc\n }\n #op_dict['doc'] = doc\n yield op_dict", "def list_operations():", "def gen_op(\n obj_list: List[DBObject],\n table_names: List[str],\n write_rate: float,\n predicate_read_rate: float,\n for_update: bool,\n chosen_len: int,\n ) -> List[Operation]:\n rnd: float = random.random()\n if rnd < write_rate:\n obj: DBObject = random.choice(obj_list)\n # This creates an object if it doesn't exist\n # Note that we cannot rely on the object being created if obj_ver[obj_id] > 0.\n # This is because obj_ver denotes the order in which the statements are *generated* not executed\n # It is incremental to ensure *uniqueness*, not *order*\n # For instance, \"1,2,0,4,3\" is a valid value for an object, but \"1,2,1,4,3\" is not\n #\n obj_ver[obj.id] += 1\n return [\n Operation(Operation.Type.READ, obj=obj, for_update=for_update),\n Operation(Operation.Type.WRITE, obj=obj, value=obj_ver[obj.id]),\n ]\n elif write_rate <= rnd < write_rate + predicate_read_rate:\n return [\n Operation(\n Operation.Type.PREDICATE_READ,\n tables=table_names,\n value=chosen_len,\n for_update=for_update,\n )\n ]\n else:\n return [\n Operation(\n Operation.Type.READ,\n obj=random.choice(obj_list),\n for_update=for_update,\n )\n ]", "def Iterator():\n return _table.Iterator()", "def collect_scans(table_type, items):\n table = get_table(table_type)\n lst = []\n for key, value in items:\n table.insert(key, value)\n lst.append(key)\n\n scans = []\n for key in lst:\n val, scan = table.lookup(key)\n scans.append(scan)\n\n ave_scan = sum(scans) / float(len(items))\n return ave_scan", "def main(\n project_id: str, \n instance_id: str, \n table_name: str, \n rowkeys: List[str], \n start_rowkey: str, \n stop_rowkey: str, \n rowkey_sep: str,\n) -> None:\n table = get_table_instance(project_id, instance_id, table_name)\n if rowkeys and len(rowkeys) >= 1:\n start = time.process_time()\n model_list = get_rowkeys(table, rowkeys, rowkey_sep)\n end = time.process_time()\n print(\"Elapsed time for getting single row: {}s\".format(end - start))\n for model in model_list:\n print(model.dict())\n else:\n start = time.process_time()\n model_list = scan_rows_range(table, start_rowkey, stop_rowkey, rowkey_sep)\n end = time.process_time()\n print(\"Elapsed time for scanning row range: {}s\".format(end - start))\n for model in model_list:\n print(model.dict())", "def query_all_users():\n ddb = boto3.resource(\"dynamodb\")\n tb = ddb.Table(os.environ.get(\"TABLE_NAME\"))\n return tb.scan()", "async def iterate_and_pass(table_sub: pxapi.TableSub) -> None:\n async for _ in table_sub:\n pass", "def scan_table(table_name: str, limit: Optional[int] = None, **db_kwargs) -> Generator:\n scan_kwargs: Dict[str, Any] = {\"TableName\": table_name}\n if limit is not None:\n scan_kwargs[\"Limit\"] = limit\n\n try:\n client = boto3.client(\"dynamodb\", **db_kwargs)\n resp = client.scan(**scan_kwargs)\n yield resp\n while \"LastEvaluatedKey\" in resp:\n resp = client.scan(ExclusiveStartKey=resp[\"LastEvaluatedKey\"], **scan_kwargs)\n yield resp\n except Exception as err: # pylint:disable=board-except\n raise RuntimeError(\"Failed to scan table: %s\" % str(err))", "def _run_query(self):", "def scan_table(self,expression=''):\n response = self.table.query(KeyConditionExpression=Key(\"Employeeid\").eq(int(expression)))\n print(response['Items'])\n df = pd.DataFrame(response['Items'], index=[0])\n print(df.head())\n return df", "def _scan_table(self, uri):\n cmd = 'kiji scan {kiji_uri}/{uri} --max-versions=10'.format(\n kiji_uri=self.kiji_uri,\n uri=uri)\n self._run_kiji_job(cmd)", "def __init__(self, collection, ordered=False,\n bypass_document_validation=False):\n super(BulkOperator, self).__init__(collection, ordered)\n self.find_count = 0\n self.insert_count = 0\n self.execute_count = 0\n self.total_ops = 0", "def _get_table(self, cursor):\n raise NotImplementedError", "async def test_iterate_many(database) -> None:\n col = database[\"test_iterate_many\"]\n # Delete all documents\n await col.delete_many()\n # Insert documents\n documents = [{\"key\": True}] * 2 + [{\"key\": False}] * 2\n await col.create_many(documents)\n # Iterator over documents\n _idx = 0\n # Apply a filter\n async for doc in col.iterate_many(filter={\"key\": True}):\n _idx += 1\n assert _idx <= 2\n assert doc[\"key\"]\n # Apply a limit\n docs = [doc async for doc in col.iterate_many(filter={\"key\": False}, limit=1)]\n assert len(docs) == 1\n assert not docs[0][\"key\"]", "def table_generator( db, query):\n db.query(query)\n result = db.store_result()\n done = False\n while not done:\n row = result.fetch_row(1,1)\n done = len(row) == 0\n if not done:\n (dict,) = row\n yield Row(SimpleDictGenerator(dict))", "def getResultAll(i=None):", "def test_batch_create_occurrences(self):\n pass", "def batch_execute(self, conn):\n def batches(data, batch_size) -> list:\n \"\"\"Return batches of length `batch_size` from any object that\n supports iteration without knowing length.\"\"\"\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv\n\n columns = ColumnCollection(self.columns)\n if self.header:\n self.columns = [columns.get(h) for h in next(self.data)]\n columns = ColumnCollection(self.columns)\n\n total = 0\n query = BulkInsertQuery(self.table, columns)\n for batch in batches(self.data, self.batch_size):\n total += query.execute(conn, batch) or 0\n yield total", "def _perform_query(self, from_row=0, max_rows=-1):\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n for item in results:\n yield self._doc_class(self._cb, item[\"id\"], item)", "def run(self):\n query = self.query\n\n # count before filtering\n # self.cardinality = query.add_columns(self.columns[0].sqla_expr).count()\n\n self._set_column_filter_expressions()\n self._set_global_filter_expression()\n self._set_sort_expressions()\n self._set_yadcf_data(query)\n\n # apply filters\n query = query.filter(\n *[e for e in self.filter_expressions if e is not None])\n self.filtered_query = deepcopy(query)\n\n # self.cardinality_filtered = query.add_columns(\n # self.columns[0].sqla_expr).count()\n\n # apply sorts\n query = query.order_by(\n *[e for e in self.sort_expressions if e is not None])\n\n # add paging options\n length = int(self.params.get('length'))\n if length >= 0:\n query = query.limit(length)\n elif length == -1:\n pass\n else:\n raise(ValueError(\n 'Length should be a positive integer or -1 to disable'))\n query = query.offset(int(self.params.get('start')))\n\n # add columns to query\n query = query.add_columns(\n *[c.sqla_expr for c in self.columns])\n\n self.filtered_query = self.filtered_query.add_columns(\n *[c.sqla_expr for c in self.columns])\n\n self.query = query\n # fetch the result of the queries\n column_names = [col.mData if col.mData else str(i)\n for i, col in enumerate(self.columns)]\n # self.results = [{k: v for k, v in zip(\n # column_names, row)} for row in query.all()]", "def iter(self, query: str, *args, **kwargs):\n self._ensure_connected()\n cursor = SSCursor(self._db)\n try:\n self._execute(cursor, query, args, kwargs)\n column_names = [d[0] for d in cursor.description]\n for row in cursor:\n yield Row(zip(column_names, row))\n finally:\n cursor.close()", "def __iter__(self) :\n\n cur = self.con.cursor()\n row_sql = 'SELECT * FROM \"%s\"' % (self.name,)\n cur.execute(row_sql)\n return DataTableIterator(cur)", "def _create_iterable_operations(self, node, interface_identifier):\n return {\n Identifier('forEach'):\n self._create_operation(Identifier('forEach'),\n arguments=self._create_arguments([\n (Identifier('callback'),\n Identifier('ForEachIteratorCallback')),\n (Identifier('thisArg'), 'any', 'null'),\n ]),\n extended_attributes={\n 'CallWith':\n ('ScriptState', 'ThisValue'),\n 'RaisesException': None,\n 'ImplementedAs': 'forEachForBinding',\n },\n node=node),\n Identifier('entries'):\n self._create_operation(\n Identifier('entries'),\n return_type=SyncIterator.identifier_for(interface_identifier),\n extended_attributes={\n 'CallWith': 'ScriptState',\n 'RaisesException': None,\n 'ImplementedAs': 'entriesForBinding',\n },\n node=node),\n Identifier('keys'):\n self._create_operation(\n Identifier('keys'),\n return_type=SyncIterator.identifier_for(interface_identifier),\n extended_attributes={\n 'CallWith': 'ScriptState',\n 'RaisesException': None,\n 'ImplementedAs': 'keysForBinding',\n },\n node=node),\n Identifier('values'):\n self._create_operation(\n Identifier('values'),\n return_type=SyncIterator.identifier_for(interface_identifier),\n extended_attributes={\n 'CallWith': 'ScriptState',\n 'RaisesException': None,\n 'ImplementedAs': 'valuesForBinding',\n },\n node=node),\n }", "def test_bulk_iterates_actions_only_once(self):\n doc = self._make_doc()\n actions = OneshotIterable([BulkActionItem.index(doc)])\n self.adapter.bulk(actions) # does not raise IterableExhaustedError", "def gen_transaction(\n txn_id: int,\n obj_list: List[DBObject],\n table_names: List[str],\n isolation_level: str,\n min_size: int,\n max_size: int,\n abort_rate: float,\n write_rate: float,\n predicate_read_rate: float,\n for_update: bool,\n ) -> Transaction:\n\n def gen_op(\n obj_list: List[DBObject],\n table_names: List[str],\n write_rate: float,\n predicate_read_rate: float,\n for_update: bool,\n chosen_len: int,\n ) -> List[Operation]:\n \"\"\"\n Generate a single operation\n\n By fixing a chosen len across a transaction, it makes it more likely for there to be conflicts\n \"\"\"\n rnd: float = random.random()\n if rnd < write_rate:\n obj: DBObject = random.choice(obj_list)\n # This creates an object if it doesn't exist\n # Note that we cannot rely on the object being created if obj_ver[obj_id] > 0.\n # This is because obj_ver denotes the order in which the statements are *generated* not executed\n # It is incremental to ensure *uniqueness*, not *order*\n # For instance, \"1,2,0,4,3\" is a valid value for an object, but \"1,2,1,4,3\" is not\n #\n obj_ver[obj.id] += 1\n return [\n Operation(Operation.Type.READ, obj=obj, for_update=for_update),\n Operation(Operation.Type.WRITE, obj=obj, value=obj_ver[obj.id]),\n ]\n elif write_rate <= rnd < write_rate + predicate_read_rate:\n return [\n Operation(\n Operation.Type.PREDICATE_READ,\n tables=table_names,\n value=chosen_len,\n for_update=for_update,\n )\n ]\n else:\n return [\n Operation(\n Operation.Type.READ,\n obj=random.choice(obj_list),\n for_update=for_update,\n )\n ]\n\n size: int = random.randint(min_size, max_size)\n # How many times, on average, each txn will write to an object\n #\n AVG_WRITE_PER_OBJECT_PER_TXN: float = (write_rate * 0.5 * (min_size + max_size)) / len(obj_list)\n\n # This is a bit hacky, but multiplying AVG_WRITE_PER_OBJECT_PER_TXN by\n # the transaction id gives the approximate average size of each object at this point\n # since it approximates sum([AVG_WRITE_PER_OBJECT_PER_TXN] * N_TXN_UNTIL_THIS_POINT)\n #\n AVG_OBJECT_SIZE: int = int(AVG_WRITE_PER_OBJECT_PER_TXN * txn_id)\n\n ops: List[Operation] = [\n Operation(Operation.Type.SET_ISOLATION, isolation_level=isolation_level),\n Operation(Operation.Type.BEGIN),\n ]\n\n for _ in range(size):\n # Using this hacky math makes the predicate reads more likely to return\n # interesting queries\n #\n # We intentionally skew in favour of returning less values, which\n # makes this more prone to returning less values, and consequently\n # generating more anti-dependencies\n #\n for op in gen_op(\n obj_list,\n table_names,\n write_rate,\n predicate_read_rate,\n for_update,\n random.randint(int(AVG_OBJECT_SIZE * 0.85), int(AVG_OBJECT_SIZE * 1.35)),\n ):\n ops.append(op)\n\n if random.random() < abort_rate:\n ops.append(Operation(Operation.Type.ROLLBACK))\n else:\n ops.append(Operation(Operation.Type.COMMIT))\n\n return Transaction(txn_id, ops)", "def iterate(query, callback=lambda x: x, batch_size=1000, verbose=True):\n start = time.time()\n count = 0\n results = query.fetch(batch_size)\n while results:\n rstart = time.time()\n for row in results:\n output = callback(row)\n if output:\n print output\n count += 1\n if verbose:\n print '%s rows processed in %.1fs' % (count, time.time() - rstart)\n print 'total time: %.1fs' % (time.time() - start)\n results = query.with_cursor(query.cursor()).fetch(batch_size)\n callback()\n print 'total rows: %s, total time: %.1fs' % (count, time.time() - start)", "def execute(self):\n for coll in list(self.__bulks):\n try:\n bulkOp = self.__bulks[coll]\n curr_result = Counter(bulkOp.execute())\n self.update_results(coll, curr_result)\n except BulkWriteError as bwe:\n sys.stderr.write(str(bwe.details))" ]
[ "0.6228359", "0.60451776", "0.5848391", "0.5576059", "0.5517535", "0.5460877", "0.5242596", "0.5196265", "0.51867557", "0.51820654", "0.5164621", "0.5130511", "0.5113141", "0.5112561", "0.5091411", "0.5072028", "0.505515", "0.50514007", "0.49664125", "0.4961605", "0.49613667", "0.49605995", "0.49504766", "0.49496102", "0.49448088", "0.4939333", "0.49211618", "0.49192578", "0.4905882", "0.49010113" ]
0.65158
0
Manage one display indicator icon. The icon to be displayed at any time is determined by the state_callback funtion. pos is the position (in pixels) on the screen. If blank, the next slot will be allocated from the display. the indicator registers itself with Display. No need to keep a variable for it.
def __init__(self, d, images, state_callback, pos=None): self.d = d #Display self.callback = state_callback self.last_img = None self.img = [] for i in images: img, width, height = display.load_image(i) self.img.append(img) self.x_pos = d.register_indicator(self, 0 if pos else width+2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _draw_indicator(\n self, src, center, color=(255, 0, 0), shape=\"circle\", size=4, thickness=1\n ):\n if isinstance(center, tuple):\n center = new_point(*center)\n if shape == \"rect\":\n draw_rectangle(\n src,\n center.x - size / 2.0,\n center.y - size / 2.0,\n size,\n size,\n color=color,\n thickness=thickness,\n )\n elif shape == \"crosshairs\":\n draw_lines(\n src,\n [\n [(center.x - size, center.y), (center.x + size, center.y)],\n [(center.x, center.y - size), (center.x, center.y + size)],\n ],\n color=color,\n thickness=thickness,\n )\n else:\n draw_circle(src, center[0], center[1], size, color=color)", "def update(self, pos = 0, msg = \"\"):\n if self.print_indicator and self.indicator and not self.video_model == None:\n C=pyqtgraph.hsvColor(1)\n pen=pyqtgraph.mkPen(color=C,width=1)\n data = np.zeros(10)\n\n pos = int(self.video_model.get_pos(datatype = self.model.get_datatype()))\n self.indicator.setData([pos,pos],[self.indicator_min,self.indicator_max]) #= self.plot_item.plot([pos,pos],[self.indicator_min,self.indicator_max],pen=pyqtgraph.mkPen(color=pyqtgraph.hsvColor(2),width=1))", "def updateIndicator(self):\n\t\tnewIndicatorX = self.getPosFromPitch(self.listener.pitch)\n\t\t\n\t\tself.triTip = (newIndicatorX, self.triTip[1])\n\t\tself.triLeft = (self.triTip[0] - self.width*0.01, self.height*.3)\n\t\tself.triRight = (self.triTip[0] + self.width*0.01, self.height*.3)\n\t\tself.indicatorCoords = ( self.triLeft, self.triTip, self.triRight)\n\t\tself.indicator.points = self.indicatorCoords\n\t\tself.indicator.fill = self.indicatorColor[self.inTune]", "def update_icon(self, _widget, _callback_data):\n\t\t\n\t\tprint \"in update_icon for \", self.name\n\t\tself.icon = self.__window.get_icon()\n\t\tself.icon.save(self.imgpath, \"png\")\n\t\tif not self.pile is None:\n\t\t\tself.pile.update_child_icon(self)\n\t\treturn", "def _add_pos_cb(self, cb, pos, only_once=True):\n obj = self.get_pvobj(\"readback\")\n queue = Queue.Queue()\n def pos_cb(e):\n if e is None and self.at_pos(pos):\n cb()\n if only_once:\n obj.del_monitor_callback(queue.get())\n id = obj.add_monitor_callback(pos_cb)\n queue.put(id)\n return True", "def __showIndicator(self, view, pos):\n hit = view.page().hitTestContent(pos)\n \n if hit.isContentEditable() or not hit.linkUrl().isEmpty():\n return False\n \n jsSource = \"\"\"\n var out = {\n vertical:\n window.innerWidth > document.documentElement.clientWidth,\n horizontal:\n window.innerHeight > document.documentElement.clientHeight\n };\n out;\"\"\"\n \n res = view.page().execJavaScript(jsSource)\n if res is None:\n return False\n \n vertical = res[\"vertical\"]\n horizontal = res[\"horizontal\"]\n if not vertical and not horizontal:\n return False\n \n if vertical and horizontal:\n self.__indicator.setPixmap(\n UI.PixmapCache.getPixmap(\"scrollAll.png\"))\n elif vertical:\n self.__indicator.setPixmap(\n UI.PixmapCache.getPixmap(\"scrollVertical.png\"))\n else:\n self.__indicator.setPixmap(\n UI.PixmapCache.getPixmap(\"scrollHorizontal.png\"))\n \n self.__view = view\n p = QPoint(\n pos.x() - self.__indicator.pixmap().width() // 2,\n pos.y() - self.__indicator.pixmap().height() // 2\n )\n \n self.__indicator.setParent(self.__view)\n self.__indicator.move(p)\n self.__indicator.show()\n \n self.__scroller.setPage(view.page())\n \n self.__view.inputWidget().grabMouse()\n QApplication.setOverrideCursor(Qt.ArrowCursor)\n \n return True", "def sli(self, indicator=0):\n self.indicator = indicator\n return indicator", "def _draw_center_indicator(\n self, src, color=(0, 0, 255), shape=\"crosshairs\", size=10, thickness=1\n ):\n cpt = self._get_frame_center(src)\n self._draw_indicator(\n src,\n new_point(*cpt),\n shape=shape,\n color=color,\n size=size,\n thickness=thickness,\n )", "def on_stateico_clicked(self, *a):\n\t\tself.window1.set_property('visible', True)\n\t\tself.stateico.set_visible(False)\n\t\tself.window1.present()", "def show(self):\n stroke(*self.status.value)\n fill(*self.status.value)\n circle((self.position.x, self.position.y), radius = 7)", "def _drawstatus(self):\n (y, x) = self.chatscreen.getmaxyx()\n\n fillchar = '*' if self.busy > 0 else '-'\n form = '{:'+ fillchar +'^' + str(x - 1) + '}'\n\n self.chatscreen.addstr(y-1, 0, form.format('%s' % self.status()))", "def _setindicator(self, index: int, value: bool) -> None:\n bitmask = 1 << (index + 1)\n current = self._get_buffer(0x04)\n if value:\n self._set_buffer(0x04, current | bitmask)\n else:\n self._set_buffer(0x04, current & ~bitmask)\n if self._auto_write:\n self.show()", "def icon(self):\n _LOGGER.info(\"icon for {}\".format(self._sensor_type))\n if self._sensor_type == ATTR_STATUS:\n if not self._state:\n return None\n return DEVICE_MAP[self._sensor_type][DEVICE_MAP_INDEX.index('ICON_INDEX')][self._state]\n else:\n return DEVICE_MAP[self._sensor_type][DEVICE_MAP_INDEX.index('ICON_INDEX')]", "def __init__(\r\n self,\r\n icon: Surface,\r\n active_icon: Surface = None,\r\n disabled_icon=None,\r\n state: int = Button.INACTIVE,\r\n ) -> None:\r\n super().__init__(state)\r\n if active_icon is None:\r\n active_icon = icon\r\n if disabled_icon is None:\r\n disabled_icon = icon\r\n self._icons = [icon, active_icon, disabled_icon]\r\n if active_icon.get_rect() != icon.get_rect():\r\n raise ValueError(\r\n \"Both the icon and active_icon must have the same dimensions\"\r\n )\r\n self.rect = icon.get_rect()\r\n self.image = self._icons[state]", "def draw_next_amino(self, amino_type = None, prev_coordinat_x = 0, prev_coordinat_y = 0, size = 0, action = 0):\n\n\t\tif amino_type == 1: \n\t\t\tif action == 0:\n\t\t\t\tnew_amino_position_x, new_amino_position_y, img = self.draw_next_up(amino_type = 1, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)\n\t\t\t\treturn new_amino_position_x, new_amino_position_y, img\n\t\t\telif action == 1:\n\t\t\t\tnew_amino_position_x, new_amino_position_y, img = self.draw_next_left(amino_type = 1, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)\n\t\t\t\treturn new_amino_position_x, new_amino_position_y, img\n\t\t\telif action == 2:\n\t\t\t\tnew_amino_position_x, new_amino_position_y, img = self.draw_next_right(amino_type = 1, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)\n\t\t\t\treturn new_amino_position_x, new_amino_position_y, img\n\t\t\telse:\n\t\t\t\tnew_amino_position_x, new_amino_position_y, img = self.draw_next_down(amino_type = 1, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)\n\t\t\t\treturn new_amino_position_x, new_amino_position_y, img\n\t\t\n\t\telif amino_type == 2 :\n\t\t\tif action == 0:\n\t\t\t\tnew_amino_position_x, new_amino_position_y, img = self.draw_next_up(amino_type = 2, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)\n\t\t\t\treturn new_amino_position_x, new_amino_position_y, img\n\t\t\telif action == 1:\n\t\t\t\tnew_amino_position_x, new_amino_position_y, img = self.draw_next_left(amino_type = 2, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)\n\t\t\t\treturn new_amino_position_x, new_amino_position_y, img\n\t\t\telif action == 2:\n\t\t\t\tnew_amino_position_x, new_amino_position_y, img = self.draw_next_right(amino_type = 2, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)\n\t\t\t\treturn new_amino_position_x, new_amino_position_y, img\n\t\t\telse:\n\t\t\t\tnew_amino_position_x, new_amino_position_y, img = self.draw_next_down(amino_type = 2, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)\n\t\t\t\treturn new_amino_position_x, new_amino_position_y, img", "def set_indicator(self, name, value):\n return self.display_table.set_indicator((self.display_table_root,name),value)", "def indicator(position, size):\n\n # Internal indicator function implementation\n def _indicator(position, size, x):\n return float(np.all(abs(position - x) < size / 2))\n\n return partial(_indicator, position, size)", "def initIndicator(self):\n\t\tself.triTip = (self.width / 2, self.height * 0.35)\n\t\tself.triLeft = (self.triTip[0] - self.width*0.01, self.height*.3)\n\t\tself.triRight = (self.triTip[0] + self.width*0.01, self.height*.3)\n\t\tself.indicatorCoords = ( self.triLeft, self.triTip, self.triRight)\n\t\tself.indicator = self.createPolygon( self.indicatorCoords )\n\t\tself.indicator.fill = self.indicatorColor[self.inTune]\n\t\tself.indicator.line = self.indicator.fill * .5", "def show(self):\n if not self.shown and not self.flag:\n self.shown = True\n self.configure(image=Tile.images[self.count])\n return -1 if self.mine else 1\n return 0", "def icon(self):", "def _on_gui_event(self): \n pos = self.last_gui_position\n button = self.get_object_id(pos)\n next_state = self.sm.state \n \n if button == self.buttons.BARCODE:\n self.barcode = \"\"\n self.set_active_entry(self.buttons.BARCODE)\n self._request_redraw()\n next_state = self.states.BARCODE\n\n if button == self.buttons.DESCRIPTION:\n self.description = \"\"\n self.set_active_entry(self.buttons.DESCRIPTION)\n self._request_redraw()\n next_state = self.states.DESCRIPTION\n\n if button == self.buttons.PRICE:\n self.price = 0\n self.set_active_entry(self.buttons.PRICE)\n self._request_redraw()\n next_state = self.states.PRICE\n\n if button == self.buttons.DONE:\n if self.data_ready():\n self.add_product()\n next_state = self.states.ADDING\n else:\n self.set_banner_with_timeout(\"One or more entries not valid!\", 4, Colours.WARN, self._banner_timeout)\n self._request_redraw()\n next_state = self.states.WARNING\n\n if button == self.buttons.CANCEL:\n self._exit()\n next_state = self.states.BARCODE\n\n #No GUI object hit:\n return next_state", "def expose(self, widget, event):\n cr = widget.window.cairo_create()\n cr.set_source_rgb(0.05, 0.05, 0.05)\n cr.paint()\n for pos in self.next_piece.occupying():\n self.paint_square(tuple_add(pos, (-1, 1)),\n self.next_piece.color, cr)", "def test_changing_to_icon_mode(self):\n iface = self.create(InterfaceItem, UML.Interface)\n iface.drawing_style = iface.DRAW_ICON\n\n assert iface.DRAW_ICON == iface.drawing_style\n\n # default folded mode is provided\n self.assertTrue(iface.FOLDED_PROVIDED, iface.folded)\n\n # check if style information changed\n self.assertTrue(iface._name.style.text_outside)\n\n # handles are not movable anymore\n for h in iface.handles():\n assert not h.movable\n\n # name is visible\n assert iface._name.is_visible()", "def on_gui_event(self, pos):\n if self.active:\n self.last_gui_position = pos\n self.sm.on_state_event(self.events.GUIEVENT)", "def update_indicator(self, i_key, color):\n if self._myIndicatorsManager.get_line_type(i_key) < 2:\n # horizontal or vertical\n canvas_line_index = self._myIndicatorsManager.get_canvas_line_index(i_key)\n self._myCanvas.updateLine(ikey=canvas_line_index, vecx=None, vecy=None, linecolor=color)\n else:\n # 2-way\n canvas_line_index_h, canvas_line_index_v = self._myIndicatorsManager.get_canvas_line_index(i_key)\n # h_vec_set, v_vec_set = self._myIndicatorsManager.get_2way_data(i_key)\n\n self._myCanvas.updateLine(ikey=canvas_line_index_h, vecx=None, vecy=None, linecolor=color)\n self._myCanvas.updateLine(ikey=canvas_line_index_v, vecx=None, vecy=None, linecolor=color)\n\n return", "def DrawIcon(self, dc, rect, pane): \r\n \r\n # Draw the icon centered vertically \r\n if pane.icon.Ok():\r\n if pane.HasCaptionLeft():\r\n bmp = wx.ImageFromBitmap(pane.icon).Rotate90(clockwise=False)\r\n dc.DrawBitmap(bmp.ConvertToBitmap(), rect.x+(rect.width-pane.icon.GetWidth())/2, rect.y+rect.height-2-pane.icon.GetHeight(), True)\r\n else:\r\n dc.DrawBitmap(pane.icon, rect.x+2, rect.y+(rect.height-pane.icon.GetHeight())/2, True)", "def DrawIcon(self, dc):\r\n\r\n rect = wx.Rect(*self.GetClientRect())\r\n point = wx.Point()\r\n length = 0\r\n\r\n rect.Deflate(4, 4)\r\n dc.SetPen(wx.Pen(colourIconBorder))\r\n dc.SetBrush(wx.Brush(colourIconBackground))\r\n dc.DrawRectangleRect(rect)\r\n\r\n right1 = rect.GetRight() + 1\r\n bottom1 = rect.GetBottom() + 1\r\n\r\n dc.SetPen(wx.Pen(colourIconShadow))\r\n dc.DrawLine(rect.x + 1, bottom1, right1 + 1, bottom1)\r\n dc.DrawLine(right1, rect.y + 1, right1, bottom1 + 1)\r\n\r\n rect.Deflate(1, 1)\r\n\r\n if self._direction == wx.TOP:\r\n rect.height -= rect.height / 2\r\n point = rect.GetBottomLeft()\r\n length = rect.width\r\n\r\n elif self._direction == wx.LEFT:\r\n rect.width -= rect.width / 2\r\n point = rect.GetTopRight()\r\n length = rect.height\r\n\r\n elif self._direction == wx.RIGHT:\r\n rect.x += rect.width / 2\r\n rect.width -= rect.width / 2\r\n point = rect.GetTopLeft()\r\n length = rect.height\r\n\r\n elif self._direction == wx.BOTTOM:\r\n rect.y += rect.height / 2\r\n rect.height -= rect.height / 2\r\n point = rect.GetTopLeft()\r\n length = rect.width\r\n\r\n elif self._direction == wx.CENTER:\r\n rect.Deflate(1, 1)\r\n point = rect.GetTopLeft()\r\n length = rect.width\r\n\r\n dc.GradientFillLinear(rect, colourIconDockingPart1,\r\n colourIconDockingPart2, self._direction)\r\n\r\n dc.SetPen(wx.Pen(colourIconBorder))\r\n\r\n if self._direction == wx.CENTER: \r\n self.DrawDottedLine(dc, rect.GetTopLeft(), rect.width, False)\r\n self.DrawDottedLine(dc, rect.GetTopLeft(), rect.height, True)\r\n self.DrawDottedLine(dc, rect.GetBottomLeft(), rect.width, False)\r\n self.DrawDottedLine(dc, rect.GetTopRight(), rect.height, True)\r\n \r\n elif self._direction in [wx.TOP, wx.BOTTOM]:\r\n self.DrawDottedLine(dc, point, length, False)\r\n \r\n else:\r\n self.DrawDottedLine(dc, point, length, True)", "def is_indicator():\n return True", "def updateStatus(msg, inpos):\n sector = inpos / CD_FRAMEWORDS\n pos = int((float(sector - startsector) / sectorlen) * pbarlength)\n if pos < 0 or pos > pbarlength - 1:\n print \"position out of bounds: %d (%d of %d+%d)\" \\\n % (pos, sector, startsector, sectorlen)\n return\n \n cur = progressbar[pos]\n if msg == PARANOIA_CB_VERIFY:\n pass\n elif msg == PARANOIA_CB_READ:\n global readsector\n if sector > readsector:\n readsector = sector\n elif msg == PARANOIA_CB_FIXUP_EDGE:\n if cur == ' ':\n cur == '-'\n elif msg == PARANOIA_CB_FIXUP_ATOM:\n if cur == ' ' or cur == '-':\n cur = '+'\n elif msg == PARANOIA_CB_READERR:\n if cur != 'V':\n cur = 'e'\n elif msg == PARANOIA_CB_SKIP:\n cur = 'V'\n elif msg == PARANOIA_CB_FIXUP_DROPPED or msg == PARANOIA_CB_FIXUP_DUPED:\n if cur == ' ' or cur == '-' or cur == '+':\n cur = '!'\n progressbar[pos] = cur", "def UpdateDockGuide(self, pos):\r\n\r\n inside = self.GetScreenRect().Contains(pos)\r\n \r\n if inside:\r\n image = self._bmp_focus\r\n else:\r\n image = self._bmp_unfocus\r\n\r\n if image != self._currentImage:\r\n self._currentImage = image\r\n self.Refresh()\r\n self.Update()" ]
[ "0.56549734", "0.55769974", "0.5466077", "0.5435122", "0.5259966", "0.52186924", "0.5168424", "0.5134024", "0.5118653", "0.50938314", "0.5083714", "0.5069448", "0.5061544", "0.5054154", "0.50497353", "0.50464547", "0.50030607", "0.49856985", "0.4973985", "0.4902044", "0.48736304", "0.48681527", "0.48378813", "0.48327887", "0.48236802", "0.48200268", "0.47981936", "0.47908068", "0.47883117", "0.4779908" ]
0.60308945
0
Gets the icon file from the resources directory.
def get_icon(): icon = Path(__file__).parent.joinpath("resources", "icon.png") # We just want the string to the path for PySide. return str(icon)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_icon(icon_file): \n img_path = _path.join(\n BASEPATH, _path.join('hallbench', _path.join('resources', 'img')))\n icon_path = _path.join(img_path, icon_file)\n icon = _QIcon()\n icon.addPixmap(\n _QPixmap(icon_path),\n _QIcon.Normal,\n _QIcon.Off)\n return icon", "def icon(self):\n\n # look for icon one level up from this hook's folder in \"icons\" folder\n return os.path.join(\n self.disk_location,\n os.pardir,\n \"icons\",\n \"review.png\"\n )", "def iconPath(icon):\n return resourcePath(icon, dirname=\"icons\")", "def getQIcon(self, resource_file):\n return QIcon(resource_manager.GetResourceFilePath(resource_file ))", "def get_icon(self):\r\n return get_icon(self.ICON)", "def GetIcon(*args, **kwargs):\n return _gdi_.IconBundle_GetIcon(*args, **kwargs)", "def getIconPath(self):\n try:\n return self.primaryAq().zIcon\n except AttributeError:\n return '/zport/dmd/img/icons/noicon.png'", "def icon(self):\n return self._config.get(CONF_ICON)", "def get_icon(self):\n return self.ICON", "def get_icon(self):\n return self._icon", "def getIcon(self): #$NON-NLS-1$\r\n iconXPath = self._getIconXPath()\r\n icon = self._getExtensionText(iconXPath)\r\n if icon:\r\n return icon\r\n else:\r\n return None", "def getIconPath(self): #$NON-NLS-1$\r\n icon = self.getIcon()\r\n if not icon:\r\n return None\r\n return self.extensionPoint.getPlugin().getResourceRegistry().getImagePath(icon)", "def api_get_icon():\n pkg_name = request.args.get('pkg')\n if pkg_name:\n pkg_files = Database().db.get_pkg_files(pkg_name)\n for src in pkg_files:\n if src.startswith(\"/usr/share/icons/hicolor/32x32/apps/\"):\n return send_file(src, as_attachment=False)\n return send_file(\"static/images/null.gif\")\n else:\n src = request.args.get('i')\n if not os.path.isfile(src):\n #abort(404)\n return send_file(\"static/images/null.gif\")\n return send_file(src, as_attachment=False)", "def get_icon(self):\n\n return self._icon", "def icon(self):\r\n icon_path = \":/plugins/pdok_services/icon.png\"\r\n icon = QtGui.QIcon(icon_path)\r\n return icon", "def get_icon(self):\n raise NotImplementedError", "def get_image(control):\n file = _icons.get(control.Id)\n if file:\n path = os.path.join(os.path.dirname(__file__), \"icons\", file)\n return pyxll.load_image(path)", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon" ]
[ "0.7267057", "0.7186539", "0.71680295", "0.71423817", "0.7094803", "0.7014663", "0.699875", "0.69923085", "0.6906685", "0.6906226", "0.6885282", "0.68688995", "0.6847708", "0.6815401", "0.6753929", "0.673115", "0.6730291", "0.6727965", "0.6727965", "0.6727965", "0.6727965", "0.6727965", "0.6727965", "0.6727965", "0.6727965", "0.6727965", "0.6727965", "0.6727965", "0.6727965", "0.6727965" ]
0.7683156
0
Gets the style.css file from the resources directory.
def get_css(): css = Path(__file__).parent.joinpath("resources", "style.css") with open(css, "r") as style_file: css_data = style_file.read() return css_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_style():\n\n style = os.path.join(os.path.dirname(__file__), \"templates\", \"style.css\")\n with open(style, \"r\") as opencss:\n return opencss.read().strip()", "def load_stylesheet(name):\n with suppress(FileNotFoundError):\n with open(STATIC_PATH / name, 'rt') as stylesheet:\n style = stylesheet.read().replace('@Path', (IMAGES_PATH / settings.value(Key.Theme)).as_posix())\n return style\n return ''", "def css_file(self):\n pass", "def load_style_sheet() -> str:\n return _preprocess_style(_read_text('style.css.template'))", "def jquery_ui_css():\n return static_file(\"jquery-ui.css\", root=os.path.join(BASEDIR, \"css\"))", "def loadStyleSheet(self, filename):\n try:\n self.cssfile = \"gui/\" + filename\n with open(self.cssfile, \"r\") as f:\n self.setStyleSheet(f.read())\n except IOError:\n logger.error('No style sheet found!')", "def get_stylesheet():\n\n return \"{static_url}/code_pygments/css/{theme}.css\".format(\n static_url=core_config['ASSETS_URL'],\n theme=module_config['PYGMENTS_THEME'])", "def load_QtCSS_StyleSheet(path):\n with open(path, \"rt\") as f:\n lines = f.read()\n return lines", "def loadcss(*args):\n return render(settings, 'CSS_FILES', 'staticloader/load_css.html', *args)", "def css(results_dir):\n local_css_dir = os.path.join(results_dir, \"css\")\n if not os.path.exists(local_css_dir):\n os.makedirs(local_css_dir)\n\n return local_css_dir", "def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep", "def send_css(path):\n return send_from_directory('templates/css', path)", "def css(self):\n css = urllib2.urlopen(self.cssfonturl)\n return css.read()", "def loadStyleSheet(self, styleFile=None):\n #Read the default file\n file = QtCore.QFile(\"resources/styles/default.css\")\n if not file.open(QtCore.QIODevice.ReadOnly | QtCore.QIODevice.Text) is True :\n raise IOError(\"Can't load the style file.\")\n stylesheet = file.readAll()\n\n #Conversion from QByteArray to Unicode String\n codec = QtCore.QTextCodec.codecForName(\"KOI8-R\")\n string = codec.toUnicode(stylesheet)\n\n #Apply the style to the whole application\n self.setStyleSheet(string)", "def bootstrap_css_url():\n return css_url()", "def resources(filename):\n return send_from_directory(\"resources\", filename)", "def load_style() -> str:\n return '<style id=\"scipp-style-sheet\">' + load_style_sheet() + '</style>'", "def propeller_css_url():\n return css_url()", "def get_resource(self, rsc_path):\n\n\t\ttry:\n\t\t\tfrom pkg_resources import resource_filename\n\t\t\treturn resource_filename(__name__, rsc_path)\n\t\texcept ImportError:\n\t\t\treturn os.path.join(os.path.dirname(__file__), rsc_path)", "def css_view(request):\n\n stylesheets = []\n for css_file in request.registry.settings['spline.plugins.stylesheets']:\n stylesheets.append(render(\"/css/%s\" % css_file, {}, request=request))\n\n response = request.response\n response.content_type = 'text/css'\n response.charset = 'utf-8'\n response.text = u'\\n'.join(stylesheets)\n return response", "def get_default_stylesheet(css=None):\n # delayed import to speed up time\n from sphinx.builders.html import Stylesheet\n rel = \"_static/\" + style_figure_notebook[0]\n res = [Stylesheet(rel=\"stylesheet\", filename=rel)]\n if css is not None:\n for cs in css:\n res.append(Stylesheet(rel=\"stylesheet\", filename=cs))\n return res", "def _get_custom_css(self) -> str:\n paths = []\n\n for scss_path in self.custom_scss:\n # Simplify the path to be only the relative path, if they've included the whole thing.\n relative_path = scss_path.split(\"static/\", 1)[-1]\n\n # Check that we can find this file with one of the other finders.\n absolute_path = None\n for finder in self.other_finders:\n if absolute_path := finder.find(relative_path):\n break\n\n # Raise an error if we can't find it.\n if absolute_path is None:\n raise ValueError(\n f\"Unable to locate the SCSS file \\\"{scss_path}\\\". Make sure the file exists, \"\n \"and ensure that one of the other configured Finders are able to locate it. \\n\"\n \"See https://docs.djangoproject.com/en/3.2/ref/contrib/staticfiles/ for more \"\n \"information about how static files are discovered.\"\n )\n\n # Prepare the paths. SASS wants forwardslash string, the rest needs a Path.\n absolute_path = str(absolute_path).replace(\"\\\\\", \"/\")\n relative_path = Path(relative_path)\n\n # Now load up the scss file\n scss_string = f'@import \"{absolute_path}\";'\n\n # Store this as a css file - we don't check and raise here because it would have\n # already happened earlier, during the Bulma compilation\n css_string = sass.compile(string=scss_string, output_style=self.output_style)\n\n css_path = simple_bulma_path / relative_path.parent\n css_path.mkdir(parents=True, exist_ok=True)\n css_path = f\"{css_path}/{relative_path.stem}.css\"\n\n with open(css_path, \"w\") as css_file:\n css_file.write(css_string)\n\n paths.append(f\"{relative_path.parent}/{relative_path.stem}.css\")\n\n return paths", "def getResource(self, file_name):\n path = os.path.join(os.path.dirname(__file__), \"resource\", file_name)\n return open(path)", "def editor_css():\n return format_html('<link rel=\"stylesheet\" href=\"' \\\n + settings.STATIC_URL \\\n + 'css/editor.css\">')", "def GetStyleSheet():\n styles = []\n for locale in translation.LOCALES:\n styles.append(\"\"\"\n .goofy-label-{locale} {{\n display: none;\n }}\n .goofy-locale-{locale} .goofy-label-{locale} {{\n display: inline;\n }}\"\"\".format(locale=locale))\n return '\\n'.join(styles)", "def update_theme_css():\n\n theme_file = os.path.join('app', 'static', 'app', 'css', 'theme.scss')\n try:\n Path(theme_file).touch()\n logger.info(\"刷新{}缓存\".format(theme_file))\n except:\n logger.warning(\"无法访问{}\".format(theme_file))", "def __css_path(self, css_path=None):\n return (\n \"\"\"<link rel=\"stylesheet\" type=\"text/css\" href='\"\"\" + css_path + \"\"\"'>\"\"\"\n if css_path\n else \"\"\n )", "def get_resource_dir(cls) -> str:\n return os.path.join(\n os.path.realpath(os.path.dirname(__file__)),\n os.pardir,\n os.pardir,\n os.pardir,\n \"gem5\",\n \"resources\",\n )", "def getCss(app):\n\n aContext = app.context\n appCss = aContext.css\n\n cssPath = f\"{dirNm(dirNm(abspath(__file__)))}\" f\"{SERVER_DISPLAY_BASE}\"\n cssPath = normpath(cssPath)\n genericCss = \"\"\n for cssFile in SERVER_DISPLAY:\n with open(f\"{cssPath}/{cssFile}\", encoding=\"utf8\") as fh:\n genericCss += fh.read()\n\n tableCss = (\n \"tr.tf.ltr, td.tf.ltr, th.tf.ltr { text-align: left ! important;}\\n\"\n \"tr.tf.rtl, td.tf.rtl, th.tf.rtl { text-align: right ! important;}\\n\"\n )\n return f\"<style>{tableCss}{genericCss}{appCss}</style>\"", "def load_style():\n display(HTML(Path('bhsa.css').read_text()))" ]
[ "0.7298122", "0.67225504", "0.63319784", "0.62525135", "0.6076459", "0.59549505", "0.59542996", "0.57827294", "0.577367", "0.5773628", "0.5684894", "0.56523114", "0.56331784", "0.56084", "0.5472248", "0.5466866", "0.54628026", "0.5364383", "0.534764", "0.5338111", "0.5276471", "0.5256504", "0.5240087", "0.52233636", "0.52212036", "0.52192265", "0.5217668", "0.5177966", "0.51564705", "0.51545954" ]
0.7843269
0
Retrieve metadata from COG asset
def _load_metadata_from_asset(): with rasterio.Env(AWS_NO_SIGN_REQUEST='YES', GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'): with rasterio.open(href) as src: # Retrieve metadata stored in COG file metadata = src.profile metadata.update(src.tags()) metadata['shape'] = src.shape # Retrieve COG CRS. Note: these COGs do not appear to have CRS info that can be # accessed via the .crs method. If this occurs assume it is in WGS84. # All COGs in AWS appear to be projected in WGS84. if src.crs is None: metadata['crs'] = rasterio.crs.CRS.from_epsg(4326) else: metadata['crs'] = src.crs # Compute bounding box, image footprint, and gsd bbox, footprint, metadata = _get_geometries(src, metadata) # Derive some additional metadata from the filename fname = os.path.basename(href) metadata = _parse_filename(fname, metadata) return metadata, bbox, footprint
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get(self) -> json_api.generic.Metadata:\n api_endpoint = ApiEndpoints.assets.fields\n return api_endpoint.perform_request(http=self.auth.http, asset_type=self.parent.ASSET_TYPE)", "def GetMetadata(self):\n return self.dict['meta']", "def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None", "def get_meta(self, asset):\n return self.get_name_and_meta(asset)[1]", "def get_metadata (self, name):\n return self.metadata.get(name)", "def metadata(self): # -> None:\n ...", "def get_metadata(self):\n return gdal.Open(self.filename).GetMetadata()", "def get_metadata(self):\n\n\t\t#see redcap api documentation -- https://redcap.wustl.edu/redcap/srvrs/prod_v3_1_0_001/redcap/api/help/\n\t\tbuf = io.BytesIO()\n\n\t\tfields = {\n\t\t 'token': config['api_token'],\n\t\t 'content': 'metadata',\n\t\t 'format': 'json'\n\t\t}\n\n\t\tch = pycurl.Curl()\n\t\tch.setopt(ch.URL, config['api_url'])\n\t\tch.setopt(ch.HTTPPOST, list(fields.items()))\n\t\tch.setopt(ch.WRITEFUNCTION, buf.write)\n\t\tch.perform()\n\t\tch.close()\n\n\t\tmetadata = json.loads(buf.getvalue().decode())\n\t\tbuf.close()\n\t\treturn metadata", "def get_metadata_v3(session):\n LOG.debug(\"Exporting metadata for SFS augur build\")\n\n metadata = datastore.fetch_rows_from_table(session, (\"shipping\", \"metadata_for_augur_build_v3\"))\n\n return Response((row[0] + '\\n' for row in metadata), mimetype=\"application/x-ndjson\")", "def getInfo(self):\n doc = minidom.parse(urllib.urlopen(serverString + \"/rest/asset/\" + self.id))\n self._getInfoFromNode(doc.getElementsByTagName(\"asset\")[0])", "def get_metadata(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name))", "def get_meta(filename):\n with fiona.open(filename) as collection:\n return collection.meta", "def get_object_metadata(self, key):\n obj = self.client.get_object(Bucket=self.bucket, Key=key)\n return obj.get('Metadata', {})", "def get_metadata_v2(session):\n LOG.debug(\"Exporting metadata for SFS augur build\")\n\n metadata = datastore.fetch_rows_from_table(session, (\"shipping\", \"metadata_for_augur_build_v2\"))\n\n return Response((row[0] + '\\n' for row in metadata), mimetype=\"application/x-ndjson\")", "def read_metadata(self):\n return self.parent.controller.get_tag_metadata()", "def get_metadata_body(self):\n key = self.build_s3_key('datapackage.json')\n return self.get_s3_object(key)", "def met(r):\n image_url = r.get(\"image\")\n if image_url is None:\n if r.get(\"source\") is not None:\n image_url = r.get(\"source\").get(\"href\")\n image_name = r.get(\"name\")\n image_artist = r.get(\"Who\")\n return image_url, image_name, image_artist", "def get_metadata(self, filename):\n return self.execute_json(filename)[0]", "def metadata(self): # -> list[Unknown]:\n ...", "def metadata(self): # -> list[Unknown]:\n ...", "def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])", "def get_metadata(key=''):\n response, content = httplib2.Http().request(\n '%s/%s' % (METADATA_BASE_URL, key),\n headers={'Metadata-Flavor': 'Google'},\n method='GET',\n )\n if response['status'] == '404':\n raise NotFoundError(response, content)\n return content", "def metadata(self):\n return parse_metadata(self.metadata_path())", "def get_metadata(\n self,\n digest: Optional[Digest] = None,\n ignore_errors: bool = True,\n ) -> BareAsset:\n ...", "def metadata(self) -> global___SummaryMetadata:", "def metadata(self):\r\n return resources.Metadata(self)", "def get_metadata(self):\n url = 'https://www150.statcan.gc.ca/t1/wds/rest/getCubeMetadata'\n payload = [{'productId': int(self.productId)}]\n print('Retreiving metadata for Product ID: ' + self.productId)\n req = requests.post(\n url,\n json=payload\n )\n response = req.json()\n if (response[0]['status'] == \"SUCCESS\"):\n return(response[0]['object'])\n else:\n self.errors = response\n print('ERROR: Metadata for Product ID ' + self.productId + ' could not be loaded.')\n print('ERROR: see Product.errors() for more info')", "def metadata(self):\n return self.meta.metadata", "def _exif_data(self):\n return exif.get_metadata(self._filename)", "def getAssetInfo(self):\n return self._AssetInfo" ]
[ "0.70809305", "0.68785983", "0.6787916", "0.6582544", "0.65211964", "0.6488768", "0.64880824", "0.6478007", "0.64755267", "0.64237744", "0.6389924", "0.63689715", "0.6299962", "0.6248687", "0.62399286", "0.62228495", "0.6218916", "0.6177658", "0.6171238", "0.6171238", "0.6155376", "0.61543214", "0.6148661", "0.61175585", "0.61028785", "0.60943145", "0.6088904", "0.60827976", "0.6057598", "0.60495347" ]
0.81800365
0
Parse metadata from the SAR cog filename
def _parse_filename(filename, metadata): file_noext = os.path.splitext(filename)[0] fname = file_noext.split("_") metadata["scene_id"] = fname[1] metadata[ "beam_mode"] = sat_properties.radarsat_product_characteristics[ fname[2]] metadata["product_type"] = fname[-1] try: metadata[ "product_description"] = sat_properties.radarsat_1_data_products[ fname[-1][:3]]['description'] except Exception: metadata["product_description"] = "" metadata["scene_mean_time"] = datetime.datetime.strptime( fname[3] + fname[4], "%Y%m%d%H%M%S") return metadata
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_metadata_from_asset():\n\n with rasterio.Env(AWS_NO_SIGN_REQUEST='YES',\n GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):\n with rasterio.open(href) as src:\n # Retrieve metadata stored in COG file\n metadata = src.profile\n metadata.update(src.tags())\n metadata['shape'] = src.shape\n\n # Retrieve COG CRS. Note: these COGs do not appear to have CRS info that can be\n # accessed via the .crs method. If this occurs assume it is in WGS84.\n # All COGs in AWS appear to be projected in WGS84.\n if src.crs is None:\n metadata['crs'] = rasterio.crs.CRS.from_epsg(4326)\n else:\n metadata['crs'] = src.crs\n\n # Compute bounding box, image footprint, and gsd\n bbox, footprint, metadata = _get_geometries(src, metadata)\n\n # Derive some additional metadata from the filename\n fname = os.path.basename(href)\n metadata = _parse_filename(fname, metadata)\n\n return metadata, bbox, footprint", "def read_meta(metafn=None):\n\n metadata = {}\n\n # potential future improvement: strip quotation marks from strings, where applicable. Will then need to adjust\n # the indices used to get the dates and times in the functions above \n # (get_DEM_img_times: dtstrings = {\"sourceImage1\":(5,19, '%Y%m%d%H%M%S')})\n\n #each key is equated with '='. This loop strips and seperates then fills the dictonary.\n with open(metafn) as f: \n for line in f:\n if not line.strip(';') == \"END\":\n val = line.strip().split('=')\n if len(val) == 1:\n continue\n else:\n metadata.setdefault(val[0].strip(), []).append(val[1].strip().strip(';')) \n else:\n break\n\t\n return metadata", "def parse_metadata_file(self, file):\n \n file_keys = list(file.keys())\n \n if 'labelAnnotations' in file_keys:\n #file_annots = file['labelAnnotations'][:int(len(file['labelAnnotations']) * 0.5)]\n file_annots = file['labelAnnotations'][:]\n file_top_score = np.asarray([x['score'] for x in file_annots]).mean()\n file_top_desc = [x['description'] for x in file_annots]\n else:\n file_top_score = np.nan\n file_top_desc = ['']\n \n file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']\n file_crops = file['cropHintsAnnotation']['cropHints']\n\n file_color_score = np.asarray([x['score'] for x in file_colors]).mean()\n file_color_pixelfrac = np.asarray([x['pixelFraction'] for x in file_colors]).mean()\n\n file_crop_conf = np.asarray([x['confidence'] for x in file_crops]).mean()\n \n if 'importanceFraction' in file_crops[0].keys():\n file_crop_importance = np.asarray([x['importanceFraction'] for x in file_crops]).mean()\n else:\n file_crop_importance = np.nan\n\n df_metadata = {\n 'annots_score': file_top_score,\n 'color_score': file_color_score,\n 'color_pixelfrac': file_color_pixelfrac,\n 'crop_conf': file_crop_conf,\n 'crop_importance': file_crop_importance,\n 'annots_top_desc': self.sentence_sep.join(file_top_desc)\n }\n \n df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T\n df_metadata = df_metadata.add_prefix('metadata_')\n \n return df_metadata", "def metadata(filename):\n import numpy as np\n import pandas as pd\n\n infos = \"\"\"IGRAID 1- 11 Character\nWMOID 13- 17 Integer\nNAME 19- 48 Character\nNAMFLAG 50- 50 Character\nLATITUDE 52- 60 Real\nLATFLAG 62- 62 Character\nLONGITUDE 64- 72 Real\nLONFLAG 74- 74 Character\nELEVATION 76- 81 Real\nELVFLAG 83- 83 Character\nYEAR 85- 88 Integer\nMONTH 90- 91 Integer\nDAY 93- 94 Integer\nHOUR 96- 97 Integer\nDATEIND 99- 99 Integer\nEVENT 101-119 Character\nALTIND 121-122 Character\nBEFINFO 124-163 Character\nBEFFLAG 164-164 Character\nLINK 166-167 Character\nAFTINFO 169-208 Character\nAFTFLAG 209-209 Character\nREFERENCE 211-235 Character\nCOMMENT 236-315 Character\nUPDCOM 316-346 Character\nUPDDATE 348-354 Character\n\"\"\"\n\n colspecs = []\n header = []\n types = {}\n for iline in infos.splitlines():\n if iline == '':\n continue\n ih = iline[0:11].strip().lower()\n header.append(ih)\n ii = int(iline[13:16]) - 1\n ij = int(iline[17:20])\n colspecs.append((ii, ij))\n it = iline[22:].strip()\n if it == 'Character':\n it = 'str'\n\n elif it == 'Real':\n it = 'float'\n\n else:\n it = 'int'\n\n types[ih] = it\n\n data = pd.read_fwf(filename, colspecs=colspecs, header=None, dtype=types, names=header)\n data = data.replace('nan', '')\n data['date'] = pd.to_datetime((data.year * 1000000 +\n np.where(data.month.values == 99, 6, data.month.values) * 10000 +\n np.where(data.day.values == 99, 15, data.day.values) * 100 +\n np.where(data.hour.values == 99, 0, data.hour.values)).apply(str), format='%Y%m%d%H')\n return data", "def read_metadata(metapath):\r\n with open(metapath) as metaFile:\r\n metadata = {}\r\n for line in metaFile.readlines():\r\n if \"=\" in line: # Get only key-value pairs\r\n l = line.split(\"=\")\r\n metadata[l[0].strip()] = l[1].strip()\r\n\r\n return metadata", "def parse_metadata(self):\n import csv\n f = open(self.seq_id_list)\n self.names = f.readlines()\n f.close()\n num_samples = len(self.names)\n for i in range(len(self.names)):\n self.names[i] = self.names[i].replace(\"\\n\", \"\")\n # Go through the combined metadata file - it has most of the data we need.\n metadata = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/combinedMetadata.csv\"))\n metadata_count = 0\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n # Need to look in external WGS spades as well.\n metadata = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/combinedMetadata.csv\"))\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n\n\n\n # Also need to go through the rMLST file to make sure that all rMLST genes are covered.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/rmlst.csv\"))\n metadata_count = 0\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n metadata_count += 1\n # Check external runs.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/rmlst.csv\"))\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n\n\n\n # Finally, need to get info on the MLST sequence type.\n metadata_count = 0\n mlst_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Also from External.\n mlst_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Go through the ROGA Summary file from the access DB to get strain/textual IDs, and 1' and 2' enzymes.\n try: # Assume we're using ROGA summary OLF. If it isn't there, assume ROGA summary OLC\n df = pd.read_excel('ROGA_summary_OLF.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['Isolate ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Textual ID'][i]\n self.metadata[seqid][\"1Enzyme\"] = df[\"1' Enzyme\"][i]\n self.metadata[seqid][\"2Enzyme\"] = df[\"2' Enzyme\"][i]\n self.metadata[seqid][\"Source\"] = df['Source'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n\n\n except FileNotFoundError: # Should be a file not found error - look it up.\n metadata_count = 0\n df = pd.read_excel('ROGA_summary_OLC.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['OLN ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Lab ID'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n # print(self.metadata)\n self.check_for_empty_data()", "def parse_metadata_file(self, file):\n\n file_keys = list(file.keys())\n\n if 'labelAnnotations' in file_keys:\n file_annots = file['labelAnnotations']\n file_top_score = np.asarray(\n [x['score'] for x in file_annots]).mean()\n file_top_desc = [x['description'] for x in file_annots]\n else:\n file_top_score = np.nan\n file_top_desc = ['']\n\n file_colors = file['imagePropertiesAnnotation']['dominantColors'][\n 'colors']\n file_crops = file['cropHintsAnnotation']['cropHints']\n\n file_color_score = np.asarray([x['score'] for x in file_colors]).mean()\n file_color_pixelfrac = np.asarray(\n [x['pixelFraction'] for x in file_colors]).mean()\n\n file_crop_conf = np.asarray(\n [x['confidence'] for x in file_crops]).mean()\n\n if 'importanceFraction' in file_crops[0].keys():\n file_crop_importance = np.asarray(\n [x['importanceFraction'] for x in file_crops]).mean()\n else:\n file_crop_importance = np.nan\n\n df_metadata = {\n 'annots_score': file_top_score,\n 'color_score': file_color_score,\n 'color_pixelfrac': file_color_pixelfrac,\n 'crop_conf': file_crop_conf,\n 'crop_importance': file_crop_importance,\n 'annots_top_desc': self.sentence_sep.join(file_top_desc)\n }\n\n df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T\n df_metadata = df_metadata.add_prefix('metadata_')\n\n return df_metadata", "def parser(filename):\n\n regex = re.compile(\n # prolog\n r\"run(?P<run>\\w+)\"\n ##r\"\\-(?P<code_name>((mfdn)|(obscalc-ob))[^\\-]*)\"\n r\"\\-(?P<descriptor>\"\n # descriptor contents\n r\"Z(?P<Z>\\d+)\\-N(?P<N>\\d+)\"\n r\"\\-(?P<interaction>.+)\\-(?P<coulomb>\\d)\"\n r\"\\-(?P<truncation_descriptor>.+)\"\n ## r\"\\-Nmax(?P<Nmax>\\d+)\"\n # epilog\n r\").res\"\n )\n\n conversions = {\n \"Z\" : int,\n \"N\" : int,\n \"interaction\" : str,\n \"coulomb\" : int,\n }\n\n match = regex.match(filename)\n if (match == None):\n raise ValueError(\"bad form for spncci results filename: \" + filename)\n info = match.groupdict()\n\n # convert fields\n for key in conversions:\n conversion = conversions[key]\n info[key] = conversion(info[key]) if (info[key] is not None) else None\n\n return info", "def _parse_metadata ( self ):\n self.date = []\n self.atcorr_refl = []\n self.saa = []\n self.sza = []\n self.vaa = []\n self.vza = []\n self.res = []\n self._mask = []\n\n for md_file in self.metadata:\n tree = xml.etree.ElementTree.ElementTree ( file=md_file ).getroot()\n dirname = os.path.dirname ( md_file )\n try:\n self.date.append(\n datetime.datetime.strptime(tree[0][1].text, \"%Y-%m-%d %H:%M:%S\") )\n except:\n self.date.append(\n datetime.datetime.strptime(tree[0][1].text, \"%Y-%m-%d %H:%M:%S.%f\") )\n self.atcorr_refl.append(\n os.path.join ( dirname, tree[1][2].text ) )\n self.saa.append( float ( tree[4][10][0].text ) )\n self.sza.append( float ( tree[4][10][1].text ) )\n self.vaa.append( float ( tree[4][10][2].text ) )\n self.vza.append( float ( tree[4][10][3].text ) )\n self.res.append( float ( tree[2][1].text ) )\n self._mask.append( os.path.join ( dirname, tree[1][5].text ) )", "def parse_meta_file(fname):\n flds = {}\n basename = re.match('(^.+?)\\..+', os.path.basename(fname)).groups()[0]\n flds['basename'] = basename\n with open(fname) as f:\n text = f.read()\n # split into items\n for item in re.split(';', text):\n # remove whitespace at beginning\n item = re.sub('^\\s+', '', item)\n match = re.match('(\\w+) = (\\[|\\{)(.*)(\\]|\\})', item, re.DOTALL)\n if match:\n key, _, value, _ = match.groups()\n # remove more whitespace\n value = re.sub('^\\s+', '', value)\n value = re.sub('\\s+$', '', value)\n # print key,':', value\n flds[key] = value\n # now check the needed things are there\n needed_keys = ['dimList', 'nDims', 'nrecords', 'dataprec']\n for k in needed_keys:\n assert k in flds\n # transform datatypes\n flds['nDims'] = int(flds['nDims'])\n flds['nrecords'] = int(flds['nrecords'])\n # endianness is set by _read_mds\n flds['dataprec'] = np.dtype(re.sub(\"'\", '', flds['dataprec']))\n flds['dimList'] = [[int(h) for h in\n re.split(',', g)] for g in\n re.split(',\\n', flds['dimList'])]\n if 'fldList' in flds:\n flds['fldList'] = [re.match(\"'*(\\w+)\", g).groups()[0] for g in\n re.split(\"'\\s+'\", flds['fldList'])]\n assert flds['nrecords'] == len(flds['fldList'])\n return flds", "def parse_metadata(metadata):\n id_to_classes_recount = {}\n with open(metadata, \"r\") as file:\n header = next(file)\n for line in file:\n try:\n splitted_line = line.split(\"\\n\")[0].split(\"\\t\")\n file_id = splitted_line[22]\n project = splitted_line[77]\n sample_type = splitted_line[107]\n if project == \"TCGA-LIHC\":\n if sample_type == 'Primary Tumor':\n id_to_classes_recount[file_id] = 1\n elif sample_type == 'Solid Tissue Normal':\n id_to_classes_recount[file_id] = 0\n elif sample_type == 'Recurrent Tumor':\n id_to_classes_recount[file_id] = 1\n else:\n print(sample_type)\n except:\n pass\n return id_to_classes_recount", "def _parse_file(cls, filepath):\n hdus = sunpy.io.read_file(filepath)\n return cls._parse_hdus(hdus)", "def extract_meta_data(video_file_name, output_file=meta.txt, *args, **kwargs):", "def extract_metadata(name):\n seps = name.count(\" - \")\n artist = title = None\n\n if seps == 1:\n\n pos = name.find(\" - \")\n artist = name[:pos].strip()\n title = name[pos + 3:].strip()\n\n else:\n title = name.strip()\n\n return dict(artist=artist, title=title)", "def get_scan_from_metadata(meta):\n with open(meta, 'r') as f:\n md = json.load(f)\n\n scan_name = None\n\n if 'lemnatec_measurement_metadata' in md:\n if 'gantry_system_variable_metadata' in md['lemnatec_measurement_metadata']:\n if 'Script copy path on FTP server' in md['lemnatec_measurement_metadata']['gantry_system_variable_metadata']:\n ftp = md['lemnatec_measurement_metadata']['gantry_system_variable_metadata']['Script copy path on FTP server']\n scan_name = os.path.basename(ftp).replace(\".cs\", \"\").lower()\n\n return scan_name", "def find_meta(filename, source_directory):\n metafile = os.path.join(source_directory, filename + '_Metadata.csv')\n metadf = pd.read_csv(metafile)\n metadf = metadf.rename(str.lower, axis='columns')\n\n schfile = metadf['schedule_file_name'][0].split('\\\\')[-1].split('.sdu')[0].split('-')[1]\n param = schfile.replace('_', '.')\n\n return param", "def identify_filename_metadata(filename, file_format='CMIP6'):\n if file_format == 'CMIP5':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'date_string']\n elif file_format == 'CMIP6':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'grid', 'date_string']\n else:\n raise NotImplementedError('file_format must be CMIP5 or CMIP6')\n\n basename = os.path.basename(filename)\n directory = os.path.dirname(filename)\n metadata = {'basename': basename, 'directory': directory}\n\n # split the filename into sections\n if basename.endswith('-clim.nc'):\n filename_sects = basename.rpartition('-clim.nc')[0].split('_')\n else:\n filename_sects = basename.rpartition('.nc')[0].split('_')\n\n # but if experiment present_day was in the filename, join these sections\n # back together. This should only occur in pre-PRIMAVERA data.\n if filename_sects[3] == 'present' and filename_sects[4] == 'day':\n filename_sects[3] += '_' + filename_sects.pop(4)\n\n # deduce as much as possible from the filename\n try:\n for cmpt_name, cmpt in zip(components, filename_sects):\n if cmpt_name == 'date_string':\n frequency = _get_frequency(metadata['table'])\n start_date, end_date = cmpt.split('-')\n try:\n metadata['start_date'] = _make_partial_date_time(\n start_date, frequency)\n metadata['end_date'] = _make_partial_date_time(\n end_date, frequency)\n except ValueError:\n msg = 'Unknown date format in filename: {}'.format(\n filename)\n raise FileValidationError(msg)\n else:\n metadata[cmpt_name] = cmpt\n except ValueError:\n msg = 'Unknown filename format: {}'.format(filename)\n raise FileValidationError(msg)\n\n # fixed variables won't have a time range and so create blank values\n potential_missing_values = ['start_date', 'end_date']\n for missing_value in potential_missing_values:\n if missing_value not in metadata:\n metadata[missing_value] = None\n\n metadata['filesize'] = os.path.getsize(filename)\n\n for freq in FREQUENCY_VALUES:\n if freq in metadata['table'].lower():\n metadata['frequency'] = freq\n break\n if 'frequency' not in metadata:\n # set a blank frequency if one hasn't been found\n metadata['frequency'] = ''\n\n return metadata", "def _parse_cvcfile(self, cvcfilepath):\n cvcfilename = os.path.basename(cvcfilepath)\n (Ymd, HMS, cvcextrest) = cvcfilename.split('_', 2)\n datatype, restdat = cvcextrest[0:3], cvcextrest[3:]\n (rest, _datstr) = restdat.split('.')\n _nr512 = 512\n if datatype == 'acc':\n rest = rest.lstrip('_')\n (_nr512, nrrcus0, nrrcus1) = map(int, rest.split('x'))\n filenamedatetime = datetime.datetime.strptime(Ymd + 'T' + HMS,\n '%Y%m%dT%H%M%S')\n # NOTE: For ACC, filename is last obstime, while for XST, it is first.\n if datatype == 'acc':\n filebegindatetime = filenamedatetime - datetime.timedelta(\n seconds=_nr512)\n else:\n filebegindatetime = filenamedatetime\n return datatype, filebegindatetime", "def load_meta(path= \"~/data/LJSpeech-1.0\", filename= \"metadata.csv\", sep= \"|\", normalize= normalize):\n names, texts = [], []\n with open(join(expanduser(path), filename)) as file:\n for line in file:\n name, _, text = line.split(sep)\n names.append(name)\n texts.append(normalize(text))\n return np.array(names), np.array(texts)", "def parse_dist_meta():\n\n re_meta = re.compile(r\"__(\\w+?)__\\s*=\\s*(.*)\")\n re_doc = re.compile(r'^\"\"\"(.+?)\"\"\"')\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, NAME, \"__init__.py\")) as meta_fh:\n distmeta = {}\n for line in meta_fh:\n if line.strip() == \"# -eof meta-\":\n break\n match = re_meta.match(line.strip())\n if match:\n distmeta.update(_add_default(match))\n return distmeta", "def extract(self, fname, quality=0.5, decoder=None):\n fname = safe_unicode(fname)\n if not fname:\n print('UNICODE FAILED: %s' % fname)\n return {}\n\n filename, real_filename = fname, fname\n\n (f, ext) = os.path.splitext(fname)\n ext = ext.lower()[1:]\n\n # Create parser\n try:\n if decoder:\n tags = None\n tags = [ (\"id\", decoder), None ]\n else:\n tags = None\n parser = None\n parser = hachoir_parser.createParser(fname, real_filename=real_filename, tags=tags)\n except hachoir_core.stream.InputStreamError, err:\n print('Failed to create parser for %s' % fname)\n print(err)\n return False\n if not parser:\n print('No parser found for %s' % fname)\n return False\n\n # Extract metadata\n results = None\n try:\n results = hachoir_metadata.extractMetadata(parser, quality)\n except hachoir_core.error.HachoirError, err:\n print('Failed to extract metadata for %s' % fname)\n print(err)\n return False\n if not results:\n return False\n\n # Convert metadata to dictionary\n meta = None\n meta = {\n 'unknown': {}\n }\n\n prefix = ''\n\n default_cat = None\n stream_id = None\n\n for line in str(results).split('\\n'):\n line = line.strip()\n #print('LINE: \\'%s\\'' % line)\n\n if line[0] in string.ascii_letters:\n (default_cat, stream_id) = self.parse_category(line)\n\n if default_cat not in meta.keys():\n if default_cat in ['audio', 'video']:\n meta[default_cat] = [{'stream_id': stream_id}]\n else:\n meta[default_cat] = {}\n else:\n if default_cat in ['audio', 'video']:\n meta[default_cat][stream_id] = {'stream_id': stream_id}\n\n continue\n\n line = safe_unicode(line)[2:]\n if not ': ' in line:\n continue\n\n tokens = line.split(': ')\n key = tokens[0]\n value = ': '.join(tokens[1:])\n\n #print(\"K: %s; V: %s; DC: %s; ID: %s\" % (key, value, default_cat, stream_id))\n\n\n if key in self._ignored_keys:\n continue\n\n if key in self._key_remapper.keys():\n key = self._key_remapper[key]\n\n if default_cat is 'unknown' and key in self._key_categories.keys():\n if not self._key_categories[key] in meta.keys():\n meta[self._key_categories[key]] = {}\n default_cat = self._key_categories[key]\n\n if key in self._int_fields:\n value = self.parse_int(value)\n\n elif key in self._float_fields:\n value = self.parse_float(value)\n\n elif key in self._bitrate_fields:\n bitrate_meta = self.parse_bitrate(value)\n if not bitrate_meta:\n continue\n if 'vbr' in bitrate_meta.keys() and default_cat in ['audio', 'video']:\n meta[default_cat][stream_id]['vbr'] = True\n value = bitrate_meta['bitrate']\n\n elif key in self._duration_fields:\n value = self.parse_duration(value)\n\n elif key in self._endianness_fields:\n value = self.parse_endianness(value)\n\n elif key in self._samplerate_fields:\n value = self.parse_samplerate(value)\n\n elif key in self._channel_fields:\n value = self.parse_channel(value)\n\n if default_cat in ['audio', 'video']:\n meta[default_cat][stream_id][key] = value\n else:\n meta[default_cat][key] = value\n\n for category in ['unknown']:\n if len(meta[category]) == 0:\n del(meta[category])\n\n return meta", "def update_metadata(self):\n parser = GenericParser(\n fn_re='{}/(e\\d+s\\d+)_.*/Production.nc'.format(self.data_folder),\n group_names=['sim'],\n group_transforms=[lambda x: x],\n top_fn='',\n step_ps=self.timestep\n )\n meta = gather_metadata('{}/e*/*nc'.format(self.data_folder), parser)\n meta['top_fn'] = sorted(glob('{}/e*/structure.prmtop'.format(self.input_folder)))\n self.meta = meta", "def _parse_metadata ( self ):\n self.date = []\n self.atcorr_refl = []\n self.saa = []\n self.sza = []\n self.vaa = []\n self.vza = []\n self.res = []\n self._mask = []\n for md_file in self.metadata:\n # This is required to get rid of the namespace cruft\n it = xml.etree.ElementTree.iterparse ( md_file )\n for _, el in it:\n el.tag = el.tag.split('}', 1)[1] # strip all namespaces\n tree = it.root\n\n dirname = os.path.dirname ( md_file )\n\n self.date.append( datetime.datetime.strptime(\n tree.find(\"global_metadata/acquisition_date\").text,\n \"%Y-%m-%d\") )\n\n for c in tree.findall (\"global_metadata/corner\"):\n if c.attrib['location'] == \"UL\":\n ulx = float ( c.attrib['longitude'] )\n uly = float ( c.attrib['latitude'] )\n else:\n lrx = float ( c.attrib['longitude'] )\n lry = float ( c.attrib['latitude'] )\n\n self.vaa.append ( get_vaa ( lrx, lry, ulx, uly ) )\n\n #self.atcorr_refl.append( os.path.join ( dirname, tree[1][2].text ) )\n self.saa.append(\n float ( tree.find(\"global_metadata/solar_angles\").attrib['azimuth'] ) )\n self.sza.append(\n float ( tree.find(\"global_metadata/solar_angles\").attrib['zenith'] ) )\n self.vza.append( 0.0 ) # Note that LDCM can look sideways a bit!\n self.res.append( 30. ) # 30m\n\n images = []\n mask = []\n for b in tree.findall(\"bands/band\"):\n if b.attrib['product'] == \"toa_refl\":\n fname = b.find(\"file_name\").text\n if fname.find ( \"qa.tif\" ) < 0:\n images.append ( os.path.join ( dirname, fname ) )\n elif b.attrib['product'] == \"cfmask\":\n mask = os.path.join ( dirname, fname )\n # Create VRT?\n subprocess.call ([\"gdalbuildvrt\", \"-overwrite\", \"-separate\",\n os.path.join ( dirname, md_file.replace(\".xml\", \"_crop.vrt\" )) ] + images )\n self.atcorr_refl.append ( os.path.join ( dirname,\n md_file.replace(\".xml\", \"_crop.vrt\" )) )\n self._mask.append( mask )", "def extract_metadata(rawfile,codeversions={}):\r\n import datetime\r\n add_standard_metadata(rawfile)\r\n # get monochromator-related information\r\n mom = average_metadata(rawfile['$entry/instrument/crystal/omega'])\r\n tk_angle = average_metadata(rawfile['$entry/instrument/crystal/takeoff_angle'])\r\n # get the date\r\n date_form = datetime.datetime.strptime(str(rawfile['$entry/start_time']),\"%Y-%m-%d %H:%M:%S\")\r\n mono_change = datetime.datetime(2009,04,01)\r\n if date_form < mono_change:\r\n monotype = \"115\"\r\n else:\r\n monotype = \"335\"\r\n hklval = pick_hkl(mom - tk_angle/2.0,monotype)\r\n if len(hklval)==3: # i.e. h,k,l found\r\n rawfile.add_metadata(\"_pd_instr_monochr_pre_spec\",\r\n hklval + \" reflection from Ge crystal, \"+monotype+\" cut\",tag=\"CIF\")\r\n wavelength = calc_wavelength(hklval,tk_angle)\r\n rawfile.add_metadata(\"_diffrn_radiation_wavelength\",\"%.3f\" % wavelength,tag=\"CIF\")\r\n rawfile.add_metadata(\"_[local]_diffrn_radiation_wavelength_determination\",\r\n \"Wavelength is calculated from monochromator hkl and takeoff angle and is therefore approximate\",\r\n tag=\"CIF\")\r\n # The following is changed later if the primary collimator is found to be inserted\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"%.3f\" % (0.099*2.0*wavelength),tag=\"CIF\")\r\n # Do some logic to obtain collimator positions\r\n pcr = average_metadata(rawfile[\"$entry/instrument/collimator/primary_collimator_rotation\"])\r\n pcx = average_metadata(rawfile[\"$entry/instrument/collimator/primary_collimator_translation\"])\r\n if pcx > 120:\r\n if abs(pcr-360.0)<5 or abs(pcr) < 5: # 5' collimator\r\n coll_string = \"A 5' primary collimator pre-monochromator\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"0.0833\",tag=\"CIF\")\r\n else:\r\n coll_string = \"A 10' primary collimator pre-monochromator\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"0.1667\",tag=\"CIF\")\r\n else: coll_string = \"No primary monochromator \"\r\n try:\r\n scr = average_metadata(rawfile['$entry/sample/secondary_collimator'])\r\n if scr>0.5:\r\n coll_string += \" and a 10' secondary collimator post-monochromator.\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_mono/spec\",\"0.1667\",tag=\"CIF\")\r\n else:\r\n coll_string += \" and no secondary collimator.\"\r\n rawfile.add_metadata(\"_diffrn_radiation_collimation\",coll_string,tag=\"CIF\")\r\n except AttributeError: #some early files are missing secondary collimator\r\n pass\r\n # These values were in the CIF writing area of the Java routines, best put here\r\n try:\r\n program_release = str(rawfile[\"$entry/program_revision\"])\r\n except AttributeError:\r\n program_release = str(rawfile[\"$entry/sics_release\"])\r\n rawfile.add_metadata(\"_computing_data_collection\",str(rawfile[\"$entry/program_name\"]) + \" \" + \\\r\n program_release,\"CIF\")\r\n # List the code versions used for data reduction\r\n codelist = \"\"\r\n for key in codeversions.keys():\r\n codelist += \"%-20s: %s\\n\" % (key,codeversions[key])\r\n rawfile.add_metadata(\"_computing_data_reduction\", str(\"Gumtree Echidna/Python routines, Git versions:\\n\" + codelist),\"CIF\")\r\n rawfile.add_metadata(\"_pd_spec_special_details\",sanitize(str(rawfile[\"$entry/sample/name\"])),\"CIF\")\r\n rawfile.add_metadata(\"_[local]_data_collection_description\",str(rawfile[\"$entry/sample/description\"]),\"CIF\")\r\n start_time = str(rawfile[\"$entry/start_time\"]).replace(\" \",\"T\")\r\n end_time = str(rawfile[\"$entry/end_time\"]).replace(\" \",\"T\")\r\n rawfile.add_metadata(\"_pd_meas_datetime_initiated\", start_time,\"CIF\")\r\n rawfile.add_metadata(\"_[local]_datetime_completed\", end_time,\"CIF\")\r\n try:\r\n username = str(rawfile[\"user_name\"])\r\n except:\r\n username = \"?\"\r\n rawfile.add_metadata(\"_pd_meas_info_author_name\", sanitize(username),\"CIF\")\r\n rawfile.add_metadata(\"_pd_meas_info_author_email\", str(rawfile[ \"$entry/user/email\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_meas_info_author_phone\", str(rawfile[ \"$entry/user/phone\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_2theta_monochr_pre\",\"%.3f\" % tk_angle,\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_dist_mono/spec\", \"%.1f\" % average_metadata(rawfile[ \"$entry/sample/mono_sample_mm\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_dist_spec/detc\",\"%.1f\" % average_metadata(rawfile[\"$entry/instrument/detector/radius\"]),\"CIF\")\r\n try:\r\n rawfile.add_metadata(\"_diffrn_source_power\", \"%.2f\" % (average_metadata(rawfile[\"$entry/instrument/source/power\"])*1000),\"CIF\")\r\n except AttributeError: #sometimes source power is missing\r\n pass\r\n # imgCIF information about geometry\r\n # axis loop\r\n names = (('_axis.id','_axis.type','_axis.equipment','_axis.depends_on'),)\r\n values = [['source','gravity','stth','horizontal','vertical'],\r\n ['.','.','rotation','rotation','translation'],\r\n ['source','gravity','detector','detector','detector'],\r\n ['.','.','.','stth','stth']]\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n radius = rawfile.__dict__['ms'][\"_pd_instr_dist_spec/detc\"]\r\n # add the vectors:\r\n \"\"\"\r\n source 0 0 1 . . .\r\n gravity -1 0 0 . . .\r\n stth 1 0 0 . . .\r\n horizontal 1 0 0 . . .\r\n vertical 1 0 0 0 0 -728\r\n \"\"\"\r\n vector_dict = {\"_axis.vector[1]\":['0','-1','1','1','1'],\r\n \"_axis.vector[2]\":['0','0','0','0','0'],\r\n \"_axis.vector[3]\":['1','0','0','0','0'],\r\n \"_axis.offset[1]\":['.','.','.','.','.'],\r\n \"_axis.offset[2]\":['.','.','.','.','.'],\r\n \"_axis.offset[3]\":['1','0','0','0',\"-\"+radius]}\r\n rawfile.__dict__['ms'].AddToLoop('_axis.id',vector_dict)\r\n # Add information about the stth positions for later use\r\n rawfile.add_metadata(\"_diffrn_scan.id\",\"1\",\"CIF\")\r\n rawfile.add_metadata(\"_diffrn_scan.frames\",rawfile.shape[0],\"CIF\")\r\n frame_ids = map(lambda a:\"%d\" % a,range(rawfile.shape[0]))\r\n stths = rawfile.stth[:]\r\n names = ((\"_diffrn_scan_frame.frame_id\",\"_diffrn_scan_frame.frame_number\"),)\r\n values = [frame_ids,range(1,rawfile.shape[0]+1)] #Spec says start from 1\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n names = ((\"_diffrn_scan_frame_axis.frame_id\",\"_diffrn_scan_frame_axis.axis_id\",\r\n \"_diffrn_scan_frame_axis.angle\"),)\r\n values = [frame_ids,['stth']*rawfile.shape[0],map(float,stths)]\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n return rawfile", "def parse_metadata_file(filename,\n logger,\n study_id=None,\n genome_name=None,\n case_list=False):\n \n logger.debug('Starting validation of meta file', extra={'filename_': filename})\n \n metaDictionary = {}\n with open(filename, 'rU') as metafile:\n for line_index, line in enumerate(metafile):\n # skip empty lines:\n if line.strip() == '':\n continue\n if ':' not in line:\n logger.error(\n \"Invalid %s file entry, no ':' found\",\n {True: 'case list', False: 'meta'}[case_list],\n extra={'filename_': filename,\n 'line_number': line_index + 1})\n meta_file_type = None\n return metaDictionary, meta_file_type\n key_value = line.split(':', 1)\n if len(key_value) == 2:\n metaDictionary[key_value[0]] = key_value[1].strip()\n\n if case_list:\n meta_file_type = MetaFileTypes.CASE_LIST\n else:\n meta_file_type = get_meta_file_type(metaDictionary, logger, filename)\n # if type could not be inferred, no further validations are possible\n if meta_file_type is None:\n return metaDictionary, meta_file_type\n\n missing_fields = []\n for field in META_FIELD_MAP[meta_file_type]:\n mandatory = META_FIELD_MAP[meta_file_type][field]\n if field not in metaDictionary and mandatory:\n logger.error(\"Missing field '%s' in %s file\",\n field,\n {True: 'case list', False: 'meta'}[case_list],\n extra={'filename_': filename})\n missing_fields.append(field)\n\n if missing_fields:\n meta_file_type = None\n # all further checks would depend on these fields being present\n return metaDictionary, meta_file_type\n\n # validate genetic_alteration_type, datatype, stable_id\n stable_id_mandatory = META_FIELD_MAP[meta_file_type].get('stable_id',\n False)\n if stable_id_mandatory:\n valid_types_and_id = validate_types_and_id(metaDictionary, logger, filename)\n if not valid_types_and_id:\n # invalid meta file type\n meta_file_type = None\n return metaDictionary, meta_file_type\n\n for field in metaDictionary:\n if field not in META_FIELD_MAP[meta_file_type]:\n logger.warning(\n 'Unrecognized field in %s file',\n {True: 'case list', False: 'meta'}[case_list],\n extra={'filename_': filename,\n 'cause': field})\n\n # check that cancer study identifiers across files so far are consistent.\n if (\n study_id is not None and\n 'cancer_study_identifier' in metaDictionary and\n study_id != metaDictionary['cancer_study_identifier']):\n logger.error(\n \"Cancer study identifier is not consistent across \"\n \"files, expected '%s'\",\n study_id,\n extra={'filename_': filename,\n 'cause': metaDictionary['cancer_study_identifier']})\n # not a valid meta file in this study\n meta_file_type = None\n return metaDictionary, meta_file_type\n\n # type-specific validations\n if meta_file_type in (MetaFileTypes.SEG, MetaFileTypes.GISTIC_GENES):\n if genome_name is not None and metaDictionary['reference_genome_id'] != genome_name:\n logger.error(\n 'Reference_genome_id is not %s',\n genome_name,\n extra={'filename_': filename,\n 'cause': metaDictionary['reference_genome_id']})\n meta_file_type = None\n if meta_file_type == MetaFileTypes.MUTATION:\n if ('swissprot_identifier' in metaDictionary and\n metaDictionary['swissprot_identifier'] not in ('name',\n 'accession')):\n logger.error(\n \"Invalid swissprot_identifier specification, must be either \"\n \"'name' or 'accession'\",\n extra={'filename_': filename,\n 'cause': metaDictionary['swissprot_identifier']})\n meta_file_type = None\n\n logger.info('Validation of meta file complete', extra={'filename_': filename})\n return metaDictionary, meta_file_type", "def identify_contents_metadata(cube, filename):\n metadata = {}\n\n try:\n # This could be None if cube.var_name isn't defined\n metadata['var_name'] = cube.var_name\n metadata['units'] = str(cube.units)\n metadata['long_name'] = cube.long_name\n metadata['standard_name'] = cube.standard_name\n metadata['time_units'] = cube.coord('time').units.origin\n metadata['calendar'] = cube.coord('time').units.calendar\n # CMIP5 doesn't have an activity id and so supply a default\n metadata['activity_id'] = cube.attributes.get('activity_id',\n 'HighResMIP')\n try:\n metadata['institute'] = cube.attributes['institution_id']\n except KeyError:\n # CMIP5 uses institute_id but we should not be processing CMIP5\n # data but handle it just in case\n metadata['institute'] = cube.attributes['institute_id']\n except Exception as exc:\n msg = ('Unable to extract metadata from the contents of file {}\\n{}'.\n format(filename, exc.__str__()))\n raise FileValidationError(msg)\n\n return metadata", "def extract_metadata(info_str):\n info = [i.strip() for i in info_str.split(';')\n if i.strip().startswith('gene_id')\n or i.strip().startswith('gene_type')]\n assert len(info) == 2, '{0}'.format(info)\n gene_id, gene_type = extract_gene_data(info)\n return gene_id, gene_type", "def Parse(filename):\n\n f = open(filename, 'r')\n\n metadata = Metadata()\n data = [] # array of dataset\n dataset = None\n\n for num, line in enumerate(f):\n try:\n line = line.strip()\n if not line: continue\n\n if not metadata.complete:\n metadata.Parse(line)\n continue\n\n if re.match('[a-z_]', line):\n continue\n\n if line.startswith('# StopWatch'): # Start of a new dataset\n if dataset:\n if dataset.summary:\n metadata.UpdateWith(dataset)\n else:\n data.append(dataset)\n\n dataset = DataSet(line)\n continue\n\n if line.startswith('#'):\n continue\n\n # must be data at this stage\n try:\n (time, value) = line.split(None, 1)\n except ValueError:\n print 'skipping line %d: %s' % (num, line)\n continue\n\n if dataset and not dataset.summary:\n dataset.Add(float(time), float(value))\n\n except Exception:\n print 'Error parsing line %d' % num, sys.exc_info()[0]\n raise\n data.append(dataset)\n if not metadata.complete:\n print \"\"\"Error missing metadata. Did you mount debugfs?\n [adb shell mount -t debugfs none /sys/kernel/debug]\"\"\"\n sys.exit(1)\n return (metadata, data)", "def LoadMetadata(filename):\r\n## print filename\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'*.zvi'))\r\n if globbed:\r\n return LoadZVIMetaData(globbed[0])\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'*.xml'))\r\n if globbed:\r\n return LoadAxioVisionXMLMetaData(globbed[0])\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'metadata.txt'))\r\n if globbed:\r\n return LoadMMMetaData(globbed[0])\r\n return None\r\n #no further valid options, crash horribly\r", "def parseFilename(fileName):\n # regex to match names like Axis-BaldCA_2018-05-29T16_02_30_129496.jpg\n # and bm-n-mobo-c__2017-06-25z11;53;33.jpg\n regexExpanded = '([A-Za-z0-9-_]+[^_])_+(\\d{4}-\\d\\d-\\d\\d)T(\\d\\d)[_;](\\d\\d)[_;](\\d\\d)'\n # regex to match diff minutes spec for subtracted images\n regexDiff = '(_Diff(\\d+))?'\n # regex to match optional crop information e.g., Axis-Cowles_2019-02-19T16;23;49_Crop_270x521x569x820.jpg\n regexOptionalCrop = '(_Crop_(-?\\d+)x(-?\\d+)x(\\d+)x(\\d+))?'\n matchesExp = re.findall(regexExpanded + regexDiff + regexOptionalCrop, fileName)\n # regex to match names like 1499546263.jpg\n regexUnixTime = '(1\\d{9})'\n matchesUnix = re.findall(regexUnixTime + regexDiff + regexOptionalCrop, fileName)\n cropInfo = None\n if len(matchesExp) == 1:\n match = matchesExp[0]\n parsed = {\n 'cameraID': match[0],\n 'date': match[1],\n 'hours': match[2],\n 'minutes': match[3],\n 'seconds': match[4]\n }\n isoStr = '{date}T{hour}:{min}:{sec}'.format(date=parsed['date'],hour=parsed['hours'],min=parsed['minutes'],sec=parsed['seconds'])\n dt = dateutil.parser.parse(isoStr)\n unixTime = int(dt.timestamp())\n parsed['diffMinutes'] = int(match[6] or 0)\n cropInfo = match[-4:]\n elif len(matchesUnix) == 1:\n match = matchesUnix[0]\n unixTime = int(match[0])\n dt = datetime.datetime.fromtimestamp(unixTime)\n isoStr = datetime.datetime.fromtimestamp(unixTime).isoformat()\n parsed = {\n 'cameraID': 'UNKNOWN_' + fileName,\n 'date': dt.date().isoformat(),\n 'hours': str(dt.hour),\n 'minutes': str(dt.minute),\n 'seconds': str(dt.second)\n }\n parsed['diffMinutes'] = int(match[2] or 0)\n cropInfo = match[-4:]\n else:\n logging.error('Failed to parse name %s', fileName)\n return None\n if cropInfo[0]:\n parsed['minX'] = int(cropInfo[0])\n parsed['minY'] = int(cropInfo[1])\n parsed['maxX'] = int(cropInfo[2])\n parsed['maxY'] = int(cropInfo[3])\n parsed['isoStr'] = isoStr\n parsed['unixTime'] = int(unixTime)\n return parsed" ]
[ "0.6328707", "0.61443377", "0.60601145", "0.6055962", "0.6050261", "0.6024006", "0.60018224", "0.5953646", "0.5923734", "0.5902934", "0.58560735", "0.58287627", "0.5821959", "0.5799036", "0.5752124", "0.5747356", "0.57307374", "0.5726997", "0.5683347", "0.56630814", "0.5662278", "0.5657891", "0.5641203", "0.5632358", "0.56289476", "0.5626663", "0.56243324", "0.56158245", "0.5613351", "0.5587915" ]
0.7258536
0
Shits out a simple dockerfile for cakephp projects based upon a .travis.yml
def main(): extensions = os.getenv('EXTENSIONS', DEFAULT_EXTENSIONS).split(',') extensions.sort() docker_contents = [] contents = travis_contents() data = yaml.safe_load(contents) # set the version php_versions = data.get('php', [DEFAULT_VERSION]) php_version = php_versions[0] docker_contents.append('FROM php:{0}'.format(php_version)) # ensure all the php shit exists # LC_ALL=en_US.UTF-8 docker_contents.append('ENV DEBIAN_FRONTEND=noninteractive LC_ALL=C DOCKER=1') # noqa docker_contents.append('RUN apt-get update') docker_contents.append('RUN apt-get -qq install -qq -y php5-cli php-pear') # for composer docker_contents.append('RUN apt-get -qq install -qq -y git-core') # for curl docker_contents.append('RUN apt-get -qq install -qq -y libcurl4-openssl-dev') # for intl docker_contents.append('RUN apt-get -qq install -qq -y libicu-dev') # installs user-specified packages packages = os.getenv('PACKAGES', '') if len(os.getenv('PACKAGES', '')) > 0: packages = packages.split(',') docker_contents.append('RUN apt-get -qq install -qq -y {0}'.format( ' '.join(packages) )) for extension in extensions: if extension in available_extensions: docker_contents.append('RUN docker-php-ext-install {0}'.format( extension )) else: docker_contents.append('RUN apt-get -qq install -qq -y php5-{0} && pecl install -o -f {0} && \\'.format(extension)) docker_contents.append(' rm -rf /tmp/pear && \\') if extension in ZEND_EXTENSIONS: docker_contents.append(' echo "zend_extension=/usr/local/lib/php/extensions/no-debug-non-zts-{0}/xdebug.so" > /usr/local/etc/php/conf.d/{1}.ini'.format( phpextension_paths[php_version], extension )) else: docker_contents.append(' echo "extension={0}.so" > /usr/local/etc/php/conf.d/{0}.ini'.format(extension)) # ensure we have all the proper php testing stuff docker_contents.append('RUN \\') docker_contents.append(' curl -sSL https://phar.phpunit.de/phpunit-old.phar > phpunit.phar && \\') docker_contents.append(' curl -sS https://getcomposer.org/installer | php && \\') docker_contents.append(' mv composer.phar /usr/local/bin/composer && \\') docker_contents.append(' mv phpunit.phar /usr/local/bin/phpunit && \\') docker_contents.append(' chmod +x /usr/local/bin/composer /usr/local/bin/phpunit && \\') docker_contents.append(' phpunit --version') # set the environment environments = data.get('env', {'matrix': 'CI=1'}).get('matrix', []) docker_env = environments[0] docker_contents.append('ENV {0}'.format(docker_env)) docker_contents.append('ADD composer.json /app/composer.json') docker_contents.append('WORKDIR /app') docker_contents.append('RUN echo "date.timezone = UTC" > /usr/local/etc/php/conf.d/timezone.ini') # noqa for script in data.get('before_script', []): docker_contents.append('RUN {0}'.format(script)) docker_contents.append('ADD . /app') # HACK docker_contents.append('ENV COVERALLS=1 DEFAULT=1 PHPCS=1') for script in data.get('script', []): docker_contents.append('RUN {0}'.format(script)) with open('{0}/Dockerfile'.format(os.getcwd()), 'w') as f: for line in docker_contents: f.write("{0}\n\n".format(line))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check():\n cmake('tests')\n docker('./{build}/tests', build=BUILD)", "def build_docker(c):\n tag = c.run('git describe', hide=True)\n docker_img = f'{docker_repo}:{tag.stdout.strip()}'\n c.run(f'docker build -t {docker_img} .')", "def docker_build(c):\n cli_tasks.docker_build.run(c)", "def test_docker_build(rule_runner: RuleRunner) -> None:\n rule_runner.write_files(\n {\n \"src/BUILD\": \"docker_image(name='test-image', image_tags=['1.0'])\",\n \"src/Dockerfile\": \"FROM python:3.8\",\n }\n )\n target = rule_runner.get_target(Address(\"src\", target_name=\"test-image\"))\n result = run_docker(rule_runner, target)\n assert len(result.artifacts) == 1\n assert len(result.artifacts[0].extra_log_lines) == 2\n assert \"Built docker image: test-image:1.0\" == result.artifacts[0].extra_log_lines[0]\n assert \"Docker image ID:\" in result.artifacts[0].extra_log_lines[1]\n assert \"<unknown>\" not in result.artifacts[0].extra_log_lines[1]", "def build_base():\n with lcd(env.local_path):\n put('./requirements.txt', '/srv/build/requirements.txt')\n\n with cd('/srv/build'):\n run('docker build -t {base_image_name} .'.format(\n base_image_name=env.base_image_name,\n ))", "def dockerfile() -> co.Exec:\n image = co.Image(dockerfile=\"./docker/Dockerfile.simple\")\n return co.Exec(\n f\"python -c '{pretty_table_script}'\", image=image, doc=co.util.magic_doc()\n )", "def main():\n\n if not os.environ.get('TRAVIS_PULL_REQUEST', 'false') == 'false':\n return\n\n git_config_setup()\n populate_source()\n build_and_deploy()", "def build(parser):\n parser.add_argument(\n '-i', '--identity-file',\n help=(\n 'A SSH private key file which may be used to pull down '\n 'repositories when building.'\n ),\n )\n parser.add_argument(\n '-e', '--env',\n action='append',\n default=[],\n help=(\n 'Add environ variables to the build. These may be accessed in '\n 'the build scripts. Each variable should be of the format '\n 'KEY=VALUE. This may be used to pass in credentials required '\n 'to access private repositories. May be specified more than once.'\n ),\n )\n parser.add_argument(\n '-b', '--build-dir',\n default=os.getcwd(),\n help=(\n 'This folder should be accessible from the docker instance.'\n ),\n )\n parser.add_argument(\n '--archive',\n help=(\n 'Archive the build files into a local tarball.'\n ),\n )\n parser.add_argument(\n '--archive-only',\n action='store_true',\n default=False,\n help=(\n 'Skip tagging and building the runner image.'\n ),\n )\n parser.add_argument(\n '-t', '--tag',\n help=(\n 'Tag to apply to the built image. '\n 'This will default to the current date/time.'\n ),\n )\n parser.add_argument(\n '--no-cache',\n dest='use_cache',\n action='store_false',\n default=True,\n help=(\n 'Do not mount a cache volume when compiling the app.'\n ),\n )\n parser.add_argument(\n '--cache',\n metavar='CONTAINER:PATH',\n help=(\n 'An optional volume or location for the cache. The format is '\n '\"<volume_id>:<path>\" where the \"volume_id\" must be the '\n 'name or hash of an existing volume. The \"path\" is an absolute '\n 'path to the cache folder/volume within the build container.'\n '\\n\\n'\n 'By default a container will be created by mangling the name of '\n 'the app by appending \"__buildcache\" (e.g. \"myapp__buildcache\").'\n '\\n\\n'\n 'This option is ignored if --no-cache is specified.'\n '\\n\\n'\n 'The \"volume_id\" may be an absolute path on the host filesystem.'\n '\\n\\n'\n 'The \"path\" may be dropped, in which case it will default to '\n '/tmp/cache inside the build container.'\n '\\n\\n'\n 'Examples:'\n '\\n\\n'\n ' # custom volume with default path\\n'\n ' --cache my_cache'\n '\\n\\n'\n ' # custom path inside of volume\\n'\n ' --cache my_cache:/tmp/cache'\n '\\n\\n'\n ' # host filesystem\\n'\n ' --cache /tmp/cache'\n ),\n )\n parser.add_argument(\n '--rebuild-cache',\n action='store_true',\n default=False,\n help=(\n 'Delete any cached artifacts prior to building.'\n ),\n )\n parser.add_argument(\n '--skip-cleanup',\n action='store_true',\n default=False,\n help=(\n 'Skip removal of images and containers.'\n ),\n )\n parser.add_argument(\n 'app',\n help=(\n 'Path to an application folder with a meta.yml file'\n ),\n )", "def docker_test(c, rebuild_venv=False):\n cli_tasks.docker_test.run(c, rebuild_venv)", "def main(repo):\n print(subprocess.call(['make', 'setup']))\n with Docker('doppins') as docker:\n print(docker.run('git clone {repo} cloned'.format(repo=repo)).out)", "def run(cont, util, shell, argv=list()):\n\n cache_dir = cont.named_cache_dir(\"travis_container_downloads\",\n ephemeral=False)\n cache_dir_key = \"_POLYSQUARE_TRAVIS_CONTAINER_TEST_CACHE_DIR\"\n shell.overwrite_environment_variable(cache_dir_key, cache_dir)\n\n cont.fetch_and_import(\"setup/python/setup.py\").run(cont, util, shell, argv)\n\n config_python = \"setup/project/configure_python.py\"\n py_ver = util.language_version(\"python3\")\n py_cont = cont.fetch_and_import(config_python).get(cont,\n util,\n shell,\n py_ver)\n\n with py_cont.activated(util):\n with util.Task(\"\"\"Downloading all distributions\"\"\"):\n os.environ[cache_dir_key] = cache_dir\n util.execute(cont,\n util.long_running_suppressed_output(),\n util.which(\"python\"),\n \"download-all-distros-to.py\")", "def main():\n parser = argparse.ArgumentParser(\n epilog=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\n \"-d\", \"--dry-run\", action=\"store_true\", default=0, help=\"Dry run mode.\"\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"Verbosity. Default is WARNING level.\",\n )\n\n subparsers = parser.add_subparsers(help=\"Sub commands\", dest=\"subparser\")\n subparsers.required = True\n\n build_parser = subparsers.add_parser(\n \"build\",\n description=\"Build an image from Dockerfile, caching image hierarchy\",\n help=\"Build an image from a Dockerfile\",\n )\n build_parser.add_argument(\n \"path\", metavar=\"PATH\", help=\"The build context directory\"\n )\n build_parser.add_argument(\n \"-f\",\n \"--file\",\n help=\"Name of the Dockerfile. If not provided, \"\n \"will use config.DOCKERFILE_PATH_PATTERN to compute. \",\n )\n build_parser.add_argument(\n \"-v\",\n \"--git-sha\",\n required=True,\n help=\"The version of code to build against, \" \"will pass as GIT_SHA variable\",\n )\n build_parser.add_argument(\n \"-n\", \"--name\", required=True, help=\"The name of the image to build\"\n )\n build_parser.add_argument(\n \"--build-arg\",\n metavar=\"ARG=VALUE\",\n nargs=\"*\",\n default=[],\n help=\"Set extra build-time variables. GIT_SHA, TIMESTAMP will be passed by default.\",\n )\n build_parser.add_argument(\n \"-r\",\n \"--raw\",\n action=\"store_true\",\n help=\"Whether to use raw docker build command to build, skipping caching logic\",\n )\n build_parser.add_argument(\n \"--registry\",\n default=config.DOCKER_REGISTRY,\n help=\"Docker registry use to determine the image identity, \"\n \"can be set via IMAGE_BUILDER_DOCKER_REGISTRY environment variable, \"\n 'or set DOCKER_REGISTRY in config.py. Default is \"%(default)s\"',\n )\n build_parser.add_argument(\n \"-t\",\n \"--tag-pattern\",\n default=config.GIT_SHA_TAG_PATTERN,\n help=\"Tag pattern, can only include one `{git_sha}` placeholder, \"\n 'such as \"{git_sha}-new\". If the tag exists, we won\\'t rebuild it. '\n 'Default is \"%(default)s\"',\n )\n build_parser.add_argument(\n \"-e\",\n \"--extra-tag\",\n nargs=\"*\",\n default=[],\n help=\"Extra tags to tag to the final images\",\n )\n build_parser.add_argument(\n \"--extra-name\",\n nargs=\"*\",\n default=[],\n help=\"Extra name and optionally with a tag in the 'name:tag' format\",\n )\n build_parser.add_argument(\n \"-o\", \"--output-hash\", help=\"The output filename of the files hash log.\"\n )\n build_parser.set_defaults(func=build)\n\n args = parser.parse_args()\n if args.dry_run:\n # DRY_RUN env will be read in image_builder.libs.process\n os.environ[\"DRY_RUN\"] = \"1\"\n\n if args.func == build:\n args.path = expand_path(args.path)\n if args.output_hash:\n args.output_hash = expand_path(args.output_hash)\n\n args.file = args.file or locate_dockerfile(args.name)\n args.file = expand_path(args.file)\n # set environ for main dockerfile for possibly retrieving later\n os.environ[\n config.DOCKERFILE_ENV_PATTERN.format(image_name=args.name)\n ] = args.file\n\n # change CWD to PATH\n os.chdir(args.path)\n\n if not args.registry:\n parser.error(\n \"--registry should be provied \"\n \"or specified by IMAGE_BUILDER_DOCKER_REGISTRY environment variable or set DOCKER_REGISTRY in config.py\"\n )\n if not all(\"=\" in kv for kv in args.build_arg):\n parser.error(\"--build_arg must be in ARG=VALUE format\")\n\n # set git_sha_tag\n try:\n args.git_sha_tag = args.tag_pattern.format(git_sha=args.git_sha)\n except KeyError:\n parser.error(\n 'Wrong --tag-pattern provided. Can only include one `{git_sha}` placeholder, such as \"{git_sha}-new\"'\n )\n\n # setup logging\n level = logging.WARNING - args.verbose * 10\n logging.basicConfig(\n level=level, format=\"%(asctime)s %(name)s %(levelname)s %(message)s\"\n )\n\n if args.output_hash:\n h = logging.FileHandler(args.output_hash)\n h.setLevel(logging.DEBUG)\n h.setFormatter(logging.Formatter(\"%(message)s\"))\n hash_logger.addHandler(h)\n\n # Suppress warning when we don't verify ssl\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n return args.func(args)", "def prepare():\n sh('docker build --rm -t {image} {dir}', image=IMAGE, dir=os.path.dirname(__file__))", "def dockerfile_with_copy() -> co.Exec:\n image = co.Image(dockerfile=\"./docker/Dockerfile.copy\", context=\".\")\n return co.Exec(\"python /root/code/test.py\", image=image, doc=co.util.magic_doc())", "def _get_dockerfiles_for_test() -> str:\n project_root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n dockerfiles_dir = os.path.join(project_root_dir, \"dockerfiles\")\n if sys.version_info[0:2] == (3, 6):\n return os.path.join(dockerfiles_dir, \"centos7.Dockerfile\")\n elif sys.version_info[0:2] == (3, 9):\n return os.path.join(dockerfiles_dir, \"rocky8.Dockerfile\")\n else:\n raise Exception(\n \"Running the tests with INMANTA_TEST_INFRA_SETUP=true is only supported using a python3.6 or python3.9 venv\"\n )", "def dockerfile_with_path_map() -> co.Exec:\n path_map = {\"./code\": \"/root/code\"}\n image = co.Image(\n dockerfile=\"./docker/Dockerfile.copy\", context=\".\", path_map=path_map\n )\n return co.Exec(\"python /root/code/test.py\", image=image, doc=co.util.magic_doc())", "def test_defaults_centos(self):\n c = catalyst()\n self.assertEqual(str(c),\nr'''# ParaView Catalyst version 5.6.1\nRUN yum install -y \\\n git \\\n gzip \\\n libICE-devel \\\n libSM-devel \\\n libX11-devel \\\n libXau-devel \\\n libXext-devel \\\n libXt-devel \\\n libglvnd-devel \\\n make \\\n mesa-libGL-devel \\\n tar \\\n wget \\\n which && \\\n rm -rf /var/cache/yum/*\nRUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -O /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz -P /var/tmp https://www.paraview.org/paraview-downloads/download.php?submit=Download\\&version=v5.6\\&type=catalyst\\&os=Sources\\&downloadFile=Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz && \\\n mkdir -p /var/tmp && tar -x -f /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz -C /var/tmp -z && \\\n mkdir -p /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build && cd /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build && /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/cmake.sh -DCMAKE_INSTALL_PREFIX=/usr/local/catalyst /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base && \\\n cmake --build /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build --target all -- -j$(nproc) && \\\n cmake --build /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build --target install -- -j$(nproc) && \\\n rm -rf /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base\nENV LD_LIBRARY_PATH=/usr/local/catalyst/lib:$LD_LIBRARY_PATH \\\n PATH=/usr/local/catalyst/bin:$PATH''')", "def main(params):\n version_map = parse_version_map(params.version_map)\n\n # The app cannot specify it's own Dockerfile when building with\n # the aspnetcore image, the builder is the one that has to build\n # it. To avoid any confusion the builder will fail with this\n # error.\n if os.path.isfile(DOCKERFILE_NAME):\n print ('A Dockerfile already exists in the workspace, this Dockerfile ' +\n 'cannot be used with the aspnetcore runtime.')\n sys.exit(1)\n\n deps_path = get_deps_path(params.root)\n if deps_path is None:\n print 'No .deps.json file found for the app'\n sys.exit(1)\n\n minor_version = get_runtime_minor_version(deps_path)\n if minor_version is None:\n print ('No valid .NET Core runtime version found for the app or it is not a ' +\n 'supported app.')\n sys.exit(1)\n\n base_image = get_base_image(version_map, minor_version)\n if base_image is None:\n print ('The app requires .NET Core runtime version {0} which is not supported at ' +\n 'this time.').format(minor_version)\n sys.exit(1)\n\n project_name = get_project_assembly_name(deps_path)\n assembly_name = ASSEMBLY_NAME_TEMPLATE.format(project_name)\n if not os.path.isfile(os.path.join(params.root, assembly_name)):\n print 'Cannot find entry point assembly {0} for ASP.NET Core project'.format(assembly_name)\n sys.exit(1)\n\n contents = DOCKERFILE_CONTENTS.format(runtime_image=base_image.image, dll_name=project_name)\n with open(params.output, 'wt') as out:\n out.write(contents)", "def test_defaults_ubuntu(self):\n c = catalyst()\n self.assertEqual(str(c),\nr'''# ParaView Catalyst version 5.6.1\nRUN apt-get update -y && \\\n DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \\\n git \\\n gzip \\\n libgl1-mesa-dev \\\n libice-dev \\\n libsm-dev \\\n libx11-dev \\\n libxau-dev \\\n libxext-dev \\\n libxt-dev \\\n make \\\n tar \\\n wget && \\\n rm -rf /var/lib/apt/lists/*\nRUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -O /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz -P /var/tmp https://www.paraview.org/paraview-downloads/download.php?submit=Download\\&version=v5.6\\&type=catalyst\\&os=Sources\\&downloadFile=Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz && \\\n mkdir -p /var/tmp && tar -x -f /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz -C /var/tmp -z && \\\n mkdir -p /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build && cd /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build && /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/cmake.sh -DCMAKE_INSTALL_PREFIX=/usr/local/catalyst /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base && \\\n cmake --build /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build --target all -- -j$(nproc) && \\\n cmake --build /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build --target install -- -j$(nproc) && \\\n rm -rf /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base\nENV LD_LIBRARY_PATH=/usr/local/catalyst/lib:$LD_LIBRARY_PATH \\\n PATH=/usr/local/catalyst/bin:$PATH''')", "def run_sagemaker_pytest_cmd(image):\n pytest_command, path, tag = generate_sagemaker_pytest_cmd(image)\n\n context = Context()\n with context.cd(path):\n context.run(f\"virtualenv {tag}\")\n with context.prefix(f\"source {tag}/bin/activate\"):\n context.run(\"pip install -r requirements.txt\", warn=True)\n context.run(pytest_command)", "def build_and_push_3p_images():\n\n log.info(\n \"Building Lambda Stack docker images (these are not on DockerHub). \"\n \"FMI see https://github.com/lambdal/lambda-stack-dockerfiles\")\n run_cmd(\"\"\"\n cd /tmp &&\n (rm -rf lambda-stack-dockerfiles || true) &&\n git clone https://github.com/lambdal/lambda-stack-dockerfiles &&\n cd lambda-stack-dockerfiles &&\n git checkout d762400d61636c074533416674426a84cc4d8992 &&\n docker build -t oarphpy/lambda-stack:22.04 -f Dockerfile.jammy . &&\n docker push oarphpy/lambda-stack:22.04\n \"\"\")", "def main():\n # Packaging tools expects either README.txt, README, or README.rst.\n # Convert the README markdown file to ReStructured text.\n try:\n run(\"pandoc README.md -f markdown -t rst -o README.rst\".split(\" \"), check=True)\n except CalledProcessError:\n print(\"pandoc does not appear to be installed. Get it from http://pandoc.org/\")\n exit(1)\n\n copy_metadata()\n\n for item in (\"sdist\", \"bdist_wheel\"):\n run([\"python\", \"setup.py\", item], check=True)", "def build_container(client):\n client.images.build(path=os.path.join(os.path.abspath(\"\"), \"docker\"), tag=\"scrape_light\")", "def run(args):\n docker(' '.join(args))", "def build_docker(params) -> None:\n print(\"Building docker image...\")\n cmd = \"cd bg_changer && docker build --tag bg_changer . >/dev/null 2>&1\"\n if os.system(cmd) == 0:\n print(\" Success !\")\n else:\n print(\" Failure !\")", "def dockerize_test(ctx, binary, skip_cleanup=False):\n import docker\n\n client = docker.from_env()\n temp_folder = tempfile.mkdtemp(prefix=\"ddtest-\")\n\n ctx.run(\"cp %s %s/test.bin\" % (binary, temp_folder))\n\n with open(\"%s/Dockerfile\" % temp_folder, 'w') as stream:\n stream.write(\n \"\"\"FROM debian:stretch-slim\nENV DOCKER_DD_AGENT=yes\nWORKDIR /\nADD https://github.com/docker/compose/releases/download/1.16.1/docker-compose-Linux-x86_64 /bin/docker-compose\nRUN echo \"1804b0ce6596efe707b9cab05d74b161833ed503f0535a937dd5d17bea8fc50a /bin/docker-compose\" > sum && \\\n sha256sum -c sum && \\\n chmod +x /bin/docker-compose\nCMD /test.bin\nCOPY test.bin /test.bin\n\"\"\"\n )\n # Handle optional testdata folder\n if os.path.isdir(\"./testdata\"):\n ctx.run(\"cp -R testdata %s\" % temp_folder)\n stream.write(\"COPY testdata /testdata\")\n\n test_image, _ = client.images.build(path=temp_folder, rm=True)\n\n scratch_volume = client.volumes.create()\n\n test_container = client.containers.run(\n test_image.id,\n detach=True,\n pid_mode=\"host\", # For origin detection\n environment=[\"SCRATCH_VOLUME_NAME=\" + scratch_volume.name, \"SCRATCH_VOLUME_PATH=/tmp/scratch\",],\n volumes={\n '/var/run/docker.sock': {'bind': '/var/run/docker.sock', 'mode': 'ro'},\n '/proc': {'bind': '/host/proc', 'mode': 'ro'},\n '/sys/fs/cgroup': {'bind': '/host/sys/fs/cgroup', 'mode': 'ro'},\n scratch_volume.name: {'bind': '/tmp/scratch', 'mode': 'rw'},\n },\n )\n\n exit_code = test_container.wait()['StatusCode']\n\n print(test_container.logs(stdout=True, stderr=False, stream=False))\n\n sys.stderr.write(test_container.logs(stdout=False, stderr=True, stream=False).decode(sys.stderr.encoding))\n\n if not skip_cleanup:\n shutil.rmtree(temp_folder)\n test_container.remove(v=True, force=True)\n scratch_volume.remove(force=True)\n client.images.remove(test_image.id)\n\n if exit_code != 0:\n raise Exit(code=exit_code)", "def create(dockerfile):\n\n path = os.path.dirname(dockerfile)\n\n container_name = input('Enter container name: ')\n port = input('Enter port number to map TCP port 5000 in the container, to a port on the Docker host: ')\n\n try:\n image = CLIENT.images.build(path=path, dockerfile=dockerfile, tag=\"my_app_image\")\n # Run a container and map TCP port 5000 in the container to a given port on the Docker host.\n container = CLIENT.containers.run('my_app_image', detach=True, ports={'5000/tcp': port},\n name=container_name)\n click.secho(\"Container created with name: {}. App is running \"\n \"on http://0.0.0.0:{}/ on the host.\"\n .format(container_name, port), bg='blue', fg='white')\n except (docker.errors.APIError, TypeError, OSError) as err:\n print(err)", "def quickstart():\n if not os.path.exists(\"./fabric_factory/ve\"):\n bootstrap()\n else:\n print \"No need to create virtualenv, 've' already exists\"\n install_requirements()\n project_linkage()", "def make(tag_masks: str = \"*\", poetry_version: str = \"master\"):\n tags = requests.get(\n \"https://registry.hub.docker.com/v1/repositories/python/tags\"\n ).json()\n\n def match_tag(tag) -> bool:\n tag_name = tag[\"name\"]\n return [\n tag_mask\n for tag_mask in tag_masks\n if tag_mask == \"*\" or fnmatch.fnmatch(tag_name, tag_mask)\n ]\n\n tags = list(filter(match_tag, tags))\n\n click.echo(f\"Found {len(tags)} tags.\")\n click.echo(\"Generating \", nl=False)\n\n docker_3_template = Path(\"./Dockerfile-3.template\").read_text(\"utf8\")\n docker_2_template = Path(\"./Dockerfile-2.template\").read_text(\"utf8\")\n\n for tag in tags:\n tag_name = tag[\"name\"]\n\n docker_template = docker_3_template\n\n try:\n tag_major_version = int(tag_name[0])\n tag_major_path = Path(str(tag_major_version))\n try:\n tag_major_path.mkdir()\n except FileExistsError:\n pass\n tag_path = tag_major_path / Path(tag_name)\n if tag_major_version == 2:\n docker_template = docker_2_template\n except ValueError:\n tag_path = Path(tag_name)\n\n try:\n tag_path.mkdir()\n except FileExistsError:\n pass\n\n (tag_path / \"Dockerfile\").write_text(\n docker_template.format(python_tag=tag_name, poetry_version=poetry_version)\n )\n click.echo(\".\", nl=False)\n click.echo(\" Done.\")", "def build():\n local('pelican -o {} -s pelicanconf.py'.format(env.deploy_path))" ]
[ "0.66781193", "0.6571562", "0.6565537", "0.6506989", "0.6320813", "0.6300796", "0.6141792", "0.60754395", "0.5996922", "0.5919745", "0.58479375", "0.58338064", "0.57864416", "0.57376957", "0.5730911", "0.57271516", "0.57252514", "0.5641273", "0.5628882", "0.5624112", "0.5603273", "0.5582554", "0.55815184", "0.5555498", "0.5513121", "0.5440082", "0.5415026", "0.5410543", "0.5405821", "0.54012024" ]
0.7650627
0
Calculate MAPs, in regards to K
def calc_maps_k(self, qBX, qBY, rBX, rBY, qLX, qLY, rLX, rLY, k): mapi2t = calc_map_k(qBX, rBY, qLX, rLY, k) mapt2i = calc_map_k(qBY, rBX, qLY, rLX, k) mapi2i = calc_map_k(qBX, rBX, qLX, rLX, k) mapt2t = calc_map_k(qBY, rBY, qLY, rLY, k) avg = (mapi2t.item() + mapt2i.item() + mapi2i.item() + mapt2t.item()) * 0.25 mapi2t, mapt2i, mapi2i, mapt2t, mapavg = mapi2t.item(), mapt2i.item(), mapi2i.item(), mapt2t.item(), avg s = 'Valid: mAP@{}, avg: {:3.3f}, i->t: {:3.3f}, t->i: {:3.3f}, i->i: {:3.3f}, t->t: {:3.3f}' self.logger.info(s.format(k, mapavg, mapi2t, mapt2i, mapi2i, mapt2t)) return mapi2t, mapt2i, mapi2i, mapt2t, mapavg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def k_map(self):\n\t\tt1 = time.time()\n\t\tmapping_matrix = [] \n\t\tfor index in self.mapping:\n\t\t\tvector = np.zeros(len(self.unique_char),dtype=float)\n\t\t\tvector[index] = 1.0\n\t\t\tmapping_matrix.append(vector)\n\t\tprint(\"Time creating k map {:.3f} sec\".format(time.time()-t1))\n\t\tself.mapping_matrix = mapping_matrix\n\t\treturn mapping_matrix", "def mapk(actual, predicted, k):\n return round(np.mean([apk(a,p,k) for a,p in zip(actual, predicted)]), 4) * 100", "def compute_map(ranks, gnd, kappas=[]):\n\n map = 0.\n nq = len(gnd) # number of queries\n aps = np.zeros(nq)\n pr = np.zeros(len(kappas))\n prs = np.zeros((nq, len(kappas)))\n nempty = 0\n\n for i in np.arange(nq):\n qgnd = np.array(gnd[i]['ok'])\n\n # no positive images, skip from the average\n if qgnd.shape[0] == 0:\n aps[i] = float('nan')\n prs[i, :] = float('nan')\n nempty += 1\n continue\n\n try:\n qgndj = np.array(gnd[i]['junk'])\n except:\n qgndj = np.empty(0)\n\n # sorted positions of positive and junk images (0 based)\n pos = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgnd)]\n junk = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgndj)]\n\n k = 0;\n ij = 0;\n if len(junk):\n # decrease positions of positives based on the number of\n # junk images appearing before them\n ip = 0\n while (ip < len(pos)):\n while (ij < len(junk) and pos[ip] > junk[ij]):\n k += 1\n ij += 1\n pos[ip] = pos[ip] - k\n ip += 1\n\n # compute ap\n ap = compute_ap(pos, len(qgnd))\n map = map + ap\n aps[i] = ap\n\n # compute precision @ k\n pos += 1 # get it to 1-based\n for j in np.arange(len(kappas)):\n kq = min(max(pos), kappas[j]); \n prs[i, j] = (pos <= kq).sum() / kq\n pr = pr + prs[i, :]\n\n map = map / (nq - nempty)\n pr = pr / (nq - nempty)\n\n return map, aps, pr, prs", "def mapk(actual, predicted, k=3):\n outs = [apk(a, p, k) for a, p in zip(actual, predicted)]\n return np.mean([apk(a, p, k) for a, p in zip(actual, predicted)]), outs", "def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A", "def all_kmers(k):\n for i in range(0, 4 ** k):\n res = number_to_kmer(i, k)\n yield res", "def calculate_MAP(self):\n testing_images = open('./digitdata/testimages', 'r')\n with testing_images as ti:\n data = list(csv.reader(ti))\n data = [i for i in data if i]\n count = 0\n #loop through all the test images\n for j in range(0,1000):\n classification_dict = {0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0} \n for l in range(0,28):\n coord = count + l\n for w in range(0,28):\n if data[coord][0][w] == \"+\":\n #iterate through each class. z is the class [0-9]\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][0]) \n elif data[coord][0][w] == \"#\":\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][1])\n elif data[coord][0][w] == \" \":\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][2])\n count += 28\n self.solutions.append(max(classification_dict, key=classification_dict.get))", "def compute_map(self):\n number_of_orders = 0\n orders = []\n for i, line in enumerate(self.__grid):\n for j, column in enumerate(line):\n if self.__grid[i][j][\"humans\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(self.__grid[i][j][\"humans\"])\n orders.append(0)\n orders.append(0)\n if self.__grid[i][j][\"vampires\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(self.__grid[i][j][\"vampires\"])\n orders.append(0)\n if self.__grid[i][j][\"werewolves\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(0)\n orders.append(self.__grid[i][j][\"werewolves\"])\n return number_of_orders, orders", "def compute_kappa_map(lens_vec, size, size_map):\n\n par_file_name = \"kappa_map.par\"\n fit_file_name = \"kappa_map.fits\"\n z_source = 2.0\n size_map = size_map * 1.05\n\n file_map = open(par_file_name, 'w')\n\n conv_lens_vec(lens_vec)\n\n file_map.write(\"runmode\\n\" )\n file_map.write(\" reference 3 0 0\\n\")\n file_map.write(\" verbose 0\\n\" )\n file_map.write(\" mass 3 \" + str(size) + \" \" + \\\n str(lens_vec[0][\"z_lens\"]) + \" \" + fit_file_name + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"source\\n\")\n file_map.write(\" z_source \" + str(z_source) + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"grille\\n\")\n file_map.write(\" nombre 128\\n\")\n file_map.write(\" nlens 4\\n\")\n file_map.write(\" nlens_crit 1\\n\")\n file_map.write(\" nlens_opt 0\\n\")\n file_map.write(\" polaire 1\\n\")\n file_map.write(\" end\\n\")\n\n\n for i in range(len(lens_vec)):\n string_out = 'potential ' + str(i) + '\\n'\n file_map.write(string_out)\n #print string_out,\n for keys in lens_vec[i].keys():\n string_out = ' ' + keys + ' ' + str(lens_vec[i][keys]) + \\\n '\\n'\n #print string_out,\n file_map.write(string_out)\n file_map.write(' end\\n')\n\n file_map.write(\"cosmology\\n\")\n file_map.write(\" H0 70.0\\n\")\n file_map.write(\" omega 0.3\\n\")\n file_map.write(\" lambda 0.7\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"champ\\n\")\n file_map.write(\" xmin -101\\n\")\n file_map.write(\" xmax 100\\n\")\n file_map.write(\" ymin -101\\n\")\n file_map.write(\" ymax 100\\n\")\n file_map.write(\" dmax \" + str(size_map) + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"fini\\n\")\n\n file_map.close()", "def count_kmers_possible(read, k):\n num_kmers = {}\n num_kmers1 = len(read) - k + 1\n num_kmers2 = 4**k\n#num_kmers.append(min(num_kmers1,num_kmers2))\n num_kmers = min(num_kmers1,num_kmers2)\n num_kmers3 = max(num_kmers,0)\n return(num_kmers3)", "def get_kpoints(self,ifwrite='yes'):\n a11 = float(self.lat[2].split()[0])\n a12 = float(self.lat[2].split()[1])\n a13 = float(self.lat[2].split()[2])\n a21 = float(self.lat[3].split()[0])\n a22 = float(self.lat[3].split()[1])\n a23 = float(self.lat[3].split()[2])\n a31 = float(self.lat[4].split()[0])\n a32 = float(self.lat[4].split()[1])\n a33 = float(self.lat[4].split()[2])\n \n x0 = [a11, a12, a13]\n x1 = [a21, a22, a23]\n x2 = [a31, a32, a33]\n \n self.natom = sum(list(map(int,self.lat[6].split())))\n # Number of atoms in POSCAR/CONTCAR\n \n l0 = np.linalg.norm(x0)\n l1 = np.linalg.norm(x1)\n l2 = np.linalg.norm(x2)\n\n self.cell_norm = [l0, l1, l2]\n \n N = (l0*l1*l2*self.kppra/self.natom)**(1.0/3.0)\n \n k0 = int(N/l0)\n k1 = int(N/l1)\n k2 = int(N/l2)\n\n klist = [k0,k1,k2]\n flag = 0\n kn = klist[:]\n\n if len(set(klist)) == 1:\n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n kn = [v+1 for v in kn]\n elif len(set(klist)) == 3:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 3:\n kn[klist.index(sorted(klist)[flag])] += 1\n flag += 1\n else:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 2:\n tmp = sorted(set(klist))[flag]\n tmp_ind = []\n for i in range(3):\n if klist[i] == tmp:\n tmp_ind.append(i)\n kn = [kn[i]+1 if i in tmp_ind else kn[i] for i in range(3)]\n flag += 1\n\n self.kps = kn\n \n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n print(\"===== WARNING =====\")\n print(\"K-points generate method may not be appropriate!\")\n print(\"Check source code!!!!\")\n print(\"===================\")\n exit()\n\n #if ifwrite == 'yes':\n # self.write_output()", "def mapk(y_true, y_pred, k):\n \n # initialize empty list for apk values \n apk_values = []\n \n # loop over all samples\n for i in range(len(y_true)):\n # store apk values for every sample\n apk_values.append(\n apk(y_true[i], y_pred[i], k=k)\n )\n \n # return mean of apk values list\n return sum(apk_values) / len(apk_values)", "def _calculate_leading_dim_map():\n small_matrixes = [(value, value+64) for value in range(256, 40192+512, 512)]\n large_matrixes = [(value, value+1088) for value in range(1024, 39936+1024, 1024)]\n return dict(small_matrixes + large_matrixes)", "def kolmomap(xx,yy,amp,wavelength,angle,phase):\n sinemap=sine2d(xx,yy,amp[0],wavelength[0],angle[0]/180.*pi,phase[0])*0.\n for counter in range(len(amp)):\n sinemap=sinemap+sine2d(xx,yy,amp[counter],wavelength[counter],angle[counter]/180.*pi,phase[counter])\n return sinemap", "def build_map(model: str, n: int, kwc: int) -> Map:\n PKWS.clear()\n fited = cluster(n, model)\n return Map(\n cats=list(map(\"c-{}\".format, range(1, n + 1))),\n kws=list(\n map(\n lambda c: \", \".join(\n map(\n lambda x: x[0],\n count_it(\n Counter(\n chain.from_iterable(\n map(\n lambda ie: model == \"bert\"\n and SS_BERT.get(YS[model][ie[0]], [])\n or model == \"glove\"\n and SS_TFIDF[ie[0]]\n or SS_GLOVE[ie[0]],\n filter(\n lambda ie: ie[1] == c,\n enumerate(fited),\n ),\n ),\n )\n ),\n kwc,\n ),\n )\n ),\n range(n),\n )\n ),\n points=list(\n map(\n lambda y, x_y, x: Point(\n question=y, x=x_y[0], y=x_y[1], catagory=x,\n ),\n YS[model],\n XY[model],\n fited,\n )\n ),\n )", "def cemap_cal(y_pred,y_true):\r\n nTest = y_true.shape[0]\r\n nLabel = y_true.shape[1]\r\n ap = np.zeros(nTest)\r\n for i in range(0,nTest):\r\n for j in range(0,nLabel):\r\n R = np.sum(y_true[i,:])\r\n if y_true[i,j]==1:\r\n r = np.sum(y_pred[i,:]>=y_pred[i,j])\r\n rb = np.sum(y_pred[i,np.nonzero(y_true[i,:])] >= y_pred[i,j])\r\n ap[i] = ap[i] + rb/(r*1.0)\r\n ap[i] = ap[i]/R\r\n imap = np.nanmean(ap)\r\n\r\n ap = np.zeros(nLabel)\r\n for i in range(0,nLabel):\r\n for j in range(0,nTest):\r\n R = np.sum(y_true[:,i])\r\n if y_true[j,i]==1:\r\n r = np.sum(y_pred[:,i] >= y_pred[j,i])\r\n rb = np.sum(y_pred[np.nonzero(y_true[:,i]),i] >= y_pred[j,i])\r\n ap[i] = ap[i] + rb/(r*1.0)\r\n ap[i] = ap[i]/R\r\n lmap = np.nanmean(ap)\r\n\r\n return lmap,imap", "def kmerHashMap(reads, k):\n kmers_dict = {}\n # loop through all reads\n for i in range(len(reads)):\n # loop read's bases, except for the last k, to obtain its kmers\n for j in range(1+len(reads[i])-k):\n kmer = reads[i][j:k+j]\n if kmers_dict.has_key(kmer):\n kmers_dict[kmer].add(i)\n else:\n kmers_dict[kmer] = set([i])\n \n return kmers_dict", "def mapping(s, t, s_new, k,c):\n n, s_dim = s.shape\n t_dim = t.shape[1]\n n_new = s_new.shape[0]\n # 1. determine nearest neighbors\n dist = np.sum((s[np.newaxis] - s_new[:,np.newaxis])**2,-1)\n nn_ids = np.argsort(dist)[:,:k] # change to [:,:k]\n nns = np.row_stack([s[nn_ids[:,ki]] for ki in range(k)])\n nns = nns.reshape((n_new, k, s_dim), order='F')\n # 2 determine gram matris; \n dif = s_new[:,np.newaxis] - nns\n G = np.tensordot(dif,dif,axes=([2],[2]))\n G = G[np.arange(n_new),:,np.arange(n_new)]\n # 3. determine weights not worth vectorizing this \n weights = np.zeros((n_new, k))\n for i_n in range(n_new): \n weights[i_n] = np.linalg.inv(G[i_n]+c*np.eye(k)).dot(np.ones((k,)))\n weights /= np.sum(weights, -1, keepdims=True)\n # 4. compute coordinates\n t_nns = np.row_stack([t[nn_ids[:,ki]] for ki in range(k)])\n t_nns = t_nns.reshape((n_new,k, t_dim), order='F')\n t_new = np.dot(weights, t_nns)\n t_new = t_new[np.arange(n_new), np.arange(n_new)]\n return t_new", "def mapk(y_pred, y, k=10):\n return np.mean([apk(a, p, k) for a, p in zip(y, y_pred)])", "def calculate_mapping(self, mask):\n K, F, _ = mask.shape\n\n # (K, F, T)\n features = mask / np.linalg.norm(mask, axis=-1, keepdims=True)\n\n mapping = np.repeat(np.arange(K)[:, None], F, axis=1)\n\n for iterations, start, end in self.alignment_plan:\n for _ in range(iterations):\n # (K, T)\n centroid = np.sum(features[:, start:end, :], axis=1)\n centroid /= np.linalg.norm(centroid, axis=-1, keepdims=True)\n\n break_flag = False\n for f in range(start, end):\n reverse_permutation = self._align_segment(\n features[:, f, :], centroid,\n )\n if not (reverse_permutation == list(range(K))).all():\n break_flag = True\n features[:, f, :] = features[reverse_permutation, f, :]\n mapping[:, f] = mapping[reverse_permutation, f]\n if break_flag:\n break\n\n return mapping", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)", "def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)", "def calc_precision_map(self, output_filenm=\"\"):\n logger.info(\"Calculating precision map\")\n success_map, total_map = {}, {} # map from query r to a dict of path and ratio of success\n # not sure why I am getting RuntimeError: dictionary changed size during iteration.\n train_map = [((e1, r), e2_list) for ((e1, r), e2_list) in self.train_map.items()]\n for ((e1, r), e2_list) in tqdm(train_map):\n c = self.args.cluster_assignments[self.entity_vocab[e1]]\n if c not in success_map:\n success_map[c] = {}\n if c not in total_map:\n total_map[c] = {}\n if r not in success_map[c]:\n success_map[c][r] = {}\n if r not in total_map[c]:\n total_map[c][r] = {}\n paths_for_this_relation = self.args.path_prior_map_per_relation[c][r]\n for p_ctr, (path, _) in enumerate(paths_for_this_relation.items()):\n ans = self.execute_one_program(e1, path, depth=0, max_branch=100)\n if len(ans) == 0:\n continue\n # execute the path get answer\n if path not in success_map[c][r]:\n success_map[c][r][path] = 0\n if path not in total_map[c][r]:\n total_map[c][r][path] = 0\n for a in ans:\n if a in e2_list:\n success_map[c][r][path] += 1\n total_map[c][r][path] += 1\n\n precision_map = {}\n for c, _ in success_map.items():\n for r, _ in success_map[c].items():\n if c not in precision_map:\n precision_map[c] = {}\n if r not in precision_map[c]:\n precision_map[c][r] = {}\n for path, s_c in success_map[c][r].items():\n precision_map[c][r][path] = s_c / total_map[c][r][path]\n\n if not output_filenm:\n dir_name = os.path.join(args.data_dir, \"data\", self.args.dataset_name, \"linkage={}\".format(self.args.linkage))\n output_filenm = os.path.join(dir_name, \"precision_map.pkl\")\n logger.info(\"Dumping precision map at {}\".format(output_filenm))\n with open(output_filenm, \"wb\") as fout:\n pickle.dump(precision_map, fout)\n logger.info(\"Done...\")", "def map():", "def phase_derivative_var_map(image, k):\n dx_phase = delta_x(image)\n dy_phase = delta_y(image)\n\n ny, nx = dx_phase.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an uneven integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n zmn = np.zeros((N,N))\n \n \n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n \n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (all_coords.shape[1], 1)).T, np.tile(avg_y, (all_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(inside, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n\n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (top_coords.shape[1], 1)).T, np.tile(avg_y, (top_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(top, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n## sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n## sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n## psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n## filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (bot_coords.shape[1], 1)).T, np.tile(avg_y, (bot_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(bot, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (left_coords.shape[1], 1)).T, np.tile(avg_y, (left_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(left, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (right_coords.shape[1], 1)).T, np.tile(avg_y, (right_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(right, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n return zmn", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def spamrisk_map(spam_wc, not_spam_wc, total_wc):\n risk_map = dict()\n spam_length = 0\n for w, v in spam_wc.iteritems():\n spam_length += v\n not_spam_length = 0\n for w, v in not_spam_wc.iteritems():\n not_spam_length += v\n total_length = not_spam_length + spam_length\n\n for word, value in total_wc.iteritems():\n\n if word not in spam_wc and word in not_spam_wc:\n risk_map[word] = 0.01\n elif word in spam_wc and word not in not_spam_wc:\n risk_map[word] = 0.99\n else:\n g = float(not_spam_wc[word] * 2)\n b = float(spam_wc[word])\n risk_map[word] = ( b / spam_length ) / ( ( g / not_spam_length) +(b / spam_length) ) \n\n return risk_map", "def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def make_map(theta, phi, data, NSIDE):\n assert len(theta) == len(phi) == len(data)\n num_pix = hp.nside2npix(NSIDE)\n e1map = np.full(num_pix, hp.UNSEEN, dtype=np.float)\n existance = np.full(num_pix, False, dtype=np.bool)\n counts = np.ones(num_pix, dtype=np.int)\n theta_new = np.zeros(num_pix)\n phi_new = np.zeros(num_pix)\n\n for i, k in enumerate(data):\n index = hp.ang2pix(NSIDE, theta[i], phi[i])\n theta_new[index], phi_new[index] = hp.pix2ang(NSIDE, index)\n if not existance[index]:\n e1map[index] = 0\n counts[index] = 0\n existance[index] = True\n e1map[index] += k\n counts[index] += 1\n return e1map/counts, existance, theta_new, phi_new", "def kmeans(in_points: List[Point], in_k: int) -> Dict[Barycenter, List[Point]]:\n\n def _choose_barycenters(_in_points: List[Point], _in_k: int) -> List[Barycenter]:\n \"\"\"\n Choose K barycenters within a set of in_points.\n :param _in_points: the set of in_points.\n :param _in_k: the number of barycenters to choose.\n :return: a list of K barycenters.\n \"\"\"\n _result: List[Barycenter] = []\n selected: Dict[Abscissa, Dict[Ordinate, None]] = {}\n x_min, x_max = Point.x_min_max(_in_points)\n y_min, y_max = Point.y_min_max(_in_points)\n while True:\n x = Abscissa(round(uniform(x_min, x_max), 1))\n y = Ordinate(round(uniform(y_min, y_max), 1))\n if y in selected.get(x, {}):\n continue\n if x not in selected:\n selected[x] = {}\n selected[x][y] = None\n _result.append(Barycenter(Point(x, y)))\n if len(_result) == _in_k:\n return _result\n\n def _assign_barycenters(_in_points: List[Point], _in_barycenters: List[Barycenter]) -> Dict[Point, Barycenter]:\n \"\"\"\n Assign one (closest) barycenter to each point.\n :param _in_points: the list of in_points.\n :param _in_barycenters: the list of barycenters.\n :return: a dictionary that associates one barycenter to one point.\n \"\"\"\n distance = NewType(\"distance\", float)\n distances: Dict[Point, Dict[Barycenter, distance]] = {}\n # For each point: calculate the distance between the point and (all) the barycenters.\n for _point in _in_points:\n distances[_point] = {}\n for _barycenter in _in_barycenters:\n distances[_point][Barycenter(_barycenter)] = distance(_point.distance(_barycenter))\n result: Dict[Point, _point_barycenter] = {}\n for _point, dist in distances.items():\n result[_point] = min(dist, key=dist.get)\n return result\n\n def _find_barycenter(_in_points: List[Point]) -> Barycenter:\n \"\"\"\n Given a list of in_points, find the barycenter.\n :param _in_points: the list of in_points.\n :return: the barycenter.\n \"\"\"\n return Barycenter(Point(sum([p.x for p in _in_points]) / len(_in_points), sum([p.y for p in _in_points]) / len(_in_points)))\n\n def _find_barycenters(_in_barycenter_points: Dict[Barycenter, List[Point]]) -> \\\n Tuple[bool, Dict[Barycenter, List[Point]]]:\n \"\"\"\n Given associations between \"barycenter candidates\" and lists of in_points, calculate the \"real\" barycenter\n and test whether the candidates are valid or not.\n :param _in_barycenter_points: associations between \"barycenter candidates\" and lists of in_points.\n :return: the function returns 2 values.\n - The first value tells whether all the \"barycenters candidates\" are valid or not.\n - The second is a set of associations between \"real barycenters\" and lists of in_points.\n \"\"\"\n result: Dict[_point_barycenter, List[Point]] = {}\n _changed = False\n for b, pts in _in_barycenter_points.items():\n new_b = _find_barycenter(pts)\n if b != new_b:\n _changed = True\n result[Barycenter(new_b)] = pts\n return _changed, result\n\n barycenters: List[Barycenter] = _choose_barycenters(in_points, in_k)\n while True:\n # Assign one barycenter to each point. The assigned barycenter is the closest one to the point.\n _point_barycenter: Dict[Point, _point_barycenter] = _assign_barycenters(in_points, barycenters)\n # Group the in_points that have the same barycenter.\n _barycenter_points: Dict[Barycenter, List[Point]] = {n: [k for k in _point_barycenter.keys()\n if _point_barycenter[k] == n]\n for n in set(_point_barycenter.values())}\n print(\"[1] \" + \"-\" * 30)\n for _barycenter, _points in _barycenter_points.items():\n print('[{}]:{}'.format(\", \".join([str(p) for p in _points]), _barycenter), flush=True)\n\n # Calculate the (real) barycenters of the previously formed groups.\n _barycenter_points: Dict[Barycenter, List[Point]]\n changed, _barycenter_points = _find_barycenters(_barycenter_points)\n\n print(\"[2] \" + \"-\" * 30)\n for _barycenter, _points in _barycenter_points.items():\n print('[{}]:{}'.format(\", \".join([str(p) for p in _points]), _barycenter), flush=True)\n print('Changed: {}'.format('yes' if changed else 'no'))\n if not changed:\n break\n barycenters = list(_barycenter_points.keys())\n return _barycenter_points" ]
[ "0.7097635", "0.6405305", "0.6382673", "0.6310537", "0.6246306", "0.6165255", "0.61539656", "0.6136181", "0.6090143", "0.6069749", "0.6035733", "0.5974822", "0.5948159", "0.5946194", "0.5929079", "0.59160817", "0.58489734", "0.58091116", "0.5785186", "0.57669145", "0.5763431", "0.5763118", "0.5754729", "0.57360965", "0.57322043", "0.571684", "0.57089174", "0.5687846", "0.5677778", "0.56714845" ]
0.6901691
1
Creates an R2 plugin with the given arguments.
def create_r2plugin(self, **kwargs): return self.create_tool(cls=R2Plugin, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_plugin(self, **kwargs):\n return self.plugin_class(**kwargs)", "def do_plugin_create(cc, args):\n\n field_list = ['name', 'code', 'callable', 'public', 'extra']\n\n fields = dict((k, v) for (k, v) in vars(args).items()\n if k in field_list and not (v is None))\n\n fields = utils.args_array_to_dict(fields, 'extra')\n\n fl = fields['code']\n with open(fl, 'r') as fil:\n fields['code'] = fil.read()\n\n if args.params:\n fields['parameters'] = utils.json_from_file(args.params)\n\n plugin = cc.plugin.create(**fields)\n\n data = dict([(f, getattr(plugin, f, '')) for f in\n res_fields.PLUGIN_DETAILED_RESOURCE.fields])\n\n cliutils.print_dict(data, wrap=72, json_flag=args.json)", "def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def createObject(self, *args):\n return _libsbml.CompSBasePlugin_createObject(self, *args)", "def new_plugin(ctx, **defaults):\n from .quickstart import plugin_quickstart\n\n project = ctx.get_project(silent=True)\n plugin_quickstart(defaults, project=project)", "def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterIF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkShotNoiseImageFilterID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterIUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, args):\n ClientPlugin.__init__(self)\n self.args = args", "def New(*args, **kargs):\n obj = itkSubtractImageFilterID2ID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def setup_arguments_parser():\n\tglobal parser\n\n\tparser.plugin_add_argument('-a', '--args', dest = 'plugin_args', \n\t\tdefault = \"\",\n\t\taction = 'store',\n\t\thelp = '''\n\t\t\tArguments to send to the plugin. For a list of arguments to send to a specific plugin, use \"-l [#|name]\". Use \"-l all\" to list all plugins.\n\n\t\t\tEx: \n\t\t\tif the plugin takes the arguments...\n\n\t\t\t'-p [SOME NUMBER]' and '-u [SOME TEXT]'\n\n\t\t\tyou would input: -a \"-p 123 -u foo\"''')\n\n\tparser.plugin_add_argument('-p', '--plugin', dest = 'plugin_name', \n\t\tdefault = None,\n\t\taction = 'store',\n\t\thelp = '''\n\t\t\t-p [#|name], or --plugin [#|name] \n\n\t\t\tName of plugin to use. Use the flag -l all or --list all to view a list of all available site plugins.''')\n\n\tparser.plugin_add_argument('-l', '--list', dest = 'plugin_list',\n\t\tdefault = None,\n\t\taction = 'store',\n\t\thelp = '''\n\t\t\tProvide a numbered listing of all plugins found. \n\n\t\t\tValid Parameters for PLUGIN_LIST:\n\t\t\t * all: for a list of all plugins.\n\n\t\t\t * #|name: to see more information on a specific plugin. You may use either the plugin # or the plugin name (if any) that is shown via \"-l all\"''')\n\n\tparser.plugin_add_argument('-o', '--out', dest = 'output_path',\n\t\tdefault = os.getcwd(),\n\t\taction = 'store',\n\t\thelp = '''\n\t\t\tPath to output to. If the path does not exist, it will be created. By default, this is the CWD.''')\n\n\tparser.plugin_add_argument('-w', '--wget', dest = 'wget_bin',\n\t\tdefault = None,\n\t\taction = 'store',\n\t\thelp = '''\n\t\t\tPath to wget executable, if available.''')", "def New(*args, **kargs):\n obj = itkShotNoiseImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def createObject(self, *args):\n return _libsbml.MultiASTPlugin_createObject(self, *args)", "def New(*args, **kargs):\n obj = itkShotNoiseImageFilterIUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterIUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSpeckleNoiseImageFilterID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSubtractImageFilterID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def createObject(self, *args):\n return _libsbml.MultiSpeciesPlugin_createObject(self, *args)", "def test_plugin_initialize_from_args(self):\n sys.argv.append('-t')\n p = PluginCustom()\n self.assertEqual('yourah', p.toto)", "def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterIF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkClosingByReconstructionImageFilterIUC2IUC2SE2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterIUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSpeckleNoiseImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkShotNoiseImageFilterIF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def new(self, plugin, *args, **kwargs):\n if plugin in self.modules.keys():\n return self.modules[plugin](*args, **kwargs)", "def _add_arguments(parser):\n parser.add_argument(\n \"command\",\n help='The plugin to run. e.g. \"shell\".',\n choices=sorted(registry.get_command_keys()),\n )\n\n parser.add_argument(\n \"-x\",\n \"--maximum-repositories\",\n default=sys.maxsize,\n type=int,\n help='If a value of `2` is used, it means \"Only search 2 repositories '\n 'for Rez packages to run on, at most\".',\n )\n\n parser.add_argument(\n \"-z\",\n \"--maximum-rez-packages\",\n default=sys.maxsize,\n type=int,\n help='If a value of `2` is used, it means \"Only search for 2 Rez packages '\n 'to run some comm on, at most\".',\n )\n\n parser.add_argument(\n \"-p\",\n \"--packages-path\",\n default=[config.release_packages_path], # pylint: disable=no-member\n help=\"A `{os.pathsep}` separated list of paths that report/run will be run on. \"\n \"If not defined, `rez.config.config.release_packages_path` is used, instead.\".format(\n os=os\n ),\n )\n\n parser.add_argument(\n \"-s\",\n \"--search-packages-path\",\n default=[config.release_packages_path], # pylint: disable=no-member\n help=\"A `{os.pathsep}` separated list of paths to search for Rez package dependencies. \"\n \"If not defined, `rez.config.config.release_packages_path` is used, instead.\".format(\n os=os\n ),\n )\n\n parser.add_argument(\n \"-i\",\n \"--ignore-patterns\",\n default=[],\n nargs=\"*\",\n help=\"A set of glob expressions or a file to a set of glob expressions. \"\n \"If a Rez package name matches one of \"\n \"these, it will not be run on.\",\n )\n\n parser.add_argument(\n \"-k\",\n \"--keep-temporary-files\",\n action=\"store_true\",\n help=\"If added, do not delete any temporary files that are generated during this run.\",\n )\n\n parser.add_argument(\n \"-r\",\n \"--rez-packages\",\n default=set(),\n nargs=\"+\",\n help=\"The names of Rez packages to process. If no names are given, \"\n \"every Rez package that is found will be processed.\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--temporary-directory\",\n help=\"A folder on-disk that will be used to clone git repositories.\",\n )", "def New(*args, **kargs):\n obj = itkScalarImageToRunLengthFeaturesFilterIUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSquaredDifferenceImageFilterIF2IF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
[ "0.6534158", "0.6163332", "0.5920514", "0.5799215", "0.57468385", "0.56910276", "0.5681927", "0.56453353", "0.56410426", "0.56301457", "0.56010586", "0.5585494", "0.5585222", "0.55543864", "0.55391985", "0.5502906", "0.5500424", "0.54910374", "0.5490024", "0.5473118", "0.54696083", "0.5464263", "0.5443342", "0.543092", "0.5410256", "0.539954", "0.5362245", "0.5353175", "0.53493685", "0.5343449" ]
0.8725646
0
The 'set' method for the Stack(dict) It 'sets' the value in it's correct place in the Stack AND applies a 'stack_pos' value depending on WHERE in the stack the value is being placed.
def __setitem__(self, key, val): super(Stack, self).__setitem__(key, val) # The 'meta' portion of the stack is a standar dict (not Stack) try: if isinstance(val, Stack) and val.stack_pos is "stack_root": val.parent = self val.key = key # This needs to be compacted and simplified. if self.stack_pos is "stack_root": val.stack_pos = "data_root" elif self.stack_pos is "data_root": val.stack_pos = "filter" elif self.stack_pos is "filter": val.stack_pos = "x" except AttributeError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __set__(self, stack: \"stack.Stack\", value: Any):\n with self._lock:\n self.assign_value_to_stack(stack, value)", "def assign_value_to_stack(self, stack: \"stack.Stack\", value: Any):\n pass", "def assign_value_to_stack(self, stack: \"stack.Stack\", value: Union[dict, list]):\n cloned = self._clone_container_with_resolvers(value, stack)\n setattr(stack, self.name, cloned)", "def stack_update(self, tree: Keyvalues) -> None:\n self._stack_update = tree", "def assign_value_to_stack(self, stack: \"stack.Stack\", value: Any):\n if isinstance(value, Resolver):\n value = self.get_setup_resolver_for_stack(stack, value)\n setattr(stack, self.name, value)", "def write_stack(self, offset, value):\n self.validate_stack_offset(offset)\n self.stack[offset] = value", "def set_current_stack(self, stack):\n self.current_stack = stack", "def _set_stack(self, val):\n return [\"@SP\", \"A=M\", \"M={v}\".format(v=val)]", "def __setitem__(self, index, value):\n self.position[index] = value", "def __setitem__(self, key, value):\n self.tree[key] = value", "def set(self, key, value):\n #try to lock the tree. If we succeed make sure\n #we dont lose updates from any other process\n if self._storage.lock():\n self._refresh_tree_ref()\n #get current top-level node and make a value-ref\n node = self._follow(self._tree_ref)\n value_ref = ValueRef(value)\n #insert and get new tree ref\n self._tree_ref = self._insert(node, key, value_ref)\n self._tree_ref = self._blacken(self._follow(self._tree_ref))", "def stack_push(self, value):\n self.stack.append(value)", "def __setitem__(self, key, value) -> None:\n # Allows value modification only in __init__.\n caller_method = inspect.getouterframes(inspect.currentframe(), 2)[1][3]\n if caller_method != \"__init__\":\n raise AttributeError\n\n self.__stash[key] = value", "def stack_start(self, tree: Keyvalues) -> None:\n self._stack_start = tree", "def __setitem__(self, pos, val):\n self._coords[pos] = val", "def setPosition(position):", "def _set_x_and_y_keys(self, data_key, x, y):\r\n if self.stack_pos == 'stack_root':\r\n self[data_key].__set_x_key(x)\r\n self[data_key].__set_y_key(y)\r\n else:\r\n raise KeyError(\"set_x_keys can only be called from a stack at root level. Current level is '{0}'\".format(self.stack_pos))", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def set_cell(self, pos, value):\n\t\tpos = Point(pos)\n\t\tif not self.valid(pos):\n\t\t\traise KeyError('Invalid cell position: {0}'.format(pos))\n\t\tself.data[pos.x + pos.y * self.dims.width] = value", "def set_pos(self, p: tuple) -> None:\n self.pos = p" ]
[ "0.7720888", "0.7439317", "0.6881211", "0.6826017", "0.6686697", "0.6470291", "0.64564764", "0.63827455", "0.6122969", "0.6057367", "0.593939", "0.5896225", "0.5853789", "0.5846537", "0.58212984", "0.5709334", "0.563116", "0.5625634", "0.5625634", "0.5625634", "0.5625634", "0.5625634", "0.5625634", "0.5625634", "0.5625634", "0.5625634", "0.5625634", "0.5625634", "0.5624676", "0.5613284" ]
0.7885178
0
Sets the data_key into the stack, optionally mapping data sources it.
def add_data(self, data_key, data=None, meta=None, ): self._verify_key_types(name='data', keys=data_key) if data_key in self.keys(): raise UserWarning("You have chosen to overwrite the source data and meta for Stack['%s']") if data is not None: if isinstance(data, pd.DataFrame): if meta is None: # To do: infer meta from DataFrame meta = {'info': None, 'lib': None, 'sets': None, 'columns': None, 'masks': None} # Add a special column of 1s data['@1'] = np.ones(len(data.index)) else: raise TypeError( "The 'data' given to Stack.add_data() must be one of the following types: " "<pandas.DataFrame>" ) if not meta is None: if isinstance(meta, (dict, OrderedDict)): # To do: verify incoming meta pass else: raise TypeError( "The 'meta' given to Stack.add_data() must be one of the following types: " "<dict>, <collections.OrderedDict>." ) # Add the data key to the stack # self[data_key] = {} # Add the meta and data to the data_key position in the stack self[data_key].meta = meta self[data_key].data = data self[data_key]['no_filter'].data = self[data_key].data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_x_and_y_keys(self, data_key, x, y):\r\n if self.stack_pos == 'stack_root':\r\n self[data_key].__set_x_key(x)\r\n self[data_key].__set_y_key(y)\r\n else:\r\n raise KeyError(\"set_x_keys can only be called from a stack at root level. Current level is '{0}'\".format(self.stack_pos))", "def SetUserData(self, key, data):\n self._userdata[key] = data", "def set_data_source(self, source_id):\n self.data_source = source_id", "def data_source_name(self, data_source_name):\n\n self._data_source_name = data_source_name", "def data_source(self, data_source):\n\n self._data_source = data_source", "def set_provenance(self, ksf: str, data: Dict):\n if ksf not in data.keys():\n if ksf in self.mapping and not isinstance(self.mapping[ksf], dict):\n data[ksf] = self.mapping[ksf]()\n else:\n # if unknown ksf or is an inapplicable pattern\n # dictionary, then just set the value to the default\n data[ksf] = [self.default_provenance]\n else:\n # If data is s a non-string iterable then, coerce into a simple list of sources\n if isinstance(data[ksf], (list, set, tuple)):\n sources = list(data[ksf])\n else:\n # wraps knowledge sources that are multivalued in a list even if single valued\n # in ingest data\n if column_types[ksf] == list:\n sources = [data[ksf]]\n else:\n sources = data[ksf]\n if ksf in self.mapping:\n log.debug(\"self.mapping[ksf]\", self.mapping[ksf])\n if isinstance(self.mapping[ksf], dict):\n log.debug(\"self.mapping[ksf].keys()\", self.mapping[ksf].keys())\n for pattern in self.mapping[ksf].keys():\n log.debug(\"pattern\", pattern)\n for source in sources:\n log.debug(\"source\", source)\n if re.compile(pattern).match(source):\n index_of_source = data[ksf].index(source)\n del data[ksf][index_of_source]\n data[ksf] = data[ksf] + self.mapping[ksf][pattern]([source])\n else:\n if source not in data[ksf] and source not in self.mapping[ksf].keys():\n data[ksf].append(source)\n log.debug(\"data[ksf]\", data[ksf])\n else:\n data[ksf] = self.mapping[ksf](sources)\n else: # leave data intact if no mapping found\n data[ksf] = sources\n\n # ignore if still empty at this point\n if not data[ksf]:\n data.pop(ksf)", "def set(self, cls, name, value, data_source):\n # if cls not in self._cache:\n # self._cache[cls] = {}\n # self._cache[cls][name] = value\n #ds = self._default_DataSource if data_source is None else data_source\n if data_source is None:\n self._cache.setdefault(cls, {})[name] = value\n elif isinstance(data_source, models.Node):\n self._cache.setdefault(data_source._id, {}).setdefault(cls, {})[name] = value\n elif isinstance(data_source, str) and data_source.startswith('#'):\n self._cache.setdefault(data_source, {}).setdefault(cls, {})[name] = value\n else:\n raise ValueError('data_source specification unknown.')", "def __init__(__self__, *,\n key_data: pulumi.Input[str]):\n pulumi.set(__self__, \"key_data\", key_data)", "def __setitem__(self, key, value):\n self.default_dataset[key] = value", "def set_data(version, key, value):\n if key not in ALLOWED_KEYS:\n raise Exception('The key is not allowed')\n if len(value) == 1:\n value = value[0]\n save_data(load_data(), version, key, value)", "def setData(key, value):\n #only string keys are accepted\n if ( type(key) != str ): return None\n \n Co8PersistentData.__dataDict[key] = value", "def set(self, key, value):\n self._data[key] = value", "def set(self, key, value):\n self._data[key] = value", "def __setitem__(self, key, val):\r\n super(Stack, self).__setitem__(key, val)\r\n\r\n # The 'meta' portion of the stack is a standar dict (not Stack)\r\n try:\r\n if isinstance(val, Stack) and val.stack_pos is \"stack_root\":\r\n val.parent = self\r\n val.key = key\r\n\r\n # This needs to be compacted and simplified.\r\n if self.stack_pos is \"stack_root\":\r\n val.stack_pos = \"data_root\"\r\n elif self.stack_pos is \"data_root\":\r\n val.stack_pos = \"filter\"\r\n elif self.stack_pos is \"filter\":\r\n val.stack_pos = \"x\"\r\n\r\n except AttributeError:\r\n pass", "def __setattr__(self, key, value):\n if key != 'json_data':\n self.get_data()[key] = value\n else:\n super(BaseJsonEncodableObject, self).__setattr__(key, value)", "def setCustomData( self, key, value ):\n self._customData[str(key)] = value", "def set_data(self, data):\n\n pass", "def from_stack(self, stack, data_key=None, dk_filter=None, reset=True):\n if data_key is None and len(list(stack.keys())) > 1:\n msg = 'Please specify a data_key, the Stack contains more than one.'\n raise ValueError(msg)\n elif data_key is None:\n data_key = list(stack.keys())[0]\n elif not data_key in list(stack.keys()):\n msg = \"data_key '{}' does not exist.\".format(data_key)\n raise KeyError(msg)\n\n if not dk_filter:\n dk_f = 'no_filter'\n elif dk_filter in list(stack[data_key].keys()):\n msg = 'Please pass an existing filter of the Stack:\\n{}'.format(\n list(stack[data_key].keys()))\n raise KeyError(msg)\n\n meta = stack[data_key].meta\n data = stack[data_key][dk_f].data\n self.name = data_key\n self.filtered = dk_f\n self.from_components(data, meta, reset=reset)\n\n return None", "def __setitem__(cls, data_id, value):\n\n cls._data[data_id] = value\n\n if data_id not in cls._defaults:\n cls._defaults[data_id] = value", "def __setitem__(self, key, value):\n self.data[key] = value", "def __setitem__(self, key, value):\n self.data[key] = value", "def set_data_dest(self, destination_id):\n self.data_dest = destination_id", "def __post_init__(self):\n # Only do this if source_data already exists (not during its own initialization)\n if \"SOURCE_DATA\" in globals():\n for data_field in fields(self):\n setattr(self, data_field.name, getattr(SOURCE_DATA, data_field.name))", "def data_source_uuid(self, data_source_uuid):\n\n self._data_source_uuid = data_source_uuid", "def setResourceApplicationDataEntry(self, authenticationToken, guid, key, value):\r\n pass", "def __setitem__(self, key, value):\r\n self.data[key] = value", "def set_data(self, table_name, key, data,\n range_key=None, pickled=True, overwrite=True, transform_time=None):\n if pickled:\n data = pickle.dumps(data)\n table = self.get_table(table_name)\n if not table:\n # this shouldn't happened,\n table = self.create_table(table_name)\n try:\n data = {self.hash_key_name: key, self.data_property: data}\n if range_key:\n data.update({self.range_key_name: range_key})\n\n item = table.put_item(data=data, overwrite=overwrite)\n except ValidationException as e:\n raise DynamoDBError(e)\n except JSONResponseError as e:\n raise DynamoDBError(_TABLE_DOES_NOT_EXIST + \": '%s'. %s\" % (table_name, e))\n return item", "def set_key(self, key):\n self.key = key", "def SetEventDataIdentifier(self, event_data_identifier):\n self._event_data_identifier = event_data_identifier", "def set_data(self, data):\n self.data = data" ]
[ "0.6472338", "0.6135069", "0.61180633", "0.6014003", "0.6006666", "0.58484524", "0.583034", "0.5807248", "0.5780128", "0.57284236", "0.57120955", "0.5705826", "0.5705826", "0.5679936", "0.56008524", "0.5600356", "0.559809", "0.5547312", "0.5539231", "0.5510886", "0.5510886", "0.55015904", "0.5501212", "0.54943156", "0.54758984", "0.54736495", "0.5463805", "0.54598844", "0.5454081", "0.5450185" ]
0.6149559
1
Group variables by data types found in the meta.
def variable_types(self, data_key, only_type=None): if self[data_key].meta['columns'] is None: return 'No meta attached to data_key: %s' %(data_key) else: types = { 'int': [], 'float': [], 'single': [], 'delimited set': [], 'string': [], 'date': [], 'time': [], 'array': [] } not_found = [] for col in self[data_key].data.columns: if not col in ['@1', 'id_L1', 'id_L1.1']: try: types[ self[data_key].meta['columns'][col]['type'] ].append(col) except: not_found.append(col) for mask in self[data_key].meta['masks'].keys(): types[self[data_key].meta['masks'][mask]['type']].append(mask) if not_found: print '%s not found in meta file. Ignored.' %(not_found) if only_type: return types[only_type] else: return types
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _all_meta(self):\n\t\treturn {meta.key: self.type_cast(meta.value) for meta in self.meta_set.all()}", "def get_varmeta(self):\n\n if self.ref_ds is not None:\n ref_meta = (self.ref_ds.id, self.ref_ds._names_from_attrs('all'))\n else:\n ref_meta = None\n if self.other_dss is not None:\n dss_meta = [(ds.id, ds._names_from_attrs('all')) for ds in self.other_dss]\n else:\n dss_meta = None\n if self.metric_ds is not None:\n mds_meta = (self.metric_ds.id, self.metric_ds._names_from_attrs('all'))\n else:\n mds_meta = None\n\n return ref_meta, dss_meta, mds_meta", "def summarize_metadata(self):\n meta_dict = {}\n for comp in self.dataset.data_vars:\n for mkey, mvalue in self.dataset[comp].attrs.items():\n meta_dict[f\"{comp}.{mkey}\"] = mvalue\n\n return meta_dict", "def readAggregatedSimpleTypes(self):\n types = {}\n # SETs\n for m in re.finditer(\"TYPE (\\w*) = SET (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'SET ' + typetype\n \n # BAGs\n for m in re.finditer(\"TYPE (\\w*) = BAG (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'BAG ' + typetype\n \n # LISTs\n for m in re.finditer(\"TYPE (\\w*) = LIST (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'LIST ' + typetype\n \n # ARRAYs\n for m in re.finditer(\"TYPE (\\w*) = ARRAY (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'ARRAY ' + typetype\n \n # STRING vectors\n for m in re.finditer(\"TYPE (\\w*) = STRING\\((.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'STRING(' + typetype\n \n return types", "def _variable_types(self):\n return self._variable_single_types + self._variable_array_types", "def _infer_variable_types_from_data(raw_data):\n raise NotImplementedError()", "def preprocess_vars(flat_vars):\n proc_vars = {}\n\n for var in flat_vars:\n v_type = var[\"type\"]\n if (v_type not in proc_vars):\n proc_vars[v_type] = [var]\n else:\n proc_vars[v_type].append(var)\n\n return proc_vars", "def get_variable_groups(all_inputs):\n row_length = len(all_inputs[0])\n for single_input in all_inputs[1:]:\n if len(single_input) != row_length:\n raise ValueError(\n \"Please make sure the length is the same if you want to input multiple values when the type of variables is t_array or t_mapping\")\n\n final_groups = list()\n row_length = len(all_inputs[0])\n col_length = len(all_inputs)\n for i in range(1, row_length):\n temp_list = list()\n for j in range(col_length):\n temp_list.append((all_inputs[j][0], all_inputs[j][i]))\n final_groups.append(temp_list)\n return final_groups", "def getVar(inmeta):\n meta = AutoVivification()\n with open(inmeta) as fp:\n for line in fp:\n cols=line.split(',')\n varname=cols[0].strip()\n meta[varname]['agg'] = cols[1].strip()\n meta[varname]['dtyp'] = cols[2].strip()\n meta[varname]['long_name'] = cols[3].strip()\n meta[varname]['units'] = cols[4].strip()\n return meta", "def _parse_dtypes(data, table_meta):\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'datetime':\n datetime_format = field.get('format')\n data[name] = pd.to_datetime(data[name], format=datetime_format, exact=False)\n elif field_type == 'numerical' and field.get('subtype') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n elif field_type == 'id' and field.get('subtype', 'integer') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n\n return data", "def variables(self, setname='data file', numeric=True, string=True,\n date=True, boolean=True, blacklist=None):\n varlist = []\n except_list = []\n dsvars = self._variables_from_set(setname)\n if not numeric: except_list.extend(['int', 'float'])\n if not string: except_list.append('string')\n if not date: except_list.append('date')\n if not boolean: except_list.append('boolean')\n for dsvar in dsvars:\n if self._get_type(dsvar) in except_list: continue\n if dsvar in blacklist: continue\n varlist.append(dsvar)\n return varlist", "def readOtherTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (\\w*) = (.*);\", self.data):\n typename, type_string = m.groups() \n if typename not in self.types.keys():\n types[typename] = type_string\n \n return types", "def output_meta_types(self, inputs=None):\n raise NotImplementedError()", "def data_grouping(self):\n group_container, film_container, plank_container = [[] for a in range(self.tot_conditions)], \\\n [[] for a in range(self.tot_conditions)], \\\n [[] for a in range(self.tot_conditions)]\n\n for i in self.data_labels:\n group = int(i[:-1])\n group_container[group - 1].append(i)\n film_container[group - 1].append(self.film_count[self.data_labels.index(i)])\n plank_container[group - 1].append(self.plank_count[self.data_labels.index(i)])\n\n return group_container, film_container, plank_container", "def readTypes(self):\r\n types = {}\r\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\r\n typename, typetype = m.groups() \r\n if typetype in self.SIMPLETYPES:\r\n types[typename] = typetype\r\n else:\r\n types[typename] = \"#\" + typetype\r\n \r\n return types", "def variables(self):\n return {u for u in self if u.type == 'var'}", "def get_dtypes_for_group_annots(header: List, annot_types: List):\n group_dtypes = {}\n for annotation, annot_type in zip(header, annot_types):\n if annot_type != \"numeric\":\n group_dtypes[annotation] = np.str\n return group_dtypes", "def _parse_groupped_data(self):\n for i, val in enumerate(self.values.keys()):\n xy = self.values[val]\n self._set_and_get(\"x_\", val, xy[:, 0])\n self._set_and_get(\"y_\", val, xy[:, 1])", "def _gather_data(self):\n for data in self._collection:\n label = data.label\n label = disambiguate(label, self._data)\n self._data[label] = data", "def regenerate_variables(self):\n\n # Let us not forget to remove fields that might be empty by now\n if hasattr(self, '_var_kinds'):\n for k in self._var_kinds:\n attrname = camel2underscores(k)\n try:\n delattr(self, attrname)\n except AttributeError:\n pass # The attribute may not have been set up yet\n\n _var_kinds = defaultdict(DictList)\n for k, v in self._var_dict.items():\n _var_kinds[v.__class__.__name__].append(v)\n\n for k in _var_kinds:\n attrname = camel2underscores(k)\n setattr(self, attrname, _var_kinds[k])\n\n self._var_kinds = _var_kinds", "def globvardimvals(tmpl, valuesdict,sufs=['.001.001.meta', '.meta']):\n # remove formats: {xx:yy} -> {xx}\n tmpl = re.sub(r'{([^:}]*)(:[^}]*)?}', r'{\\1}', tmpl)\n\n fields = list(set(re.findall(r'{([^}]*)}', tmpl)))\n vardims = [k for k in fields if k.startswith('v')]\n vardims.sort()\n knownvars = dict((k,v) for k,v in valuesdict.items() if k in vardims)\n knownvardims = [ k for k in vardims if k in knownvars ]\n knownvarvals = [ knownvars[k] for k in knownvardims ]\n knownvarlens = [ len(v) for v in knownvarvals ]\n unknownvardims = [ k for k in vardims if not k in knownvars ]\n\n fixdims = [k for k in fields if not k.startswith('v')]\n fixdims.sort()\n\n # just pick actual fields\n known = dict((k,v) for k,v in valuesdict.items() if k in fields)\n knowndims = dict((k,v) for k,v in known.items() if k not in vardims)\n # first known value for each field\n firstdims = dict((k,v[0]) for k,v in knowndims.items())\n\n if 'vars' in valuesdict:\n # list of variable value tuples\n # must be all variables; will ignore other v0=... settings\n varvals = valuesdict['vars']\n else:\n knownvarindices = np.indices(knownvarlens)\n varvals = []\n for vi in zip(*[x.flat for x in knownvarindices]):\n varval = tuple(v[i] for v,i in zip(knownvarvals,vi))\n varvals.append(varval)\n\n dimvals = {}\n\n unknown = set(fields) - set(known)\n if unknown:\n replaceknown = dict((k,'{'+k+'}') for k in fields)\n for k,v in firstdims.items():\n replaceknown[k] = v\n\n for knownvarval in varvals:\n vars = dict(zip(knownvardims, knownvarval))\n replaceknown.update(vars)\n\n unknowntmpl = tmpl.format(**replaceknown)\n\n globpatt = re.sub(r'{[^}]*}', '*', unknowntmpl)\n for suf in sufs:\n metafiles = glob(globpatt + suf)\n if len(metafiles):\n break\n else:\n raise IOError(globpatt + suf)\n\n unknowndims = [k for k in unknown if not k.startswith('v')]\n regexp,parts,keys = format2re(unknowntmpl + suf)\n vals = {}\n for metafile in metafiles:\n g = re.match(regexp,metafile).groups()\n d = dict(zip(keys,g))\n varval = tuple(d[k] for k in unknownvardims)\n if varval not in vals:\n vals[varval] = dict((k,set()) for k in unknowndims)\n for k,v in zip(keys,g):\n if not k.startswith('v'):\n vals[varval][k].add(v)\n\n for unknownvarvals,vs in vals.items():\n unknownvars = dict(zip(unknownvardims,unknownvarvals))\n vars.update(unknownvars)\n varval = tuple(vars[k] for k in vardims)\n dimvals[varval] = dict((k,sorted(list(s))) for k,s in vs.items())\n dimvals[varval].update(knowndims)\n else:\n dimvals = dict.fromkeys(varvals, knowndims)\n \n # res: (v0,v1) -> {'d0':['a','b','c'], 'd1':[0,1,2], ...}\n return vardims,fixdims,dimvals", "def _parse_metadata(self, meta):\r\n output = {}\r\n for name, value in meta.items():\r\n name = name.lower()\r\n if name == \"summary\":\r\n # handle summary metadata as markdown\r\n # summary metadata is special case and join all list values\r\n summary_values = \"\\n\".join(value)\r\n # reset the markdown instance to clear any state\r\n self._md.reset()\r\n summary = self._md.convert(summary_values)\r\n output[name] = self.process_metadata(name, summary)\r\n elif len(value) > 1:\r\n # handle list metadata as list of string\r\n output[name] = self.process_metadata(name, value)\r\n else:\r\n # otherwise, handle metadata as single string\r\n output[name] = self.process_metadata(name, value[0])\r\n return output", "def get_variables_of_type(self, variable_type):\n if isinstance(variable_type,str):\n variable_key = variable_type\n else:\n #it is a class\n variable_key = variable_type.__name__\n return self._var_kinds[variable_key]", "def var_metadata(self, index):\n if index is not None:\n metadata = []\n for m in self.primary_header['variables'][index]['metadata']:\n meta = {\n 'value': m['Value'] / 10**m['Value precision'],\n 'code': m['Variable-specific code'],\n }\n if 'iMeta' in m:\n meta['iMeta'] = m['iMeta']\n else:\n meta['iMeta'] = 0\n metadata.append(meta)\n return metadata\n else:\n return None", "def test_group_by_fields(self):\r\n t = [\r\n ['#sample', 'loc', 'age', 'mal'],\r\n ['a', 'US', '5', 'n'],\r\n ['b', 'US', '10', 'n'],\r\n ['c', 'Mal', '5', 'y'],\r\n ['d', 'Mal', '10', 'n'],\r\n ['e', 'Mal', '5', 'y'],\r\n ]\r\n self.assertEqual(group_by_fields(t, ['age', 'loc']),\r\n {('5', 'US'): ['a'], ('10', 'US'): ['b'], ('5', 'Mal'): ['c', 'e'],\r\n ('10', 'Mal'): ['d']})", "def describe(self, var=None, only_type=None, text_key=None, axis_edit=None):\n if text_key is None: text_key = self.text_key\n if var is not None:\n return self._get_meta(var, only_type, text_key, axis_edit)\n if self._meta['columns'] is None:\n return 'No meta attached to data_key: %s' %(data_key)\n else:\n types = {\n 'int': [],\n 'float': [],\n 'single': [],\n 'delimited set': [],\n 'string': [],\n 'date': [],\n 'time': [],\n 'array': [],\n 'N/A': []\n }\n not_found = []\n for col in self._data.columns:\n if not col in ['@1', 'id_L1', 'id_L1.1']:\n try:\n types[\n self._meta['columns'][col]['type']\n ].append(col)\n except:\n types['N/A'].append(col)\n for mask in list(self._meta['masks'].keys()):\n types[self._meta['masks'][mask]['type']].append(mask)\n idx_len = max([len(t) for t in list(types.values())])\n for t in list(types.keys()):\n typ_padded = types[t] + [''] * (idx_len - len(types[t]))\n types[t] = typ_padded\n types = pd.DataFrame(types)\n if only_type:\n if not isinstance(only_type, list): only_type = [only_type]\n types = types[only_type]\n types = types.replace('', np.NaN).dropna(how='all')\n else:\n types = types[['single', 'delimited set', 'array', 'int',\n 'float', 'string', 'date', 'time', 'N/A']]\n types.columns.name = 'size: {}'.format(len(self._data))\n return types", "def _parse_output_variables(self):\n self._output_variables_by_name = {}\n self._output_variables_by_type = {}\n for ov in self._output_variables:\n # parse the variable to get individual parts\n parsed_variable = self.parse_variable(ov)\n variable_name = parsed_variable.get('name')\n variable_type = parsed_variable.get('type')\n\n # store the variables in dict by name (e.g. \"status_code\")\n self._output_variables_by_name[variable_name] = {'variable': ov}\n\n # store the variables in dict by name-type (e.g. \"status_code-String\")\n self._output_variables_by_type[f'{variable_name}-{variable_type}'] = {'variable': ov}", "def data_types(self):", "def _get_field_details(self, data, fields):\n fields_metadata = dict()\n for field in fields:\n dtype = data[field].dtype\n field_template = self._FIELD_TEMPLATES.get(dtype.kind)\n if not field_template:\n raise ValueError('Unsupported dtype {} in column {}'.format(dtype, field))\n\n field_details = copy.deepcopy(field_template)\n fields_metadata[field] = field_details\n\n return fields_metadata", "def _make_category_groups(data_struct):\n groups = {}\n for cat in set(data_struct[\"Objects\"]): \n \n data_names = [\"left_x\",\"top_y\",\"width\",\"height\",\"FPS\",\"AVG_FPS\",\"Accuracy\"]\n indices = [i for i, x in enumerate(data_struct[\"Objects\"]) if x == cat]\n for dn in data_names:\n for idx in indices:\n groups[cat] = data_struct[dn][idx]\n return(groups)" ]
[ "0.59063804", "0.58795136", "0.5873593", "0.57685095", "0.5711241", "0.55855113", "0.5570443", "0.5503071", "0.5463241", "0.5416724", "0.5404935", "0.5290731", "0.5261287", "0.52095056", "0.52092326", "0.5185386", "0.50565064", "0.5050169", "0.5047163", "0.5043952", "0.50173277", "0.5008008", "0.5005571", "0.5003432", "0.49776724", "0.49645534", "0.49642453", "0.49605185", "0.49553165", "0.49394986" ]
0.65671605
0
Construct a "chain" shaped subset of Links and their Views from the Stack. A chain is a onetoone or onetomany relation with an orientation that defines from which axis (x or y) it is build.
def get_chain(self, name=None, data_keys=None, filters=None, x=None, y=None, views=None, post_process=True, orient_on=None, select=None): #Make sure all the given keys are in lists data_keys = self._force_key_as_list(data_keys) # filters = self._force_key_as_list(filters) views = self._force_key_as_list(views) if orient_on: if x is None: x = self.describe()['x'].drop_duplicates().values.tolist() if y is None: y = self.describe()['y'].drop_duplicates().values.tolist() if views is None: views = self._Stack__view_keys views = [v for v in views if '|default|' not in v] return self.__get_chains(name=name, data_keys=data_keys, filters=filters, x=x, y=y, views=views, post_process=post_process, orientation=orient_on, select=select) else: chain = Chain(name) found_views = [] missed_views = [] #Make sure all the given keys are in lists x = self._force_key_as_list(x) y = self._force_key_as_list(y) if data_keys is None: # Apply lazy data_keys if none given data_keys = self.keys() the_filter = "no_filter" if filters is None else filters if self.__has_list(data_keys): for key in data_keys: # Use describe method to get x keys if not supplied. if x is None: x_keys = self.describe()['x'].drop_duplicates().values.tolist() else: x_keys = x # Use describe method to get y keys if not supplied. if y is None: y_keys = self.describe()['y'].drop_duplicates().values.tolist() else: y_keys = y # Use describe method to get view keys if not supplied. if views is None: v_keys = self.describe()['view'].drop_duplicates().values.tolist() v_keys = [v_key for v_key in v_keys if '|default|' not in v_key] else: v_keys = views chain._validate_x_y_combination(x_keys, y_keys, orient_on) chain._derive_attributes(key,the_filter,x_keys,y_keys,views) # Apply lazy name if none given if name is None: chain._lazy_name() for x_key in x_keys: for y_key in y_keys: if views is None: chain[key][the_filter][x_key][y_key] = self[key][the_filter][x_key][y_key] else: for view in views: try: chain[key][the_filter][x_key][y_key][view] = self[key][the_filter][x_key][y_key][view] if view not in found_views: found_views.append(view) except KeyError: if view not in missed_views: missed_views.append(view) else: raise ValueError('One or more of your data_keys ({data_keys}) is not in the stack ({stack_keys})'.format(data_keys=data_keys, stack_keys=self.keys())) if found_views: chain.views = [view for view in chain.views if view in found_views] for view in missed_views: if view in found_views: missed_views.remove(view) if post_process: chain._post_process_shapes(self[chain.data_key].meta) if select is not None: for view in chain[key][the_filter][x_key][y_key]: df = chain[key][the_filter][x_key][y_key][view].dataframe levels = df.index.levels selection = {} for var in select: level = functions.find_variable_level(levels, var) if level is not None: selection[var] = level #Don't do anything if the selection doesnt produce a result if selection: # selection = {var: functions.find_variable_level(levels, var) for var in select} list_of_dfs = [df.xs(var, level=selection[var]) for var in selection.keys()] new_df = pd.concat(list_of_dfs) # Reconstruct the index new_df.index= pd.MultiIndex.from_product([levels[0],selection.keys()], names=df.index.names) chain[key][the_filter][x_key][y_key][view].dataframe = new_df return chain
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chain_graph(self) -> nx.DiGraph:\n edg_lst = [\n (f\"p{idx}\", f\"p{idx+1}\", self.a[f\"p{idx+1}\"]) for idx in range(self.n)\n ]\n chain_graph = nx.DiGraph()\n chain_graph.add_weighted_edges_from(edg_lst)\n return chain_graph", "def chain_graph(self) -> nx.DiGraph:\n edg_lst = [\n (f\"p{idx}\", f\"p{idx+1}\", self.d[f\"p{idx+1}\"]) for idx in range(self.n)\n ]\n chain_graph = nx.DiGraph()\n chain_graph.add_weighted_edges_from(edg_lst)\n return chain_graph", "def chain(self,slices):\n for proj in self.projs[::-1]:\n slices = proj(slices)\n return", "def rich_chain(self):\n chain = self\n\n result = []\n while chain.prev_fragment:\n result.append(chain)\n chain = chain.prev_fragment\n result.append(chain)\n result.reverse()\n\n return result", "def trip_chain(self):\n pass", "def metro_alg(N):\n\n chain = np.zeros(N) # start with x_0 = 0\n chain_removed = np.array([0])\n j = 0\n for i in range(N-1):\n\n y = (np.random.rand()-0.5)*10\n if next_chain_link(chain[i], y):\n chain[i + 1] = y\n else:\n chain[i + 1] = chain[i]\n\n if next_chain_link(chain_removed[j], y):\n chain_removed = np.append(chain_removed, y) # append creates new array, does not change array argument\n j += 1\n\n return chain, chain_removed", "def plot_chain(chain):\n\n\tlabels = ['a', 'b']\n\tplt.figure(figsize=(20,6))\n\tfor i_dim in range(2):\n\t\tplt.subplot(2,1,i_dim+1)\n\t\tplt.ylabel(labels[i_dim])\n\n\t\tfor i in range(100):\n\t\t\tplt.plot(chain[i,:,i_dim],color='black', alpha=0.5)\n \n\tplt.show()", "def chain():\n chain_identifier, url = get_vars(request, [\"id\", \"data\"])\n info('chain=%s' % chain_identifier)\n chain = LAPPS_SERVICE_CHAINS.get_chain(chain_identifier)\n info('source-url=%s' % url)\n data = requests.get(url).text\n result = chain.run({\n \"discriminator\": \"http://vocab.lappsgrid.org/ns/media/text\", \n \"payload\": data})\n info(\"discriminator=%s\" % result.get('discriminator'))\n return render_template(\"chain.html\",\n chain=chain,\n fname=url,\n result=result,\n builder=HtmlBuilder())", "def chain_edges(edges):\n if not isinstance(edges, np.ndarray):\n edges = np.array(edges, dtype=int)\n return PyMesh.chain_edges(edges)", "def explode_chained(self):\n\n words = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n\n for i, image in enumerate(self.images):\n for j, chain in enumerate(list(self.traces[i])):\n\n # initialize new lists to keep track of another chain\n explode_image_chain = list()\n explode_trace_chain = list()\n\n for k, link in enumerate(chain):\n\n if k is 0:\n explode_image = image[:]\n explode_image = np.append(explode_image, [\"zero\", 0, 0])\n \n if k is not len(chain) - 1:\n next_image = explode_trace = explode_image[:-3]\n\n word = words[k]\n x_coor = link[0]\n y_coor = link[1]\n\n np.append(explode_trace, [word, x_coor, y_coor])\n\n # Label the image with the action of the teacher given the image as input\n explode_image_chain.append(explode_image)\n explode_trace_chain.append(explode_trace)\n explode_image = next_image\n else:\n explode_trace = explode_image\n explode_image_chain.append(explode_image)\n explode_trace_chain.append(explode_trace)\n\n # Add entire counting sequence grouped together from start to finish\n self.explode_traces.append(explode_trace_chain)\n self.explode_images.append(explode_image_chain)\n\n self.explode_length = len(self.explode_images)", "def indirect(stack):\n g = nx.Graph(stack)\n for group in nx.connected_components(g):\n yield from map(frozenset, combinations(group, 2))", "def chain(self):\n return self._chain", "def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return", "def metro_alg(N):\n\n chain = []\n chain_removed = []\n chain.append(0)\n chain_removed.append(0)\n\n for i in range(N):\n j = 0\n y = (np.random.rand()-0.5)*10\n if next_chain_link(chain[i], y):\n chain.append(y)\n else:\n chain.append(chain[i])\n\n if next_chain_link(chain_removed[j], y):\n chain_removed.append(y)\n j += 1\n\n return chain, chain_removed", "def create_relation_superset(self):\n # trace = [a, b, c]\n # trace x trace = [(a, a), (a, b), ..., (c, a), (c, b), (c, c)]\n return itertools.product(self.activities, self.activities)", "def _build_chain(G, u, v, visited):\n while v not in visited:\n yield u, v\n visited.add(v)\n u, v = v, G.nodes[v]['parent']\n yield u, v", "def _interconnect(self):\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return", "def makeBinaryChains():\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\n\t# Do some basic argument checking for this model\n\tif (len(types) < 2):\n\t\tprint \"Number of defined types must equal two for binary chain calculations.\"\n\t\treturn\n\tif (maxsize == 0):\n\t\tprint \"Must specify a valid maximum number for one or more components.\"\n\t\treturn\n\n\tallChains = []\n\tnewChainsA = [[]]\n\tnewChainsB = []\n\t\n\ttypeA = types[0]\n\ttypeB = types[1]\n\t\n\t# start the chain with a single type A component\n\taddComponent(newChainsA[0],typeA,0,0)\n\n\tdepth = 0\n\tfor n in range(maxsize):\n\t\tdepth+=1\n\t\t\n\t\t# go through all the chains created last iteration and append B components\n\t\tnewChainsB = []\n\t\tfor thisChain in newChainsA:\n\n\t\t\t# get a list of new available sites in the provided chain\n\t\t\t# by setting depth -1, we will only add to components added last round\n\t\t\topenSites = makeSiteList(thisChain,typeB,depth-1)\n\t\t\t\n\t\t\t# make all the descendants from the current chain and append them to the pool\n\t\t\tif (n == 0) and (typeA['sym']): #if the starting binder is symmetric, no need to start chains at all its sites\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,-1)\n\t\t\telse:\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,depth)\n\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsB))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsB\n\t\t\n\t\tdepth+=1\n\t\t\n\t\t# add an additional component to all the previously modified chains\n\t\tnewChainsA = []\n\t\tfor thisChain in newChainsB:\n\n\t\t\topenSites = makeSiteList(thisChain,typeA,depth-1)\n\t\t\tnewChainsA = newChainsA + fillSites(openSites,thisChain,typeA,depth)\n\t\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsA))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsA\n\n\treturn allChains", "def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3,\n SE=False, expansion=3, stride=1):\n\n norm_layer = self._norm_layer\n act_layer = self._act_layer\n downsample = None\n\n # if stride > 1\n # or if block input planes != block output planes (only possible for first block in stack)\n # downsamples skip connection by 1x1-conv filter\n if stride != 1 or inplanes != outplanes:\n downsample = nn.Sequential(\n conv1x1(inplanes, outplanes, stride=stride),\n norm_layer(outplanes)\n )\n\n layers = []\n\n # first block in stack can have stride > 1\n layers.append(block(inplanes, outplanes, expansion=expansion, kernel_size=kernel_size,\n SE=SE, stride=stride, dropout=self._dropout, downsample=downsample,\n norm_layer=norm_layer, act_layer=act_layer))\n\n # other layers in stack\n # for each layer: inplanes = outplanes, stride=1, downsample=None\n for _ in range(1, num_layers):\n layers.append(block(outplanes, outplanes, expansion=expansion, kernel_size=kernel_size,\n SE=SE, stride=1, dropout=self._dropout, norm_layer=norm_layer,\n act_layer=act_layer))\n\n return nn.Sequential(*layers)", "def compile_links(cls, board):\n\n # compute 1 row width\n width = len(board[0])\n\n # flatten board to a 1d list\n flat_board = list(itertools.chain(*board))\n\n # compute total board length\n board_width = len(flat_board)\n\n # allocate a frame of 0s with proper columns and index\n df = pd.DataFrame(0, columns=flat_board, index=flat_board)\n\n # form links, one full loop of the board\n for y in range(board_width - 1):\n\n # 2 main skipping chains\n df.ix[y][y + 1] = df.ix[y + 1][y] = (y + 1) % width\n\n try:\n # 2 solid side chains\n df.ix[y][y + width] = df.ix[y + width][y] = y + width < board_width\n except IndexError:\n pass\n\n # make sure we cast any ints to bool on exit\n return df.astype(bool)", "def chain_cmd(ctx):\n pass", "def stack(self):\n # Fetch the zeroth layer data, which is the original input\n \tdata = self.data_container[0]\n # Initialize network that will contain the stack\n \tself.init_stacked_net(data)\n # Add the weights layer by layer from the individual networks.\n \t# The weights container has [(I_1,O_1),(I_2,O_2),...(I_n,O_n)],\n \t# you need to unfold it as I_1,I_2...I_n:O_n,...O_2,O_1\n \tself.stacked_net.weights = [a[0] for a \\\n in self.weights_container] + [a[1] for a \\\n in self.weights_container][::-1]\n \tself.stacked_net.biases = [a[0] for a in self.bias_container]\\\n + [a[1] for a in self.bias_container][::-1]", "def get_chain(self):\n return self.segment.chain", "def get_chain(self):\n return self.segment.chain", "def chain(self, chain):\n\n self._chain = chain", "def basestack(p, i, j, k, l):\n return _RNAstructure_wrap.basestack(p, i, j, k, l)", "def Chain(self, chain, **kwargs):\n\n from trulens_eval.tru_chain import TruChain\n\n return TruChain(tru=self, app=chain, **kwargs)", "def _add_links_from_mergers(self):\n for i, node_name in enumerate(self.node_list):\n self.builder.addDirectedLink(node_name, self, islot=i)", "def __view(self, top, view):\n\t\tresult = []\n\t\tdepth = -1\n\t\tfor seeking in reversed(view):\n\t\t\twhile depth > seeking:\n\t\t\t\tdepth -= 1\n\t\t\t\ttop = top[self.NODE_PRIOR]\n\t\t\tresult.append(top[self.NODE_SEMANTIC])\n\t\tresult.reverse()\n\t\treturn result", "def _get_transformation_chain(self, moving_slice_index):\n\n i = moving_slice_index\n s, e, r = tuple(self.options.sliceRange)\n\n # Calculate shortest paths between individual slices\n slice_paths = nx.all_pairs_dijkstra_path(self.G)\n\n # Get the shortest path linking given moving slice with the reference\n # slice.\n path = list(reversed(slice_paths[r][i]))\n chain = []\n\n # In case we hit a reference slice :)\n if i == r:\n chain.append((r, r))\n\n # For all the other cases collect partial transforms.\n for step in range(len(path) - 1):\n chain.append((path[step], path[step + 1]))\n\n return chain" ]
[ "0.5949611", "0.5761146", "0.5523492", "0.5483067", "0.53988105", "0.52840245", "0.51865804", "0.51337653", "0.50848824", "0.50766546", "0.5058167", "0.50423664", "0.50234085", "0.50112087", "0.5005778", "0.49760357", "0.49735498", "0.49703103", "0.49459764", "0.49457243", "0.49347624", "0.49345878", "0.48823467", "0.48823467", "0.487165", "0.48705792", "0.48612934", "0.48447213", "0.4801638", "0.478355" ]
0.58844775
1
Save Stack instance to .stack file.
def save(self, path_stack, compression="gzip"): protocol = cPickle.HIGHEST_PROTOCOL if not path_stack.endswith('.stack'): raise ValueError( "To avoid ambiguity, when using Stack.save() you must provide the full path to " "the stack file you want to create, including the file extension. For example: " "stack.save(path_stack='./output/MyStack.stack'). Your call looks like this: " "stack.save(path_stack='%s', ...)" % (path_stack) ) if compression is None: f = open(path_stack, 'wb') cPickle.dump(self, f, protocol) elif compression.lower() == "lzma": f = open(path_stack, 'wb') cPickle.dump(pylzma.compress(bytes(self)), f, protocol) else: f = gzip.open(path_stack, 'wb') cPickle.dump(self, f, protocol) f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_stack(stack):\n path = os.path.join(STACK_DIRECTORY, '%s.%s' % (stack.module, stack.caller))\n with open(path, 'w+') as f:\n dill.dump(stack, f)", "def save_game(self, path):\n try:\n file = open(path, \"wb\")\n for i in self.state_stack.states:\n i.on_save()\n pic.dump(self.state_stack, file)\n for i in self.state_stack.states:\n i.on_load()\n except IOError or pic.PicklingError as e:\n print(\"Game save error: {}\".format(e))", "def saveImage(self):\n\t\tself.getStackView().saveImage()", "def save(self, filename):\n with open(filename, \"w\") as fp:\n dump(self, fp)", "def save(self):\n pickle.dump(self, open(self.path, \"wb\"))", "def saveMovie(self):\n\t\tself.getStackView().saveStackMovie()", "def save(self, filename: str):\n dump(self, filename)", "def save(self, filename:str):\n dump(self, filename=filename)", "def save(self):\n\n if not self.revertable:\n return\n\n state = {}\n for x in self.toSave:\n state[x] = deepcopy(self.toSave[x]())\n\n #made a new model, reparent it so it displays\n state[\"model\"].reparentTo(base.render)\n\n #add it to the stack\n self.stack.append(state)\n\n for s in self.stack:\n s[\"model\"].setPos(s[\"model\"].getPos() + Vec3(0,0,-THING_REVERT_DISTANCE))", "def save(self, pretty=True):\n self.endInstance()\n if pretty:\n _indent(self.root, whitespace=self._whiteSpace)\n tree = ET.ElementTree(self.root)\n tree.write(self.path, encoding=\"utf-8\", method='xml', xml_declaration=True)\n if self.logger:\n self.logger.info(\"Writing %s\", self.path)", "def save(self, path):\n pickle.dump(self, open(path, 'wb'))", "def save(self, filename):\n pickle.dump(self, open(filename + '.p', 'wb'), 2)", "def save(self, fp):\n fp.write(self.dump())", "def save(self, filename):\n with gzip.open(filename, \"w\") as f:\n f.write(pickle.dumps(self))", "def _save(self):\n if not os.path.exists(gitrepo.DEFAULT_REPOSITORY_PATH):\n # there is no data yet --> nothing to save\n return\n\n self.stack.serialize(DEFAULT_STACK)\n self.backlog.serialize(DEFAULT_QUEUE)\n # self.blocked.serialize(DEFAULT_LIMBO)\n self.sleeping.serialize(DEFAULT_DORM)", "def SaveStackH5(self):\n\n try: \n wildcard = \"HDF5 files (*.hdf5)|*.hdf5\"\n dialog = wx.FileDialog(None, \"Save as .hdf5\", wildcard=wildcard,\n style=wx.SAVE|wx.OVERWRITE_PROMPT)\n\n if dialog.ShowModal() == wx.ID_OK:\n filepath = dialog.GetPath()\n self.page1.filename = dialog.GetFilename()\n dir = dialog.GetDirectory()\n \n self.common.path = dir\n self.common.filename = self.page1.filename\n\n wx.BeginBusyCursor() \n self.stk.write_h5(filepath, self.data_struct) \n wx.EndBusyCursor() \n\n except:\n\n wx.EndBusyCursor()\n wx.MessageBox(\"Could not save HDF5 file.\")\n \n dialog.Destroy()\n self.refresh_widgets()\n \n return", "def write_to_disk(self):\n text_file = open(self.file_path, \"w\")\n text_file.write(str(self))\n text_file.close()\n # dump to pickle\n pickle.dump(self.blockchain, open(self.pickle_path, \"wb\"))", "def save(self, filename, **kwargs):\n with open(filename, 'wb') as fin:\n pickle.dump(self, fin, **kwargs)", "def _save_object_stack(self, folder, basename, img_stack, slices, labels=None):\n if labels is None:\n labels = range(slices)\n for lab, sl in zip(labels, slices):\n if sl is None:\n pass\n x = sl[0].start\n y = sl[1].start\n\n exsl = tuple([np.s_[:]] + [s for s in sl])\n\n fn = os.path.join(\n folder,\n basename\n + \"_l\"\n + str(lab + 1)\n + \"_x\"\n + str(x)\n + \"_y\"\n + str(y)\n + \".tiff\",\n )\n timg = img_stack[exsl]\n skimage.io.imsave(fn, timg, plugin=\"tifffile\", imagej=True)", "def save(self,filename):\n with open(filename,'wb') as f:\n pickle.dump(self,f)", "def save(self, filename):\n with open(filename, \"wb\") as f:\n pkl.dump(self, f)", "def save(self, path):\n with open(path, 'wb') as f:\n pkl.dump(self, f)", "def save(self, fname):\n with open(fname, \"wb\") as f:\n cloudpickle.dump(self, f)\n # pickle.dump(self, open(fname, 'wb'))", "def save(self):\n data = (\n self.Joints,\n self.Links,\n self.joint_syms,\n self.global_syms,\n self.name,\n self.sym_prefix,\n )\n cloudpickle.dump(data, open(self.save_filename, \"wb\"))", "def save(self, filename):\n if '.pkl' not in filename:\n filename = filename + '.pkl'\n with open(filename, 'wb') as f:\n pickle.dump(self, f)", "def save(self):\n # TODO: save the file", "def saveState(self,filename=None):\n # For now we just use pickle for convenience. In the future, could use np.savez or HDF5 (or FITS)\n if filename is None:\n if self.statefile:\n filename = self.statefile\n else:\n filename = self.filename + '.cysolve.pkl'\n orig_statefile = self.statefile\n orig_ar = self.ar\n self.ar = None\n fh = open(filename,'w')\n cPickle.dump(self,fh,protocol=-1)\n fh.close()\n self.ar = orig_ar\n self.statefile = orig_statefile\n print \"Saved state in:\", filename", "def save(self,filename): \n with open(filename, 'wb') as f:\n pickle.dump(self,f)", "def save(self):\n memento = self.create_memento()\n import datetime\n f = open(str(datetime.datetime.now()).replace(' ','_')+'.saved_story','w')\n cPickle.dump(memento,f)\n f.close()\n zcanvas.message(\"Saved!\")", "def save(self, filename='test'):\n file = open(filename+'.txt','w')\n pickle.dump(self, file)\n file.close()" ]
[ "0.83591104", "0.68703425", "0.684626", "0.68171114", "0.6795026", "0.67741024", "0.6737266", "0.65906686", "0.6552278", "0.6511375", "0.6493919", "0.6468704", "0.6422137", "0.638314", "0.63795894", "0.6343323", "0.63281953", "0.62846315", "0.6282582", "0.62518305", "0.6233292", "0.62283796", "0.6213348", "0.621034", "0.62093115", "0.6199277", "0.61984", "0.619665", "0.619576", "0.6195488" ]
0.7311077
1
Creates a new stack instance from a .sav file.
def from_sav(data_key, filename, name=None, path=None, ioLocale="en_US.UTF-8", ioUtf8=True): if name is None: name = data_key meta, data = parse_sav_file(filename=filename, path=path, name=name, ioLocale=ioLocale, ioUtf8=ioUtf8) return Stack(add_data={name: {'meta': meta, 'data':data}})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_stack(filename):\n data = np.genfromtxt(filename, skip_header=1)\n index_arr = data[:, 2]\n thickness_arr = data[:, 3] / 1e9\n stack = Stack(index_arr, thickness_arr)\n return stack", "def read_spss(self, path_sav, **kwargs):\n if path_sav.endswith('.sav'): path_sav = path_sav.replace('.sav', '')\n self._meta, self._data = r_spss(path_sav+'.sav', **kwargs)\n self._set_file_info(path_sav)\n self._rename_blacklist_vars()\n return None", "def create_new_sim(save_file):\n sim = simulation.Simulation(save_file, 10)\n return sim", "def from_saves(cls, name : str):\n appdata = os.environ['appdata']\n folder = os.path.join(appdata, '.minecraft', 'saves', name)\n return cls(folder)", "def restore(cls, filename, *a, **kw):\n if not os.path.exists(filename):\n if not a:\n raise NoSuchFileError(\"You attempted to restore and did not supply parameters for andrey.Markov.\")\n return cls(*a, **kw)\n else:\n return cls.fromdict(next(msgpack.Unpacker(open(filename), encoding='utf-8')))", "def load(savename):\n Co8PersistentData.__dataDict.clear()\n try:\n inFile = open(buildPath(savename), \"r\")\n except IOError:\n return\n\n for line in inFile:\n separatorPos = line.index(\"|\")\n #read up to separator\n key = line[:separatorPos]\n #read from separator to -1 to ignore \\n\n value = line[separatorPos + 1:-1]\n #restore lists, tuples, dicts\n if ( value[0] == \"(\" or value[0] == \"[\" or value[0] == \"{\" ):\n exec(\"value = \" + value)\n Co8PersistentData.__dataDict[key] = value\n \n inFile.close()", "def load_model_sav(filename):\n filename = \"{}/models/saved_models/{}.sav\".format(ROOT_DIR, filename)\n loaded_model = pickle.load(open(filename, \"rb\"))\n return loaded_model", "def Load(self, filename):\n\n self.sm['state'] = self.AddState\n self.sm['condition'] = self.AddCondition\n exec(open(filename).read(), self.sm)\n self.name = self.sm['name']\n if not self.name.isalnum():\n raise Exception(\"State machine name must consist of only alphanumeric\"\n \"characters.\")\n self.comment = self.sm['comment']", "def create_stack():\n\n return Stack()", "def from_file(cls, file_location: str) -> EvgProject:\n with open(file_location) as contents:\n return cls(**yaml.safe_load(contents))", "def from_genbank(cls, filename):\n\t\tseq_record = SeqIO.read(filename, 'genbank')\n\t\trec = cls(seq_record=seq_record)\n\t\treturn rec", "def from_laspy_File(cls, f):\n return cls((f.x, f.y, f.z), header=f.header.copy())", "def load(path_stack, compression=\"gzip\"):\r\n\r\n if not path_stack.endswith('.stack'):\r\n raise ValueError(\r\n \"To avoid ambiguity, when using Stack.load() you must provide the full path to \"\r\n \"the stack file you want to create, including the file extension. For example: \"\r\n \"stack.load(path_stack='./output/MyStack.stack'). Your call looks like this: \"\r\n \"stack.load(path_stack='%s', ...)\" % (path_stack)\r\n )\r\n\r\n if compression is None:\r\n f = open(path_stack, 'rb')\r\n elif compression.lower() == \"lzma\":\r\n f = pylzma.decompress(open(path_stack, 'rb')) # there seems to be a problem here!\r\n else:\r\n f = gzip.open(path_stack, 'rb')\r\n\r\n new_stack = cPickle.load(f)\r\n f.close()\r\n return new_stack", "def load(cls, from_file):\n with open(from_file) as infile:\n task_list = json.loads(infile.read())\n\n stack = cls()\n for task_id in task_list:\n stack.push(TaskInfo.from_id(task_id))\n\n return stack", "def from_file(cls, filename, SerializerClass=serialize.XMLSerializer):\n # Create a serializer object of class SerializerClass with the \n # structure definition for EGStub\n serializer = SerializerClass(EGStub_serialize_structure_definition)\n \n # Deserialize the EGStub instance from file\n try:\n data = serializer.deserialize_from_file(filename)\n except serialize.InvalidSerializeDataError as e:\n # Convert the exception to an InvalidPloneVoteCryptoFileError\n raise InvalidPloneVoteCryptoFileError(filename, \\\n \"File \\\"%s\\\" does not contain a valid cryptosystem. The \" \\\n \"following error occurred while trying to deserialize the \" \\\n \"file contents: %s\" % (filename, str(e)))\n \n name = data[\"PloneVoteCryptoSystem\"][\"name\"]\n description = data[\"PloneVoteCryptoSystem\"][\"description\"]\n \n inner_elems = data[\"PloneVoteCryptoSystem\"][\"CryptoSystemScheme\"]\n try:\n nbits = int(inner_elems[\"nbits\"])\n prime = int(inner_elems[\"prime\"], 16)\n generator = int(inner_elems[\"generator\"], 16)\n except ValueError as e:\n raise InvalidPloneVoteCryptoFileError(filename, \\\n \"File \\\"%s\\\" does not contain a valid cryptosystem. The \" \\\n \"stored values for nbits, prime and generator are not all \" \\\n \"valid integers in the expected format. Inner error message: \" \\\n \"%s\" % (filename, str(e)))\n \n # Create a new EGStub\n return cls(name, description, nbits, prime, generator)", "def __init__(self, fname):\n self.fname = os.path.abspath(fname)\n self.restore()", "def FromFile(cls, path: pathlib.Path, ir_id: int):\n with open(path, \"rb\") as f:\n graph_tuple = pickle.load(f)\n\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)", "def from_file(cls, filename):\n constructor_args = _load_serialized_mesh(filename)\n return cls(*constructor_args)", "def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)", "def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)", "def from_ast_file(cls, filename, index=None):\r\n if index is None:\r\n index = Index.create()\r\n\r\n ptr = conf.lib.clang_createTranslationUnit(index, filename)\r\n if ptr is None:\r\n raise TranslationUnitLoadError(filename)\r\n\r\n return cls(ptr=ptr, index=index)", "def from_file(cls, filename):\n\n f = libc.fopen(filename, \"r\")\n if f == 0:\n raise IOError(\"No such file\")\n\n try:\n set_ptr = ipset.ipset_load(f)\n if set_ptr == 0:\n raise IOError(\"Could not read IP set\")\n\n return cls(set_ptr)\n\n finally:\n libc.fclose(f)", "def from_filename(cls, filename, verify=True):\n\n with open(filename, \"r\") as fp:\n contents = pickle.load(fp)\n\n # Contents is: trained attributes, data hash, data trained on\n trained_contents = dict(zip(cls._trained_attributes, contents))\n N = len(trained_contents)\n expected_data_hash = contents[N]\n\n if N + 1 >= len(contents):\n raise TypeError(\"saved model in {} does not include data\".format(\n filename))\n\n # There was data as well.\n if verify and expected_data_hash is not None:\n actual_data_hash = _short_hash(contents[N + 1:])\n if actual_data_hash != expected_data_hash:\n raise ValueError(\"expected data hash ({0}) is different ({1})\"\\\n .format(expected_data_hash, actual_data_hash))\n\n # Create the model by initialising it with the data attributes.\n model = cls(**dict(zip([_[1:] for _ in cls._data_attributes],\n contents[N + 1:])))\n\n # Set the training attributes.\n for k, v in trained_contents.items():\n setattr(model, k, v)\n\n model._trained = True\n return model", "def from_stan_file(\n cls,\n stan_file: str,\n model_data: Optional[str] = None,\n *,\n stanc_args: List[str] = [],\n make_args: List[str] = [],\n seed: int = 1234,\n capture_stan_prints: bool = True,\n ):\n result = compile_model(stan_file, stanc_args=stanc_args, make_args=make_args)\n return cls(\n str(result), model_data, seed=seed, capture_stan_prints=capture_stan_prints\n )", "def _new_from_file(self, address, quiet=False):\n address = self._get_fullpath(address)\n \n version = self._dta_format(address)\n \n if version in (114, 115):\n self._file_to_Dta115(address)\n if not isinstance(self, Dta115):\n if not quiet:\n msg = \"file format is {}, converting to 117\"\n print(msg.format(version))\n self._convert_dta(Dta115)\n else:\n self._file_to_Dta117(address)\n if not isinstance(self, Dta117):\n if not quiet:\n msg = \"file format is {}, converting to 115\"\n print(msg.format(version))\n self._convert_dta(Dta117)\n \n # set self's path and filename\n self._set_path(address)\n \n # set changed to False, since dataset comes directly from file\n self._changed = False\n \n # display data label if in Stata\n if not quiet and IN_STATA and self._data_label.strip() != \"\":\n print(\"{txt}(\" + self._data_label + \"){txt}\")\n \n # set quiet on or off\n self._quiet = bool(quiet)", "def load(name, crystalStructure, cOverA=None, groupBy='plane'):\n # try and load from package dir first\n try:\n fileExt = \".txt\"\n packageDir, _ = os.path.split(__file__)\n filepath = f\"{packageDir}/slip_systems/{name}{fileExt}\"\n\n slipSystemFile = open(filepath)\n\n except FileNotFoundError:\n # if it doesn't exist in the package dir, try and load the path\n try:\n filepath = name\n\n slipSystemFile = open(filepath)\n\n except FileNotFoundError:\n raise(FileNotFoundError(\"Couldn't find the slip systems file\"))\n\n slipSystemFile.readline()\n slipTraceColours = slipSystemFile.readline().strip().split(',')\n slipSystemFile.close()\n\n if crystalStructure.name == \"hexagonal\":\n vectSize = 4\n else:\n vectSize = 3\n\n ssData = np.loadtxt(filepath, delimiter='\\t', skiprows=2,\n dtype=np.int8)\n if ssData.shape[1] != 2 * vectSize:\n raise IOError(\"Slip system file not valid\")\n\n # Create list of slip system objects\n slipSystems = []\n for row in ssData:\n slipSystems.append(SlipSystem(\n row[0:vectSize], row[vectSize:2 * vectSize],\n crystalStructure, cOverA=cOverA\n ))\n\n # Group slip systems is required\n if groupBy is not None:\n slipSystems = SlipSystem.group(slipSystems, groupBy)\n\n return slipSystems, slipTraceColours", "def import_model(file):\n file = os.path.expanduser(file)\n obj = IsolationForest()\n metadata = obj._cpp_obj.deserialize_obj(file)\n metadata = json.loads(metadata)\n obj._take_metadata(metadata)\n return obj", "def from_file( cls, filename ):\n with open( filename, 'r' ) as stream:\n data = yaml.load( stream )\n notes = data.get( 'notes' )\n type = data.get( 'type' )\n vaspmeta = VASPMeta( data['title'], \n data['description'], \n data['status'], \n notes=notes, \n type=type )\n return vaspmeta", "def fromFile(cls, filepath):\r\n return cls(values=foamFileFromFile(filepath, cls.__name__))", "def fromfile(cls, file):\n with open(file, 'rb') as fp:\n return pickle.load(fp)" ]
[ "0.6381975", "0.5992964", "0.57952154", "0.5777234", "0.5610841", "0.56030494", "0.5596717", "0.55868554", "0.5555224", "0.5437524", "0.5419869", "0.5418676", "0.54048544", "0.5385061", "0.53795224", "0.5335637", "0.53254545", "0.5320597", "0.5294863", "0.5294863", "0.5275241", "0.52719474", "0.5248755", "0.52450067", "0.52385145", "0.5197593", "0.5185324", "0.51845723", "0.51781714", "0.51769096" ]
0.7607764
0
Sets the x_variables and y_variables in the data part of the stack for this data_key, e.g. stack['Jan']. This method can also be used to add to the current lists and it makes sure the list stays unique.
def _set_x_and_y_keys(self, data_key, x, y): if self.stack_pos == 'stack_root': self[data_key].__set_x_key(x) self[data_key].__set_y_key(y) else: raise KeyError("set_x_keys can only be called from a stack at root level. Current level is '{0}'".format(self.stack_pos))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push(self, **vars):\n self._variable_stack.append(dict(self._variables))\n self.update(**vars)", "def set_data(self, x = None, y = None):\n self.x_axis = x\n self.y_axis = y", "def set_xList(self, *xList):\n assert len(xList) == self.__nx\n self.__x = xList\n self.__xshape = xList[0].shape # Reset the shape of the input.", "def pushFrameVariables(self, frame_variables):\n self.frame_variables_stack.append(frame_variables)\n self.frame_type_descriptions.append(set())", "def insert_variables(self, x):\n if len(self.index_value_pairs) == 0:\n return x\n y = list(x)\n for i in sorted(self.index_value_pairs):\n y.insert(i, self.index_value_pairs[i])\n if not isinstance(x, list):\n y = np.asarray(y) # doubles the necessary time\n return y", "def setData(self, ydata):\n\n newData = []\n for i in range(len(ydata)):\n # Set abstract X data\n newData.append(QtCore.QPointF(i, ydata[i]))\n\n self.replace(newData)\n self.rescale()", "def set_variables(self, new_variables: np.array):\n pass", "def update(self, x_stack, y_stack):\n if self.model_name == \"MLPv1\":\n x_stack = np.reshape(x_stack, [-1, self.input_size])\n else:\n x_stack = np.reshape(x_stack, [-1, self.seq_size, self.input_size, 1])\n return self.session.run([self.loss, self.train], feed_dict={self.x: x_stack, self.y: y_stack})", "def setData(self, Xdata, Ydata, legend=None, nbmark=10):\n plot_var = ['Xdata', 'Ydata', 'legend', 'mark', 'marknb']\n plot_nb = len(self.data['Plots'])\n\n if legend:\n plot_name = legend\n else:\n plot_name = 'plot%s' % (plot_nb)\n\n self.data['Plots']['%s' % (plot_name)] = dict.fromkeys(plot_var)\n\n local_plot = self.data['Plots']['%s' % (plot_name)]\n local_plot['Xdata'] = Xdata\n local_plot['Ydata'] = Ydata\n local_plot['legend'] = legend\n local_plot['marknb'] = nbmark", "def setVariableIndices(self, indicesOfVariables):\n for e in self.children:\n e.setVariableIndices(indicesOfVariables)", "def set_values(self,x):\n for i in range(len(self)):\n self[i].set_value(x[i])", "def calibrateStacks(self, x_stack, y_stack, z_stack):\r\n self.x_stack= x_stack\r\n self.y_stack=y_stack\r\n self.z_stack=z_stack\r\n \r\n self.resetImages()", "def extend(self, data_list):\n self.__stack.extend(data_list)", "def put_coords(self, xCoords, yCoords, zCoords):\n self._f.variables[\"coordx\"][:] = xCoords\n self._f.variables[\"coordy\"][:] = yCoords\n self._f.variables[\"coordz\"][:] = zCoords", "def set(self, x, y=0):\n self.d[x] = y", "def add_data(self, v, m, x, pos=1):\n if x is not None:\n if v in self.variables:\n if m in self.models:\n self.data.update({self.__gen_key(m, v, pos): x})\n self.pos.update({self.__gen_key(m, v, pos): pos})\n else:\n pass\n else:\n pass\n else:\n pass", "def fillup_x(self):\n assert not np.all(self.x == None)\n x_df = pd.DataFrame(self.x, columns=self.x_title)\n self.df = pd.concat([self.df, x_df], axis=1)", "def set_variable_slices(self, variables):\n # Set up y_slices and bounds\n y_slices = defaultdict(list)\n y_slices_explicit = defaultdict(list)\n start = 0\n end = 0\n lower_bounds = []\n upper_bounds = []\n # Iterate through unpacked variables, adding appropriate slices to y_slices\n for variable in variables:\n # Add up the size of all the domains in variable.domain\n if isinstance(variable, pybamm.ConcatenationVariable):\n start_ = start\n spatial_method = self.spatial_methods[variable.domain[0]]\n children = variable.children\n meshes = OrderedDict()\n for child in children:\n meshes[child] = [spatial_method.mesh[dom] for dom in child.domain]\n sec_points = spatial_method._get_auxiliary_domain_repeats(\n variable.domains\n )\n for i in range(sec_points):\n for child, mesh in meshes.items():\n for domain_mesh in mesh:\n end += domain_mesh.npts_for_broadcast_to_nodes\n # Add to slices\n y_slices[child].append(slice(start_, end))\n y_slices_explicit[child].append(slice(start_, end))\n # Increment start_\n start_ = end\n else:\n end += self._get_variable_size(variable)\n\n # Add to slices\n y_slices[variable].append(slice(start, end))\n y_slices_explicit[variable].append(slice(start, end))\n\n # Add to bounds\n def evaluate_bound(bound, side):\n if bound.has_symbol_of_classes(pybamm.InputParameter):\n if side == \"lower\":\n return -np.inf\n elif side == \"upper\":\n return np.inf\n else:\n return bound.evaluate()\n\n lower_bounds.extend(\n [evaluate_bound(variable.bounds[0], \"lower\")] * (end - start)\n )\n upper_bounds.extend(\n [evaluate_bound(variable.bounds[1], \"upper\")] * (end - start)\n )\n # Increment start\n start = end\n\n # Convert y_slices back to normal dictionary\n self.y_slices = dict(y_slices)\n # Also keep a record of what the y_slices are, to be stored in the model\n self.y_slices_explicit = dict(y_slices_explicit)\n\n # Also keep a record of bounds\n self.bounds = (np.array(lower_bounds), np.array(upper_bounds))\n\n # reset discretised_symbols\n self._discretised_symbols = {}", "def SetXData(self,entity,xdataPairs):\n\t\txdataType=[]\n\t\txdataValue=[]\n\t\tfor i,j in xdataPairs:\n\t\t\txdataType.append(i)\n\t\t\txdataValue.append(j)\n\t\tentity.SetXData(FilterType(xdataType),FilterData(xdataValue))", "def push(self, number_of_names):\n self.local_variables = EnvironmentLevel(self.local_variables)\n self.local_types = EnvironmentLevel(self.local_types)", "def setVariableIndices(self, indicesOfVariables):\n if self.name in indicesOfVariables:\n self.variableId = indicesOfVariables[self.name]", "def update(self, x_stack, y_stack):\n feed = {\n self._X: x_stack,\n self._Y: y_stack\n }\n return self.session.run([self._loss, self._train], feed)", "def set_variables(self, variables):\n self.variables = variables", "def set_metric_variables(self, metric_variable_values: List[Any]) -> None:\n with self._lock:\n self._set_metric_variables(metric_variable_values)", "def set_variables(self, new_variables: np.array):\n self.m, self.c = new_variables", "def _update_vars(self, axis, traj_s, traj_o, rank_s, rank_o, t):\n if axis == 0:\n self.x_traj = traj_s\n self.x_ranking = rank_s\n self.x_scores = traj_s[-1]\n self.inverse_y_traj = traj_o\n self.inverse_y_ranking = rank_o\n self.inverse_y_scores = traj_o[-1]\n if axis == 1:\n self.y_traj = traj_s\n self.y_ranking = rank_s\n self.y_scores = traj_s[-1]\n self.inverse_x_traj = traj_o\n self.inverse_x_ranking = rank_o\n self.inverse_x_scores = traj_o[-1]", "def set_data(self, y: Iterable[torch.Tensor]):\n self._y = y\n\n return self", "def update_vars(self, point=None):\n \n which = self.thread_list.curselection()\n which = int(which[0])\n id = self.thread_ids[which]\n self.client.send_msg(\"get_locals\", id)\n self.locals = self.client.recv_msg()[0]\n \n self.locals_list.delete(0, END)\n \n for k, v in self.locals:\n self.locals_list.insert(END, str(k) + \" = \" + str(v))\n \n self.client.send_msg(\"get_stack\", id)\n stack = self.client.recv_msg()[0]\n self.stack_text.delete(1.0, END)\n self.stack_text.insert(END, \"\".join(stack))", "def setx(self, inputs):\n result = fixangles(self.n, inputs['azimuth'], inputs['elevation'])\n self.x[:, 0] = inputs['finAngle']\n self.x[:, 1] = result[0]\n self.x[:, 2] = result[1]", "def stack_update(self, tree: Keyvalues) -> None:\n self._stack_update = tree" ]
[ "0.58428204", "0.5507232", "0.54659504", "0.5432805", "0.54303056", "0.54111904", "0.5294803", "0.5270163", "0.5206912", "0.52056944", "0.5143031", "0.51415336", "0.51316047", "0.51277274", "0.5122735", "0.5116576", "0.5107302", "0.50981134", "0.50929874", "0.5043019", "0.5041647", "0.5028371", "0.50074476", "0.49863893", "0.49806315", "0.4966879", "0.4966339", "0.49478117", "0.49162117", "0.49089688" ]
0.6777685
0
Generate keys from a list (or tuple).
def __generate_key_from_list_of(self, list_of_keys): list_of_keys = list(list_of_keys) list_of_keys.sort() return ",".join(list_of_keys)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_decomp_keys(self, decomp_list):\n for key in decomp_list:\n if isinstance(key, tuple) or isinstance(key, list):\n yield key[0]\n else:\n yield key", "def make_key(*values, **kwargs):\n if len(kwargs) == 0:\n key = tuple(v.key for v in values)\n else:\n res = [v.key for v in values]\n for k, v in sorted(kwargs.items()):\n if isinstance(v, (int, float, str)):\n res.append(k)\n res.append(v)\n else:\n raise TypeError(\n f\"Type {type(v)} is not yet supported, \"\n f\"v={v} and parameter {k!r}.\")\n key = tuple(res)\n return key", "def gen_keys():", "def generateDictKeys(string, n,step=1):\n if type(string) != str or type(n) != int:\n raise ValueError('Please input string and integer for first and second argument')\n elif step == 1:\n keylist = [string+str(i) for i in range(n)]\n return keylist\n else:\n keylist = [string+str(i) for i in range(0, n*step, step)]\n return keylist", "def __generate_tuple_keys(self, data):\n if len(data) < self.order:\n return\n\n for i in range(len(data) - self.order):\n yield [tuple(data[i:i+self.order]), data[i+self.order]]", "def get_identifiers(args_list, valid_keys):\n # ignore keys which have no variation among results\n identifiers = []\n for args in args_list:\n identifier = ''\n for key in valid_keys:\n if key in args:\n identifier += '{}={},'.format(key, args[key])\n identifiers.append(identifier)\n return identifiers", "def list_to_dict_keys(list):\n dictionary = defaultdict(list)\n for item in list:\n dictionary[item] = ''\n return dictionary", "def get_generator(strains, reference_id, start, end):\n primary_keys = []\n vals = range(start, end+1)\n for val in vals:\n for strain in strains:\n primary_keys.append(strain+\"_\"+reference_id+\"_\"+str(val))\n return primary_keys", "def _make_key(args, kwds, typed,\r\n kwd_mark = (object(),),\r\n fasttypes = {int, str, frozenset, type(None)},\r\n tuple=tuple, type=type, len=len):\r\n # All of code below relies on kwds preserving the order input by the user.\r\n # Formerly, we sorted() the kwds before looping. The new way is *much*\r\n # faster; however, it means that f(x=1, y=2) will now be treated as a\r\n # distinct call from f(y=2, x=1) which will be cached separately.\r\n key = args\r\n if kwds:\r\n key += kwd_mark\r\n for item in kwds.items():\r\n key += item\r\n if typed:\r\n key += tuple(type(v) for v in args)\r\n if kwds:\r\n key += tuple(type(v) for v in kwds.values())\r\n elif len(key) == 1 and type(key[0]) in fasttypes:\r\n return key[0]\r\n return _HashedSeq(key)", "def keys(self, args=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None):\n return itertools.imap(ITEMGETTER_1,\n self.pairs(args, lo, hi, reverse, max, include, txn))", "def args_to_key(args, kwargs, separator=STAR):\n\t# type: (tuple, dict, Any) -> tuple\n\n\tkey = [] # type: List[tuple]\n\tif args:\n\t\tkey.extend(args)\n\tif kwargs:\n\t\tkey.append(separator)\n\t\tkey.extend(sorted(kwargs.items()))\n\n\treturn tuple(key)", "def key(cls, *args, **kwargs):\n\n items = [cls]\n if args:\n items.append(tuple(args))\n if kwargs:\n items.append(FrozenDict(kwargs))\n return tuple(items)", "def _get_keys(self, listOfKeys):\n return self._keys", "def generate_keys(self):\n self.keys = []\n key = string_to_bit_array(self.passwd)\n key = self.permutation(key, CP_1) # Perform initial permutation on the key\n g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)\n for i in range(16): # Apply the 16 rounds\n g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round\n tmp = g + d # Merge them\n self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki", "def key(self, x):\r\n return tuple(x)", "def get_random_inchikeys(inchikey_list, train_val_test_split_fractions):\n random.shuffle(inchikey_list)\n\n train_num = int(train_val_test_split_fractions.train * len(inchikey_list))\n val_num = int(train_val_test_split_fractions.validation * len(inchikey_list))\n\n return TrainValTestInchikeys(inchikey_list[:train_num],\n inchikey_list[train_num:train_num + val_num],\n inchikey_list[train_num + val_num:])", "def input_to_hash(self, keys):\n basic_keys = []\n for i, key in enumerate(keys):\n s = ''\n #print(max(key), min(key))\n for val in key:\n s += \"{:04x}\".format(val)\n basic_keys.append(s)\n return basic_keys", "def make_key(*args, **kwargs) -> Hashable:\n if len(args) == 1 and isinstance(args[0], (int, str)):\n return args[0]\n if kwargs:\n args = sum(kwargs.items(), (*args, _KWD_MARK))\n return _HashedSeq(args)", "def keys(self) -> tuple[Hashable, ...]:\n return tuple([self._hashify(item = c) for c in self.contents])", "def natural_keys(_tuple):\n return [atoi(c) for c in re.split(r'(\\d+)', _tuple[0])]", "def keyby(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return {iteratee(value): value for value in seq}", "def key_list(dict):\n list = []\n for key in dict:\n list.append(key)\n return list", "def _get_keys_prefixes(li):\n keys = [x for x in li if isinstance(x, boto.s3.key.Key)]\n prefixes = [x for x in li if not isinstance(x, boto.s3.key.Key)]\n return (keys, prefixes)", "def generate_valid_keys():\n valid_keys = []\n for minimum, maximum in RANGES:\n for i in range(ord(minimum), ord(maximum) + 1):\n valid_keys.append(chr(i))\n return valid_keys", "def get_unique_keys(param_list):\n\tif not param_list:\n\t\treturn\n\tcounts = {}\n\tmax_count = len(param_list)\n\tfor p in param_list:\n\t\tfor k in p:\n\t\t\tcounts[k] = 1 + counts.get(k, 0)\n\tunique = []\n\t# now find out which keys are not shared\n\tfor k in counts:\n\t\tif counts[k] < max_count:\n\t\t\tunique.append(k)\n\tunique.sort()\n\treturn unique", "def keys(self, *args, **kwargs):\n return self._list(*args, **kwargs)", "def combinations(self, key_list, lst=None):\n lst = self.filtered(key_list, lst)\n tups = [tuple([d[INDEP].get(k, d[DEP].get(k)) for k in key_list]) for d in lst]\n s = set(tups)\n l = list(s)\n l.sort()\n return [{k: v for k, v in zip(key_list, vals)} for vals in l]", "def key_arr(x):\n b = [x & bit_mask(32)]\n x >>= 32\n while x > 0:\n b.insert(0, x & bit_mask(32))\n x >>= 32\n return tuple(b)", "def get_shared_keys(param_list):\n\tif not param_list:\n\t\treturn\n\tkeys = set(param_list[0].keys())\n\tfor i in range(1, len(param_list)):\n\t\tkeys = keys.intersection(param_list[i].keys())\n\tkeys = list(keys)\n\tkeys.sort()\n\treturn keys", "def from_list(hash, list):\r\n for k, v in enumerate(list):\r\n put(hash, k, v)" ]
[ "0.7271849", "0.6904", "0.66444695", "0.6273985", "0.6262291", "0.61452633", "0.61233735", "0.5925577", "0.59042096", "0.5861307", "0.5854959", "0.58370656", "0.58244324", "0.58018833", "0.579413", "0.575015", "0.57346183", "0.57298255", "0.57273", "0.5726597", "0.57195264", "0.5650421", "0.5641236", "0.56243926", "0.5605441", "0.55844194", "0.55654633", "0.55370265", "0.5528543", "0.5528357" ]
0.724901
1
Generates all combinations of items from a list
def __get_all_combinations(self, list_of_items): return [itertools.combinations(list_of_items, index+1) for index in range(len(list_of_items))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_combinations(items):\n\n def inner(items, r):\n \"\"\"\n recursively yields partitioned remainders of original partition lists\n \"\"\"\n items = set(items)\n if not len(items):\n yield ()\n return\n first = next(iter(items))\n remainder = items.difference((first, ))\n for combination in combinations(remainder, r-1):\n first_subset = (first, ) + combination\n for partition in inner(remainder.difference(combination), r):\n yield (first_subset, ) + partition\n\n def outter(items, r):\n \"\"\"\n combines partition lists\n \"\"\"\n items = set(items)\n for i in range(len(items), -1, -r):\n if i == 0:\n for partition in inner(items, r):\n yield partition\n elif i != r:\n for combination in combinations(items, i):\n for partition in inner(items.difference(combination), r):\n yield partition + (combination, )\n\n # step through length of origin combination partitions to ensure full list\n for i in range(1, len(items)):\n gen = outter(items, i)\n for row in gen:\n yield row", "def combinations(*args: List[Any]) -> List[List]:\n return list([list(el) for el in list(product(*args))])", "def genSubset2(L):\n import itertools\n result = []\n for i in range(len(L) + 1):\n result += list(itertools.combinations(L, i))\n return result", "def combinations(*comb, **kw):\n return _fixture_functions.combinations(*comb, **kw)", "def word_combination(wlist:list) -> list :\r\n\r\n if wlist and len(wlist)>1:\r\n return chain(*map(lambda x: combinations(wlist, x), range(1, len(wlist)+1)))\r\n else :\r\n return wlist", "def section_4_9():\n from itertools import permutations\n from itertools import combinations\n from itertools import combinations_with_replacement\n\n items = ['a', 'b', 'c']\n\n def test1():\n for p in permutations(items):\n print(p)\n\n def test2():\n for p in combinations(items, 3):\n print(p)\n print()\n for p in combinations(items, 2):\n print(p)\n print()\n for p in combinations(items, 1):\n print(p)\n print()\n for p in combinations_with_replacement(items, 3):\n print(p)", "def compute_combinations(items: List[Union[List[Any], Tuple]], n: int) -> List[List[Any]]:\n return [chunks[i:i + n] for chunks in items for i in range(len(chunks) - (n - 1))]", "def representative_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:\n all_selected_combinations: list[tuple[str, str]] = []\n for i in range(max(len(list_1), len(list_2))):\n all_selected_combinations.append((list_1[i % len(list_1)], list_2[i % len(list_2)]))\n return all_selected_combinations", "def generate_item_combinations(\n weapons, armors, rings\n) -> Iterator[Tuple[Item, Item, Item, Item]]:\n\n for weapon in weapons:\n for armor in armors:\n for ring_one, ring_two in combinations(rings, 2):\n yield weapon, armor, ring_one, ring_two", "def get_combinations(self):\n all_steps = self.do_steps()\n self.option = [k for k, v in all_steps.items()]\n result = itertools.product(*(v for k, v in all_steps.items()))\n return result", "def AllCombinations(data, comblength):\n return [c for c in itertools.combinations(data, comblength)]", "def lists_combinations(list_1, list_2):\n return [x[0] + ' ' + x[1] for x in itertools.product(list_1, list_2)]", "def equipment_combinations(weapons, armor, rings):\n weapon_choices = item_combinations(weapons, range(1, 2))\n armor_choices = item_combinations(armor, range(2))\n ring_choices = item_combinations(rings, range(3))\n complete_choices = itertools.product(weapon_choices, armor_choices, ring_choices)\n return complete_choices", "def permutations(xs):\n if not xs:\n yield []\n else:\n for x, xs in selections(xs):\n for ys in permutations(xs):\n yield [x] + ys", "def generate_option_combos(self):\n available_options = list()\n for option in self.options:\n # generate a list of dicts for every value of the option\n tmp = list()\n for value in option.values:\n tmp.append({option.name: value})\n\n available_options.append(tmp)\n\n # generate a list of tuples for each product option combination\n option_combos = list(itertools.product(*available_options))\n\n return option_combos", "def product(*iterables, **kwargs):\n if len(iterables) == 0:\n yield ()\n else:\n iterables = iterables * kwargs.get('repeat', 1)\n it = iterables[0]\n for item in it() if callable(it) else iter(it):\n for items in product(*iterables[1:]):\n yield (item, ) + items", "def cartesianproduct(lists):\r\n return reduce(appendEs2Sequences,lists,[])", "def get_paren_combos():\n results = [None] * 4\n options = [('%s', '(%s)')]\n for i in range(1, 4):\n results[i] = list(itertools.product(*(i * options)))\n return results", "def cands(inputs):\n # The below could probably be simplified a bit....\n return map(''.join, list(itertools.chain.from_iterable([ map (list, (itertools.permutations(inputs, x))) for x in range(4, len(inputs)+1)])))", "def _generate_combinations(self, param_idx, params):\n\n if param_idx == len(self.grid) - 1:\n # last parameter, just return list of values for this parameter\n return [[value] for value in self.grid[params[param_idx]]]\n else:\n subcombinations = self._generate_combinations(param_idx + 1, params) # returns list of param combinations\n result = []\n\n # iterate over all values of current parameter\n for value in self.grid[params[param_idx]]:\n for subcombination in subcombinations:\n result.append([value] + subcombination)\n\n return result", "def generateCombos(vars,constants):\n # SUPER NOT GENERALIZED---TOO LATE AT NIGHT FOR ME TO DO RECURSIVE ALGORITHMS\n assert len(vars) == 2 and len(constants) == 2\n combs = []\n for c1 in constants:\n for c2 in constants:\n combs.append(Grounding([(vars[0], c1), (vars[1], c2)]))\n return combs", "def __combination(orgset, k):\n if k == 1:\n for i in orgset:\n yield (i,)\n elif k > 1:\n for i, x in enumerate(orgset):\n # iterates though to near the end\n for s in __combination(orgset[i + 1 :], k - 1):\n yield (x,) + s", "def CombinationMethods(nums, elements_number):\n res = list(c(nums, elements_number))\n return res, Combination(len(nums), elements_number)", "def powerset(xs):\n cards = list(reversed(xrange(len(xs)))) + [len(xs)]\n return list(chain.from_iterable(combinations(xs, n) for n in cards))", "def subset_gen(itemSet):\n subsets = []\n for i in range(1, len(itemSet)):\n c = combinations(itemSet, r=i)\n for cc in c:\n subsets.append(set(cc))\n return subsets", "def combinations(self, key_list, lst=None):\n lst = self.filtered(key_list, lst)\n tups = [tuple([d[INDEP].get(k, d[DEP].get(k)) for k in key_list]) for d in lst]\n s = set(tups)\n l = list(s)\n l.sort()\n return [{k: v for k, v in zip(key_list, vals)} for vals in l]", "def combine_params(param_list):\n\n\tif sum(isinstance(l, list) for l in param_list) > 1:\n\t\treturn list(map(list, list(itertools.product(*param_list))))\n\telse:\n\t\treturn [[p] for p in param_list]", "def allcombinations(orgset, k):\n return itertools.chain(*[combination(orgset, i) for i in range(1, k + 1)])", "def tripletGenerator(S):\n for a in S:\n for b in S:\n for c in S:\n yield (a, b, c)", "def allCombinations(groupList):\n\tqueue = [groupList]\n\tresult = []\n\twhile len(queue) > 0:\n\t\tcurrent = queue.pop(0)\n\t\tif sum([len(group) for group in current]) == len(current):\n\t\t\tresult.append([group[0] for group in current])\n\t\t\tcontinue\n\n\t\ttmpList = []\n\t\tfor i, group in enumerate(current):\n\t\t\tif len(group) == 1:\n\t\t\t\ttmpList.append(group)\n\t\t\telse:\n\t\t\t\tfor j in xrange(len(group)):\n\t\t\t\t\tqueue.append(tmpList + [[group[j]]] + current[i + 1:])\n\t\t\t\tbreak\n\n\treturn result" ]
[ "0.73547757", "0.7042136", "0.6938442", "0.6915465", "0.6875703", "0.67848724", "0.6771032", "0.67682385", "0.67648953", "0.67328763", "0.6639714", "0.65762985", "0.6549294", "0.6549017", "0.65443975", "0.64914507", "0.64767146", "0.6452351", "0.63577306", "0.6336958", "0.6278383", "0.625681", "0.6201341", "0.61912936", "0.61782503", "0.61619085", "0.6155788", "0.6116351", "0.61043733", "0.6100919" ]
0.7876408
0
Takes a stack_pos and returns the stack with that location raises an exception IF the stack pointer is not found
def __get_stack_pointer(self, stack_pos): if self.parent.stack_pos == stack_pos: return self.parent else: return self.parent.__get_stack_pointer(stack_pos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_stack(self, offset):\n self.validate_stack_offset(offset)\n return self.stack[offset]", "def getStackPosition(self):\r\n return self.callstack.getStack()", "def findCaller(self, stack_info=False, stacklevel=2):\n f = currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n orig_f = f\n while f and stacklevel > 1:\n f = f.f_back\n stacklevel -= 1\n if not f:\n f = orig_f\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv", "def read_stack_pointer(self):\n return self.STACK_POINTER", "def _FindTransactionFrameInStack():\n frame = sys._getframe()\n filename = frame.f_code.co_filename\n\n frame = frame.f_back.f_back\n while frame:\n if (frame.f_code.co_filename == filename and\n frame.f_code.co_name == 'RunInTransactionCustomRetries'):\n return frame\n frame = frame.f_back\n\n return None", "def _location_from_fx_stack_trace(\n node_stack_trace: str,\n) -> Optional[diagnostics.infra.Location]:\n if \"File\" not in node_stack_trace:\n return None\n\n lines = node_stack_trace.strip().split(\"\\n\")\n idx = 0\n while idx < len(lines) and \"File\" not in lines[idx]:\n idx += 1\n if idx + 1 >= len(lines):\n return None\n\n pattern = re.compile(r\"^File \\\"(.+)\\\", line (\\d+), in (.+)$\")\n matches = pattern.match(lines[idx].strip())\n if matches:\n uri = matches.group(1)\n line_number = int(matches.group(2))\n snippet = lines[idx + 1].strip()\n return diagnostics.infra.Location(uri=uri, line=line_number, snippet=snippet)\n return None", "def peek(self):\n try:\n return self.stack[-1]\n except IndexError:\n return None", "def top(stack):\n if empty_stack(stack):\n raise IndexError(\"Stack is empty!\")\n else:\n return stack.top.value", "def test_peek_on_small_stack(small_stack):\n assert small_stack.peek().val == 3", "def copy(stack):\n try:\n return stack[-1]\n except:\n print('error')", "def stack(context=1):\r\n return getouterframes(sys._getframe(1), context)", "def next_stack(zone: List[List[List[Tray]]],\n stack: List[List[Tray]]) -> Union[List[List[Tray]], None]:\n try:\n pos = zone.index(stack)\n except ValueError:\n return None\n return zone[pos + 1] if pos == len(stack) - 1 else None", "def probe_stack(depth = 10):\n if depth == 0:\n return\n probe_stack(depth - 1)", "def position(self):\n return self.stack.position()", "def _sourceFrame(self):\n try:\n raise Exception('catch me') # forced exception to get stack traceback\n except:\n exc_traceback = sys.exc_info()[2]\n return exc_traceback.tb_frame.f_back.f_back.f_back.f_back\n #endTry", "def top(self):\n if self.stack:\n return self.stack[-1]\n raise ValueError", "def currentframe(_no_of_go_up_level):\n try:\n raise Exception\n except Exception:\n return sys.exc_info()[_no_of_go_up_level - 1].tb_frame.f_back", "def match_parentheses(dot, position):\n stack = 0\n for i in range(position + 1, len(dot)):\n if dot[i] == '(':\n stack += 1\n elif dot[i] == ')':\n if stack == 0:\n return i\n else:\n stack -= 1\n return -1", "def test_peek_empty():\n test_stack = stack.Stack()\n\n with pytest.raises(stack.StackEmptyError):\n test_stack.peek()", "def findCaller(self, stack_info=False):\n \n _frame_object = logging.currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X: Frames.\n if (_frame_object is not None):\n _frame_object = _frame_object.f_back\n \n rv = (\"(unknown file)\", 0, \"(unknown function)\", None)\n while hasattr(_frame_object, 'f_code'):\n _code_object = _frame_object.f_code\n filename = os.path.normcase(_code_object.co_filename)\n \n _next = _frame_object.f_back\n # noinspection PyProtectedMember,PyUnresolvedReferences\n if (filename == logging._srcfile):\n _frame_object = _next\n continue\n \n if (_next and hasattr(_next, 'f_code')):\n _parent_code = _next.f_code\n if (_parent_code.co_name == LOGGING_WRAPPER_NAME):\n _frame_object = _next.f_back\n continue\n \n _stack_info = None\n if (stack_info):\n _str_io = StringIO()\n _str_io.write('Stack (most recent call last):\\n')\n traceback.print_stack(_frame_object, file=_str_io)\n _stack_info = _str_io.getvalue()\n if (_stack_info[-1] == '\\n'):\n _stack_info = _stack_info[:-1]\n _str_io.close()\n \n rv = (_code_object.co_filename, _frame_object.f_lineno, _code_object.co_name, _stack_info)\n break\n return rv", "def peek(self) -> int:\n return self.stack[len(self.stack)-1]", "def peek(self):\n if not self.is_empty():\n return self._stack_items[-1]\n else:\n raise StackException('Peek operation not supported on an empty stack')", "def read_stack(self):\n\n result = self.memory.read(0x100 + self.sp)\n self.inc_sp()\n\n return result", "def get_stack_index(self):\n\t\treturn call_sdk_function('PrlVmDev_GetStackIndex', self.handle)", "def peek(self) -> int:\n if len(self.sk1.stack) == 0:\n if len(self.sk2.stack) > 0:\n return self.sk2.stack[-1]\n else:return self.sk1.stack[0]", "def peek(self):\n return self.stack[0]", "def peek(self):\n return self.stack[-1]", "def peek(self):\n return self.stack[-1]", "def peek(self):\n return self.stack[-1]", "def top(state):\n if len(state[STACK]) <= 0:\n return -1\n else:\n return state[STACK][-1]" ]
[ "0.6661285", "0.64549404", "0.6386795", "0.63797617", "0.62505674", "0.6199616", "0.6169721", "0.6112925", "0.6090924", "0.6016371", "0.5990624", "0.5959264", "0.5959168", "0.59493977", "0.5908766", "0.58735526", "0.5840768", "0.583369", "0.5831592", "0.5808342", "0.5787022", "0.576837", "0.57664686", "0.57470745", "0.5732998", "0.5707283", "0.56891954", "0.56891954", "0.56891954", "0.567286" ]
0.6940179
0
Verify that the given keys str or unicode or a list or tuple of those.
def _verify_multiple_key_types(self, data_keys=None, filters=None, x=None, y=None, variables=None, views=None): if data_keys is not None: self._verify_key_types(name='data', keys=data_keys) if filters is not None: self._verify_key_types(name='filter', keys=filters) if x is not None: self._verify_key_types(name='x', keys=x) if y is not None: self._verify_key_types(name='y', keys=y) if variables is not None: self._verify_key_types(name='variables', keys=variables) if views is not None: self._verify_key_types(name='view', keys=views)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _verify_key_types(self, name, keys):\r\n if isinstance(keys, (list, tuple)):\r\n for key in keys:\r\n self._verify_key_types(name, key)\r\n elif isinstance(keys, (str, unicode)):\r\n pass\r\n else:\r\n raise TypeError(\r\n \"All %s keys must be one of the following types: \"\r\n \"<str> or <unicode>, \"\r\n \"<list> of <str> or <unicode>, \"\r\n \"<tuple> of <str> or <unicode>. \"\r\n \"Given: %s\" % (name, keys)\r\n )", "def NormalizeAndTypeCheckKeys(keys):\n keys, multiple = NormalizeAndTypeCheck(keys, (basestring, Entity, Key))\n\n keys = [_GetCompleteKeyOrError(key) for key in keys]\n\n return (keys, multiple)", "def assert_keys_have_values(self, caller, *keys):\n for key in keys:\n self.assert_key_has_value(key, caller)", "def assert_keys_type_value(self,\n caller,\n extra_error_text,\n *context_items):\n assert context_items, (\"context_items parameter must be specified.\")\n\n for context_item in context_items:\n self.assert_key_type_value(context_item, caller, extra_error_text)", "def _verify_dict_field(self, _dict, name, types):\n if type(types) != list:\n types = [types]\n if str in types and unicode not in types:\n types.append(unicode)\n if unicode in types and str not in types:\n types.append(str)\n self.assertTrue(name in _dict, msg=\"Missing field '%s'\" % name)\n self.assertTrue(type(_dict[name]) in types,\n msg=\"Erroneous type of the field '%s': \"\n \"found %s, expected any of %s\" % (\n name, str(type(_dict[name])), \",\".join([str(x) for x in types])))", "def is_valid_request(request_args, keys):\n if type(keys) != list:\n raise TypeError(\"Keys must be of type list\")\n\n for key in keys:\n if key not in request_args:\n return False\n return True", "def _validate_parameters(parameters):\n if not isinstance(parameters, dict):\n raise ValueError(\"Please enter a dictionary for parameters\")\n for key, val in parameters.items():\n if isinstance(val, list):\n for params in val:\n if not isinstance(params, u.unyt_array):\n raise ValueError(\n \"Parameter value {} lacks a unyt\".format(val)\n )\n else:\n if not isinstance(val, u.unyt_array):\n raise ValueError(\n \"Parameter value {} lacks a unyt\".format(val)\n )\n if not isinstance(key, str):\n raise ValueError(\"Parameter key {} is not a str\".format(key))\n\n return parameters", "def validate_dict(types,val,allowed,typ):\n if not len(types): return TYPE_MISMATCH\n if str(type(val)) not in typ['list']: raise(Exception('unknown type'))\n for k,v in val.items():\n result=VALIDATORS[types[-1]](types[:-1],v,allowed,types[-1])\n if not result: return result\n return True", "def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))", "def check_keys(set_name, keys, value, expect_key):\n\trecords = lib.read_all_records(set_name)\n\n\tfor key in keys:\n\t\tdigest = lib.get_key_digest(set_name, key)\n\t\tmeta_key, meta_ttl, record = records[str(digest).encode().hex()]\n\t\tlib.validate_record(key, record, [\"value\"], [value])\n\t\tlib.validate_meta(key, meta_key, meta_ttl, expect_key)", "def test_valid_key(self):\n f = lws.valid_data_key\n assert f('string', int, r'string') is False\n assert f('string', str, r'test') is False\n assert f(123, int, '123') is False\n assert f(123.00, float, '123') is False\n assert f('123', str, r'[0-9]*') is True", "def check_all_have_keys(dict_list, keys, name):\n if len(dict_list) == 0:\n return\n keys = set(keys)\n for dct in dict_list:\n if not keys.issubset(dct.keys()):\n raise DGLError('Expect all {} to include keys {}, but got {}.'.format(\n name, keys, dct.keys()))", "def assert_keys_exist(self, caller, *keys):\n assert keys, (\"*keys parameter must be specified.\")\n for key in keys:\n self.assert_key_exists(key, caller)", "def validate_arguments(arguments: dict) -> None:\n if not isinstance(arguments, dict):\n raise TypeError('Argument \"arguments\" should be a dict')\n for argument in arguments:\n if not isinstance(arguments[argument][0], arguments[argument][1]):\n raise TypeError(f'Argument {argument} should be a {arguments[argument][1]}')", "def verify(self, values):\n s = set(values)\n if not s.issubset(self.keys):\n raise ValueError, 'unknown keys in values'", "def verify(self, values):\n s = set(values)\n if not s.issubset(self.keys):\n raise ValueError('unknown keys in values')", "def test_two_keys():\n test = [{'key1': {'key2': 'val1'}}, ['key1', 'key2']]\n assert fetch_data_by_keys(*test).unwrap() == 'val1'", "def check_invalid_items(**kwargs: Tuple[T, Iterable[T]]):\n for key, (value, possible) in kwargs.items():\n possible = set(possible)\n if value not in possible:\n raise ValueError(f\"{key}={value} is not in: {possible}\")", "def verifyDictTypes( template, dictToCheck ):\n for key in dictToCheck:\n if not ( ( isinstance( dictToCheck[ key ], list ) and\n isinstance( template[ key ], list ) ) or\n ( isinstance( dictToCheck[ key ], dict ) and\n isinstance( template[ key ], dict ) ) or\n ( isinstance( dictToCheck[ key ], template[ key ] ) ) ):\n return False\n\n return True", "def _security_check_parameters(param_dict):\n for key, value in param_dict.iteritems():\n str_value = str(value) # Could easily be an int or a float\n for bad_str in [\";\", \"&&\", \">\", \"<\", \"|\"]:\n if bad_str in str_value:\n raise ValueError(\"Rejecting suspicious argument for %s\" % key)", "def assert_keys_match(keys, expected, allow_missing=True):\n if not allow_missing:\n missing = expected - keys\n assert not missing, 'missing keys: %s' % missing\n extra = keys - expected\n assert not extra, 'extraneous keys: %s' % extra", "def test_bad_valuetype():\n test = [{'key': {'key1': 'val'}}, ['key']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'Bad data found' in str(t_result.failure())", "def _validate_parameter(value):\n if isinstance(value, (dict)):\n if any([not isinstance(key, string_types) for key in value.keys()]):\n raise TypeError(\"Invalid parameter. Dictionary keys must be strings.\")\n [_validate_parameter(item) for item in value.values()]\n elif isinstance(value, (list, tuple)):\n [_validate_parameter(item) for item in value]\n elif (\n value is None or\n isinstance(value, string_types) or\n isinstance(value, (int, float, bool))\n ):\n pass\n else:\n raise TypeError(\"Invalid parameter type. Got '%s'.\" % type(value))", "def valid_tuple(obj):\r\n try:\r\n assert isinstance(obj, tuple)\r\n assert isinstance(obj[0], str)\r\n assert isinstance(obj[1], str)\r\n except:\r\n raise Invalid(\"{} is not a valid key tuple\".format(obj))\r\n return obj", "def verify_rpc_value ( user_dict ):\n for key in user_dict:\n if not isinstance ( user_dict[ key ], str ):\n # Error code 422\n raise ValueError ( 'Value of {0} is not a string'.format ( key ) )", "def verify_json(output, expected_keys):\n deser = json.loads(output)\n assert deser\n for expected_key in expected_keys:\n assert expected_key in deser", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def key_checker(expected_keys):\r\n\r\n def check(actual_dict, raise_error=True):\r\n \"\"\"\r\n Function that checks whether all keys in the expected_keys object is in the given actual_dict object.\r\n \"\"\"\r\n missing = set(expected_keys) - set(actual_dict.keys())\r\n if not missing:\r\n return True\r\n if raise_error:\r\n raise InvalidTabsException(\r\n \"Expected keys '{0}' are not present in the given dict: {1}\".format(expected_keys, actual_dict)\r\n )\r\n else:\r\n return False\r\n\r\n return check", "def test_dict_keys_to_list():\n\n @type_checked\n def _run_test(thing:[str]):\n assert isinstance(thing, list)\n assert \"foo\" in thing\n assert \"bar\" in thing\n assert len(thing) == 2\n\n _run_test({\"foo\": 1, \"bar\": 2}.keys())\n _run_test({1: \"foo\", 2: \"bar\"}.values())", "def check_keys(self):" ]
[ "0.84560245", "0.6857103", "0.680023", "0.6798254", "0.6696846", "0.6648808", "0.6521568", "0.64482117", "0.6443321", "0.638443", "0.63820523", "0.63607854", "0.63559777", "0.6261623", "0.62345", "0.622563", "0.62081283", "0.61760545", "0.61636066", "0.61437494", "0.61348677", "0.6081966", "0.6074926", "0.60557616", "0.60502726", "0.60352725", "0.6033227", "0.6027914", "0.5999708", "0.5975701" ]
0.6996826
1
Verify that the given key exists in the stack at the path targeted.
def _verify_key_exists(self, key, stack_path=[]): error_msg = ( "Could not find the {key_type} key '{key}' in: {stack_path}. " "Found {keys_found} instead." ) try: dk = stack_path[0] fk = stack_path[1] xk = stack_path[2] yk = stack_path[3] vk = stack_path[4] except: pass try: if len(stack_path) == 0: if key not in self: key_type, keys_found = 'data', self.keys() stack_path = 'stack' raise ValueError elif len(stack_path) == 1: if key not in self[dk]: key_type, keys_found = 'filter', self[dk].keys() stack_path = 'stack[{dk}]'.format( dk=dk) raise ValueError elif len(stack_path) == 2: if key not in self[dk][fk]: key_type, keys_found = 'x', self[dk][fk].keys() stack_path = 'stack[{dk}][{fk}]'.format( dk=dk, fk=fk) raise ValueError elif len(stack_path) == 3: if key not in self[dk][fk][xk]: key_type, keys_found = 'y', self[dk][fk][xk].keys() stack_path = 'stack[{dk}][{fk}][{xk}]'.format( dk=dk, fk=fk, xk=xk) raise ValueError elif len(stack_path) == 4: if key not in self[dk][fk][xk][yk]: key_type, keys_found = 'view', self[dk][fk][xk][yk].keys() stack_path = 'stack[{dk}][{fk}][{xk}][{yk}]'.format( dk=dk, fk=fk, xk=xk, yk=yk) raise ValueError except ValueError: print error_msg.format( key_type=key_type, key=key, stack_path=stack_path, keys_found=keys_found )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assert_key_exists(self, key, caller):\n assert key, (\"key parameter must be specified.\")\n if key not in self:\n raise KeyNotInContextError(\n f\"context['{key}'] doesn't exist. It must exist for {caller}.\")", "def _has(self, key):\n path = self._get_key_path(key)\n return exists(path)", "def _verify_key_exists(self, key, lookup_dict):\n exists = False\n if get_occurrence_of_key(lookup_dict, key) > 0:\n exists = True\n return exists", "def exists(self, key_name: str) -> bool:\n pass", "def has(self, key):", "def __contains__(self, key):\n return self._get(key, self.root) is not None", "def contains(bank, key):\n try:\n c_key = \"{}/{}\".format(bank, key or \"\")\n _, value = api.kv.get(c_key, keys=True)\n except Exception as exc: # pylint: disable=broad-except\n raise SaltCacheError(f\"There was an error getting the key, {c_key}: {exc}\")\n return value is not None", "def __contains__(self,key):\n if self.recursiveLookup(key,self.root):\n return True\n else:\n return False", "def exists(root: Node, key: int):\n if root is None:\n return False\n else:\n if root.key == key:\n return True\n elif key < root.key:\n return exists(root.left, key)\n else:\n return exists(root.right, key)", "def has(key):\n return not not (key in current().values)", "def path_in_dictionary(self, dictionary, path):\n if path:\n key = path.split('.')[0]\n if key in dictionary and dictionary[key]:\n key_exists = self.path_in_dictionary(dictionary[key], '.'.join(path.split('.')[1:]))\n else:\n key_exists = False\n else:\n key_exists = True\n return key_exists", "def contains(self, key):\n try:\n self.keyvaluepair_set.get(key=key)\n return True\n except KeyValuePair.DoesNotExist:\n return False", "def has(self, key):\n return False", "async def contains(self, key: str) -> bool:", "def containsKey(self, key):\n return get(key) != None", "def ifExist(file_name, key):\n\tif exists(file_name) and exists(key):\n\t\treturn True\n\telse:\n\t\treturn False", "def tag_key_exists(self, key):\n return key in self.map", "async def _exists(self, key):\n return key in SimpleMemoryBackend._cache", "async def _exists(self, key):\n return await self.client.append(key, b'')", "def is_child_exists(self, key):\n return True if _Node.__find_key_in_level(self, key) else False", "def has_key(self, key):\n return self.__dict__.has_key(key)", "def contains(self, key: int) -> bool:\n lv1, lv2 = self.hashing(key)\n \n for item in self.cont[lv1][lv2]:\n if item==key:\n return True\n \n return False", "def assert_key_has_value(self, key, caller):\n assert key, (\"key parameter must be specified.\")\n self.assert_key_exists(key, caller)\n\n if self[key] is None:\n raise KeyInContextHasNoValueError(\n f\"context['{key}'] must have a value for {caller}.\")", "def exists(self, key):\n try:\n return (self.salt + str(key)) in self.DB\n except KeyError:\n return False", "def __contains__(self, key):\n return self.keys[self._linear_probe(key, \"contains\")] is not None", "def has(self, key):\n return os.path.isfile(self._filename(key))", "def key_exists(dictionary, key):\n\n exists = dictionary.get(key, None)\n return exists is not None", "def contains(self, key: int) -> bool:\n _hash = self.get_hash(key)\n return self.bucket_array[_hash].exist(key)", "def item_has_key(self, item, key):\n if key in self._reverse_store[item]:\n return True\n else:\n return False", "def _check_key(self, key):\n raise NotImplementedError" ]
[ "0.70001173", "0.6919717", "0.6739363", "0.6700697", "0.6681527", "0.65265507", "0.6524702", "0.6507662", "0.6446694", "0.64168507", "0.6404698", "0.6384552", "0.6377084", "0.63695735", "0.63428736", "0.63240826", "0.63168067", "0.6312734", "0.63050866", "0.63019025", "0.62791747", "0.62726945", "0.62681437", "0.62503004", "0.62330914", "0.6229347", "0.6228349", "0.62090117", "0.6206176", "0.6200302" ]
0.81743956
0
Returns key as [key] if it is str or unicode
def _force_key_as_list(self, key): return [key] if isinstance(key, (str, unicode)) else key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _tokey(self, keys: Union[str, Iterable]):\n if hasattr(keys, \"encode\"): # str\n return keys.encode(\"utf-8\")\n elif hasattr(keys, \"decode\"): # bytes\n return keys\n return (self.Sep.join(keys).encode(\"utf-8\"))", "def __getitem__(self, key):\n if type(key) is str:\n return self.encode(key)\n elif type(key) is list or type(key) is tuple:\n return self.decode(key)", "def _key_to_str(self, key: Any) -> Any:\n if isinstance(key, str):\n return key\n if isinstance(key, int):\n return list(self._data_vars.keys())[key]\n if isinstance(key, slice):\n s = key.indices(len(self))\n return self._key_to_str(list(range(*s)))\n if isinstance(key, Iterable):\n keys = []\n for k in key:\n keys.append(self._key_to_str(k))\n return keys\n if hasattr(key, \"name\"):\n return key.name\n raise TypeError(f\"indexing with type {type(key)} is not supported\")", "def _decode_key(self, key):\n if hasattr(key, \"char\"):\n return str(key.char).lower()\n elif hasattr(key, \"name\"):\n return str(key.name).lower()", "def key(key):\n return key", "def as_key(key):\n return key.lstrip('/').rstrip('/')", "def key_to_string(cls, key):\n return '_'.join(map(str, key))", "def _decode_key(self, key):\n return key if not key or isinstance(key, str) else key.decode()", "def __getitem__(self, key: Union[str, Tuple[str, T]]) -> Union[str, T]:\n default: Union[str, T]\n if isinstance(key, tuple):\n key, default = key\n else:\n default = ''\n\n key = key.casefold()\n for k in self._keys:\n if k.casefold() == key:\n return self._keys[k]\n else:\n return default", "def _safe_key(self, key):\n if isinstance(key, str):\n key = key.encode('UTF-8')\n return key", "def decode_key(as_bytes: typing.List[int]) -> str:\n raise NotImplementedError()", "def deserialize_key(key: str):\n try:\n lit = ast.literal_eval(key)\n if isinstance(lit, Hashable):\n key = lit\n except ValueError:\n pass\n return key", "def serialize_key(key) -> str:\n if not isinstance(key, str):\n key = repr(key)\n return key", "def _GetKeyString(self):", "def _GetKeyString(self):", "def _get_key(key_or_id, key_cls):\n return (\n key_cls.from_string(key_or_id)\n if isinstance(key_or_id, str)\n else key_or_id\n )", "def _validate_key(self, key):\n if isinstance(key, str):\n key = unicode(key, 'utf-8')\n elif not isinstance(key, unicode):\n raise TypeError(\n \"`key` must be `str` or `unicode`, not `{}`\".format(\n key.__class__.__name__)\n )\n return key", "def _convert_dict_key(self, string):\r\n try:\r\n # Don't do any of this if the string is empty or None.\r\n if string is None:\r\n return None\r\n # If the string represents a container, convert it.\r\n elif string[0] in [\"(\", \"[\", \"{\"]:\r\n string = (\r\n str_utils.str_to_container(string))\r\n # Try other type conversions. Any that fail will leave the\r\n # variable untouched.\r\n else:\r\n string = str_utils.str_to_num(string)\r\n string = (\r\n str_utils.str_to_datetime(string))\r\n string = str_utils.str_to_bool(string)\r\n # end if\r\n return string\r\n except Exception as err:\r\n _z_exc(\"logentry.py/convert_dict_key\", err)\r\n # end try\r", "def string_to_keypair(self, data): \n return keypair_lst", "def _key_name(self, key):\n if type(key) == type(\"\"):\n return str(curses.keyname(ord(key)).decode(\"utf-8\"))\n return False", "def prepare_key(self, key):\n return smart_str(key)", "def __getitem__(self, key: str) -> str:\n return self.get(key)", "def _clean_key_type(key_name, escape_char=ESCAPE_SEQ):\n\n for i in (2, 1):\n\n if len(key_name) < i:\n return None, key_name\n\n type_v = key_name[-i:]\n\n if type_v in _KEY_SPLIT:\n if len(key_name) <= i:\n return _KEY_SPLIT[type_v], ''\n\n esc_cnt = 0\n for pos in range(-i - 1, -len(key_name) - 1, -1):\n if key_name[pos] == escape_char:\n esc_cnt += 1\n else:\n break\n\n if esc_cnt % 2 == 0:\n return _KEY_SPLIT[type_v], key_name[:-i]\n else:\n return None, key_name\n\n return None, key_name", "def __getitem__(self, key):\n return super(CaseInsensitiveStringDict, self).__getitem__(key.lower())", "def _GetKeyString(self):\n return self.__key_string", "def cast_name(key):\n special_symbols = set('{}{}'.format(punctuation, ' '))\n special_symbols.remove('_')\n new_key = ['_' if x in special_symbols else x for x in key]\n casted_key = ''.join(new_key)\n return casted_key", "def _get_key(var_type, attr):\n if attr is None:\n return var_type\n return f'{var_type}{SEP}{attr}'", "def _encode_key(self, key):\n return key.encode() if isinstance(key, str) else key", "def get(self, key=False, httpformat=False):\n if not key:\n result = self.data\n elif not isinstance(key, basestring):\n raise TypeError('keys have to be string')\n else:\n result = []\n for k, v in self.data:\n if k.lower() == key.lower():\n result.append((str(k), str(v)))\n if httpformat:\n return '\\n'.join(['%s: %s' % item for item in result])\n return result", "def resolve_key(obj, _):\n return obj.key.decode()" ]
[ "0.692036", "0.68628824", "0.667871", "0.65280366", "0.6479335", "0.6431466", "0.64151585", "0.64050287", "0.63881904", "0.62845904", "0.62736076", "0.62168086", "0.62093353", "0.61739796", "0.61739796", "0.6144354", "0.6143355", "0.6131979", "0.6118", "0.6107281", "0.60872144", "0.60620934", "0.6034612", "0.60198015", "0.6013931", "0.6003328", "0.5999141", "0.5998403", "0.59960616", "0.59954095" ]
0.73133
0
Verify that the given keys str or unicode or a list or tuple of those.
def _verify_key_types(self, name, keys): if isinstance(keys, (list, tuple)): for key in keys: self._verify_key_types(name, key) elif isinstance(keys, (str, unicode)): pass else: raise TypeError( "All %s keys must be one of the following types: " "<str> or <unicode>, " "<list> of <str> or <unicode>, " "<tuple> of <str> or <unicode>. " "Given: %s" % (name, keys) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _verify_multiple_key_types(self, data_keys=None, filters=None, x=None,\r\n y=None, variables=None, views=None):\r\n if data_keys is not None:\r\n self._verify_key_types(name='data', keys=data_keys)\r\n\r\n if filters is not None:\r\n self._verify_key_types(name='filter', keys=filters)\r\n\r\n if x is not None:\r\n self._verify_key_types(name='x', keys=x)\r\n\r\n if y is not None:\r\n self._verify_key_types(name='y', keys=y)\r\n\r\n if variables is not None:\r\n self._verify_key_types(name='variables', keys=variables)\r\n\r\n if views is not None:\r\n self._verify_key_types(name='view', keys=views)", "def NormalizeAndTypeCheckKeys(keys):\n keys, multiple = NormalizeAndTypeCheck(keys, (basestring, Entity, Key))\n\n keys = [_GetCompleteKeyOrError(key) for key in keys]\n\n return (keys, multiple)", "def assert_keys_have_values(self, caller, *keys):\n for key in keys:\n self.assert_key_has_value(key, caller)", "def assert_keys_type_value(self,\n caller,\n extra_error_text,\n *context_items):\n assert context_items, (\"context_items parameter must be specified.\")\n\n for context_item in context_items:\n self.assert_key_type_value(context_item, caller, extra_error_text)", "def _verify_dict_field(self, _dict, name, types):\n if type(types) != list:\n types = [types]\n if str in types and unicode not in types:\n types.append(unicode)\n if unicode in types and str not in types:\n types.append(str)\n self.assertTrue(name in _dict, msg=\"Missing field '%s'\" % name)\n self.assertTrue(type(_dict[name]) in types,\n msg=\"Erroneous type of the field '%s': \"\n \"found %s, expected any of %s\" % (\n name, str(type(_dict[name])), \",\".join([str(x) for x in types])))", "def is_valid_request(request_args, keys):\n if type(keys) != list:\n raise TypeError(\"Keys must be of type list\")\n\n for key in keys:\n if key not in request_args:\n return False\n return True", "def _validate_parameters(parameters):\n if not isinstance(parameters, dict):\n raise ValueError(\"Please enter a dictionary for parameters\")\n for key, val in parameters.items():\n if isinstance(val, list):\n for params in val:\n if not isinstance(params, u.unyt_array):\n raise ValueError(\n \"Parameter value {} lacks a unyt\".format(val)\n )\n else:\n if not isinstance(val, u.unyt_array):\n raise ValueError(\n \"Parameter value {} lacks a unyt\".format(val)\n )\n if not isinstance(key, str):\n raise ValueError(\"Parameter key {} is not a str\".format(key))\n\n return parameters", "def validate_dict(types,val,allowed,typ):\n if not len(types): return TYPE_MISMATCH\n if str(type(val)) not in typ['list']: raise(Exception('unknown type'))\n for k,v in val.items():\n result=VALIDATORS[types[-1]](types[:-1],v,allowed,types[-1])\n if not result: return result\n return True", "def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))", "def check_keys(set_name, keys, value, expect_key):\n\trecords = lib.read_all_records(set_name)\n\n\tfor key in keys:\n\t\tdigest = lib.get_key_digest(set_name, key)\n\t\tmeta_key, meta_ttl, record = records[str(digest).encode().hex()]\n\t\tlib.validate_record(key, record, [\"value\"], [value])\n\t\tlib.validate_meta(key, meta_key, meta_ttl, expect_key)", "def test_valid_key(self):\n f = lws.valid_data_key\n assert f('string', int, r'string') is False\n assert f('string', str, r'test') is False\n assert f(123, int, '123') is False\n assert f(123.00, float, '123') is False\n assert f('123', str, r'[0-9]*') is True", "def check_all_have_keys(dict_list, keys, name):\n if len(dict_list) == 0:\n return\n keys = set(keys)\n for dct in dict_list:\n if not keys.issubset(dct.keys()):\n raise DGLError('Expect all {} to include keys {}, but got {}.'.format(\n name, keys, dct.keys()))", "def assert_keys_exist(self, caller, *keys):\n assert keys, (\"*keys parameter must be specified.\")\n for key in keys:\n self.assert_key_exists(key, caller)", "def validate_arguments(arguments: dict) -> None:\n if not isinstance(arguments, dict):\n raise TypeError('Argument \"arguments\" should be a dict')\n for argument in arguments:\n if not isinstance(arguments[argument][0], arguments[argument][1]):\n raise TypeError(f'Argument {argument} should be a {arguments[argument][1]}')", "def verify(self, values):\n s = set(values)\n if not s.issubset(self.keys):\n raise ValueError, 'unknown keys in values'", "def verify(self, values):\n s = set(values)\n if not s.issubset(self.keys):\n raise ValueError('unknown keys in values')", "def test_two_keys():\n test = [{'key1': {'key2': 'val1'}}, ['key1', 'key2']]\n assert fetch_data_by_keys(*test).unwrap() == 'val1'", "def check_invalid_items(**kwargs: Tuple[T, Iterable[T]]):\n for key, (value, possible) in kwargs.items():\n possible = set(possible)\n if value not in possible:\n raise ValueError(f\"{key}={value} is not in: {possible}\")", "def verifyDictTypes( template, dictToCheck ):\n for key in dictToCheck:\n if not ( ( isinstance( dictToCheck[ key ], list ) and\n isinstance( template[ key ], list ) ) or\n ( isinstance( dictToCheck[ key ], dict ) and\n isinstance( template[ key ], dict ) ) or\n ( isinstance( dictToCheck[ key ], template[ key ] ) ) ):\n return False\n\n return True", "def _security_check_parameters(param_dict):\n for key, value in param_dict.iteritems():\n str_value = str(value) # Could easily be an int or a float\n for bad_str in [\";\", \"&&\", \">\", \"<\", \"|\"]:\n if bad_str in str_value:\n raise ValueError(\"Rejecting suspicious argument for %s\" % key)", "def assert_keys_match(keys, expected, allow_missing=True):\n if not allow_missing:\n missing = expected - keys\n assert not missing, 'missing keys: %s' % missing\n extra = keys - expected\n assert not extra, 'extraneous keys: %s' % extra", "def test_bad_valuetype():\n test = [{'key': {'key1': 'val'}}, ['key']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'Bad data found' in str(t_result.failure())", "def _validate_parameter(value):\n if isinstance(value, (dict)):\n if any([not isinstance(key, string_types) for key in value.keys()]):\n raise TypeError(\"Invalid parameter. Dictionary keys must be strings.\")\n [_validate_parameter(item) for item in value.values()]\n elif isinstance(value, (list, tuple)):\n [_validate_parameter(item) for item in value]\n elif (\n value is None or\n isinstance(value, string_types) or\n isinstance(value, (int, float, bool))\n ):\n pass\n else:\n raise TypeError(\"Invalid parameter type. Got '%s'.\" % type(value))", "def valid_tuple(obj):\r\n try:\r\n assert isinstance(obj, tuple)\r\n assert isinstance(obj[0], str)\r\n assert isinstance(obj[1], str)\r\n except:\r\n raise Invalid(\"{} is not a valid key tuple\".format(obj))\r\n return obj", "def verify_rpc_value ( user_dict ):\n for key in user_dict:\n if not isinstance ( user_dict[ key ], str ):\n # Error code 422\n raise ValueError ( 'Value of {0} is not a string'.format ( key ) )", "def verify_json(output, expected_keys):\n deser = json.loads(output)\n assert deser\n for expected_key in expected_keys:\n assert expected_key in deser", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def key_checker(expected_keys):\r\n\r\n def check(actual_dict, raise_error=True):\r\n \"\"\"\r\n Function that checks whether all keys in the expected_keys object is in the given actual_dict object.\r\n \"\"\"\r\n missing = set(expected_keys) - set(actual_dict.keys())\r\n if not missing:\r\n return True\r\n if raise_error:\r\n raise InvalidTabsException(\r\n \"Expected keys '{0}' are not present in the given dict: {1}\".format(expected_keys, actual_dict)\r\n )\r\n else:\r\n return False\r\n\r\n return check", "def test_dict_keys_to_list():\n\n @type_checked\n def _run_test(thing:[str]):\n assert isinstance(thing, list)\n assert \"foo\" in thing\n assert \"bar\" in thing\n assert len(thing) == 2\n\n _run_test({\"foo\": 1, \"bar\": 2}.keys())\n _run_test({1: \"foo\", 2: \"bar\"}.values())", "def check_keys(self):" ]
[ "0.6996826", "0.6857103", "0.680023", "0.6798254", "0.6696846", "0.6648808", "0.6521568", "0.64482117", "0.6443321", "0.638443", "0.63820523", "0.63607854", "0.63559777", "0.6261623", "0.62345", "0.622563", "0.62081283", "0.61760545", "0.61636066", "0.61437494", "0.61348677", "0.6081966", "0.6074926", "0.60557616", "0.60502726", "0.60352725", "0.6033227", "0.6027914", "0.5999708", "0.5975701" ]
0.84560245
0
Join all running threads
def joinAllThreads(self): if self.fitAsync: with self.threadListLock: for thread in self.threadList: thread.join() else: return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def joiner():\n for th in threads:\n th.join()\n done(process)", "def join_threads(threads):\n for t in threads:\n while t.isAlive():\n t.join(5)", "def join(self):\n while not self._stop:\n time.sleep(0.1)\n for t in reversed(self.tasks):\n t.join()", "def end_threads(self):\n\n self.wait_threads()\n\n for _ in xrange(len(self.threads)):\n self.submit(None, None, None)\n\n for thread in self.threads:\n thread.join()", "def end_thread_pool():\n main_thread = threading.currentThread()\n for aThread in threading.enumerate():\n if aThread is main_thread:\n continue\n aThread.join()\n\n fout.debug(\"Waiting for threads to finish\")", "def run_in_parallel(self):\n\t\tfor p in self.parallel_threads:\n\t\t\tp.start()\n\t\tfor p in self.parallel_threads:\n\t\t\tp.join()", "def invoke_all_and_wait(self):\n list_promise = []\n for thread in self.__list_thread:\n thread.start()\n list_promise.append(thread)\n for process in list_promise: process.join()", "def _join_running_pipelines(self):\n still_running = list(self._running_pipelines)\n for pipeline in still_running:\n pipeline.join_all()", "def join(self, *args, **kwargs):\n for process in self.process:\n process.join(*args, **kwargs)", "def wait_threads(self):\n\n self.queue.join()", "def join(self):\n # block until all tasks are done\n self.queue.join()\n\n # stop workers\n for _ in self.async_femags:\n self.queue.put(None)\n for async_femag in self.async_femags:\n async_femag.join()\n \n return [t.status for t in self.job.tasks]", "def wake_all_threads(self):\n self.advance_time(increment_by=0.0)", "def cleanup():\n for th in THREAD_REGISTER.values():\n th.exit()\n th.join(timeout=3)", "def join(self):\n self.thread.join()", "def join(self):\n self.thread.join()", "def stop(self):\n self.queue.join()\n print (\"task queue is now empty\")\n for thd in self._allthreads:\n print (\"Joining thread %s\" % thd.name)\n thd.stop()\n if thd.isAlive():\n thd.join(10) # max wait time is 10 seconds\n if thd.isAlive():\n print (\"Joining thread %s failed\" % thd.name)", "def join(self):\n # A nicer place for this would be shutdown(), but this being a mixin,\n # that method can't safely do anything with that method, thus we add\n # an extra method explicitly for clearing the queue and shutting\n # down the workers.\n self._request_queue.join()\n self._shutdown_event.set()", "def shutdown(self):\n self.all_workers_joined.wait() \n self.shutdown_master_thread()\n self.all_workers_joined.clear()", "def join(self):\r\n if self._unfinished_tasks > 0:\r\n yield from self._finished.wait()", "def run_and_join():\n daemon = threading.Thread(target=print_numbers, args=[10, 1, \"\"])\n daemon.daemon = True\n daemon.start()\n daemon.join()\n print(\"After the join\")", "def quit(self):\n map(lambda w: self._tasks.put(None), self._workers)\n map(lambda w: w.join(), self._workers)", "def join(self):\n self.is_join = True\n self.task_queue.join()", "def join(self) -> None:\n n_tasks = self.n_tasks()\n while n_tasks > 0:\n self.log.tick(\n \"%s tasks remaining, sleeping for %s s\", n_tasks, POLL_INTERVAL\n )\n time.sleep(POLL_INTERVAL)\n n_tasks = self.n_tasks()\n self.log.debug(\"Joined successfully\")", "def join(self):\n self.queue.join()", "def clear_workers(self):\n # seems sometimes that workers will cause\n # print \"calling destructor\"\n # first set the exit flag for each of the workers.\n for worker in self.workers:\n worker.no_exit = False\n\n # next clear the queue, the workers might be waiting to add data to\n # the queue.\n # print \"clearing queue\"\n while not self.queue.empty():\n self.queue.get()\n\n # print \"queue empty, joining threads\"\n # now join all the workers\n for worker in self.workers:\n worker.join()\n # print \"done joining threads\"", "def monitor(self):\n while True:\n complete = True\n for thread in self._running:\n if not thread.complete:\n complete = False\n\n if thread.complete:\n thread.join()\n elif thread.failed:\n pass\n\n if complete:\n break\n time.sleep(Threadable.THREAD_SLEEP)", "def run_threads(self, threads):\n\n for t, daemon in threads:\n t.daemon = daemon\n self.processes.append(t)\n t.start()\n\n for t in self.processes:\n t.join()", "def stopworkerthreads():\n global PROCESSES\n for proc in PROCESSES:\n proc.stop()\n proc.join()", "def join(self, timeout=None):\n self.all_tasks_done.acquire()\n try:\n while self.unfinished_tasks:\n self.all_tasks_done.wait(timeout) # changed here\n finally:\n self.all_tasks_done.release()", "def start_workers(self):\n\n for thread in self.threads:\n thread.start()" ]
[ "0.8029715", "0.773945", "0.76160085", "0.7336391", "0.72426933", "0.7126829", "0.70607364", "0.6977239", "0.6876884", "0.6726911", "0.66968334", "0.66548884", "0.6609573", "0.6568536", "0.6568536", "0.65668297", "0.656492", "0.6482418", "0.64778936", "0.64752525", "0.64453655", "0.64316905", "0.64009655", "0.64000684", "0.63993776", "0.63792294", "0.63528484", "0.63399184", "0.63194966", "0.62716305" ]
0.83492696
0
Add role to the Member
async def addrole(self, ctx, member: discord.Member, role: discord.Role): role = discord.utils.get(ctx.guild.roles, id=role.id) muted_role = discord.utils.get(ctx.guild.roles, name="Muted") punished_role = discord.utils.get(ctx.guild.roles, name="Punished") if role > ctx.author.top_role: return await ctx.send( embed=discord.Embed( title="You don't have permission to add this role", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.darker_grey(), ) ) if role == muted_role or role == punished_role: return await ctx.send( embed=discord.Embed( title=f"Can not assign *{role}* role using this command.", description="For more information run ```.help addrole```", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.red(), ) ) if role in member.roles: return await ctx.channel.send( embed=discord.Embed( title=f"*{member}* already has *{role}* Role!", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.greyple(), ) ) await member.add_roles(role) await ctx.send( embed=discord.Embed( title=f"*{role}* has been added to *{member}*", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.green(), ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_role(self, role):\n if role.name not in [r.name for r in self.roles]:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$push': {'roles': role.to_python()}})", "def addRole(self, role):\n self._client.addRole(role)", "async def addrole(self, ctx, user: discord.Member=None, *, role=None):\r\n if user is None or role is None:\r\n return await ctx.send(\"Incorrect usage! *;addrole @user role*\")\r\n r = discord.utils.get(ctx.guild.roles, name=str(role))\r\n if r is None:\r\n return await ctx.send(f'{role} was not found')\r\n try:\r\n await user.add_roles(r)\r\n return await ctx.send(f\"**{str(user)}** has been given the role of **{role}** {self.bot.get_emoji(470063310386233344)}\")\r\n except discord.Forbidden:\r\n return await ctx.send(\"Bot does not have enough permissions to give roles.\")", "def add_role(self, principal, role):\n return permissions.utils.add_local_role(self, principal, role)", "def test_add_role_to_project_member(self):\n pass", "async def apply_role(self, *, reason: str = None):\n if self.role not in self.member.roles:\n try:\n await self.member.add_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "def add_role(self, role, parents=[]):\r\n self._roles.setdefault(role, set())\r\n self._roles[role].update(parents)", "async def massadd(\n self,\n ctx,\n role: discord.Role,\n member: commands.Greedy[discord.Member],\n ):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help massadd```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in member:\n if role in i.roles:\n await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{i}* already has *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await i.add_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been added to **{len(member)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "def add_role(role):\n roleOfUser=Role.objects.create(type=role)\n return roleOfUser", "def addRole(self, role=None, roleName=None, kvDict=None):\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='add', modelType='role')", "async def addrole(self, ctx, rolename, user: discord.Member=None):\n author = ctx.message.author\n channel = ctx.message.channel\n server = ctx.message.server\n\n if user is None:\n user = author\n\n role = self._role_from_string(server, rolename)\n\n if role is None:\n await self.bot.say('That role cannot be found.')\n return\n\n if not channel.permissions_for(server.me).manage_roles:\n await self.bot.say('I don\\'t have manage_roles.')\n return\n\n if author.id == settings.owner:\n pass\n elif not channel.permissions_for(author).manage_roles:\n raise commands.CheckFailure\n\n await self.bot.add_roles(user, role)\n await self.bot.say('Added role {} to {}'.format(role.name, user.name))", "def role(self, role):\n\n self._role = int(role)", "async def add_role_member(request, role_id):\n required_fields = [\"id\"]\n utils.validate_fields(required_fields, request.json)\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n proposal_id = str(uuid4())\n batch_list = Role().member.propose.batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n proposal_id=proposal_id,\n role_id=role_id,\n pack_id=request.json.get(\"pack_id\"),\n next_id=request.json.get(\"id\"),\n reason=request.json.get(\"reason\"),\n metadata=request.json.get(\"metadata\"),\n )\n batch_status = await utils.send(\n request.app.config.VAL_CONN,\n batch_list,\n request.app.config.TIMEOUT,\n request.json.get(\"tracker\") and True,\n )\n if request.json.get(\"tracker\"):\n return utils.create_tracker_response(\"batch_status\", batch_status)\n return json({\"proposal_id\": proposal_id})", "def define_role(self, role):\n\n self._db_manager.create_role(role)", "async def add_role(\n client,\n event,\n user: ('user', 'User to add role to'),\n role: ('role', 'The role to give'),\n):\n # Check for permissions\n if not event.user_permissions.can_manage_roles:\n abort('You need `manage roles` permission to invoke this command.')\n \n if not event.guild.cached_permissions_for(client).can_manage_roles:\n abort('I need `manage roles` permission to execute this command.')\n \n if not event.user.has_higher_role_than(role):\n abort('You must have higher role than the role you are trying to give.')\n \n if not client.has_higher_role_than(role):\n abort('I must have higher role than the role you are trying to give.')\n \n # Using `.copy_to` on forms works as well.\n return ADD_ROLE_FORM.copy_with(\n title = f'Add role {role.name} to {user.full_name}',\n custom_id = f'add_role.{user.id}.{role.id}',\n )", "async def role(ctx, role: discord.Role = None):\n if role is None:\n await ctx.send(\"List of assignable roles: \" + str(allowed_roles))\n if role.name in allowed_roles:\n if not role in ctx.message.author.roles:\n await ctx.message.author.add_roles(role)\n await ctx.send(\"Role added.\")\n else:\n await ctx.message.author.remove_roles(role)\n await ctx.send(\"Role removed.\") \n else:\n await ctx.send(\"That role doesn't exist, or you don't have permission to modify it.\")", "def role(self, role):\n\n self._role = role", "def role(self, role):\n\n self._role = role", "def add_role():\n role = roles.find_or_create_role(request.values.get('role_name', ''))\n user = users.get_or_404(int(request.values.get('user_id', '')))\n if not users.add_role_to_user(user, role):\n return {}, 500\n return {}", "def test_add_role(self):\n pass", "def add_role(self, role):\n try:\n self.db_proxy.nameCheck(role.theName, 'role')\n except ARM.ARMException as ex:\n self.close()\n raise ARMHTTPError(ex)\n\n role_params = RoleParameters(\n name=role.theName,\n rType=role.theType,\n sCode=role.theShortCode,\n desc=role.theDescription,\n cProperties=[]\n )\n\n role_id = self.db_proxy.addRole(role_params)\n\n return role_id", "def addUserRole(self, name, role):\n self._client.addUserRole(name, role)", "async def command_assign_role(self, context, role: str):\n try:\n await context.author.add_roles(discord.utils.get(\n context.guild.roles, name=role))\n await context.message.add_reaction('👍')\n except Exception as e:\n await context.message.add_reaction('👎')\n await context.send('Role could not be assigned')\n print(f'Errored in command_assign_role.', e)", "def add_role(self, name):\n role = Role.by_name(name)\n if not role:\n role = Role(name)\n db.add(role)\n if not role in self.roles:\n self.roles.append(role)", "def create_role(self, role_id, role):\n raise exception.NotImplemented() # pragma: no cover", "def set_role(userid, role, group, request=None):", "async def persistrole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help persistrole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{member}* already has *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.add_roles(role)\n persistent_role = Roles(\n bot=self.bot,\n guild_id=ctx.guild.id,\n user_id=member.id,\n roles=role.id,\n )\n # Post to db for persistent role\n await persistent_role.post()\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"Persisting Role: *{role}* has been added to *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def addtagrole(self, ctx, _role):\r\n\t\tif _role == 0:\r\n\t\t\tself.settings.ServerConfig(ctx.guild.id, 'TagRole', 0)\r\n\t\t\tawait ctx.send('Tag role set to: {}'.format(0))\r\n\t\telse:\t\r\n\t\t\trole = self.settings.Get(ctx, 'role', _role)\r\n\t\t\tif not role: return await ctx.send('Can\\'t find role: {}'.format(_role))\r\n\r\n\t\t\tself.settings.ServerConfig(ctx.guild.id, 'TagRole', role.id)\r\n\t\t\tawait ctx.send('Tag role set to: {}'.format(role))", "def manage_addRole(self, role_id, title, description, RESPONSE=None,\n REQUEST=None):\n if not role_id:\n message = 'Please+provide+a+Role+ID'\n else:\n self.addRole(role_id, title, description)\n message = 'Role+added'\n\n if RESPONSE is not None:\n RESPONSE.redirect('%s/manage_roles?manage_tabs_message=%s' %\n (self.absolute_url(), message))", "async def addrole(self, ctx: context.CustomContext):\n\n await ctx.send(\n f\"{config.USER_INTERACTION_REQUIRED} Reply with the name of the role you want to create.\"\n )\n\n role_name = await ctx.converted_input(converter=converter.CaseInsensitiveRole)\n\n if isinstance(role_name, str):\n await ctx.send(\n f\"{config.YES} I will **create a new role** on this server named `{role_name}` for this.\"\n )\n try:\n discord_role = await ctx.guild.create_role(name=role_name)\n except discord.Forbidden:\n raise exceptions.ForbiddenError(\n exceptions.ForbiddenTask.CREATE_ROLE, role_name\n )\n\n else:\n discord_role = role_name\n\n await ctx.send(\n f\"{config.YES} I'll use the **pre-existing role** named `{discord_role.name}` for this.\"\n )\n\n role_join_message = await ctx.input(\n f\"{config.USER_INTERACTION_REQUIRED} Reply with a short message the user should see when they get the role.\"\n )\n\n try:\n await self.bot.db.execute(\n \"INSERT INTO selfrole (guild_id, role_id, join_message) VALUES ($1, $2, $3) \"\n \"ON CONFLICT (guild_id, role_id) DO UPDATE SET join_message = $3\",\n ctx.guild.id,\n discord_role.id,\n role_join_message,\n )\n except asyncpg.UniqueViolationError:\n return await ctx.send(\n f\"{config.NO} `{discord_role.name}` is already a selfrole on this server.\"\n )\n\n await ctx.send(f\"{config.YES} `{discord_role.name}` was added as a selfrole.\")" ]
[ "0.7570307", "0.7567677", "0.7534781", "0.7433675", "0.7415941", "0.7411809", "0.7370567", "0.72969", "0.72914296", "0.7286169", "0.7281028", "0.72385347", "0.72077465", "0.7204429", "0.71875477", "0.71404094", "0.71215886", "0.71215886", "0.71122515", "0.7056936", "0.7049572", "0.7033353", "0.7017532", "0.6994796", "0.695422", "0.69447374", "0.69231755", "0.6891489", "0.68405646", "0.6832828" ]
0.79049414
0
Remove a role from the member
async def removerole(self, ctx, member: discord.Member, role: discord.Role): role = discord.utils.get(ctx.guild.roles, id=role.id) muted_role = discord.utils.get(ctx.guild.roles, name="Muted") punished_role = discord.utils.get(ctx.guild.roles, name="Punished") if role > ctx.author.top_role: return await ctx.send( embed=discord.Embed( title="You don't have permission to remove this role", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.darker_grey(), ) ) if role == muted_role or role == punished_role: return await ctx.send( embed=discord.Embed( title=f"Can not remove *{role}* role using this command.", description="For more information run ```.help removerole```", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.red(), ) ) if role not in member.roles: return await ctx.channel.send( embed=discord.Embed( title=f"{member} doesn't have *{role}* Role!", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.greyple(), ) ) await member.remove_roles(role) await ctx.send( embed=discord.Embed( title=f"*{role}* has been removed from *{member}*", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.green(), ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def remove_role(self, *, reason: str = None):\n await config.member(self.member).set_raw(str(self.role.id), value=None)\n if self.role in self.member.roles:\n try:\n await self.member.remove_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "def remove_role(self, role):\n if role.name in [r.name for r in self.roles]:\n remaining_if_any_roles = [r.to_python() for r in self.roles if not r.name == role.name]\n if remaining_if_any_roles:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$set': {'roles': remaining_if_any_roles}})\n else:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$unset': {'roles': 1}})", "def remove_member(self, project_id, user_id, role_id):\n resp = {}\n path = '/projects/%s/users/%s/roles/%s' % (project_id, user_id, role_id)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token) \n \n self.logger.debug('Revoke role %s to user %s on project %s' % \n (project_id, user_id, role_id))\n return True", "def remove_role():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/auth/{1}/role/{2}\".format(args.vault_url, args.k8s_cluster_name, args.k8s_namespace)\n print 'Removing role {0} for {1}'.format(args.k8s_namespace, args.k8s_cluster_name)\n send_delete(url=url, headers=headers)", "def test_remove_role_from_project_member(self):\n pass", "def remove_role(self, principal, role):\n return permissions.utils.remove_local_role(self, principal, role)", "async def massremove(\n self,\n ctx,\n role: discord.Role,\n member: commands.Greedy[discord.Member],\n ):\n\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help massremove```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in member:\n if role not in i.roles:\n await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{i}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(member)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "def removeRole(self, role=None, roleName=None, kvDict=None):\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='remove', modelType='role')", "async def command_unassign_role(self, context, role: str):\n try:\n await context.author.remove_roles(discord.utils.get(context.guild.roles, name=role))\n await context.message.add_reaction('👍')\n except Exception as e:\n await context.message.add_reaction('👎')\n await context.send('Role could not be unassigned')\n print(f'Errored in command_unassign_role.', e)", "def remove_role(self, name):\n role = Role.by_name(name)\n if not role:\n return\n if role in self.roles:\n self.roles.remove(role)", "async def rolemenu_remove_role(self, interaction: discord.Interaction,\n name: str, role: str):\n try:\n role_id = int(role)\n except ValueError:\n return await interaction.response.send_message(\n \"The role provided \"\n \"is not valid. Make sure that you either select one from the \"\n \"options that the autocomplete provides, or that you \"\n \"provide the role's ID\",\n ephemeral=True)\n doc = await self.db.find_one({\n \"guild_id\": interaction.guild.id,\n \"name\": name\n })\n if not doc:\n return await interaction.response.send_message(\n \"No role menu with that name exists.\", ephemeral=True)\n await interaction.response.defer(ephemeral=True)\n for role_doc in doc[\"roles\"]:\n if role_doc[\"id\"] == role_id:\n break\n else:\n return await interaction.followup.send(\n \"Role not found in that menu\")\n await self.db.update_one({\"_id\": doc[\"_id\"]},\n {\"$pull\": {\n \"roles\": role_doc\n }})\n doc = await self.db.find_one({\"_id\": doc[\"_id\"]})\n await interaction.followup.send(\"Role removed from the menu.\")\n menu = Menu(self, interaction.guild, doc)\n await menu.update()", "async def removerole(self, ctx, rolename, user: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n\n role = self._role_from_string(server, rolename)\n if role is None:\n await self.bot.say(\"Role not found.\")\n return\n\n if user is None:\n user = author\n\n if role in user.roles:\n try:\n await self.bot.remove_roles(user, role)\n await self.bot.say(\"Role successfully removed.\")\n except discord.Forbidden:\n await self.bot.say(\"I don't have permissions to manage roles!\")\n else:\n await self.bot.say(\"User does not have that role.\")", "def delete_role(self, name): # NOQA\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n role_record = self.get_role(name)\n self.client.delete_resource(role_record.get('href'))", "async def removepersistrole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removepersistrole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role not in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{member}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.remove_roles(role)\n persistent_role = Roles(\n bot=self.bot,\n guild_id=ctx.guild.id,\n user_id=member.id,\n roles=role.id,\n )\n # Post to db for persistent role\n await persistent_role.delete()\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"Persisting Role *{role}* has been removed from *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "def delete_token_role(self, role):\n return self.delete('auth/token/roles/{0}'.format(role))", "def delete_role(role):\n fallback = Role.load_cli_user()\n\n def _del(cls, col):\n pq = db.session.query(cls)\n pq = pq.filter(col == role.id)\n\n def _repo(cls, col):\n pq = db.session.query(cls).filter(col == role.id)\n pq.update({col: fallback.id}, synchronize_session=False)\n\n _del(Permission, Permission.role_id)\n db.session.delete(role)\n db.session.commit()", "async def remove_from(self, target: discord.Member) -> None:\n role = await self.get_role(target.guild)\n if role:\n await target.remove_roles(role)\n\n if not role.members:\n await role.delete()", "def remove_permission_from_bucket(bucket_name, role_type, member_type):\n\n # initialize client & get bucket\n _, bucket, _ = create_client(bucket_name)\n\n policy = bucket.get_iam_policy(requested_policy_version=3)\n \n # get member type\n member_value = get_member_bucket_level(member_type)\n\n # get role type\n role_value = get_role_bucket_level(role_type)\n\n for binding in policy.bindings:\n # print(binding)\n if binding[\"role\"] == role_value and binding.get(\"condition\") is None:\n # revoke role from member\n binding[\"members\"].discard(member_value)\n\n bucket.set_iam_policy(policy)\n\n print(\"removed {} with role {} from {}\".format(member_value, role_value, bucket_name))", "def delete_role(self, role_id):\n raise exception.NotImplemented() # pragma: no cover", "def remove_role(self, rolename):\n params = {\n \"f\" : \"json\",\n \"rolename\" : rolename\n }\n uURL = self._url + \"/roles/remove\"\n return self._con.post(path=uURL,\n postdata=params)", "def role_delete(\n login_manager: LoginManager, *, role_id: str, endpoint_id: uuid.UUID\n) -> None:\n transfer_client = login_manager.get_transfer_client()\n res = transfer_client.delete_endpoint_role(endpoint_id, role_id)\n display(res, text_mode=TextMode.text_raw, response_key=\"message\")", "def revoke_role(self, role, principal_ids):", "async def removerole(self, ctx, role: discord.Role):\n guild = ctx.message.guild\n excluded_roles = await self.config.guild(guild).excluded_roles()\n\n if role.id in excluded_roles:\n excluded_roles.remove(role.id)\n await self.config.guild(guild).excluded_roles.set(excluded_roles)\n await ctx.send(\"Removed %s from role exclusion list.\" % role.name)\n else:\n await ctx.send(\"%s is not an excluded role.\" % role.name)", "def remove_role(profile, instance_profile, role):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = instance_profile\n params[\"RoleName\"] = role\n return client.remove_role_from_instance_profile(**params)", "def _remove_role(contest, user, role_class):\n user_biv_id = _lookup_user(user).biv_id\n role = role_class.query.select_from(pam.BivAccess).filter(\n pam.BivAccess.source_biv_id == user_biv_id,\n pam.BivAccess.target_biv_id == role_class.biv_id\n ).one()\n db.session.delete(\n pam.BivAccess.query.filter(\n pam.BivAccess.source_biv_id == contest,\n pam.BivAccess.target_biv_id == role.biv_id\n ).one()\n )", "async def roledelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete: {role}\")", "def remove_trainee(role_id):\n\n role = Role.query.get(role_id)\n if role is None or role.role_id != RoleIds.Trainee:\n flash(\"Role invalide\", \"error\")\n return redirect(url_for(\".leader_list\"))\n\n if role.activity_type not in current_user.get_supervised_activities():\n flash(\"Non autorisé\", \"error\")\n return redirect(url_for(\".leader_list\"))\n\n db.session.delete(role)\n db.session.commit()\n\n return redirect(url_for(\".leader_list\"))", "def removeRole(self, role_id, REQUEST=None):\n for principal_id in self._principal_roles.keys():\n self.removeRoleFromPrincipal(role_id, principal_id)\n\n del self._roles[role_id]", "def delete(self, role_id):\n self.client.delete_role(role_id)", "def deleteUserRole(self, name, role):\n self._client.deleteUserRole(name, role)" ]
[ "0.8187497", "0.7978509", "0.79427636", "0.79377174", "0.78856933", "0.77909815", "0.76512873", "0.7625483", "0.7503555", "0.7469059", "0.74140835", "0.741383", "0.74035096", "0.73808104", "0.73396295", "0.7317423", "0.72812545", "0.7207674", "0.7205083", "0.72049063", "0.71506137", "0.7098876", "0.7064379", "0.70634633", "0.7033618", "0.7025776", "0.701235", "0.6999964", "0.6997312", "0.6972282" ]
0.8088363
1
Add a persisting role to the member
async def persistrole(self, ctx, member: discord.Member, role: discord.Role): role = discord.utils.get(ctx.guild.roles, id=role.id) muted_role = discord.utils.get(ctx.guild.roles, name="Muted") punished_role = discord.utils.get(ctx.guild.roles, name="Punished") if role > ctx.author.top_role: return await ctx.send( embed=discord.Embed( title="You don't have permission to add this role", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.darker_grey(), ) ) if role == muted_role or role == punished_role: return await ctx.send( embed=discord.Embed( title=f"Can not assign *{role}* role using this command.", description="For more information run ```.help persistrole```", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.red(), ) ) if role in member.roles: return await ctx.channel.send( embed=discord.Embed( title=f"*{member}* already has *{role}* Role!", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.greyple(), ) ) await member.add_roles(role) persistent_role = Roles( bot=self.bot, guild_id=ctx.guild.id, user_id=member.id, roles=role.id, ) # Post to db for persistent role await persistent_role.post() await ctx.send( embed=discord.Embed( title=f"Persisting Role: *{role}* has been added to *{member}*", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.green(), ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def save(self):\n await config.member(self.member).set_raw(str(self.role.id), value=self.as_dict)", "def add_role(self, role):\n if role.name not in [r.name for r in self.roles]:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$push': {'roles': role.to_python()}})", "def add_role(role):\n roleOfUser=Role.objects.create(type=role)\n return roleOfUser", "def define_role(self, role):\n\n self._db_manager.create_role(role)", "async def removepersistrole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removepersistrole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role not in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{member}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.remove_roles(role)\n persistent_role = Roles(\n bot=self.bot,\n guild_id=ctx.guild.id,\n user_id=member.id,\n roles=role.id,\n )\n # Post to db for persistent role\n await persistent_role.delete()\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"Persisting Role *{role}* has been removed from *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "def add_role(self, principal, role):\n return permissions.utils.add_local_role(self, principal, role)", "def add_role(self, role, parents=[]):\r\n self._roles.setdefault(role, set())\r\n self._roles[role].update(parents)", "async def addrole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help addrole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{member}* already has *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.add_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been added to *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "def becomeMemberOf(self, groupRole):\n self.store.findOrCreate(RoleRelationship,\n group=groupRole,\n member=self)", "def save(self):\n body = {}\n body[\"permissions\"] = dict(self.permissions)\n body[\"name\"] = self.name\n body[\"description\"] = self.description\n _, role = self._requestor.patch('/roles/' + self._id, json=body)\n self._data = role\n self.name = role[\"name\"]\n self.description = role[\"description\"]\n self.system = role[\"system\"]\n self.permissions = dict(role[\"permissions\"])", "def addRole(self, role):\n self._client.addRole(role)", "async def massadd(\n self,\n ctx,\n role: discord.Role,\n member: commands.Greedy[discord.Member],\n ):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help massadd```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in member:\n if role in i.roles:\n await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{i}* already has *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await i.add_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been added to **{len(member)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def apply_role(self, *, reason: str = None):\n if self.role not in self.member.roles:\n try:\n await self.member.add_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "def add_role(self, name):\n role = Role.by_name(name)\n if not role:\n role = Role(name)\n db.add(role)\n if not role in self.roles:\n self.roles.append(role)", "def addRole(self, role=None, roleName=None, kvDict=None):\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='add', modelType='role')", "def role(self, role):\n\n self._role = int(role)", "async def addrole(self, ctx, user: discord.Member=None, *, role=None):\r\n if user is None or role is None:\r\n return await ctx.send(\"Incorrect usage! *;addrole @user role*\")\r\n r = discord.utils.get(ctx.guild.roles, name=str(role))\r\n if r is None:\r\n return await ctx.send(f'{role} was not found')\r\n try:\r\n await user.add_roles(r)\r\n return await ctx.send(f\"**{str(user)}** has been given the role of **{role}** {self.bot.get_emoji(470063310386233344)}\")\r\n except discord.Forbidden:\r\n return await ctx.send(\"Bot does not have enough permissions to give roles.\")", "def create_role(self, **kwargs):\n role = self.role_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(role)", "def grant_role(self, role, principal_ids):", "def add_role():\n role = roles.find_or_create_role(request.values.get('role_name', ''))\n user = users.get_or_404(int(request.values.get('user_id', '')))\n if not users.add_role_to_user(user, role):\n return {}, 500\n return {}", "def test_add_role(self):\n pass", "def role(self, role):\n\n self._role = role", "def role(self, role):\n\n self._role = role", "def add_employeeRole(self, id, role):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employeeRoles values(%s,%s)',\n (id, role))\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save EmployeeRole!\\n(%s)' % (error))", "async def add_role(\n client,\n event,\n user: ('user', 'User to add role to'),\n role: ('role', 'The role to give'),\n):\n # Check for permissions\n if not event.user_permissions.can_manage_roles:\n abort('You need `manage roles` permission to invoke this command.')\n \n if not event.guild.cached_permissions_for(client).can_manage_roles:\n abort('I need `manage roles` permission to execute this command.')\n \n if not event.user.has_higher_role_than(role):\n abort('You must have higher role than the role you are trying to give.')\n \n if not client.has_higher_role_than(role):\n abort('I must have higher role than the role you are trying to give.')\n \n # Using `.copy_to` on forms works as well.\n return ADD_ROLE_FORM.copy_with(\n title = f'Add role {role.name} to {user.full_name}',\n custom_id = f'add_role.{user.id}.{role.id}',\n )", "def set_role(self, user, role):\n obj = self._get_through_object(user)\n obj.role = role if isinstance(role, int) else obj.ROLE_MAP_REV[role]\n obj.save()", "def add_role_to_user(self, user, role):\n user, role = self._prepare_role_modify_args(user, role)\n if role not in user.roles:\n user.roles.append(role)\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n\n return False", "async def addrole(self, ctx, rolename, user: discord.Member=None):\n author = ctx.message.author\n channel = ctx.message.channel\n server = ctx.message.server\n\n if user is None:\n user = author\n\n role = self._role_from_string(server, rolename)\n\n if role is None:\n await self.bot.say('That role cannot be found.')\n return\n\n if not channel.permissions_for(server.me).manage_roles:\n await self.bot.say('I don\\'t have manage_roles.')\n return\n\n if author.id == settings.owner:\n pass\n elif not channel.permissions_for(author).manage_roles:\n raise commands.CheckFailure\n\n await self.bot.add_roles(user, role)\n await self.bot.say('Added role {} to {}'.format(role.name, user.name))", "def add_role():\n check_admin()\n add_role = True\n\n form = RoleForm()\n if form.validate_on_submit():\n role = Role(title=form.title.data)\n\n try:\n db.session.add(role)\n db.session.commit()\n flash('New role successfully created')\n except:\n flash('Error: Role title already exist')\n\n return redirect(url_for('admin.get_roles'))\n\n return render_template('admin/roles/role.html', form=form, add_role=add_role, title='Add Role')", "def add_role(email, role):\n from enferno.user.models import Role\n u = User.query.filter(User.email == email).first()\n\n if u is None:\n print('Sorry, this user does not exist!')\n else:\n r = Role.query.filter(Role.name == role).first()\n if r is None:\n print('Sorry, this role does not exist!')\n u = click.prompt('Would you like to create one? Y/N', default='N')\n if u.lower() == 'y':\n r = Role(name=role)\n try:\n db.session.add(r)\n db.session.commit()\n print('Role created successfully, you may add it now to the user')\n except Exception as e:\n db.session.rollback()\n # add role to user\n u.roles.append(r)" ]
[ "0.75814265", "0.69590753", "0.6944869", "0.6903061", "0.68663025", "0.67569315", "0.67447764", "0.6688437", "0.6578498", "0.6572754", "0.6531272", "0.65293014", "0.6517352", "0.6489404", "0.64842844", "0.64722466", "0.64613277", "0.6391094", "0.6384594", "0.6379459", "0.63694286", "0.636707", "0.636707", "0.6356663", "0.63397306", "0.63251513", "0.6319986", "0.6312419", "0.63028735", "0.63005435" ]
0.7995215
0
Remove a persisting role from the member
async def removepersistrole(self, ctx, member: discord.Member, role: discord.Role): role = discord.utils.get(ctx.guild.roles, id=role.id) muted_role = discord.utils.get(ctx.guild.roles, name="Muted") punished_role = discord.utils.get(ctx.guild.roles, name="Punished") if role > ctx.author.top_role: return await ctx.send( embed=discord.Embed( title="You don't have permission to add this role", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.darker_grey(), ) ) if role == muted_role or role == punished_role: return await ctx.send( embed=discord.Embed( title=f"Can not remove *{role}* role using this command.", description="For more information run ```.help removepersistrole```", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.red(), ) ) if role not in member.roles: return await ctx.channel.send( embed=discord.Embed( title=f"*{member}* doesn't have *{role}* Role!", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.greyple(), ) ) await member.remove_roles(role) persistent_role = Roles( bot=self.bot, guild_id=ctx.guild.id, user_id=member.id, roles=role.id, ) # Post to db for persistent role await persistent_role.delete() await ctx.send( embed=discord.Embed( title=f"Persisting Role *{role}* has been removed from *{member}*", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.green(), ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_role(role):\n fallback = Role.load_cli_user()\n\n def _del(cls, col):\n pq = db.session.query(cls)\n pq = pq.filter(col == role.id)\n\n def _repo(cls, col):\n pq = db.session.query(cls).filter(col == role.id)\n pq.update({col: fallback.id}, synchronize_session=False)\n\n _del(Permission, Permission.role_id)\n db.session.delete(role)\n db.session.commit()", "async def remove_role(self, *, reason: str = None):\n await config.member(self.member).set_raw(str(self.role.id), value=None)\n if self.role in self.member.roles:\n try:\n await self.member.remove_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "def remove_role(self, role):\n if role.name in [r.name for r in self.roles]:\n remaining_if_any_roles = [r.to_python() for r in self.roles if not r.name == role.name]\n if remaining_if_any_roles:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$set': {'roles': remaining_if_any_roles}})\n else:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$unset': {'roles': 1}})", "def remove_role(self, principal, role):\n return permissions.utils.remove_local_role(self, principal, role)", "def remove_role():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/auth/{1}/role/{2}\".format(args.vault_url, args.k8s_cluster_name, args.k8s_namespace)\n print 'Removing role {0} for {1}'.format(args.k8s_namespace, args.k8s_cluster_name)\n send_delete(url=url, headers=headers)", "async def removerole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removerole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role not in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"{member} doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.remove_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "def removeRole(self, role=None, roleName=None, kvDict=None):\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='remove', modelType='role')", "def test_remove_role_from_project_member(self):\n pass", "def delete_role(self, name): # NOQA\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n role_record = self.get_role(name)\n self.client.delete_resource(role_record.get('href'))", "def remove_role(self, name):\n role = Role.by_name(name)\n if not role:\n return\n if role in self.roles:\n self.roles.remove(role)", "async def massremove(\n self,\n ctx,\n role: discord.Role,\n member: commands.Greedy[discord.Member],\n ):\n\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help massremove```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in member:\n if role not in i.roles:\n await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{i}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(member)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "def delete_token_role(self, role):\n return self.delete('auth/token/roles/{0}'.format(role))", "async def command_unassign_role(self, context, role: str):\n try:\n await context.author.remove_roles(discord.utils.get(context.guild.roles, name=role))\n await context.message.add_reaction('👍')\n except Exception as e:\n await context.message.add_reaction('👎')\n await context.send('Role could not be unassigned')\n print(f'Errored in command_unassign_role.', e)", "async def removerole(self, ctx, rolename, user: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n\n role = self._role_from_string(server, rolename)\n if role is None:\n await self.bot.say(\"Role not found.\")\n return\n\n if user is None:\n user = author\n\n if role in user.roles:\n try:\n await self.bot.remove_roles(user, role)\n await self.bot.say(\"Role successfully removed.\")\n except discord.Forbidden:\n await self.bot.say(\"I don't have permissions to manage roles!\")\n else:\n await self.bot.say(\"User does not have that role.\")", "def remove_member(self, project_id, user_id, role_id):\n resp = {}\n path = '/projects/%s/users/%s/roles/%s' % (project_id, user_id, role_id)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token) \n \n self.logger.debug('Revoke role %s to user %s on project %s' % \n (project_id, user_id, role_id))\n return True", "def _remove_role(contest, user, role_class):\n user_biv_id = _lookup_user(user).biv_id\n role = role_class.query.select_from(pam.BivAccess).filter(\n pam.BivAccess.source_biv_id == user_biv_id,\n pam.BivAccess.target_biv_id == role_class.biv_id\n ).one()\n db.session.delete(\n pam.BivAccess.query.filter(\n pam.BivAccess.source_biv_id == contest,\n pam.BivAccess.target_biv_id == role.biv_id\n ).one()\n )", "def delete_role(self, role_name: str) -> None:\n session = self.get_session\n role = session.query(Role).filter(Role.name == role_name).first()\n if role:\n log.info(\"Deleting role '%s'\", role_name)\n session.delete(role)\n session.commit()\n else:\n raise AirflowException(f\"Role named '{role_name}' does not exist\")", "async def unset(self, ctx, *, role_name: str):\n role_name = role_name.lower()\n\n if isinstance(ctx.message.channel, discord.DMChannel):\n guild = await self.get_server_from_pm(ctx)\n else:\n guild = ctx.guild\n\n if guild is None:\n return\n\n await self.remove_role(ctx, role_name, guild)", "def revoke_role(self, role, principal_ids):", "def test_delete_role(self):\n pass", "def unmake_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.remove_role_from_user(self, 'admin')\n db.session.commit()", "async def roledelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete: {role}\")", "def remove_role(role_id: int) -> bool:\n role: Role = db.session.query(Role).get(role_id)\n if role is None:\n return False # this role did not exist in the first place\n db.session.delete(role)\n db.session.commit()\n return True", "async def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent):\n role: discord.Role = await self.check_payload(payload)\n\n if role:\n guild = self.bot.get_guild(payload.guild_id)\n member: discord.Member = guild.get_member(payload.user_id)\n await member.remove_roles(role)", "def remove_trainee(role_id):\n\n role = Role.query.get(role_id)\n if role is None or role.role_id != RoleIds.Trainee:\n flash(\"Role invalide\", \"error\")\n return redirect(url_for(\".leader_list\"))\n\n if role.activity_type not in current_user.get_supervised_activities():\n flash(\"Non autorisé\", \"error\")\n return redirect(url_for(\".leader_list\"))\n\n db.session.delete(role)\n db.session.commit()\n\n return redirect(url_for(\".leader_list\"))", "async def rolemenu_remove_role(self, interaction: discord.Interaction,\n name: str, role: str):\n try:\n role_id = int(role)\n except ValueError:\n return await interaction.response.send_message(\n \"The role provided \"\n \"is not valid. Make sure that you either select one from the \"\n \"options that the autocomplete provides, or that you \"\n \"provide the role's ID\",\n ephemeral=True)\n doc = await self.db.find_one({\n \"guild_id\": interaction.guild.id,\n \"name\": name\n })\n if not doc:\n return await interaction.response.send_message(\n \"No role menu with that name exists.\", ephemeral=True)\n await interaction.response.defer(ephemeral=True)\n for role_doc in doc[\"roles\"]:\n if role_doc[\"id\"] == role_id:\n break\n else:\n return await interaction.followup.send(\n \"Role not found in that menu\")\n await self.db.update_one({\"_id\": doc[\"_id\"]},\n {\"$pull\": {\n \"roles\": role_doc\n }})\n doc = await self.db.find_one({\"_id\": doc[\"_id\"]})\n await interaction.followup.send(\"Role removed from the menu.\")\n menu = Menu(self, interaction.guild, doc)\n await menu.update()", "def role_delete(\n login_manager: LoginManager, *, role_id: str, endpoint_id: uuid.UUID\n) -> None:\n transfer_client = login_manager.get_transfer_client()\n res = transfer_client.delete_endpoint_role(endpoint_id, role_id)\n display(res, text_mode=TextMode.text_raw, response_key=\"message\")", "async def on_guild_role_delete(role):\r\n\r\n if role.guild.id not in RULES:\r\n return\r\n\r\n for target, rolesets in RULES[role.guild.id].items():\r\n if role == target:\r\n del RULES[role.guild.id][target]\r\n continue\r\n for i, roles in enumerate(rolesets):\r\n if role in roles:\r\n RULES[role.guild.id][target][i].remove(role)", "def detach(profile, instance_profile, role):\n # Make sure the instance profile exists.\n if not exists(profile, instance_profile):\n msg = \"No instance profile '\" + str(instance_profile) + \"'.\"\n raise ResourceDoesNotExist(msg)\n\n # Make sure the role exists.\n if not role_jobs.exists(profile, role):\n msg = \"No role '\" + str(role) + \"'.\"\n raise ResourceDoesNotExist(msg)\n\n # Detach the role\n params = {}\n params[\"profile\"] = profile\n params[\"instance_profile\"] = instance_profile\n params[\"role\"] = role\n return utils.do_request(instanceprofile, \"remove_role\", params)", "def _restoreRole(self, oldRole, args):\n if oldRole:\n args['role'] = oldRole\n else:\n del args['role']" ]
[ "0.7482419", "0.73443776", "0.7326495", "0.7247932", "0.71359766", "0.7044", "0.7003247", "0.6907898", "0.69022053", "0.68038666", "0.6803843", "0.680215", "0.6796542", "0.67945176", "0.6772586", "0.6704123", "0.66735536", "0.66630554", "0.6656054", "0.66415286", "0.6641259", "0.6586434", "0.6583941", "0.6579907", "0.6577087", "0.6546582", "0.6528084", "0.6515865", "0.65133125", "0.64842284" ]
0.810434
0
Add the role to specified members in the guild
async def massadd( self, ctx, role: discord.Role, member: commands.Greedy[discord.Member], ): role = discord.utils.get(ctx.guild.roles, id=role.id) muted_role = discord.utils.get(ctx.guild.roles, name="Muted") punished_role = discord.utils.get(ctx.guild.roles, name="Punished") if role > ctx.author.top_role: return await ctx.send( embed=discord.Embed( title="You don't have permission to add this role", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.darker_grey(), ) ) if role == muted_role or role == punished_role: return await ctx.send( embed=discord.Embed( title=f"Can not assign *{role}* role using this command.", description="For more information run ```.help massadd```", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.red(), ) ) for i in member: if role in i.roles: await ctx.channel.send( embed=discord.Embed( title=f"*{i}* already has *{role}* Role!", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.greyple(), ) ) await i.add_roles(role) await ctx.send( embed=discord.Embed( title=f"*{role}* has been added to **{len(member)}** members!", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.green(), ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def addroleall(self, ctx, role: discord.Role):\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help addroleall```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in ctx.guild.members:\n if not i.bot:\n await i.add_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been added to **{len(ctx.guild.members)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def addrole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help addrole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{member}* already has *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.add_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been added to *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def addRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.add_roles(*roles)\n await ctx.send(f\"Adding {roles_str(person, roles)}\")", "async def apply_role(self, *, reason: str = None):\n if self.role not in self.member.roles:\n try:\n await self.member.add_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "def add_members(self, members):\n self.__add_remove_members(members)", "async def _update_member_roles(guild_id: int, member_id: int,\n wanted_roles: list):\n\n # first, fetch all current roles\n roles = await app.db.fetch(\"\"\"\n SELECT role_id from member_roles\n WHERE guild_id = $1 AND user_id = $2\n \"\"\", guild_id, member_id)\n\n roles = [r['role_id'] for r in roles]\n\n roles = set(roles)\n wanted_roles = set(wanted_roles)\n\n # first, we need to find all added roles:\n # roles that are on wanted_roles but\n # not on roles\n added_roles = wanted_roles - roles\n\n # and then the removed roles\n # which are roles in roles, but not\n # in wanted_roles\n removed_roles = roles - wanted_roles\n\n conn = await app.db.acquire()\n\n async with conn.transaction():\n # add roles\n await app.db.executemany(\"\"\"\n INSERT INTO member_roles (user_id, guild_id, role_id)\n VALUES ($1, $2, $3)\n \"\"\", [(member_id, guild_id, role_id)\n for role_id in added_roles])\n\n # remove roles\n await app.db.executemany(\"\"\"\n DELETE FROM member_roles\n WHERE\n user_id = $1\n AND guild_id = $2\n AND role_id = $3\n \"\"\", [(member_id, guild_id, role_id)\n for role_id in removed_roles])\n\n await app.db.release(conn)", "async def apply_to(self, target: discord.Member, updatedb: bool = True) -> None:\n role = await self.get_role(target.guild, create=True, updatedb=updatedb)\n await target.add_roles(role)", "async def addrole(self, ctx, rolename, user: discord.Member=None):\n author = ctx.message.author\n channel = ctx.message.channel\n server = ctx.message.server\n\n if user is None:\n user = author\n\n role = self._role_from_string(server, rolename)\n\n if role is None:\n await self.bot.say('That role cannot be found.')\n return\n\n if not channel.permissions_for(server.me).manage_roles:\n await self.bot.say('I don\\'t have manage_roles.')\n return\n\n if author.id == settings.owner:\n pass\n elif not channel.permissions_for(author).manage_roles:\n raise commands.CheckFailure\n\n await self.bot.add_roles(user, role)\n await self.bot.say('Added role {} to {}'.format(role.name, user.name))", "async def add(ctx, *args: commands.clean_content):\r\n if len(args) < 2:\r\n await ctx.send('Add takes 2+ parameters')\r\n return\r\n\r\n tgt_role = args[-1]\r\n if tgt_role.startswith('@'):\r\n tgt_role = tgt_role[1:]\r\n if not discord.utils.get(ctx.guild.roles, name=tgt_role):\r\n await ctx.send(f'Role {args[-1]} does not exist')\r\n return\r\n\r\n roles = list(args[:-1])\r\n\r\n for index, role in enumerate(roles):\r\n if role.startswith('@'):\r\n role = role[1:]\r\n roles[index] = role\r\n print(role)\r\n if not discord.utils.get(ctx.guild.roles, name=role):\r\n await ctx.send(f'Role {role} does not exist')\r\n return\r\n\r\n docid = db.insert({'guild': ctx.guild.id, 'roles': roles, 'target': tgt_role})\r\n await ctx.send(f'Rule {docid} created')\r\n await update_roles(ctx.guild)\r\n await check_guild_rules(ctx.guild)", "def add_member(self, db: Session, *, room: Room, user: User) -> Room:\n members = [x for x in room.members]\n members.append(user)\n return self.update(db=db, db_obj=room, obj_in={\"members\": members})", "async def mute(self, ctx: Context, members: commands.Greedy[discord.Member], reason=\"no reason\"):\n\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n member_display = []\n\n for i, member in enumerate(members):\n if role in member.roles:\n await ctx.send(f\"guild member `{member.display_name}` is already muted\", delete_after=8)\n del members[i]\n\n if role is None:\n permissions = discord.Permissions()\n permissions.change_nickname = True\n permissions.send_messages = False\n permissions.read_message_history = True\n role = await ctx.guild.create_role(name=\"Muted\", permissions=permissions)\n\n await self.set_perms(ctx.guild, role)\n\n for member in members:\n\n if await self.hiearchy_check(ctx, member):\n continue\n\n member_display.append(str(member))\n await member.add_roles(role, reason=reason)\n\n member_display = \", \".join(member_display)\n\n if not member_display:\n member_display = \"no one\"\n\n await ctx.send(f\"> {ctx.author.name} muted {member_display}\")", "def setRole(self, room, nick, role):\n if role not in ('moderator', 'participant', 'visitor', 'none'):\n raise TypeError\n query = ET.Element('{http://jabber.org/protocol/muc#admin}query')\n item = ET.Element('item', {'role':role, 'nick':nick}) \n query.append(item)\n iq = self.xmpp.makeIqSet(query)\n iq['to'] = room\n result = iq.send()\n if result is False or result['type'] != 'result':\n raise ValueError\n return True", "async def addrole(self, ctx, user: discord.Member=None, *, role=None):\r\n if user is None or role is None:\r\n return await ctx.send(\"Incorrect usage! *;addrole @user role*\")\r\n r = discord.utils.get(ctx.guild.roles, name=str(role))\r\n if r is None:\r\n return await ctx.send(f'{role} was not found')\r\n try:\r\n await user.add_roles(r)\r\n return await ctx.send(f\"**{str(user)}** has been given the role of **{role}** {self.bot.get_emoji(470063310386233344)}\")\r\n except discord.Forbidden:\r\n return await ctx.send(\"Bot does not have enough permissions to give roles.\")", "async def role(ctx, role: discord.Role = None):\n if role is None:\n await ctx.send(\"List of assignable roles: \" + str(allowed_roles))\n if role.name in allowed_roles:\n if not role in ctx.message.author.roles:\n await ctx.message.author.add_roles(role)\n await ctx.send(\"Role added.\")\n else:\n await ctx.message.author.remove_roles(role)\n await ctx.send(\"Role removed.\") \n else:\n await ctx.send(\"That role doesn't exist, or you don't have permission to modify it.\")", "def update_guild_members(name, server):\n url = base_wow + guild+\"/\"+ server+\"/\"+ name+\"?\"+ method + locale + api\n r = requests.get(url)\n data = r.json()\n guilde = data['name']\n for member in data[\"members\"]:\n add_member(guilde, member['character']['name'], member['rank'], member['character']['level'])", "def add_user_roles(userid:str, *roles):", "def grant_role(self, role, principal_ids):", "async def setjoinrole(self, ctx, role):\r\n guild = ctx.message.guild\r\n role = discord.utils.get(guild.roles, name=role)\r\n functions.updatesql(server=ctx.guild.id, joinrole=role.id)\r\n await ctx.send(embed=discord.Embed(title='Sucsess!', color=discord.Colour.from_rgb(255, 0, 255)))", "def role_add(role, nodes, node, node_vars, host_vars, extra):\n role_manager = get_role_manager()\n node += nodes\n nodes, node_vars, host_vars, extra_args = _split_vars(\n node, node_vars, host_vars, extra)\n if not nodes:\n raise ArgumentError('No nodes informed')\n\n added_nodes = role_manager.add_role(\n role, hosts_node_map=nodes, host_vars=host_vars,\n node_vars=node_vars, extra_args=extra_args)\n\n print(f\"{len(added_nodes)} nodes were added to role {role}: {', '.join(sorted(added_nodes))}\")\n return 0", "def roles(*args):\n env.salt_roles.extend(args)", "async def add(self, ctx, project_name: str,\n members: commands.Greedy[discord.Member]) -> None:\n project = project_name\n if not ctx.projects.find_project(project_name):\n await ctx.send(\"This project doesn't exist.\")\n return\n if str(ctx.author.id) != ctx.projects.find_project(project).get(\n \"owner\"):\n await ctx.send(\"You can't add members to this project.\")\n return\n members = members if len(members) > 0 else [ctx.author]\n count = len(members)\n channel = ctx.guild.get_channel(\n int(ctx.projects.find_project(project).get(\"channel\")))\n for member in members:\n await channel.set_permissions(member, read_messages=True,\n send_messages=False)\n ctx.projects.add_project_members(project, [x.id for x in members])\n if members == ctx.author:\n await ctx.send(f\"You're already a member.\")\n if count == 1:\n member = members[0]\n await ctx.send(f\"`{member}` is now a member.\")\n if count == 2:\n await ctx.send(f\"`{members[0]}` and `{members[1]} `\"\n \"are now members.\")\n else:\n last_member = members[count - 1]\n members = members.pop(count - 1)\n string = \"`\"\n members = string + \", \".join(str(x) for x in members) + string\n members = members + f\" and `{last_member}`\"\n await ctx.send(f\"{members} are now members of your project.\")", "def setHgMembers(self, membersToAdd):\n self.members = membersToAdd", "async def createRole(self, ctx):\n await self.deleteRole(ctx=ctx, reason=\"Début de partie.\")\n await ctx.guild.create_role(name=self.categoryName)\n await asyncio.sleep(1)\n self.roleForPlayer = discord.utils.get(ctx.guild.roles, name=self.categoryName)\n print(\"Role created.\")\n member = await ctx.guild.fetch_member(bot.user.id)\n await member.add_roles(self.roleForPlayer, reason=\"Début de partie.\")\n for member in ctx.author.voice.channel.members:\n await member.add_roles(self.roleForPlayer, reason=\"Début de partie.\")", "def add_users(caller, role, *users):\r\n _check_caller_authority(caller, role)\r\n role.add_users(*users)", "async def vouch(ctx, *, member_name=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n server = ctx.message.server\n member_roles = ctx.message.author.roles\n member_admin = discord.utils.find(lambda r: r.name.lower() in admin_roles, member_roles)\n if member_admin is not None:\n member = discord.utils.find(lambda c: c.name.lower() == member_name.lower(), server.members)\n roles = member.roles\n new_role = discord.utils.find(lambda r: r.name.lower() == required_role, server.roles)\n roles.append(new_role)\n await amor_manager.replace_roles(member, *roles)\n await amor_manager.say('{0} granted citizenship'.format(member.name))", "async def _set_roles(self, ctx: Context):\n\n guild: discord.Guild = ctx.guild\n\n host = await guild.create_role(\n name=\"Host\", colour=discord.Color(0xFFBF37),\n hoist=True, mentionable=True\n )\n await self.config.guild(guild).host_id.set(host.id)\n await ctx.author.add_roles(host)\n\n player = await guild.create_role(\n name=\"Player\", colour=discord.Color(0x37BFFF),\n hoist=True, mentionable=True\n )\n await self.config.guild(guild).player_id.set(player.id)\n\n repl = await guild.create_role(\n name=\"Replacement\", colour=discord.Color(0x86FF40)\n )\n await self.config.guild(guild).repl_id.set(repl.id)\n\n spec = await guild.create_role(\n name=\"Spectator\", colour=discord.Color(0xD837FF)\n )\n await self.config.guild(guild).spec_id.set(spec.id)\n\n dead = await guild.create_role(\n name=\"Dead\", colour=discord.Color(0xDC5757)\n )\n await self.config.guild(guild).dead_id.set(dead.id)\n\n txt = _(\n \"Host: {}\"\n \"\\nPlayer: {}\"\n \"\\nSpectator: {}\"\n \"\\nDead: {}\"\n \"\\nReplacement: {}\"\n ).format(\n host.mention,\n player.mention,\n spec.mention,\n dead.mention,\n repl.mention\n )\n\n embed = discord.Embed(\n color=0x37BFFF, title=\"Created Roles!\", description=txt\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"Set up required roles!\")", "async def add_role(\n client,\n event,\n user: ('user', 'User to add role to'),\n role: ('role', 'The role to give'),\n):\n # Check for permissions\n if not event.user_permissions.can_manage_roles:\n abort('You need `manage roles` permission to invoke this command.')\n \n if not event.guild.cached_permissions_for(client).can_manage_roles:\n abort('I need `manage roles` permission to execute this command.')\n \n if not event.user.has_higher_role_than(role):\n abort('You must have higher role than the role you are trying to give.')\n \n if not client.has_higher_role_than(role):\n abort('I must have higher role than the role you are trying to give.')\n \n # Using `.copy_to` on forms works as well.\n return ADD_ROLE_FORM.copy_with(\n title = f'Add role {role.name} to {user.full_name}',\n custom_id = f'add_role.{user.id}.{role.id}',\n )", "def syncRole(user, roleToAdd, listToAdd):\n print(user, \":\", roleToAdd)\n if roleToAdd == \"Doppelgänger\":\n listToAdd.append(\n Doppelganger(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Sbire\":\n listToAdd.append(Minion(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Loup-Garou\":\n listToAdd.append(Werewolf(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Loup Alpha\":\n listToAdd.append(\n AlphaWerewolf(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Loup Shamane\":\n listToAdd.append(\n ShamanWerewolf(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Franc-Maçon\":\n listToAdd.append(Freemason(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Voyante\":\n listToAdd.append(Seer(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Chasseur de Fantômes\":\n listToAdd.append(GhostHunter(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Apprentie voyante\":\n listToAdd.append(\n BeginnerSeer(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Voleur\":\n listToAdd.append(Thief(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Noiseuse\":\n listToAdd.append(\n Troublemaker(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Soûlard\":\n listToAdd.append(Drunkard(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Insomniaque\":\n listToAdd.append(Insomniac(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Divinateur\":\n listToAdd.append(Diviner(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Tanneur\":\n listToAdd.append(Tanner(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Chasseur\":\n listToAdd.append(Hunter(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Garde du corps\":\n listToAdd.append(BodyGuard(user=user, firstRole=roleToAdd, botRef=bot))\n\n elif roleToAdd == \"Loup rêveur\":\n listToAdd.append(\n SleepingWerewolf(user=user, firstRole=roleToAdd, botRef=bot))\n else:\n print(\"GROS PROBLEME\", roleToAdd)\n exit()", "async def add_roles(self, ctx: commands.Context, *roles: discord.Role):\n if not roles:\n return await ctx.send_help()\n errored = \"\"\n message = \"\"\n added = []\n already_added = []\n for role in roles:\n if role >= ctx.author.top_role:\n errored += (\n \"{role}: You can't set a role equal to or higher than your own.\\n\".format(\n role=role.name\n )\n )\n continue\n if role >= ctx.guild.me.top_role:\n errored += (\n \"{role}: You can't set a role that's equal to or higher than the \"\n \"bot.\\n\".format(role=role.name)\n )\n continue\n async with self.config.guild(ctx.guild).autoroles() as roles_list:\n if role.id not in roles_list:\n roles_list.append(role.id)\n added.append(role.name)\n else:\n already_added.append(role.name)\n message += errored\n if added:\n message += \"\\nAdded role(s): {roles}\".format(roles=humanize_list(added))\n if already_added:\n message += \"\\nRole(s) already added: {roles}\".format(\n roles=humanize_list(already_added)\n )\n if message:\n for line in pagify(message):\n await ctx.send(line)", "async def add_role_member(request, role_id):\n required_fields = [\"id\"]\n utils.validate_fields(required_fields, request.json)\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n proposal_id = str(uuid4())\n batch_list = Role().member.propose.batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n proposal_id=proposal_id,\n role_id=role_id,\n pack_id=request.json.get(\"pack_id\"),\n next_id=request.json.get(\"id\"),\n reason=request.json.get(\"reason\"),\n metadata=request.json.get(\"metadata\"),\n )\n batch_status = await utils.send(\n request.app.config.VAL_CONN,\n batch_list,\n request.app.config.TIMEOUT,\n request.json.get(\"tracker\") and True,\n )\n if request.json.get(\"tracker\"):\n return utils.create_tracker_response(\"batch_status\", batch_status)\n return json({\"proposal_id\": proposal_id})" ]
[ "0.70327395", "0.6760455", "0.6745772", "0.67203873", "0.66337353", "0.6586981", "0.6503161", "0.6405369", "0.63433355", "0.6337548", "0.6296795", "0.6280821", "0.62756366", "0.62320954", "0.62254107", "0.62130994", "0.62019783", "0.61869454", "0.613318", "0.60818875", "0.60789466", "0.6060879", "0.60533947", "0.6042473", "0.6000809", "0.59866375", "0.5981686", "0.59807783", "0.5961196", "0.5957259" ]
0.7289643
0
Remove the role from specified members in the guild
async def massremove( self, ctx, role: discord.Role, member: commands.Greedy[discord.Member], ): role = discord.utils.get(ctx.guild.roles, id=role.id) muted_role = discord.utils.get(ctx.guild.roles, name="Muted") punished_role = discord.utils.get(ctx.guild.roles, name="Punished") if role > ctx.author.top_role: return await ctx.send( embed=discord.Embed( title="You don't have permission to remove this role", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.darker_grey(), ) ) if role == muted_role or role == punished_role: return await ctx.send( embed=discord.Embed( title=f"Can not remove *{role}* role using this command.", description="For more information run ```.help massremove```", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.red(), ) ) for i in member: if role not in i.roles: await ctx.channel.send( embed=discord.Embed( title=f"*{i}* doesn't have *{role}* Role!", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.greyple(), ) ) await i.remove_roles(role) await ctx.send( embed=discord.Embed( title=f"*{role}* has been removed from **{len(member)}** members!", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.green(), ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def remove_from(self, target: discord.Member) -> None:\n role = await self.get_role(target.guild)\n if role:\n await target.remove_roles(role)\n\n if not role.members:\n await role.delete()", "async def unmute(self, ctx: Context, members: commands.Greedy[discord.Member], *, reason: str = None):\n\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n member_display = []\n\n for member in members:\n if role not in member.roles:\n await ctx.send(f\"guild member `{member.display_name}` is already unmuted\")\n\n else:\n\n if await self.hiearchy_check(ctx, member):\n continue\n\n member_display.append(str(member))\n await member.remove_roles(role, reason=reason)\n\n member_display = \", \".join(member_display)\n\n if not member_display:\n member_display = \"no one\"\n\n await ctx.send(f\"> {ctx.author.name} unmuted {member_display}\")", "async def removeroleall(self, ctx, role: discord.Role):\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removeroleall```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in ctx.guild.members:\n if not i.bot:\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(ctx.guild.members)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def removerole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removerole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role not in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"{member} doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.remove_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def remove_roles(guild):\r\n Rules = Query()\r\n db.remove(Rules.guild == guild.id)\r\n del RULES[guild.id]", "async def removeRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.remove_roles(*roles)\n await ctx.send(f\"Removing {roles_str(person, roles)}\")", "def remove_members(self, members):\n # TODO docstring\n self.__add_remove_members(members, remove=True)", "async def on_guild_remove(guild):\r\n logging.info(\"Left guild %d\", guild.id)\r\n await remove_roles(guild)", "async def remove_role(self, *, reason: str = None):\n await config.member(self.member).set_raw(str(self.role.id), value=None)\n if self.role in self.member.roles:\n try:\n await self.member.remove_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "async def roledelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete: {role}\")", "def remove_member(self, project_id, user_id, role_id):\n resp = {}\n path = '/projects/%s/users/%s/roles/%s' % (project_id, user_id, role_id)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token) \n \n self.logger.debug('Revoke role %s to user %s on project %s' % \n (project_id, user_id, role_id))\n return True", "def remove_member(self, db: Session, *, room: Room, user: User) -> Room:\n members = [x for x in room.members if x.id != user.id]\n return self.update(db=db, db_obj=room, obj_in={\"members\": members})", "async def on_guild_role_delete(role):\r\n\r\n if role.guild.id not in RULES:\r\n return\r\n\r\n for target, rolesets in RULES[role.guild.id].items():\r\n if role == target:\r\n del RULES[role.guild.id][target]\r\n continue\r\n for i, roles in enumerate(rolesets):\r\n if role in roles:\r\n RULES[role.guild.id][target][i].remove(role)", "def test_remove_role_from_project_member(self):\n pass", "async def removerole(self, ctx, rolename, user: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n\n role = self._role_from_string(server, rolename)\n if role is None:\n await self.bot.say(\"Role not found.\")\n return\n\n if user is None:\n user = author\n\n if role in user.roles:\n try:\n await self.bot.remove_roles(user, role)\n await self.bot.say(\"Role successfully removed.\")\n except discord.Forbidden:\n await self.bot.say(\"I don't have permissions to manage roles!\")\n else:\n await self.bot.say(\"User does not have that role.\")", "async def on_member_remove(member):\r\n pass", "def remove_role():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/auth/{1}/role/{2}\".format(args.vault_url, args.k8s_cluster_name, args.k8s_namespace)\n print 'Removing role {0} for {1}'.format(args.k8s_namespace, args.k8s_cluster_name)\n send_delete(url=url, headers=headers)", "def revoke_role(self, role, principal_ids):", "def remove_user_roles(userid:str, *roles):", "async def unmute(self, ctx, user: Redeemed):\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot unmute yourself!\")\n return \n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\"Muted\"))\n await ctx.send(f\"{user.mention} has been unmuted\")", "async def mute(self, ctx: Context, members: commands.Greedy[discord.Member], reason=\"no reason\"):\n\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n member_display = []\n\n for i, member in enumerate(members):\n if role in member.roles:\n await ctx.send(f\"guild member `{member.display_name}` is already muted\", delete_after=8)\n del members[i]\n\n if role is None:\n permissions = discord.Permissions()\n permissions.change_nickname = True\n permissions.send_messages = False\n permissions.read_message_history = True\n role = await ctx.guild.create_role(name=\"Muted\", permissions=permissions)\n\n await self.set_perms(ctx.guild, role)\n\n for member in members:\n\n if await self.hiearchy_check(ctx, member):\n continue\n\n member_display.append(str(member))\n await member.add_roles(role, reason=reason)\n\n member_display = \", \".join(member_display)\n\n if not member_display:\n member_display = \"no one\"\n\n await ctx.send(f\"> {ctx.author.name} muted {member_display}\")", "async def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent):\n role: discord.Role = await self.check_payload(payload)\n\n if role:\n guild = self.bot.get_guild(payload.guild_id)\n member: discord.Member = guild.get_member(payload.user_id)\n await member.remove_roles(role)", "def unassign_members(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"unassign_members\"), kwargs)", "def remove_members(self, REQUEST):\n\n agent = self._get_ldap_agent()\n role_id = REQUEST.form['role_id']\n role_name = get_role_name(agent, role_id)\n country_code = role_id.rsplit('-', 1)[-1]\n\n if not self._allowed(agent, REQUEST, country_code):\n return None\n user_id_list = REQUEST.form.get('user_id_list', [])\n assert isinstance(user_id_list, list)\n\n if user_id_list:\n with agent.new_action():\n for user_id in user_id_list:\n roles_id_list = agent.remove_from_role(role_id,\n 'user',\n user_id)\n log.info(\"%s REMOVED USER %s FROM ROLES %r\",\n logged_in_user(REQUEST), user_id, roles_id_list)\n\n msg = \"Users %r removed from role %s\" % (user_id_list, role_name)\n IStatusMessage(REQUEST).add(msg, type='info')\n\n if '-awp-' in role_id:\n return REQUEST.RESPONSE.redirect(self.absolute_url() +\n '/awps?nfp=%s#role_%s' %\n (country_code, role_id))\n\n return REQUEST.RESPONSE.redirect(self.absolute_url() +\n '/nrcs?nfp=%s#role_%s' %\n (country_code, role_id))", "async def _ad_remove(self, ctx, member):\n member_object = discord.utils.find(\n lambda x: x.name == member or str(x) == member or (member.isnumeric() and x.id == int(member)),\n ctx.guild.members\n )\n if member_object is not None:\n member = member_object.id\n elif member.isnumeric():\n member = int(member)\n\n admin = list(filter(lambda x: x.user_id == member, self.database.get_admins(ctx.guild.id)))\n if admin:\n self.database.remove_item(admin[0])\n if member_object:\n await ctx.send(f\"Removed admin from {member_object.name}\")\n else:\n await ctx.send(\"Removed admin from invalid user\")\n else:\n await ctx.send(\"That person isn't an admin!\")", "async def remove_roles(self, ctx: commands.Context, *roles: discord.Role):\n if not roles:\n return await ctx.send_help()\n message = \"\"\n removed = []\n not_found = []\n async with self.config.guild(ctx.guild).autoroles() as roles_list:\n for role in roles:\n if role.id in roles_list:\n roles_list.remove(role.id)\n removed.append(role.name)\n else:\n not_found.append(role.name)\n if not_found:\n message += \"\\nRole(s) not found in autorole list: {roles}\".format(\n roles=humanize_list(not_found)\n )\n if removed:\n message += \"\\nRole(s) remove from autorole list: {roles}\".format(\n roles=humanize_list(removed)\n )\n if message:\n for line in pagify(message):\n await ctx.send(line)", "async def tod_leave(self, ctx, *args):\n try:\n self.players.remove(ctx.author)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.remove_roles(role)\n except ValueError:\n pass\n message = f\"{ctx.author.mention} has been removed from the game!\"\n await ctx.send(message)", "def role_remove(role, nodes, node):\n role_manager = get_role_manager()\n node += nodes\n nodes, node_vars, host_vars, extra_args = _split_vars(node, [], [], [])\n\n if not nodes:\n raise ArgumentError('No nodes informed')\n\n if type(nodes) is list:\n d = defaultdict(list)\n for n in nodes:\n hosts = role_manager.get_role_node_hosts(role, n)\n if not hosts:\n raise NodeRoleError(n, role)\n for hname in hosts:\n d[hname].append(n)\n nodes = defaultdict_to_dict(d)\n else:\n nodes = nodes\n\n if not nodes:\n raise ValueError(f\"No nodes to remove from role {role}\")\n\n result = role_manager.remove_role(role, nodes)\n print(f\"{len(result)} nodes were removed from {role}: {', '.join(sorted(result))}\")\n return 0", "async def unset(self, ctx, *, role_name: str):\n role_name = role_name.lower()\n\n if isinstance(ctx.message.channel, discord.DMChannel):\n guild = await self.get_server_from_pm(ctx)\n else:\n guild = ctx.guild\n\n if guild is None:\n return\n\n await self.remove_role(ctx, role_name, guild)", "def remove_members(id): # pylint: disable=I0011,W0622\n\n l = Legacy.query.get_or_404(id)\n\n if current_app.config.get('IGNORE_AUTH') is not True: # pragma: no cover\n if l.owner_id != g.user.id:\n raise Http403('Access denied')\n\n if not l.can_modify(g.user.id):\n raise Http403('Access denied')\n\n if request.json is None or 'members' not in request.json:\n raise NoData('\"members\" was not specified')\n\n member_list = request.json['members']\n\n if not isinstance(member_list, list):\n raise IncorrectData('\"members\" was not a valid list')\n\n for member in member_list:\n member = Person.query.get(member)\n\n if member is None:\n continue\n\n try:\n l.members.remove(member)\n except ValueError:\n pass\n\n l.save()\n\n return {}" ]
[ "0.7578296", "0.7440219", "0.72017306", "0.7164568", "0.70579517", "0.7023119", "0.6872467", "0.6871099", "0.67866164", "0.6748588", "0.6680696", "0.6669738", "0.663427", "0.6621332", "0.6523396", "0.6516523", "0.64946425", "0.64583254", "0.6451127", "0.6432923", "0.64100957", "0.63800645", "0.6343822", "0.6340666", "0.63380766", "0.63345325", "0.6318573", "0.63115335", "0.62725836", "0.62601054" ]
0.7633059
0
Add a role to all members in the guild
async def addroleall(self, ctx, role: discord.Role): muted_role = discord.utils.get(ctx.guild.roles, name="Muted") punished_role = discord.utils.get(ctx.guild.roles, name="Punished") if role > ctx.author.top_role: return await ctx.send( embed=discord.Embed( title="You don't have permission to add this role", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.darker_grey(), ) ) if role == muted_role or role == punished_role: return await ctx.send( embed=discord.Embed( title=f"Can not assign *{role}* role using this command.", description="For more information run ```.help addroleall```", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.red(), ) ) for i in ctx.guild.members: if not i.bot: await i.add_roles(role) await ctx.send( embed=discord.Embed( title=f"*{role}* has been added to **{len(ctx.guild.members)}** members!", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.green(), ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def massadd(\n self,\n ctx,\n role: discord.Role,\n member: commands.Greedy[discord.Member],\n ):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help massadd```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in member:\n if role in i.roles:\n await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{i}* already has *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await i.add_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been added to **{len(member)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def apply_role(self, *, reason: str = None):\n if self.role not in self.member.roles:\n try:\n await self.member.add_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "async def addRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.add_roles(*roles)\n await ctx.send(f\"Adding {roles_str(person, roles)}\")", "async def addrole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help addrole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{member}* already has *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.add_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been added to *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def role(ctx, role: discord.Role = None):\n if role is None:\n await ctx.send(\"List of assignable roles: \" + str(allowed_roles))\n if role.name in allowed_roles:\n if not role in ctx.message.author.roles:\n await ctx.message.author.add_roles(role)\n await ctx.send(\"Role added.\")\n else:\n await ctx.message.author.remove_roles(role)\n await ctx.send(\"Role removed.\") \n else:\n await ctx.send(\"That role doesn't exist, or you don't have permission to modify it.\")", "def roles(*args):\n env.salt_roles.extend(args)", "async def add(ctx, *args: commands.clean_content):\r\n if len(args) < 2:\r\n await ctx.send('Add takes 2+ parameters')\r\n return\r\n\r\n tgt_role = args[-1]\r\n if tgt_role.startswith('@'):\r\n tgt_role = tgt_role[1:]\r\n if not discord.utils.get(ctx.guild.roles, name=tgt_role):\r\n await ctx.send(f'Role {args[-1]} does not exist')\r\n return\r\n\r\n roles = list(args[:-1])\r\n\r\n for index, role in enumerate(roles):\r\n if role.startswith('@'):\r\n role = role[1:]\r\n roles[index] = role\r\n print(role)\r\n if not discord.utils.get(ctx.guild.roles, name=role):\r\n await ctx.send(f'Role {role} does not exist')\r\n return\r\n\r\n docid = db.insert({'guild': ctx.guild.id, 'roles': roles, 'target': tgt_role})\r\n await ctx.send(f'Rule {docid} created')\r\n await update_roles(ctx.guild)\r\n await check_guild_rules(ctx.guild)", "async def addrole(self, ctx, rolename, user: discord.Member=None):\n author = ctx.message.author\n channel = ctx.message.channel\n server = ctx.message.server\n\n if user is None:\n user = author\n\n role = self._role_from_string(server, rolename)\n\n if role is None:\n await self.bot.say('That role cannot be found.')\n return\n\n if not channel.permissions_for(server.me).manage_roles:\n await self.bot.say('I don\\'t have manage_roles.')\n return\n\n if author.id == settings.owner:\n pass\n elif not channel.permissions_for(author).manage_roles:\n raise commands.CheckFailure\n\n await self.bot.add_roles(user, role)\n await self.bot.say('Added role {} to {}'.format(role.name, user.name))", "async def _update_member_roles(guild_id: int, member_id: int,\n wanted_roles: list):\n\n # first, fetch all current roles\n roles = await app.db.fetch(\"\"\"\n SELECT role_id from member_roles\n WHERE guild_id = $1 AND user_id = $2\n \"\"\", guild_id, member_id)\n\n roles = [r['role_id'] for r in roles]\n\n roles = set(roles)\n wanted_roles = set(wanted_roles)\n\n # first, we need to find all added roles:\n # roles that are on wanted_roles but\n # not on roles\n added_roles = wanted_roles - roles\n\n # and then the removed roles\n # which are roles in roles, but not\n # in wanted_roles\n removed_roles = roles - wanted_roles\n\n conn = await app.db.acquire()\n\n async with conn.transaction():\n # add roles\n await app.db.executemany(\"\"\"\n INSERT INTO member_roles (user_id, guild_id, role_id)\n VALUES ($1, $2, $3)\n \"\"\", [(member_id, guild_id, role_id)\n for role_id in added_roles])\n\n # remove roles\n await app.db.executemany(\"\"\"\n DELETE FROM member_roles\n WHERE\n user_id = $1\n AND guild_id = $2\n AND role_id = $3\n \"\"\", [(member_id, guild_id, role_id)\n for role_id in removed_roles])\n\n await app.db.release(conn)", "async def addrole(self, ctx, user: discord.Member=None, *, role=None):\r\n if user is None or role is None:\r\n return await ctx.send(\"Incorrect usage! *;addrole @user role*\")\r\n r = discord.utils.get(ctx.guild.roles, name=str(role))\r\n if r is None:\r\n return await ctx.send(f'{role} was not found')\r\n try:\r\n await user.add_roles(r)\r\n return await ctx.send(f\"**{str(user)}** has been given the role of **{role}** {self.bot.get_emoji(470063310386233344)}\")\r\n except discord.Forbidden:\r\n return await ctx.send(\"Bot does not have enough permissions to give roles.\")", "def add_role(self, role):\n if role.name not in [r.name for r in self.roles]:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$push': {'roles': role.to_python()}})", "async def _set_roles(self, ctx: Context):\n\n guild: discord.Guild = ctx.guild\n\n host = await guild.create_role(\n name=\"Host\", colour=discord.Color(0xFFBF37),\n hoist=True, mentionable=True\n )\n await self.config.guild(guild).host_id.set(host.id)\n await ctx.author.add_roles(host)\n\n player = await guild.create_role(\n name=\"Player\", colour=discord.Color(0x37BFFF),\n hoist=True, mentionable=True\n )\n await self.config.guild(guild).player_id.set(player.id)\n\n repl = await guild.create_role(\n name=\"Replacement\", colour=discord.Color(0x86FF40)\n )\n await self.config.guild(guild).repl_id.set(repl.id)\n\n spec = await guild.create_role(\n name=\"Spectator\", colour=discord.Color(0xD837FF)\n )\n await self.config.guild(guild).spec_id.set(spec.id)\n\n dead = await guild.create_role(\n name=\"Dead\", colour=discord.Color(0xDC5757)\n )\n await self.config.guild(guild).dead_id.set(dead.id)\n\n txt = _(\n \"Host: {}\"\n \"\\nPlayer: {}\"\n \"\\nSpectator: {}\"\n \"\\nDead: {}\"\n \"\\nReplacement: {}\"\n ).format(\n host.mention,\n player.mention,\n spec.mention,\n dead.mention,\n repl.mention\n )\n\n embed = discord.Embed(\n color=0x37BFFF, title=\"Created Roles!\", description=txt\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"Set up required roles!\")", "async def apply_to(self, target: discord.Member, updatedb: bool = True) -> None:\n role = await self.get_role(target.guild, create=True, updatedb=updatedb)\n await target.add_roles(role)", "def add_role(self, role, parents=[]):\r\n self._roles.setdefault(role, set())\r\n self._roles[role].update(parents)", "async def roles(self, ctx):\n\n pass", "def setRole(self, room, nick, role):\n if role not in ('moderator', 'participant', 'visitor', 'none'):\n raise TypeError\n query = ET.Element('{http://jabber.org/protocol/muc#admin}query')\n item = ET.Element('item', {'role':role, 'nick':nick}) \n query.append(item)\n iq = self.xmpp.makeIqSet(query)\n iq['to'] = room\n result = iq.send()\n if result is False or result['type'] != 'result':\n raise ValueError\n return True", "async def createRole(self, ctx):\n await self.deleteRole(ctx=ctx, reason=\"Début de partie.\")\n await ctx.guild.create_role(name=self.categoryName)\n await asyncio.sleep(1)\n self.roleForPlayer = discord.utils.get(ctx.guild.roles, name=self.categoryName)\n print(\"Role created.\")\n member = await ctx.guild.fetch_member(bot.user.id)\n await member.add_roles(self.roleForPlayer, reason=\"Début de partie.\")\n for member in ctx.author.voice.channel.members:\n await member.add_roles(self.roleForPlayer, reason=\"Début de partie.\")", "async def roles(self, ctx, *, role: Fuzzy[Selfrole] = None):\n\n if role:\n await self._toggle_role(ctx, role)\n else:\n await self._list_all_roles(ctx)", "def add_user_roles(userid:str, *roles):", "def grant_role(self, role, principal_ids):", "async def add_roles(self, ctx: commands.Context, *roles: discord.Role):\n if not roles:\n return await ctx.send_help()\n errored = \"\"\n message = \"\"\n added = []\n already_added = []\n for role in roles:\n if role >= ctx.author.top_role:\n errored += (\n \"{role}: You can't set a role equal to or higher than your own.\\n\".format(\n role=role.name\n )\n )\n continue\n if role >= ctx.guild.me.top_role:\n errored += (\n \"{role}: You can't set a role that's equal to or higher than the \"\n \"bot.\\n\".format(role=role.name)\n )\n continue\n async with self.config.guild(ctx.guild).autoroles() as roles_list:\n if role.id not in roles_list:\n roles_list.append(role.id)\n added.append(role.name)\n else:\n already_added.append(role.name)\n message += errored\n if added:\n message += \"\\nAdded role(s): {roles}\".format(roles=humanize_list(added))\n if already_added:\n message += \"\\nRole(s) already added: {roles}\".format(\n roles=humanize_list(already_added)\n )\n if message:\n for line in pagify(message):\n await ctx.send(line)", "async def add_role(\n client,\n event,\n user: ('user', 'User to add role to'),\n role: ('role', 'The role to give'),\n):\n # Check for permissions\n if not event.user_permissions.can_manage_roles:\n abort('You need `manage roles` permission to invoke this command.')\n \n if not event.guild.cached_permissions_for(client).can_manage_roles:\n abort('I need `manage roles` permission to execute this command.')\n \n if not event.user.has_higher_role_than(role):\n abort('You must have higher role than the role you are trying to give.')\n \n if not client.has_higher_role_than(role):\n abort('I must have higher role than the role you are trying to give.')\n \n # Using `.copy_to` on forms works as well.\n return ADD_ROLE_FORM.copy_with(\n title = f'Add role {role.name} to {user.full_name}',\n custom_id = f'add_role.{user.id}.{role.id}',\n )", "async def command_assign_role(self, context, role: str):\n try:\n await context.author.add_roles(discord.utils.get(\n context.guild.roles, name=role))\n await context.message.add_reaction('👍')\n except Exception as e:\n await context.message.add_reaction('👎')\n await context.send('Role could not be assigned')\n print(f'Errored in command_assign_role.', e)", "async def addrole(self, ctx, role: discord.Role):\n guild = ctx.message.guild\n excluded_roles = await self.config.guild(guild).excluded_roles()\n\n for excluded_role in excluded_roles:\n if excluded_role == role.id:\n await ctx.send(\"%s already added to role exclusion list\" % role.name)\n return\n\n excluded_roles.append(role.id)\n await self.config.guild(guild).excluded_roles.set(excluded_roles)\n\n await ctx.send(\"%s added to role exclusion list\" % role.name)", "async def muterole(self, ctx, *, role: discord.Role):\n await queries.update_setting(ctx, \"guild_settings\", \"mute_role_id\", role.id)\n await util.send_success(ctx, f\"Muting someone now gives them the role {role.mention}\")", "async def alumni(ctx):\n member = ctx.message.author\n div_a_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_A)\n div_b_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_B)\n div_c_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_C)\n await member.remove_roles(div_a_role, div_b_role, div_c_role)\n role = discord.utils.get(member.guild.roles, name=ROLE_ALUMNI)\n if role in member.roles:\n await member.remove_roles(role)\n await ctx.send(\"Removed your alumni status.\")\n else:\n await member.add_roles(role)\n await ctx.send(f\"Added the alumni role, and removed all other division roles.\")", "def _add_users_to_role(self, users, rolename):\n role = Role.objects.get(name=rolename, course_id=self.course.id)\n for user in users:\n role.users.add(user)", "async def addRolesOnEmbed(msg):\n for field in msg.embeds[0].fields:\n await msg.add_reaction(field.name[0])", "async def userrole(self, ctx, *, role=None):\n server = ctx.message.guild\n\n if not role:\n result = await self.bot.db.config.find_one({'_id': str(server.id)})\n if result and result.get('user_role'):\n await ctx.send(f'The user role restricts which users are able to create and manage their own polls. \\n'\n f'The current user role is `{result.get(\"user_role\")}`. '\n f'To change it type `{result.get(\"prefix\")}userrole <role name>`')\n else:\n await ctx.send(f'The user role restricts which users are able to create and manage their own polls. \\n'\n f'No user role set. '\n f'To set one type `{result.get(\"prefix\")}userrole <role name>`')\n elif role in [r.name for r in server.roles]:\n await self.bot.db.config.update_one({'_id': str(server.id)}, {'$set': {'user_role': str(role)}}, upsert=True)\n await ctx.send(f'Server role `{role}` can now create and manage their own polls.')\n else:\n await ctx.send(f'Server role `{role}` not found.')", "def update_guild_members(name, server):\n url = base_wow + guild+\"/\"+ server+\"/\"+ name+\"?\"+ method + locale + api\n r = requests.get(url)\n data = r.json()\n guilde = data['name']\n for member in data[\"members\"]:\n add_member(guilde, member['character']['name'], member['rank'], member['character']['level'])" ]
[ "0.7208946", "0.68566215", "0.6601704", "0.6442673", "0.6392668", "0.6327689", "0.63269323", "0.6314798", "0.6309211", "0.62774974", "0.6272708", "0.62459", "0.62431896", "0.62427074", "0.6148689", "0.61357826", "0.613441", "0.61249214", "0.61176383", "0.6112425", "0.6098608", "0.6085526", "0.6072581", "0.6052983", "0.6037095", "0.6021677", "0.6019023", "0.60065204", "0.59855556", "0.597894" ]
0.7853511
0
Remove the role from all members in the guild
async def removeroleall(self, ctx, role: discord.Role): muted_role = discord.utils.get(ctx.guild.roles, name="Muted") punished_role = discord.utils.get(ctx.guild.roles, name="Punished") if role > ctx.author.top_role: return await ctx.send( embed=discord.Embed( title="You don't have permission to remove this role", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.darker_grey(), ) ) if role == muted_role or role == punished_role: return await ctx.send( embed=discord.Embed( title=f"Can not remove *{role}* role using this command.", description="For more information run ```.help removeroleall```", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.red(), ) ) for i in ctx.guild.members: if not i.bot: await i.remove_roles(role) await ctx.send( embed=discord.Embed( title=f"*{role}* has been removed from **{len(ctx.guild.members)}** members!", timestamp=datetime.datetime.utcnow(), colour=discord.Colour.green(), ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def remove_roles(guild):\r\n Rules = Query()\r\n db.remove(Rules.guild == guild.id)\r\n del RULES[guild.id]", "async def massremove(\n self,\n ctx,\n role: discord.Role,\n member: commands.Greedy[discord.Member],\n ):\n\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help massremove```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in member:\n if role not in i.roles:\n await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{i}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(member)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def on_guild_remove(guild):\r\n logging.info(\"Left guild %d\", guild.id)\r\n await remove_roles(guild)", "async def remove_from(self, target: discord.Member) -> None:\n role = await self.get_role(target.guild)\n if role:\n await target.remove_roles(role)\n\n if not role.members:\n await role.delete()", "async def roledelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete: {role}\")", "async def erase(self, guild: discord.Guild):\n role = await self.get_role(guild=guild)\n if role:\n await role.delete()", "def _delete_roles(self):\n for role in self.roles:\n role.delete()", "async def on_guild_role_delete(role):\r\n\r\n if role.guild.id not in RULES:\r\n return\r\n\r\n for target, rolesets in RULES[role.guild.id].items():\r\n if role == target:\r\n del RULES[role.guild.id][target]\r\n continue\r\n for i, roles in enumerate(rolesets):\r\n if role in roles:\r\n RULES[role.guild.id][target][i].remove(role)", "async def removerole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removerole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role not in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"{member} doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.remove_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def remove_role(self, *, reason: str = None):\n await config.member(self.member).set_raw(str(self.role.id), value=None)\n if self.role in self.member.roles:\n try:\n await self.member.remove_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "def remove_role():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/auth/{1}/role/{2}\".format(args.vault_url, args.k8s_cluster_name, args.k8s_namespace)\n print 'Removing role {0} for {1}'.format(args.k8s_namespace, args.k8s_cluster_name)\n send_delete(url=url, headers=headers)", "async def unmute(self, ctx: Context, members: commands.Greedy[discord.Member], *, reason: str = None):\n\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n member_display = []\n\n for member in members:\n if role not in member.roles:\n await ctx.send(f\"guild member `{member.display_name}` is already unmuted\")\n\n else:\n\n if await self.hiearchy_check(ctx, member):\n continue\n\n member_display.append(str(member))\n await member.remove_roles(role, reason=reason)\n\n member_display = \", \".join(member_display)\n\n if not member_display:\n member_display = \"no one\"\n\n await ctx.send(f\"> {ctx.author.name} unmuted {member_display}\")", "async def unset(self, ctx, *, role_name: str):\n role_name = role_name.lower()\n\n if isinstance(ctx.message.channel, discord.DMChannel):\n guild = await self.get_server_from_pm(ctx)\n else:\n guild = ctx.guild\n\n if guild is None:\n return\n\n await self.remove_role(ctx, role_name, guild)", "async def removeRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.remove_roles(*roles)\n await ctx.send(f\"Removing {roles_str(person, roles)}\")", "async def removerole(self, ctx, rolename, user: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n\n role = self._role_from_string(server, rolename)\n if role is None:\n await self.bot.say(\"Role not found.\")\n return\n\n if user is None:\n user = author\n\n if role in user.roles:\n try:\n await self.bot.remove_roles(user, role)\n await self.bot.say(\"Role successfully removed.\")\n except discord.Forbidden:\n await self.bot.say(\"I don't have permissions to manage roles!\")\n else:\n await self.bot.say(\"User does not have that role.\")", "async def unmute(self, ctx, user: Redeemed):\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot unmute yourself!\")\n return \n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\"Muted\"))\n await ctx.send(f\"{user.mention} has been unmuted\")", "async def fulldelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(\r\n f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete role: {role}\"\r\n )\r\n for channel in ctx.guild.channels:\r\n try:\r\n await channel.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]CHANNEL => {Fore.RESET}Failed to delete: {channel}\")", "async def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent):\n role: discord.Role = await self.check_payload(payload)\n\n if role:\n guild = self.bot.get_guild(payload.guild_id)\n member: discord.Member = guild.get_member(payload.user_id)\n await member.remove_roles(role)", "def teardown(bot):\n bot.remove_cog('RoleManager')", "async def tod_remove(self, ctx, *args):\n if \"all\" in args:\n for user in self.players:\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await user.remove_roles(role)\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"truth-or-dare\"):\n await channel.delete()\n break\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"secret-voice\"):\n await channel.delete()\n break\n self.players = []\n message = \"All players removed from the game!\"\n await ctx.send(message)\n return\n\n for name in args:\n message = \"\"\n size = len(self.players)\n for user in self.players:\n if name == user.mention:\n self.players.remove(user)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await user.remove_roles(role)\n message = f\"{name} removed from the game!\"\n if size == len(self.players):\n message = \"Player not in the game! Check command syntax.\"\n await ctx.send(message)", "async def on_guild_remove(self, guild):\n\t\tself.leaderboards.pop(str(guild.id))\n\t\tawait self.update_state()", "def remove_member(self, project_id, user_id, role_id):\n resp = {}\n path = '/projects/%s/users/%s/roles/%s' % (project_id, user_id, role_id)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token) \n \n self.logger.debug('Revoke role %s to user %s on project %s' % \n (project_id, user_id, role_id))\n return True", "async def alumni(ctx):\n member = ctx.message.author\n div_a_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_A)\n div_b_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_B)\n div_c_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_C)\n await member.remove_roles(div_a_role, div_b_role, div_c_role)\n role = discord.utils.get(member.guild.roles, name=ROLE_ALUMNI)\n if role in member.roles:\n await member.remove_roles(role)\n await ctx.send(\"Removed your alumni status.\")\n else:\n await member.add_roles(role)\n await ctx.send(f\"Added the alumni role, and removed all other division roles.\")", "async def removerole(self, ctx, role: discord.Role):\n guild = ctx.message.guild\n excluded_roles = await self.config.guild(guild).excluded_roles()\n\n if role.id in excluded_roles:\n excluded_roles.remove(role.id)\n await self.config.guild(guild).excluded_roles.set(excluded_roles)\n await ctx.send(\"Removed %s from role exclusion list.\" % role.name)\n else:\n await ctx.send(\"%s is not an excluded role.\" % role.name)", "async def deleteRole(self, ctx, reason=\"No reason available\"):\n for role in ctx.guild.roles:\n if role.name == self.categoryName:\n try:\n await role.delete(reason=reason)\n except discord.errors.Forbidden:\n self.msgToDelete.append(await ctx.message.channel.send(\n \"Erreur, permission non accordée, la suppression des rôles n'est pas complète.\"))\n print(\"Deleted all roles.\")", "def remove_member(self, db: Session, *, room: Room, user: User) -> Room:\n members = [x for x in room.members if x.id != user.id]\n return self.update(db=db, db_obj=room, obj_in={\"members\": members})", "def revoke_role(self, role, principal_ids):", "async def tod_leave(self, ctx, *args):\n try:\n self.players.remove(ctx.author)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.remove_roles(role)\n except ValueError:\n pass\n message = f\"{ctx.author.mention} has been removed from the game!\"\n await ctx.send(message)", "async def rmadmin(self, ctx, user: discord.Member):\n self.settings.rmAdmin(user.id)\n await ctx.send(\"done\")", "def test_remove_role_from_project_member(self):\n pass" ]
[ "0.77196056", "0.7477624", "0.74612665", "0.74261963", "0.70653266", "0.6954451", "0.69512653", "0.69299954", "0.68825835", "0.67811483", "0.67630804", "0.67576116", "0.6745457", "0.66221887", "0.6603801", "0.6488937", "0.64209276", "0.63899964", "0.6385136", "0.63824064", "0.63626766", "0.6358443", "0.63528854", "0.6341963", "0.6294061", "0.62825096", "0.6269525", "0.6260506", "0.62462753", "0.62349963" ]
0.79392797
0
Load all IJSONDataProvider providers
def json_data(self): json_providers = getAdapters((self.context, self.request, self.view), IJSONDataProvider) results = [] for name, provider in json_providers: results.append({'name': name or None, 'data': json.dumps(provider())}) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_providers(self, **kwargs):\n return super()._load_providers(providers=\"TIProviders\", **kwargs)", "def get_providers(self):\n \n r = requests.get(\n self._url('/dataproviders'),\n headers={'Authorization': self.token},\n proxies=self.proxy)\n r.raise_for_status()\n providers = r.json()\n self.providers = [p['name'] for p in providers if (p['user'] is not None and p['user']!='SCRIPTING ENGINE')]\n log.info('{:d} providers found'.format(len(self.providers)))\n\n return", "def all_providers(self) -> List[ProviderInfo]:\n sp_key = self.__providers_key()\n value = self.get(name=sp_key)\n if value is None:\n return []\n js = utf8_decode(data=value)\n array = json_decode(string=js)\n return ProviderInfo.convert(array=array)", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n taas_consts.TAAS, self)", "def get_providers(self):\n datasets = [\n \"Heineken\",\n \"Eisenbahn\",\n \"Corona\",\n \"Brahma\",\n \"Skol\",\n \"Bohemia\"\n ]\n return datasets", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n 'L2GW', self)", "def create_providers(cls) -> Iterable['BaseProvider']:\n return []", "def registered_providers():\n return list(_DEFAULT_PROVIDER.providers)", "def get(self):\n return get_all_provider()", "def _load_objects(self):\n self._get_package()\n\n object_names = [name for name in dir(self._sdk) if name != \"GATDLSession\" and name != \"SDKInfo\" and name.startswith(\"GA\") and not name.endswith(\"Fetcher\")]\n\n for object_name in object_names:\n obj = getattr(self._sdk, object_name)\n self._objects_mapping[obj.rest_name] = object_name", "def add_providers(self):\n str_providers = PROVIDERS[0] # Providers, called by name\n live_providers = PROVIDERS[1] # Providers, provided as a live module\n for providers in PROVIDERS: # Iterate over the types of providers\n for provider in providers: # Iterate over all the methods\n # Inject those into faker, and swap the numpy instance\n self.fake.add_faker(self._swap_numpy(provider[0]), provider[1])", "def get_providers(self):\n return self.keys", "def get_providers(self):\n return self.keys", "def get_providers(self):\n return self.keys", "def get_providers(self):\n return self.keys", "def get_providers(self):\n return self.keys", "def get_providers(self):\n return self.keys", "def get_providers() -> List[Type[ProviderApi]]:\n providers = get_supported_dataset_providers()\n return sorted(providers, key=lambda p: p.priority.value) # type: ignore", "def _init_loaders(self):\n @self.loaders_wrapper(\"nx2nx\")\n def get_nx2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.nx2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n @self.loaders_wrapper(\"neo4j2nx\")\n def get_neo4j2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n\n @self.loaders_wrapper(\"neo4j2edgelist\")\n def get_neo4j2edgelist_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2edgelist_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )\n\n\n @self.loaders_wrapper(\"edgelist2neo4j\")\n def get_edgelist2neo4j_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.edgelist2neo4j_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )", "def _load_serializers(self):\n global _serializers\n serializers = {}\n for format in BUILTIN_SERIALIZERS:\n self.register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)\n if hasattr(settings, \"SERIALIZATION_MODULES\"):\n for format in settings.SERIALIZATION_MODULES:\n self.register_serializer(format,\n settings.SERIALIZATION_MODULES[format],\n serializers)\n _serializers = serializers", "def file_loader(self):\n\n for folder in self.config[\"data_folders\"]:\n f = os.path.join(folder, self.data_file)\n yield jsonlist.load_file(f)", "def _initialize(self):\n configured_providers = self.domain.config[\"DATABASES\"]\n provider_objects = {}\n\n if configured_providers and isinstance(configured_providers, dict):\n if \"default\" not in configured_providers:\n raise ConfigurationError(\"You must define a 'default' provider\")\n\n for provider_name, conn_info in configured_providers.items():\n provider_full_path = conn_info[\"PROVIDER\"]\n provider_module, provider_class = provider_full_path.rsplit(\n \".\", maxsplit=1\n )\n\n provider_cls = getattr(\n importlib.import_module(provider_module), provider_class\n )\n provider = provider_cls(provider_name, self.domain, conn_info)\n\n provider_objects[provider_name] = provider\n\n self._providers = provider_objects", "def _build_observation_providers(self) -> Dict[str, ObservationProvider]:\n pass", "def import_data(self):\n self.models = []\n for o in self.loader.load():\n klass = self.type_for(o)\n if hasattr(klass, \"from_api\"):\n self.models.append(klass.from_api(o))\n else:\n self.models.append(klass(o))\n return self.models", "def get_all_adapters(self):\n pass", "def _load_commands(self):\n\n entry_points = pkg_resources.iter_entry_points(\n config.PROVIDER_EP_NAMESPACE)\n for entry_point in entry_points:\n self.logger.debug('found provider %r', entry_point.name)\n self._commands[entry_point.name] = entry_point.load()", "def create_data_providers():\n prov_dict = {}\n with custom_mp_config(\n get_test_data_path().parent.joinpath(\"msticpyconfig-test.yaml\")\n ):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n if _KQL_IMP_OK:\n prov_dict[\"az_sent_prov\"] = QueryProvider(\"MSSentinel\")\n prov_dict[\"mdatp_prov\"] = QueryProvider(\"MDE\")\n if _SPLUNK_IMP_OK:\n prov_dict[\"splunk_prov\"] = QueryProvider(\"Splunk\")\n prov_dict[\"ti_lookup\"] = TILookup()\n prov_dict[\"geolite\"] = GeoLiteLookup()\n\n if _IPSTACK_IMP_OK:\n prov_dict[\"ip_stack\"] = ip_stack_cls()\n return prov_dict", "def init_loaders(self, *args, **kwargs):\n\n # Convert the data to Dataset\n dataset_dict = self.init_datasets(*args, **kwargs)\n\n # If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used\n if hasattr(dataset_dict[\"train\"], \"collate_fn\") and callable(\n getattr(dataset_dict[\"train\"], \"collate_fn\")\n ):\n collate_fn = dataset_dict[\"train\"].collate_fn\n else:\n collate_fn = default_collate\n\n # If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set\n # are drawn per epoch.\n # Otherwise, an epoch is defined by a full run through all of the data in the dataloader.\n #\n if self.config_dict.get(\"iters_per_epoch\") is not None:\n num_samples = (\n self.config_dict[\"iters_per_epoch\"] * self.config_dict[\"batch_size\"]\n )\n loaders_dict = {}\n for key in dataset_dict.keys():\n if key == \"train\":\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_sampler=BatchSampler(\n RandomSampler(\n dataset_dict[key],\n replacement=True,\n num_samples=num_samples,\n ),\n batch_size=self.config_dict[\"batch_size\"],\n drop_last=False,\n ),\n collate_fn=collate_fn,\n )\n else:\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n else:\n loaders_dict = {\n key: DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n for key in data_dict.keys()\n }\n\n return loaders_dict", "def _load_apis(self):\n cannabis_reports = __import__('cannabis_reports.apis')\n for class_name in cannabis_reports.apis.__all__:\n if not class_name.startswith('_'):\n cls = getattr(cannabis_reports.apis, class_name)\n api = AuthProxy(self.session, cls)\n setattr(self, class_name, api)\n self.__apis__[class_name] = api", "def install_providers():\n host = env.host_string\n providers = get_providers(host)\n for provider in providers.values():\n if getattr(provider, 'manager', None) is not None:\n provider.manager.install()\n\n provider.install()" ]
[ "0.6913169", "0.64779794", "0.6103122", "0.60407805", "0.6022248", "0.586881", "0.5856722", "0.57174754", "0.56971353", "0.5516536", "0.5515115", "0.5500439", "0.5500439", "0.5500439", "0.5500439", "0.5500439", "0.5500439", "0.5453203", "0.54512775", "0.5449996", "0.54412305", "0.5431389", "0.5425756", "0.54081774", "0.53626627", "0.53537965", "0.5343183", "0.5315757", "0.5249365", "0.5227323" ]
0.7103289
0
Load all IDOMDataProvider providers
def dom_data(self): dom_providers = getAdapters((self.context, self.request, self.view), IDOMDataProvider) results = [] for name, provider in dom_providers: results.append({'name': name or None, 'data': provider()}) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_providers(self, **kwargs):\n return super()._load_providers(providers=\"TIProviders\", **kwargs)", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n taas_consts.TAAS, self)", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n 'L2GW', self)", "def create_providers(cls) -> Iterable['BaseProvider']:\n return []", "def get_providers(self):\n \n r = requests.get(\n self._url('/dataproviders'),\n headers={'Authorization': self.token},\n proxies=self.proxy)\n r.raise_for_status()\n providers = r.json()\n self.providers = [p['name'] for p in providers if (p['user'] is not None and p['user']!='SCRIPTING ENGINE')]\n log.info('{:d} providers found'.format(len(self.providers)))\n\n return", "def registered_providers():\n return list(_DEFAULT_PROVIDER.providers)", "def get_providers(self):\n datasets = [\n \"Heineken\",\n \"Eisenbahn\",\n \"Corona\",\n \"Brahma\",\n \"Skol\",\n \"Bohemia\"\n ]\n return datasets", "def get(self):\n return get_all_provider()", "def add_providers(self):\n str_providers = PROVIDERS[0] # Providers, called by name\n live_providers = PROVIDERS[1] # Providers, provided as a live module\n for providers in PROVIDERS: # Iterate over the types of providers\n for provider in providers: # Iterate over all the methods\n # Inject those into faker, and swap the numpy instance\n self.fake.add_faker(self._swap_numpy(provider[0]), provider[1])", "def install():\n ArticleDataProvider.register()\n ProductDataProvider.register()", "def providers(self):\n return [p for p in self._db.providers.values() if self._dbattr(p.IDATTR)]", "def providers(self):\n return [p for p in self._db.providers.values() if self._dbattr(p.IDATTR)]", "def install_providers():\n host = env.host_string\n providers = get_providers(host)\n for provider in providers.values():\n if getattr(provider, 'manager', None) is not None:\n provider.manager.install()\n\n provider.install()", "def collectPlugins(self):\n\t\tself.locatePlugins()\n\t\tself.loadPlugins()", "def load_elements(self):\n for path in self.element_paths:\n self.process_path(path)", "def addAllFactories(self) -> None:\n ...", "def load_demos():\n for index in range(len(feconf.DEMO_EXPLORATIONS)):\n load_demo(str(index))", "def get_all_providers() -> list[str]:\n return list(ALL_PROVIDERS)", "def get_providers() -> List[Type[ProviderApi]]:\n providers = get_supported_dataset_providers()\n return sorted(providers, key=lambda p: p.priority.value) # type: ignore", "def _load_commands(self):\n\n entry_points = pkg_resources.iter_entry_points(\n config.PROVIDER_EP_NAMESPACE)\n for entry_point in entry_points:\n self.logger.debug('found provider %r', entry_point.name)\n self._commands[entry_point.name] = entry_point.load()", "def _load_modules(self):\n moduledocs = self._docset.get_compounds(xml.Group,\n lambda x: x.get_name().startswith('module_'))\n for moduledoc in moduledocs:\n moduleobj = self._modules.get(moduledoc.get_name())\n if not moduleobj:\n self._reporter.input_error(\n \"no matching directory for module: {0}\".format(moduledoc))\n continue\n moduleobj.set_doc_xml(moduledoc, self)\n self._docmap[moduledoc] = moduleobj", "def _init_loaders(self):\n @self.loaders_wrapper(\"nx2nx\")\n def get_nx2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.nx2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n @self.loaders_wrapper(\"neo4j2nx\")\n def get_neo4j2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n\n @self.loaders_wrapper(\"neo4j2edgelist\")\n def get_neo4j2edgelist_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2edgelist_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )\n\n\n @self.loaders_wrapper(\"edgelist2neo4j\")\n def get_edgelist2neo4j_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.edgelist2neo4j_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )", "def get_all_adapters(self):\n pass", "def load_dependencies(self):\n all_elements = expand_dependencies(\n self.elements, ':'.join(self.element_paths))\n self.elements = all_elements", "def get_providers(self):\n return self.keys", "def get_providers(self):\n return self.keys", "def get_providers(self):\n return self.keys", "def get_providers(self):\n return self.keys", "def get_providers(self):\n return self.keys", "def get_providers(self):\n return self.keys" ]
[ "0.6451605", "0.62758476", "0.62439394", "0.60339785", "0.5897526", "0.5797458", "0.57533056", "0.55735177", "0.5567528", "0.5532424", "0.5518296", "0.5518296", "0.5508206", "0.5463152", "0.5460798", "0.5414698", "0.53313816", "0.5312492", "0.53124434", "0.5306001", "0.53031135", "0.52842194", "0.5276029", "0.52709883", "0.5265207", "0.5265207", "0.5265207", "0.5265207", "0.5265207", "0.5265207" ]
0.6828622
0
Calculate kinetic energy density using laplacian of orbitals
def calc_ked_WFI(self): #Initialize kinetic energy density self.ked_WFI = np.zeros( (self.grid.Nelem, 1)) #Figure out the number of occupied orbitals if self.m == 0: if self.pol == 1: Nocc = np.floor(self.N/2) nu = self.N / 2 - Nocc else: Nocc = np.floor(self.N) nu = self.N - Nocc else: #m>0 orbitals hold twice as many electrons due to +-m symmetry if self.pol == 1: Nocc = np.floor(self.N / 4) nu = self.N / 4 - Nocc else: Nocc = np.floor(self.N/2) nu = self.N / 2 - Nocc #Construct density for i in range(int(Nocc)): # print("phi from pssolver", self.phi) # print("phi subset", self.phi[:,i]) # print("integrate returns", self.grid.integrate( self.phi[:,i]**2 )**0.5) #Normalized orbital phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5 phi_norm = phi_norm[:, None] self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None] #If we are doing fractional robitals and are non-integer if self.FRACTIONAL is True and nu != 0: #Normalized orbital phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5 phi_norm = phi_norm[:, None] self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None] #Scale densities appropriately if self.m == 0: if self.pol == 1: #Unpolarized electrons self.ked_WFI = 2 * self.ked_WFI else: # m>0 orbitals hold twice as many electrons due to +-m symmetry if self.pol == 1: self.ked_WFI = 4 * self.ked_WFI else: self.ked_WFI = 2 * self.ked_WFI
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_energy_density(kT):\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)", "def kin_energy (self):\n\n for planet in self.planets:\n planet.kenergy = 0.5*planet.mass*((np.linalg.norm(planet.velocity))**2) # every 'kenergy' depends by the body's mass and velocity", "def kinetic_energy(vel):\r\n return 0.5 * (vel ** 2).sum(axis=1)", "def Local_Kinetic(Walker):\n\n # laplacian -0.5 \\nabla^2 \\Psi / \\Psi\n h = 0.001\n h2 = h*h\n K = 0.0\n Psi_R = wfs(Walker)\n for i in range(Walker.Ne):\n for j in range(Walker.sys_dim):\n Y=Walker.Re[i][j]\n Walker.Re[i][j]-=h\n wfs1 = wfs(Walker)\n Walker.Re[i][j]+=2.0*h\n wfs2 = wfs(Walker)\n K -= 0.5*(wfs1+wfs2-2.0*Psi_R)/h2\n Walker.Re[i][j]=Y\n return K/Psi_R", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "def kervella(magB=None, magV=None, magK=None):\n if magB is None or np.isnan(magB) or magB > 49:\n magB = np.nan\n if magV is None or np.isnan(magV) or magV > 49:\n magV = np.nan\n if magK is None or np.isnan(magK) or magK > 49:\n magK = np.nan\n const1 = np.array([0.0755, 0.0535])\n const2 = np.array([0.5170, 0.5159])\n mag = np.array([magV, magB])\n vals = 10**(const1*(mag-magK)+const2-0.2*magK)\n diameter = {}\n if not np.isnan(vals[0]):\n diameter['V'] = vals[0]*u.mas\n if not np.isnan(vals[1]):\n diameter['B'] = vals[1]*u.mas\n return diameter", "def density(ensembles):\n if len(ensembles.shape) < 2:\n return ketbra(ensembles)\n else:\n den_mat = ketbra(ensembles[0])\n for i in range(1, len(ensembles)):\n den_mat += ketbra(ensembles[i])\n den_mat /= len(ensembles)\n return den_mat", "def energies():\n # Hardcoded initial values\n numsteps = 10000\n time_max = 1\n # Running the calculation in the solver class using the velocity verlet method\n # for better accuracy.\n verlet = solver(input_matrix, 'verlet', time_max, numsteps)\n output_matrix, KE, PE, AM = verlet.main()\n # Creating a simple time axis for plotting\n x = np.linspace(0, 1, numsteps+1)\n\n # Plotting kinetic energy over time\n plt.figure(1, figsize=(10, 10))\n plt.plot(x, KE)\n plt.suptitle('Total kinetic energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE'])\n\n # Plotting potential energy over time\n plt.figure(2, figsize=(10, 10))\n plt.plot(x, PE)\n plt.suptitle('Total potential energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE'])\n\n # Plotting total energy against time\n plt.figure(3, figsize=(10, 10))\n plt.plot(x, PE+KE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE+PE'])\n\n # Plotting angular momentum against time. print the amplitude to terminal\n amplitude = max(AM)-min(AM)\n print('Amplitude of angular momentum during 1 year: %g[AU²/yr²]' %(amplitude))\n plt.figure(4, figsize=(10, 10))\n plt.plot(x, AM)\n plt.suptitle('Total angular momentum in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²/yr²]', fontsize=16)\n plt.legend(['AM'])\n\n # Plotting the kinetic, potential and total energy against time to see\n # how great the variations are\n plt.figure(5, figsize=(10, 10))\n plt.plot(x, PE, x, KE, x, KE+PE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE', 'KE', 'KE+PE'])\n plt.show()", "def air_density(altitude):\n p = pressure(altitude) # psf\n t = temperature(altitude) # R\n rho = p/(gas_constant*t) # lb/ft3\n return rho", "def getDensity(h, R_w, R_sun): # k is a fitting constant\n\n R = np.sqrt(R_w**2+h**2)\n r = R/R_sun # units need to be in solar radii \n a = 77.1\n b = 31.4\n c = 0.954\n d = 8.30\n e = 0.550\n f = 4.63\n\n return (a*r**(-b) + c*r**(-d) + e*r**(-f))*10**8 #[cm-3]", "def plot_dispersion(kpts, enk):\n\n # Lattice constant and reciprocal lattice vectors\n # b1 = 2 pi/a (kx - ky + kz)\n # b2 = 2 pi/a (kx + ky - kz)\n # b3 = 2 pi/a (-kx + ky + kz)\n a = 5.556 # [A]\n b1 = (2 * np.pi / a) * np.array([1, -1, 1])\n b2 = (2 * np.pi / a) * np.array([1, 1, -1])\n b3 = (2 * np.pi / a) * np.array([-1, 1, 1])\n\n # L point in BZ is given by 0.5*b1 + 0.5*b2 + 0.5*b3\n # X point in BZ is given by 0.5*b2 + 0.5*b3\n lpoint = 0.5 * (b1 + b2 + b3)\n xpoint = 0.5 * (b2 + b3)\n\n # We can find kpoints along a path just by considering a dot product with lpoint and xpoint vectors.\n # Any kpoints with angle smaller than some tolerance are considered on the path and we can plot their frequencies\n deg2rad = 2 * np.pi / 360\n ang_tol = 1 * deg2rad # 1 degree in radians\n\n print(list(kpts))\n\n enkonly = np.array(enk['energy [Ryd]'])[:, np.newaxis]\n enkinds = np.array(enk['q_inds'])\n kptsonly = np.array(kpts[['kx [1/A]', 'ky [1/A]', 'kz [1/A]']]) / (2 * np.pi / a)\n kptsinds = np.array(kpts['q_inds'])\n kptsmag = np.linalg.norm(kptsonly, axis=1)[:, np.newaxis]\n\n dot_l = np.zeros(len(kpts))\n dot_x = np.zeros(len(kpts))\n\n # Separate assignment for gamma point to avoid divide by zero error\n nongamma = kptsmag != 0\n dot_l[np.squeeze(nongamma)] = np.divide(np.dot(kptsonly, lpoint[:, np.newaxis])[nongamma],\n kptsmag[nongamma]) / np.linalg.norm(lpoint)\n dot_x[np.squeeze(nongamma)] = np.divide(np.dot(kptsonly, xpoint[:, np.newaxis])[nongamma],\n kptsmag[nongamma]) / np.linalg.norm(xpoint)\n dot_l[np.squeeze(kptsmag == 0)] = 0\n dot_x[np.squeeze(kptsmag == 0)] = 0\n\n lpath = np.logical_or(np.arccos(dot_l) < ang_tol, np.squeeze(kptsmag == 0))\n xpath = np.logical_or(np.arccos(dot_x) < ang_tol, np.squeeze(kptsmag == 0))\n\n linds = kptsinds[lpath]\n xinds = kptsinds[xpath]\n lkmag = kptsmag[lpath]\n xkmag = kptsmag[xpath]\n\n plt.figure()\n\n for i, ki in enumerate(linds):\n energies = enkonly[enkinds == ki, 0]\n thiskmag = lkmag[i]\n if len(energies) > 1:\n veck = np.ones((len(energies), 1)) * thiskmag\n plt.plot(veck, energies, '.', color='C0')\n else:\n plt.plot(thiskmag, energies, '.', color='C0')\n\n for i, ki in enumerate(xinds):\n energies = enkonly[enkinds == ki, 0]\n thiskmag = lkmag[i]\n if len(energies) > 1:\n veck = np.ones((len(energies), 1)) * thiskmag\n plt.plot(-1 * veck, energies, '.', color='C1')\n else:\n plt.plot(-1 * thiskmag, energies, '.', color='C1')\n\n plt.xlabel('k magnitude')\n plt.ylabel('Energy in Ry')", "def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):\n\n # Make vectors of the wave numbers\n kc_z = np.linspace(1e-6, kc_z_max, 35)\n kc_x = np.linspace(1e-6, kc_x_max, 35)\n\n # Turn those vectors into matrices\n kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)\n\n # Find some of the numbers that appear later in the calculations\n kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k\n theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B\n wc_i = 1 / m_i # The ion gyro frequency\n wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency\n wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency\n\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # For every k_perp and k_par, turn the dispersion relation into a\n # polynomial equation and solve it.\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # The polynomial coefficients are calculated\n pol_koeff_8 = -2 * kc_ ** 2\n pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)\n pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)\n pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)\n pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(\n theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))\n pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2\n\n w_final = np.zeros((10, len(kc_z), len(kc_x)))\n\n # For each k, solve the equation\n for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):\n disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,\n pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],\n 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]\n # theoretically should be real (A. Tjulin)\n w_temp = np.real(np.roots(disp_polynomial))\n # We need to sort the answers to get nice surfaces.\n w_final[:, k_z, k_x] = np.sort(w_temp)\n\n n2_ = kc_ ** 2 / w_final ** 2\n v_ph_c = np.sqrt(1. / n2_)\n va_c = 1 / (wp_e * np.sqrt(m_i))\n v_ph_va = v_ph_c / va_c\n\n diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i)\n\n e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor)\n e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_\n\n b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat,\n w_final, e_x, e_y, e_z)\n\n dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]]\n dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)]\n dw_x[:, :, 1:] = np.diff(w_final, axis=2)\n dw_z[:, 1:, :] = np.diff(w_final, axis=1)\n v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])]\n\n s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z)\n\n # Compute ion and electron velocities\n v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final,\n e_x, e_y, e_z)\n\n # Ratio of parallel and perpendicular to B speed\n vepar_perp = v_ez * np.conj(v_ez)\n vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey))\n vipar_perp = v_iz * np.conj(v_iz)\n vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy))\n\n # Total particle speeds\n v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez)\n v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz)\n\n # Ion and electron energies\n m_e = -1\n en_e = 0.5 * m_e * v_e2\n en_i = 0.5 * m_i * v_i2\n\n # Ratio of particle and field energy densities\n ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot)\n\n # Continuity equation\n dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final,\n v_ex, v_ez, v_ix, v_iz)\n\n dn_e_n_db_b = dn_e_n / b_tot\n dn_i_n_db_b = dn_i_n / b_tot\n\n dn_e_n_dbpar_b = dn_e_n / b_par\n dn_i_n_dbpar_b = dn_i_n / b_par\n\n dn_e = dn_e_n * wp_e ** 2\n k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat\n k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e))\n\n # Build output dict\n extra_param = {\"Degree of electromagnetism\": np.log10(b_tot / e_tot),\n \"Degree of longitudinality\": np.abs(e_par) / e_tot,\n \"Degree of parallelity E\": e_z / e_tot,\n \"Degree of parallelity B\": np.sqrt(\n b_z * np.conj(b_z)) / b_tot,\n \"Ellipticity E\": e_pol, \"Ellipticity B\": b_pol,\n \"E_part/E_field\": np.log10(ratio_part_field),\n \"v_g\": np.sqrt(v_x ** 2 + v_z ** 2),\n \"v_ph/v_a\": np.log10(v_ph_va),\n \"E_e/E_i\": np.log10(en_e / en_i),\n \"v_e/v_i\": np.log10(np.sqrt(v_e2 / v_i2)),\n \"v_epara/v_eperp\": np.log10(vepar_perp),\n \"v_ipara/v_iperp\": np.log10(vipar_perp),\n \"dn_e/dn_i\": np.log10(dne_dni),\n \"(dn_e/n)/ (dB/B)\": np.log10(dn_e_n_db_b),\n \"(dn_i/n)/(dB/B)\": np.log10(dn_i_n_db_b),\n \"(dn_i/n)/(dBpar/B)\": np.log10(dn_i_n_dbpar_b),\n \"(dn_e/n)/(dB/B)\": np.log10(dn_e / k_dot_e),\n \"(dn_e/n)/(dBpar /B)\": np.log10(dn_e_n_dbpar_b),\n \" Spar/Stot\": s_par / s_tot}\n\n for k, v in zip(extra_param.keys(), extra_param.values()):\n extra_param[k] = np.transpose(np.real(v), [0, 2, 1])\n\n kx_ = np.transpose(kc_x_mat)\n kz_ = np.transpose(kc_z_mat)\n wf_ = np.transpose(w_final, [0, 2, 1])\n\n return kx_, kz_, wf_, extra_param", "def energyK(k):\r\n C1 = 9.7846113e-07\r\n C2 = 12.263868e0 \r\n E = (-1.0 + np.sqrt(1.0 + 4.0 * C1 * C2**2 * k**2))/(2.0 * C1)\r\n return E", "def _analytical_encircled_energy(fno, wavelength, points):\n p = points * np.pi / fno / wavelength\n return 1 - special.j0(p)**2 - special.j1(p)**2", "def kinetic_energy(self):\r\n return self.mass * np.dot(self.vel, self.vel) / 2", "def lawsonite():\n\n rho = 3090.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 214.; C[0,1] = 69.; C[0,2] = 82.; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 226.; C[1,2] = 65.; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 259.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 60.; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 65.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 17.\n\n return C, rho", "def kts(self):\n return CAL_TO_J * 0.0077 * (self.rho/1000.0) * (self.rho/1000.0)", "def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))", "def compute_density(\n traj,\n area,\n surface_normal_dim=2,\n pore_center=0.0,\n max_distance = 1.0,\n bin_width = 0.01\n ):\n distances = traj.xyz[:,:,surface_normal_dim] - pore_center\n bin_centers = []\n density = []\n for bin_center in np.arange(-max_distance, max_distance, bin_width):\n mask = np.logical_and(\n distances > bin_center - 0.5 * bin_width,\n distances < bin_center + 0.5 * bin_width\n )\n bin_centers.append(bin_center)\n #changed the density from original below: added the conversion factor nm^2 to A^2 to mult by 10**2\n #density.append(mask.sum() / (area * bin_width * traj.n_frames))\n density.append(10**2 * mask.sum() / (area * bin_width * traj.n_frames))\n return bin_centers, density", "def kinetic_energy(self):\r\n position, velocity, escaped_particles,impact, wall_collision,mom = self.box_collision_info()\r\n for j in xrange(1,self.n):\r\n abs_velocity = np.sqrt(velocity[:,0]**2+velocity[:,1]**2\r\n + velocity[:,2]**2)\r\n KE = 0.5*self.m*abs_velocity**2\r\n total_KE = np.sum(KE)\r\n invid_KE = total_KE/self.Npart\r\n\r\n return total_KE, invid_KE", "def kl_divergence(self) -> Tensor:\n return self.variational_strategy.kl_divergence().sum(dim=1).mean()", "def computeChargeDensity(self):\n \n self.rho = np.zeros((self.ni, self.nj, self.nk))\n \n for species in self.speciesList:\n if species.charge!=0:\n self.rho += species.charge*species.den", "def kinetic_energies(self):\n return sum([body.kinetic_energy\n for body in self.bodies])", "def all_dhkl(self, crystal):\n #d_min = self.wavelength/self.max2theta*pi/2\n d_min = self.wavelength/sin(self.max2theta/2)/2\n \n # This block is to find the shortest d_hkl, \n # for all basic directions (1,0,0), (0,1,0), (1,1,0), (1,-1,0) and so on, 26 in total \n hkl_max = np.array([1,1,1])\n for h1 in [-1, 0, 1]:\n for k1 in [-1, 0, 1]:\n for l1 in [-1, 0, 1]:\n hkl_index = np.array([[h1,k1,l1]])\n d = float(np.linalg.norm( np.dot(hkl_index, crystal.rec_matrix), axis=1))\n if d>0:\n multiple = 1/d/d_min\n hkl_index *= round(multiple)\n for i in range(len(hkl_max)):\n if hkl_max[i] < hkl_index[0,i]:\n hkl_max[i] = hkl_index[0,i]\n #h1 = 2*ceil(np.linalg.norm(crystal.cell_para[0])/d_min)\n #k1 = 2*ceil(np.linalg.norm(crystal.cell_para[1])/d_min)\n #l1 = 2*ceil(np.linalg.norm(crystal.cell_para[2])/d_min)\n h1, k1, l1 = hkl_max\n h = np.arange(-h1,h1)\n k = np.arange(-k1,k1)\n l = np.arange(-l1,l1)\n \n hkl = np.array((np.meshgrid(h,k,l))).transpose()\n hkl_list = np.reshape(hkl, [len(h)*len(k)*len(l),3])\n hkl_list = hkl_list[np.where(hkl_list.any(axis=1))[0]]\n d_hkl = 1/np.linalg.norm( np.dot(hkl_list, crystal.rec_matrix), axis=1)\n #for ix, a in enumerate(hkl_list):\n # if np.array_equal(a, np.array([1,-1,3])) is True:\n # print(a)\n # break\n #\n #print(ix, hkl_list[ix], d_hkl[ix], d_min)\n\n shortlist = d_hkl > (d_min)\n d_hkl = d_hkl[shortlist]\n hkl_list = hkl_list[shortlist]\n sintheta = self.wavelength/2/d_hkl\n\n self.theta = np.arcsin(sintheta)\n self.hkl_list = hkl_list\n self.d_hkl = d_hkl\n \n #return hkl_list, d_hkl, sintheta", "def air_density(self):\n return self.flow_field.air_density", "def D(z):\n k=0.01 #Our choice of large-scale mode\n mPk=cosmo.pk(k,z)\n mPk_norm=cosmo.pk(k,0) #Normalize at z=0\n D=np.sqrt(mPk/mPk_norm)\n return D", "def density(self, alt):\n (Z, T, CN2, CO2, CO, CAr, CHe, CH, CM, WM) = self.altitude_profile(alt)\n\n # using eqn(42) of COESA for multiple gases\n M_i = [wmN2, wmO2, wmO, wmAr, wmHe, wmH] << (u.g / u.mol)\n n_i = [\n CN2.to_value(u.m**-3),\n CO2.to_value(u.m**-3),\n CO.to_value(u.m**-3),\n CAr.to_value(u.m**-3),\n CHe.to_value(u.m**-3),\n CH.to_value(u.m**-3),\n ] << (1 / u.m**3)\n rho = (n_i @ M_i) / Na\n return rho.to(u.kg / u.m**3)", "def kinetic_energy(self, units = 'si'):\n if units == 'si':\n return 0.5 * self.mass * (linalg.norm(self.velocity) ** 2)\n if units == 'au':\n return 0.5 * self.mass * (linalg.norm(self.velocity * (1.496e11) * 86400) ** 2)", "def fdm_2d(N,L,x,y,h,k):\n\n # Create the Laplacian as a 1d sparse matrix using central difference\n ones = np.ones(N)\n diagvalues = np.array([ones,-2*ones,ones])\n offsets = np.array([-1,0,1])\n lap1d = sps.dia_matrix((diagvalues,offsets), shape=(N,N))/h**2\n \n # Represent 2d coordinates as kronecker sum\n lap = sps.kron(lap1d,sps.diags(np.ones(N))) + \\\n sps.kron(sps.diags(np.ones(N)),lap1d)\n \n # potential terms\n pot_x = np.repeat(x**2,N)\n pot_y = np.tile(y**2,N)\n\n # The whole Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_x) + sps.diags(pot_y))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvectors\n E, psi = eigsh(A,k=k,which='SM')\n\n\n # Perturbated potential\n a = 25\n pot_new = pot_x + pot_y + gauss_pert(N,a).flatten()\n\n # Plot the new potential\n X,Y = np.meshgrid(x,y)\n fig = plt.figure()\n ax = fig.add_subplot(1,2,1,projection='3d')\n ax.plot_surface(X, Y, pot_new.reshape((N,N)), cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax = fig.add_subplot(1,2,2)\n fig.suptitle(r'Potential with a Gaussian perturbation')\n ax.imshow(pot_new.reshape(N,N),extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'perturbated_potential.png'))\n\n # The perturbated Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_new))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvector\n # Of the perturbated system\n E_p, psi_p = eigsh(A,k=k,which='SM')\n\n return E,psi,E_p,psi_p", "def test_density(self):\n earth = CoreMantleCrustModel()\n assert earth.density(0) == 14\n assert earth.density(1e6) == 14\n assert earth.density(3.464e6) == 14\n assert earth.density(3.5e6) == 3.4\n assert earth.density(5e6) == 3.4\n assert earth.density(6.338e6) == 3.4\n assert earth.density(6.378e6) == 2.9" ]
[ "0.6711679", "0.6050671", "0.5996172", "0.59738076", "0.59571743", "0.5953587", "0.59038174", "0.5898051", "0.5881261", "0.58746827", "0.58423156", "0.583687", "0.5827854", "0.58256716", "0.5778814", "0.57494277", "0.5736355", "0.57223487", "0.5695155", "0.56936395", "0.5681797", "0.5679147", "0.5675262", "0.56635875", "0.56557703", "0.5637398", "0.56368303", "0.5628931", "0.56225204", "0.56193155" ]
0.69676065
0
Create the user agent for IotHub
def get_iothub_user_agent() -> str: return "{iothub_iden}/{version}{common}".format( iothub_iden=IOTHUB_IDENTIFIER, version=VERSION, common=_get_common_user_agent(), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user_agent(self):\n user_agent = b\"test-agent\"\n\n def update_expected_user_agent(expected):\n expected[3][\"attributes\"].update(\n {\"http.user_agent\": user_agent.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"user-agent\", user_agent])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_user_agent])", "def user_agent(self):\n version = '{0}.{1}.{2}'.format(sys.version_info[0], sys.version_info[1], sys.version_info[2])\n return \"PAYNL/SDK/{0} Python/{1} ({2})\".format(self.client_version, version, sys.hexversion)", "def setUA(self, useragent):\n\t\tpass", "def test_user_agent(self):\n user_agent = b\"test-agent\"\n\n def update_expected_user_agent(expected):\n expected[3][\"attributes\"].update(\n {SpanAttributes.HTTP_USER_AGENT: user_agent.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"user-agent\", user_agent])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_user_agent])", "def build_user_agent():\n if any(key.startswith(prefix) for prefix in TESTING_ENV_PREFIXES for key in os.environ.keys()):\n testing = \" (testing) \"\n else:\n testing = \" \"\n os_platform = \"{0.system}/{0.release} ({0.machine})\".format(utils.get_os_platform())\n return \"charmcraft/{}{}{} python/{}\".format(\n __version__, testing, os_platform, platform.python_version()\n )", "def user_agent_identifier():\n client_info = (get_version(), platform.system(), platform.machine())\n return \"txclient/%s (%s %s)\" % client_info", "def userAgent(self):\n raise NotImplementedError", "def user_agent(self):\n ua_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',\n 'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',\n ]\n return random.choice(ua_list)", "def user_agent(self):\n ua_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',\n 'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',\n ]\n return random.choice(ua_list)", "def build_user_agent(application_name, version, url):\n return '%s/%s %s/%s (+%s)' % (application_name, version,\n 'python-simplemediawiki', __version__, url)", "def get_user_agent(user_agent: str | None) -> str:\r\n from wikibaseintegrator import __version__\r\n wbi_user_agent = f\"WikibaseIntegrator/{__version__}\"\r\n\r\n if user_agent is None:\r\n return_user_agent = wbi_user_agent\r\n else:\r\n return_user_agent = user_agent + ' ' + wbi_user_agent\r\n\r\n return return_user_agent", "def get_manubot_user_agent() -> str:\n try:\n from manubot import __version__ as manubot_version\n except ImportError:\n manubot_version = \"\"\n return (\n f\"manubot/{manubot_version} \"\n f\"({platform.system()}; Python/{sys.version_info.major}.{sys.version_info.minor}) \"\n f\"<{contact_email}>\"\n )", "def user_agent():\n ua_list = [\n\"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\",\n\"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\",\n\"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);\",\n\"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)\",\n\"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\",\n\"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)\",\n\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1\",\n\"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1\",\n\"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11\",\n\"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11\",\n\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 \",\n]\n return random.choice(ua_list)", "def user_agent(name, version):\n\n def _interpreter():\n name = platform.python_implementation()\n version = platform.python_version()\n bitness = platform.architecture()[0]\n if name == 'PyPy':\n version = '.'.join(map(str, sys.pypy_version_info[:3]))\n full_version = [version]\n if bitness:\n full_version.append(bitness)\n return name, \"-\".join(full_version)\n\n tags = [\n (name, version),\n (\"python\", platform.python_version()),\n _interpreter(),\n (\"machine\", platform.machine() or 'unknown'),\n (\"system\", platform.system() or 'unknown'),\n (\"platform\", platform.platform() or 'unknown'),\n ]\n\n return ' '.join(\"{}/{}\".format(name, version) for name, version in tags)", "def user_agent(self) -> str:\n return self.root_hartree.user_agent", "def get_user_agent(platform=None):\n if isinstance(platform, ustr):\n platform = platform.upper()\n return {\"chrome\": AGENT_CHROME, \"edge\": AGENT_EDGE, \"ios\": AGENT_IOS}.get(\n platform, random.choice(AGENT_ALL)\n )", "def getUA(self):\n\t\tself.script(\"return navigator.userAgent\")", "def __init__(self):\n \n self.ua = USERAGENT\n self.br = mechanize.Browser()\n self.br.addheaders = [('User-Agent', self.ua)]\n print \"Browser initialized with user agent\"\n\n self.login()", "def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:\n ua = f\"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}\"\n if is_torch_available():\n ua += f\"; torch/{_torch_version}\"\n if is_tf_available():\n ua += f\"; tensorflow/{_tf_version}\"\n if DISABLE_TELEMETRY:\n return ua + \"; telemetry/off\"\n if is_training_run_on_sagemaker():\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in define_sagemaker_information().items())\n # CI will set this value to True\n if os.environ.get(\"TRANSFORMERS_IS_CI\", \"\").upper() in ENV_VARS_TRUE_VALUES:\n ua += \"; is_ci/true\"\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n return ua", "def get_user_agent(faked=False):\n if faked:\n agent = 'curl/7.21.4 (universal-apple-darwin11.0) libcurl/7.21.4 OpenSSL/0.9.8r zlib/1.2.5'\n\n else:\n from bowerer import VERSION\n from platform import platform\n agent = 'bowerer/%s (%s)' % ('.'.join(map(str, VERSION)), platform(terse=True))\n\n return agent", "def user_agent(self):\n # type: () -> str\n return self.user_agent_policy.user_agent", "def user_agent():\n headers = [\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.4',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/59.0.3071.109 Chrome/59.0.3071.109 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 OPR/46.0.2597.57',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (iPad; CPU OS 10_3_2 like Mac OS X) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.0 Mobile/14F89 Safari/602.1',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 5.1; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/603.2.5 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.5',\n 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; Trident/5.0)',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; Trident/5.0)',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (iPad; CPU OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.0 Mobile/14G60 Safari/602.1',\n 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/603.2.5 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.5',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/602.4.8 (KHTML, like Gecko) Version/10.0.3 Safari/602.4.8',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',\n ]\n return {'User-Agent': headers[random.randrange(0, len(headers))]}", "def format_user_agent(name=None):\n parts = ['TronAPI/%s' % tronapi.__version__,\n '%s/%s' % (platform.python_implementation(),\n platform.python_version())]\n if name:\n parts.insert(0, name)\n return ' '.join(parts)", "def UserAgent(self):\n return self._userAgent", "def view_user_agent():\n\n headers = get_headers()\n\n return jsonify({\"user-agent\": headers[\"user-agent\"]})", "def create_fakeheader(ua,browsers):\n\n headers = {'User-Agent': pick_random_fakeheader(ua, browsers)}\n return headers", "def get_new_user_agent(self):\n new_user_agent = user_agent.generate_navigator()[\"user_agent\"]\n if new_user_agent == self.user_agent:\n self.get_new_user_agent()\n\n return new_user_agent", "def _set_agent_header(self):\n self._api_client.set_default_header('User-Agent', self._api_client.user_agent)", "def _create_properties(self, user_agent=None): # pylint: disable=no-self-use\n properties = {}\n properties[\"product\"] = \"eventhub.python\"\n properties[\"version\"] = __version__\n properties[\"framework\"] = \"Python {}.{}.{}\".format(*sys.version_info[0:3])\n properties[\"platform\"] = sys.platform\n\n final_user_agent = 'azsdk-python-eventhub/{} ({}; {})'.format(\n __version__, properties[\"framework\"], sys.platform)\n if user_agent:\n final_user_agent = '{}, {}'.format(final_user_agent, user_agent)\n\n if len(final_user_agent) > MAX_USER_AGENT_LENGTH:\n raise ValueError(\"The user-agent string cannot be more than {} in length.\"\n \"Current user_agent string is: {} with length: {}\".format(\n MAX_USER_AGENT_LENGTH, final_user_agent, len(final_user_agent)))\n\n properties[\"user-agent\"] = final_user_agent\n return properties", "def _random_user_agent(self):\n try:\n ua = UserAgent()\n return ua.random\n except:\n default_ua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) \\\n AppleWebKit/537.36 (KHTML, like Gecko) \\\n Chrome/58.0.3029.110 Safari/537.36'\n return default_ua" ]
[ "0.65058565", "0.6411603", "0.63528496", "0.63170594", "0.62735516", "0.62172145", "0.62084913", "0.6191572", "0.6191572", "0.6155119", "0.61193496", "0.60615385", "0.605927", "0.6007714", "0.5996502", "0.5863477", "0.5844187", "0.57906896", "0.57821304", "0.5745263", "0.57273483", "0.5707266", "0.5666119", "0.56425285", "0.56273264", "0.5603001", "0.5550024", "0.5549103", "0.55345654", "0.55299795" ]
0.6565932
0
Test conversion of html with png data url to pdf
def testConvertHtmlWithPngDataUrlToPdf(self): self._testBase("data/test_with_png_dataurl.html")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_raw_pdf(html_path, pdf_path, width='', height=''):\n debug = False\n if mg.EXPORT_IMAGES_DIAGNOSTIC: debug = True\n try:\n url = html_path.as_uri()\n cmd_make_pdf = 'cmd_make_pdf not successfully generated yet'\n \"\"\"\n Unless Linux, MUST be in report directory otherwise won't carry across\n internal links.\n\n Re: http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/ntcmds_shelloverview.mspx?mfr=true\n \"\"\"\n ## clear decks first so we can tell if image made or not\n try:\n os.remove(pdf_path)\n except Exception:\n pass\n rel_url = os.path.split(url)[1]\n cd_path = os.path.split(html_path)[0]\n if mg.PLATFORM == mg.WINDOWS: ## using Pyinstaller\n cmd_make_pdf = (\n f'cd \"{cd_path}\" && '\n f'\"{export_output.EXE_TMP}\\\\wkhtmltopdf.exe\" '\n f'{width} {height} \"{rel_url}\" \"{pdf_path}\"')\n elif mg.PLATFORM == mg.MAC:\n cmd_make_pdf = (\n f'cd \"{cd_path}\" && '\n f'\"{mg.MAC_FRAMEWORK_PATH}/wkhtmltopdf\" '\n f'{width} {height} \"{rel_url}\" \"{pdf_path}\"')\n elif mg.PLATFORM == mg.LINUX:\n cmd_make_pdf = f'wkhtmltopdf {width} {height} \"{url}\" \"{pdf_path}\"'\n else:\n raise Exception('Encountered an unexpected platform!')\n ## wkhtmltopdf uses stdout to actually output the PDF - a good feature but stuffs up reading stdout for message\n if debug: print(f'cmd_make_pdf: {cmd_make_pdf}')\n export_output.shellit(cmd_make_pdf)\n if not os.path.exists(pdf_path):\n raise Exception(\n f\"wkhtmltopdf didn't generate error but {pdf_path} not made \"\n f'nonetheless. cmd_make_pdf: {cmd_make_pdf}')\n if debug: print(f'Initial processing of {html_path} complete')\n except Exception as e:\n raise Exception(\n f'get_raw_pdf command failed: {cmd_make_pdf}. Orig error: {b.ue(e)}')\n return pdf_path", "def testConvertHtmlWithScriptToPdf(self):\n self._testBase(\"data/test_with_script.html\")", "def testConvertHtmlWithOpacityStyleToPdf(self):\n self._testBase(\"data/test_with_opacity_style.html\")", "def _produce_pdf_as_an_attachment(self, html, pdf_details):\n result = BytesIO()\n pisa.pisaDocument(BytesIO(html.encode(\"ISO-8859-1\")), result)\n return result.getvalue(), pdf_details", "def generate_pdf(pdf_data):\n\n html = HTML(string=pdf_data)\n f = html.write_pdf()\n\n return f", "def scrape (url, pdf_filename, pdf_page_size=PDF_PAGE_SIZE, folder=OUTPUT_FOLDER, clean_it=True):\n\n raw_html = get_url(url)\n if raw_html is None:\n print \"Sorry, could not read \", url\n else:\n filename_prefix, file_ext = os.path.splitext(pdf_filename)\n if clean_it:\n title = Document(raw_html).short_title()\n content = Document(raw_html).summary(html_partial=True)\n frame = HTML_FRAME.substitute(content=to_unicode(content),\n url=url,\n title=title)\n source = write_html_file(folder, os.extsep.join([filename_prefix, 'html']), frame)\n else:\n source = write_html_file(folder, os.extsep.join([filename_prefix, 'html']), raw_html)\n\n if source:\n generate_pdf (folder, filename_prefix, pdf_page_size)", "def preview():\r\n html = create_html_report()\r\n return html", "def png_from_pdf(pdf_data, density):\n d = mkdtemp()\n try:\n tmpfile = os.path.join(d, 'file.pdf')\n with open(tmpfile, 'wb') as f:\n f.write(pdf_data)\n \n out = os.path.join(d, 'file.png')\n \n cmd = [\n 'convert',\n '-density', str(density), \n tmpfile, \n '-background', 'white',\n '-alpha','remove',\n '-alpha','off', \n ]\n shave = True\n if shave:\n# warnings.warn('Using shave to fix some bug in imagemagic')\n cmd += ['-shave', '1']\n cmd += ['-strip']\n cmd += [out]\n try:\n res = system_cmd_result(cwd='.', cmd=cmd,\n display_stdout=False,\n display_stderr=False,\n raise_on_error=True)\n \n if not os.path.exists(out):\n msg = \"ImageMagick did not fail, but it didn't write the image it promised.\"\n msg += \"\\n\"+indent(\" \".join(cmd), \" invocation: \") \n msg += \"\\n\"+ indent(res.stdout or \"(no output)\", '|', 'stdout: |')\n msg += \"\\n\"+ indent(res.stderr or \"(no output)\", '|', 'stderr: |')\n where = 'problematic.pdf'\n msg += \"\\n I will copy the problematic pdf file to %s\" % where\n shutil.copy(tmpfile, where)\n raise CmdException(msg)\n\n except CmdException as e:\n msg = 'I was not able to use Imagemagick to convert an image.'\n \n try: \n version = system_cmd_result(cwd='.', cmd=['convert', '--version'],\n display_stdout=False,\n display_stderr=False,\n raise_on_error=True)\n msg += '\\n ImageMagick \"convert\" version:'\n msg += '\\n' + indent(version.stdout, ' | ')\n except: \n pass\n raise_wrapped(ConversionError, e, msg, compact=True)\n \n r = open(out,'rb').read()\n return r\n finally:\n shutil.rmtree(d)", "def pdf_page_to_png(src_pdf, pagenum=0, resolution=154):\n\n #check_dependencies(__optional_dependencies__['pdf'])\n # Import libraries within this function so as to avoid import-time dependence\n\n dst_pdf = PyPDF2.PdfFileWriter()\n src_pdf = w(filename=src_pdf,resolution=300)\n dst_pdf.addPage(src_pdf.getPage(pagenum))\n\n pdf_bytes = io.BytesIO()\n dst_pdf.write(pdf_bytes)\n pdf_bytes.seek(0)\n\n img = Image(file=pdf_bytes, resolution=resolution)\n \n with img.convert('png') as converted:\n converted.save(filename='converted.png')\n return img", "def test_malformed_pdf(self):\n paper = factories.Paper.create(document=factory.django.FileField(\n data=b\"\"))\n paper_url = \"{}/{}\".format(EXTRACT_URL, paper.unique_id)\n c = django.test.Client()\n # Extract all at once\n d = json.loads(c.get(paper_url).content)\n self.assertEqual({\"error\"}, set(d.keys()))", "def pdf2png(pdf_input_path, png_output_path):\n args = [\"pdf2png\", # actual value doesn't matter\n \"-dNOPAUSE\",\n \"-sDEVICE=png\",\n \"-r144\",\n \"-sOutputFile=\" + png_output_path,\n pdf_input_path]\n ghostscript.Ghostscript(*args)", "def test_html_report_save_figure(self):\n report_fp = io.StringIO() # Write report to in-memory file object.\n report_fp.name = '/test/report/index.html'\n report = plot.HtmlReport(report_fp, 'Test report')\n self.assertEqual(report.filename, '/test/report/index.html')\n fig = matplotlib.figure.Figure(figsize=(7, 4.5), dpi=100)\n ax = fig.add_subplot(1, 1, 1)\n t = np.linspace(-12, 12, 200)\n ax.plot(t, np.sinc(t))\n\n fig_fp = io.BytesIO()\n fig_fp.name = '/test/report/images/fig.png'\n report.save_figure(fig_fp, fig)\n\n html = report_fp.getvalue()\n\n self.assertIn('<img src=\"images/fig.png\" width=700 height=450>', html)\n self.assertTupleEqual((700, 450), Image.open(fig_fp).size)", "def create_pdf(html, options):\n\n # TODO: we will change this path, or use an other library for converting PDF!\n # TODO: otherwise just say that wkhtmltopdf needs to be pre-installed (and how) and added to windows path\n path_wkthmltopdf = \"C:/Program Files/wkhtmltopdf/bin/wkhtmltopdf.exe\"\n config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)\n\n pdfkit.from_file(html, html.replace(\".html\", \".pdf\"), configuration=config, options=options)\n return html + \".pdf\"", "def generate_pdf(file_path_or_url, data_type, filename):\n file_path = get_pdf_file_path(filename)\n if data_type == TYPE_FILE:\n try:\n HTML(filename=file_path_or_url).write_pdf(file_path)\n finally:\n default_storage.delete(file_path_or_url)\n else:\n HTML(file_path_or_url).write_pdf(file_path)\n return filename", "def test_pdf_generation():\n pdf = factories.ReceiptPDFFactory(receipt__receipt_number=3)\n factories.ReceiptValidationFactory(receipt=pdf.receipt)\n pdf.save_pdf()\n regex = r\"afip/receipts/[a-f0-9]{2}/[a-f0-9]{2}/[a-f0-9]{32}.pdf\"\n\n assert re.match(regex, pdf.pdf_file.name)\n assert pdf.pdf_file.name.endswith(\".pdf\")", "def isPdf(page):\n return page['data'][:4] == '%PDF'", "def render_to_pdf(template_src, context_dict={}):\n template = get_template(template_src)\n html = template.render(context_dict)\n result = BytesIO()\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"ISO-8859-1\")), result)\n if not pdf.err:\n return HttpResponse(result.getvalue(), content_type='application/pdf')\n return None", "def _produce_pdf_as_a_response(self, html):\n # Create a Django response object, and specify content_type as pdf\n response = HttpResponse(content_type='application/pdf')\n # Define that this is an attachment. \n response['Content-Disposition'] = 'attachment;'\n pisaStatus = pisa.CreatePDF(html, dest=response)\n \n return response", "def test_html_output(self):\n pass", "def test_nonfree_175(self):\n self.extract_images(full_path('../samples/nonfree/175.pdf'))", "def downlaod():\r\n filename = str(uuid.uuid4()) + '.pdf'\r\n filename = os.path.join('./output' , filename)\r\n\r\n config = pdfkit.configuration(wkhtmltopdf = PRG_Path)\r\n options = {\r\n 'page-size': 'Letter'\r\n ,'margin-top': '0.75in'\r\n ,'margin-right': '0.75in'\r\n ,'margin-bottom': '0.75in'\r\n ,'margin-left': '0.75in'\r\n ,'no-outline': None\r\n ,'encoding':'UTF-8'\r\n ,'enable-local-file-access':None\r\n ,'quiet': ''\r\n # ,'javascript-delay':2000000\r\n }\r\n\r\n\r\n html = create_html_report()\r\n pdf = pdfkit.from_string(input=html, output_path=filename,configuration=config, options=options)\r\n pdfDownload = open(filename,'rb').read()\r\n\r\n response: Response = Response (\r\n pdfDownload\r\n ,mimetype=\"application/pdf\"\r\n ,headers={\r\n \"Content-disposition\": \"attachment; filename=\" + filename\r\n ,\"Content-type\": \"application/force-download\"\r\n }\r\n )\r\n return response", "def convert_pdf(pdf_path):\n with Image(filename=pdf_path, resolution=300, format=\"pdf\") as pdf:\n pdf.convert('tiff')\n pdf.save(filename='./data/raw/full.tiff')", "def _pdf(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'pdf')\n xmlDoc = PDFiD(self.src_path)\n oPDFiD = cPDFiD(xmlDoc, True)\n # TODO: are there other characteristics which should be dangerous?\n if oPDFiD.encrypt.count > 0:\n self.make_dangerous('encrypted pdf')\n if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:\n self.make_dangerous('pdf with javascript')\n if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:\n self.make_dangerous('openaction')\n if oPDFiD.richmedia.count > 0:\n self.make_dangerous('flash')\n if oPDFiD.launch.count > 0:\n self.make_dangerous('launch')", "def fetch_pdf(url, browser):\n\tpass\n\n\t# grab link page\n\n\t# search soup for pdf file\n\n\t# grab pdf file and return it", "def create_pdf(f,s1,s2='',s3=''):\n # does not need reportlab!\n if s1 == 'White Ballot': s1 = '\"'+'_'*10+'\"'\n cod = zlib.compress('BT /F1 16 Tf ET\\r\\nBT 300 270 Td (%s) Tj ET\\r\\nBT /F1 48 Tf ET\\r\\nBT 5 180 Td (%16s) Tj ET\\r\\nBT /F1 12 Tf ET\\r\\nBT 10 50 Td (%s) Tj ET'%(s3,s1,s2))\n open(f,'w').write(create_pdf.__doc__ + '/Length %d>>\\nstream\\n'%len(cod) + cod + 'endstream endobj\\ntrailer<</Root 4 0 R>>')", "def convert_to_pdf(self, news_list):\n self.logger.info(\"Converting news to PDF...\")\n self.prepare_storage()\n self.process_news_list_with_images(news_list)\n content = self.generate_html_template(news_list)\n pdf = io.BytesIO()\n pisa.pisaDocument(content, pdf)\n self.write_to_file(pdf.getvalue())", "def make_pdf_from_raw_html(self, html, filename: str, options=None):\n return pdfkit.from_string(html, filename, configuration=self._get_pdfkit_config(), options=options)", "def make_pdf(self):\n source = self.get_page_source()\n if not source:\n self.errors.append('no_source')\n if not self.errors:\n self.generate_pdf_file(source)", "def latex2img(expression, filename):\n webp = False\n\n extension = \"png\"\n\n # Preparing text strings\n server = \"http://latex.codecogs.com/\" + extension + \".download?\"\n fullname = filename + \".\" + extension\n size = \"%5Cdpi%7B100%7D%20\"\n\n # Quote expression引用表达式\n expression = quote(expression)\n url = server + size + expression\n\n # Download file from url and save to output_file:\n with urlopen(url) as response, open(fullname, 'wb') as output_file:\n data = response.read() # Un objeto \"bytes\"\n output_file.write(data) # Se escribe en disco\n\n if webp:\n img2webp(fullname)\n extension = \"webp\"\n\n return filename + \".\" + extension", "def pdf2split_html(pdf, saveto, left=0, right=0, top=0, bottom=0, res=100):\n print(\"- Opening pdf file: \", pdf)\n with(wand.image.Image(filename=pdf, resolution=res)) as document:\n print(\"- getting pages\")\n pages=document.sequence\n n_pages=len(pages)\n width, height, _, _ = pages[0].page\n mid = width//2\n html = []\n\n print(\"- creating output dir\")\n if not os.path.exists(saveto):\n os.makedirs(saveto)\n\n print(\"- splitting pages\")\n for i, page in enumerate(pages):\n left_side = page[left:mid, top:height-bottom]\n right_side = page[mid:width-right, top:height-bottom]\n left_side.save(filename=os.path.join(saveto, \"{:03d}_a.jpg\".format(i)))\n right_side.save(filename=os.path.join(saveto, \"{:03d}_b.jpg\".format(i)))\n\n # Append these two images to the html page\n html.append(\"<img src='{0:03d}_a.jpg'/><br><img src='{0:03d}_b.jpg'/><br>\".format(i))\n\n print(\"- creating html page\")\n with open(os.path.join(saveto, \"index.html\"), mode = \"w\") as textFile:\n html = \"\\n\".join(html)\n textFile.write(html)\n print(\"- DONE!\")" ]
[ "0.67066187", "0.6701321", "0.65533936", "0.63305837", "0.6099489", "0.6017834", "0.59876275", "0.5946929", "0.5874019", "0.57817435", "0.572556", "0.5722021", "0.5679243", "0.56552047", "0.56141573", "0.5595637", "0.5588724", "0.5588328", "0.5584552", "0.55791056", "0.5565778", "0.5559737", "0.55513936", "0.5549616", "0.55389094", "0.55378085", "0.5527003", "0.55222666", "0.5513507", "0.5511628" ]
0.86471814
0
Test conversion of html with script to pdf
def testConvertHtmlWithScriptToPdf(self): self._testBase("data/test_with_script.html")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testConvertHtmlWithOpacityStyleToPdf(self):\n self._testBase(\"data/test_with_opacity_style.html\")", "def testConvertHtmlWithPngDataUrlToPdf(self):\n self._testBase(\"data/test_with_png_dataurl.html\")", "def generate_pdf(pdf_data):\n\n html = HTML(string=pdf_data)\n f = html.write_pdf()\n\n return f", "def create_pdf(html, options):\n\n # TODO: we will change this path, or use an other library for converting PDF!\n # TODO: otherwise just say that wkhtmltopdf needs to be pre-installed (and how) and added to windows path\n path_wkthmltopdf = \"C:/Program Files/wkhtmltopdf/bin/wkhtmltopdf.exe\"\n config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)\n\n pdfkit.from_file(html, html.replace(\".html\", \".pdf\"), configuration=config, options=options)\n return html + \".pdf\"", "def test_html_output(self):\n pass", "def _produce_pdf_as_an_attachment(self, html, pdf_details):\n result = BytesIO()\n pisa.pisaDocument(BytesIO(html.encode(\"ISO-8859-1\")), result)\n return result.getvalue(), pdf_details", "def get_raw_pdf(html_path, pdf_path, width='', height=''):\n debug = False\n if mg.EXPORT_IMAGES_DIAGNOSTIC: debug = True\n try:\n url = html_path.as_uri()\n cmd_make_pdf = 'cmd_make_pdf not successfully generated yet'\n \"\"\"\n Unless Linux, MUST be in report directory otherwise won't carry across\n internal links.\n\n Re: http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/ntcmds_shelloverview.mspx?mfr=true\n \"\"\"\n ## clear decks first so we can tell if image made or not\n try:\n os.remove(pdf_path)\n except Exception:\n pass\n rel_url = os.path.split(url)[1]\n cd_path = os.path.split(html_path)[0]\n if mg.PLATFORM == mg.WINDOWS: ## using Pyinstaller\n cmd_make_pdf = (\n f'cd \"{cd_path}\" && '\n f'\"{export_output.EXE_TMP}\\\\wkhtmltopdf.exe\" '\n f'{width} {height} \"{rel_url}\" \"{pdf_path}\"')\n elif mg.PLATFORM == mg.MAC:\n cmd_make_pdf = (\n f'cd \"{cd_path}\" && '\n f'\"{mg.MAC_FRAMEWORK_PATH}/wkhtmltopdf\" '\n f'{width} {height} \"{rel_url}\" \"{pdf_path}\"')\n elif mg.PLATFORM == mg.LINUX:\n cmd_make_pdf = f'wkhtmltopdf {width} {height} \"{url}\" \"{pdf_path}\"'\n else:\n raise Exception('Encountered an unexpected platform!')\n ## wkhtmltopdf uses stdout to actually output the PDF - a good feature but stuffs up reading stdout for message\n if debug: print(f'cmd_make_pdf: {cmd_make_pdf}')\n export_output.shellit(cmd_make_pdf)\n if not os.path.exists(pdf_path):\n raise Exception(\n f\"wkhtmltopdf didn't generate error but {pdf_path} not made \"\n f'nonetheless. cmd_make_pdf: {cmd_make_pdf}')\n if debug: print(f'Initial processing of {html_path} complete')\n except Exception as e:\n raise Exception(\n f'get_raw_pdf command failed: {cmd_make_pdf}. Orig error: {b.ue(e)}')\n return pdf_path", "def generate_pdf (tmp_folder, filename, pdf_page_size):\n\n shell_cmd = PDF_CONVERT_CMD.substitute(wkhtmltox_path=WKHTMLTOX_PATH, folder=tmp_folder, article_id=filename, page_size=pdf_page_size)\n proc = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout_value, stderr_value = proc.communicate()\n\n print u'\\n'.join(filter(None, [shell_cmd, stdout_value, stderr_value]))", "def to_pdf(self, wkhtmltopdf: str, f, output_file: Optional[str] = None):\n if output_file is None:\n output_file = \"-\"\n html = self(f)\n with tempfile.NamedTemporaryFile(\"wb\", suffix=\".html\") as fd:\n html.write(fd)\n fd.flush()\n res = subprocess.run([wkhtmltopdf, fd.name, output_file], stdin=subprocess.DEVNULL, capture_output=True)\n if res.returncode != 0:\n raise RuntimeError(\"%s exited with error %d: stderr: %s\", self.wkhtmltopdf, res.returncode, res.stderr)\n if output_file == \"-\":\n return res.stdout", "def render_to_pdf(template_src, context_dict={}):\n template = get_template(template_src)\n html = template.render(context_dict)\n result = BytesIO()\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"ISO-8859-1\")), result)\n if not pdf.err:\n return HttpResponse(result.getvalue(), content_type='application/pdf')\n return None", "def html_to_pdf(source_path: str, output_filename: str, delete_html=True):\n # open output file for writing (truncated binary)\n assert exists(source_path), \"The input file MUST exists!\"\n from_file(source_path, output_filename)\n if delete_html:\n remove(source_path)\n # return False on success and True on errors", "def _produce_pdf_as_a_response(self, html):\n # Create a Django response object, and specify content_type as pdf\n response = HttpResponse(content_type='application/pdf')\n # Define that this is an attachment. \n response['Content-Disposition'] = 'attachment;'\n pisaStatus = pisa.CreatePDF(html, dest=response)\n \n return response", "def html2pdf(html_filename, output_filename=None, **options):\n\n if not output_filename:\n output_filename = newTempfile(suffix='.pdf')\n\n if not pdfreactor_available:\n raise RuntimeError(\"The external 'pdfreactor' converter isn't available\")\n\n cmd = '%s \"pdfreactor\" \"%s\" \"%s\"' % \\\n (execution_shell, html_filename, output_filename)\n \n status, output = runcmd(cmd)\n if status != 0:\n raise ConversionError('Error executing: %s' % cmd, output)\n return dict(output_filename=output_filename,\n status=status,\n output=output)", "def make_pdf(self):\n source = self.get_page_source()\n if not source:\n self.errors.append('no_source')\n if not self.errors:\n self.generate_pdf_file(source)", "def preview():\r\n html = create_html_report()\r\n return html", "def convert_html():\n return", "def create_pdf(f,s1,s2='',s3=''):\n # does not need reportlab!\n if s1 == 'White Ballot': s1 = '\"'+'_'*10+'\"'\n cod = zlib.compress('BT /F1 16 Tf ET\\r\\nBT 300 270 Td (%s) Tj ET\\r\\nBT /F1 48 Tf ET\\r\\nBT 5 180 Td (%16s) Tj ET\\r\\nBT /F1 12 Tf ET\\r\\nBT 10 50 Td (%s) Tj ET'%(s3,s1,s2))\n open(f,'w').write(create_pdf.__doc__ + '/Length %d>>\\nstream\\n'%len(cod) + cod + 'endstream endobj\\ntrailer<</Root 4 0 R>>')", "def scrape (url, pdf_filename, pdf_page_size=PDF_PAGE_SIZE, folder=OUTPUT_FOLDER, clean_it=True):\n\n raw_html = get_url(url)\n if raw_html is None:\n print \"Sorry, could not read \", url\n else:\n filename_prefix, file_ext = os.path.splitext(pdf_filename)\n if clean_it:\n title = Document(raw_html).short_title()\n content = Document(raw_html).summary(html_partial=True)\n frame = HTML_FRAME.substitute(content=to_unicode(content),\n url=url,\n title=title)\n source = write_html_file(folder, os.extsep.join([filename_prefix, 'html']), frame)\n else:\n source = write_html_file(folder, os.extsep.join([filename_prefix, 'html']), raw_html)\n\n if source:\n generate_pdf (folder, filename_prefix, pdf_page_size)", "def generate_pdf(list,id):\n\n doc = SimpleDocTemplate(settings.STATIC_ROOT+\"/tests/\"+str(id)+\"/\"+str(id)+\".pdf\")\n\n Story = [Spacer(1,2*inch)]\n styles = stylesheet()\n global Title\n\n # Add 10 questions with boxes below\n for i in list:\n if not i[0] in \"skills-scan\" and not i[0] in \"csrfmiddlewaretoken\" and not i[0] in \"titre\" and not i[0] in \"custom\":\n tmp = int(i[0])+1\n bogustext = (str(tmp)+\". %s\" % i[1])\n p = Paragraph(bogustext, styles['default'])\n # Write the paragraph\n\n draw = Drawing()\n # rect(x1,y1,width,height)\n rec = Rect(0, 100, 450, 150)\n rec.fillColor = colors.white\n # draw the rect under each paragraph\n draw.add(rec)\n p.keepWithNext = True\n Story.append(p)\n Story.append(draw)\n Story.append(Spacer(1,-0.9 * inch))\n elif i[0] in \"titre\":\n Title = i[1]\n # build the document by inserting the whole story\n doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)\n return str(id)+\".pdf\"", "def pdf_to_text(self, f):\n cmd = [\"pdftohtml\", \"-zoom\", \"1.35\", \"-xml\", \"-stdout\", f.name]\n code, stdout, stderr = self.shell(cmd)\n if code > 0:\n raise ValueError(stderr)\n return stdout.decode('utf-8')", "def pdf():\n env.file_ext = \".pdf\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} -H {preamble_file} --template {template_file} --bibliography={bib_file} --csl={csl_file} -V fontsize=12pt -V papersize=a4paper -V documentclass:report -N --latex-engine=xelatex\".format(**env))", "def make_pdf_from_raw_html(self, html, filename: str, options=None):\n return pdfkit.from_string(html, filename, configuration=self._get_pdfkit_config(), options=options)", "def html():\n env.file_ext = \".html\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} --standalone --bibliography={bib_file} --csl={csl_file} --toc --number-sections\".format(**env))", "def generate_document(stats: dict, semester: str):\n filename = 'report_' + str(date.today()) + '.html'\n with open('raw_html.html', 'r') as f:\n string = f.read()\n string = string.format(semester,\n stats['faculty_with_usage'],\n stats['full_time'],\n stats['total_full_time'],\n round((stats['full_time'] / stats['total_full_time']) * 100, 1),\n stats['part_time'],\n stats['total_part_time'],\n round((stats['part_time'] / stats['total_part_time']) * 100, 1),\n stats['staff'],\n stats['courses_with_usage'],\n stats['total_courses'],\n round((stats['courses_with_usage'] / stats['total_courses']) * 100, 1),\n stats['specifics']['assignments'],\n stats['specifics']['grade'],\n stats['specifics']['graded'],\n stats['specifics']['discussion'])\n with open(filename, 'w') as f:\n f.write(string)\n pdf = weasyprint.HTML(filename).write_pdf()\n open(\"report_\" + str(date.today()) + \".pdf\", 'wb').write(pdf)", "def main():\n f_name = sys.argv[1]\n file_contents = open(f_name).read()\n C = CAST([], \"python\")\n C2 = C.from_json_str(file_contents)\n\n V = CASTToAGraphVisitor(C2)\n last_slash_idx = f_name.rfind(\"/\")\n file_ending_idx = f_name.rfind(\".\")\n pdf_file_name = f\"{f_name[last_slash_idx + 1 : file_ending_idx]}.pdf\"\n V.to_pdf(pdf_file_name)", "def exportTable(self):\n\t\tself.pdf = \tself.dir + \"/application.pdf\"\n\t\tpdf = pisa.CreatePDF(\n\t\t\tfile(self.html, \"r\" ),\n\t\t\tfile(self.pdf, \"wb\")\n\t\t\t)", "def download(texttitle):\n try:\n body = current_file.analysed_texts['Regular']\n rendered = render_template('pdf_template.html', title=texttitle, body=body)\n options = {'encoding': \"UTF-8\"}\n pdf = pdfkit.from_string(rendered, False, options=options)\n response = make_response(pdf)\n response.headers[\"Content-Type\"] = 'application/pdf'\n response.headers[\"Content-Disposition\"] = 'attachment; filename=output.pdf'\n\n return response\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(request.referrer)", "def downlaod():\r\n filename = str(uuid.uuid4()) + '.pdf'\r\n filename = os.path.join('./output' , filename)\r\n\r\n config = pdfkit.configuration(wkhtmltopdf = PRG_Path)\r\n options = {\r\n 'page-size': 'Letter'\r\n ,'margin-top': '0.75in'\r\n ,'margin-right': '0.75in'\r\n ,'margin-bottom': '0.75in'\r\n ,'margin-left': '0.75in'\r\n ,'no-outline': None\r\n ,'encoding':'UTF-8'\r\n ,'enable-local-file-access':None\r\n ,'quiet': ''\r\n # ,'javascript-delay':2000000\r\n }\r\n\r\n\r\n html = create_html_report()\r\n pdf = pdfkit.from_string(input=html, output_path=filename,configuration=config, options=options)\r\n pdfDownload = open(filename,'rb').read()\r\n\r\n response: Response = Response (\r\n pdfDownload\r\n ,mimetype=\"application/pdf\"\r\n ,headers={\r\n \"Content-disposition\": \"attachment; filename=\" + filename\r\n ,\"Content-type\": \"application/force-download\"\r\n }\r\n )\r\n return response", "def _pdf(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'pdf')\n xmlDoc = PDFiD(self.src_path)\n oPDFiD = cPDFiD(xmlDoc, True)\n # TODO: are there other characteristics which should be dangerous?\n if oPDFiD.encrypt.count > 0:\n self.make_dangerous('encrypted pdf')\n if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:\n self.make_dangerous('pdf with javascript')\n if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:\n self.make_dangerous('openaction')\n if oPDFiD.richmedia.count > 0:\n self.make_dangerous('flash')\n if oPDFiD.launch.count > 0:\n self.make_dangerous('launch')", "def make_pdf(self, htmlbody, html_only=False):\n # wrap htmlbody with provided HTML template\n template = self.context.auto_template\n template = template.replace(u'${body}', htmlbody)\n if html_only:\n return template\n try:\n tempdir = tempfile.mkdtemp()\n # attachemnts saved. Let's save generated HTML\n fullpath = os.path.join(tempdir, 'issue.html')\n fp = open(fullpath, 'w')\n fp.write(template.encode('utf-8'))\n fp.close()\n # Run wkhtmltopdf and generate the PDF\n targetpath = os.path.join(tempdir, 'issue.pdf')\n result = subprocess.call([\"wkhtmltopdf\", '-q', 'file://%s' % fullpath, '%s' % targetpath])\n if result == 0:\n return open(targetpath, 'rb').read()\n else:\n return ''\n finally:\n shutil.rmtree(tempdir, ignore_errors=True)" ]
[ "0.68983364", "0.6657992", "0.65550816", "0.65299934", "0.6401548", "0.6344371", "0.63163656", "0.62249994", "0.620765", "0.6193275", "0.6157627", "0.6150456", "0.61295813", "0.6123848", "0.6107017", "0.61050683", "0.6063744", "0.6042349", "0.6035368", "0.60228574", "0.5976391", "0.5939912", "0.59088075", "0.58934015", "0.5877291", "0.58752334", "0.5867598", "0.58634275", "0.5851963", "0.5851683" ]
0.85146284
0