query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Take off extension, and check for another if extension is gz >>> split_ext('tmp.txt') ('tmp', '.txt') >>> split_ext('tmp.txt.gz') ('tmp', '.txt.gz')
|
def split_ext(filepath):
(fn, ext) = os.path.splitext(filepath)
if ext=='.gz':
(fn, ext) = os.path.splitext(fn)
ext += '.gz'
return (fn, ext)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _splitzipext(self, filename):\n\n if self._iszip(filename):\n return os.path.splitext(filename)\n else:\n return filename, None",
"def splitext_zip(fname):\n base_fname, ext = splitext(fname)\n if ext == '.gz' or ext == '.zip':\n base_fname, ext2 = splitext(base_fname)\n ext = ''.join([ext2, ext])\n return base_fname, ext",
"def splitexts(path, exts=None):\n exts = []\n ext = os.path.splitext(path)\n while True:\n if len(ext[1]) < 1:\n break\n else:\n exts.append(ext[1])\n ext = os.path.splitext(ext[0])\n exts.reverse()\n return (path, exts)",
"def splitext(path):\n base, ext = posixpath.splitext(path)\n if base.lower().endswith('.tar'):\n ext = base[-4:] + ext\n base = base[:-4]\n return base, ext",
"def splitext( filename ):\n index = filename.find('.')\n if index == 0:\n index = 1+filename[1:].find('.')\n if index == -1:\n return filename, ''\n return filename[:index], filename[index:]\n return os.path.splitext(filename)",
"def splitext_no_dot(filename):\n name, ext = os.path.splitext(filename)\n ext.strip('.')\n return name, ext.strip('.')",
"def split_ext(filename):\n parts = filename.split(\".\")\n if len(parts) == 1:\n return filename, \"\"\n\n tail = list(dropwhile(lambda x: len(x) < 5,\n reversed(parts[1:])))\n\n file_parts = parts[:1] + tail[::-1]\n ext_parts = parts[1+len(tail):]\n return \".\".join(file_parts), \".\" + \".\".join(ext_parts)",
"def split_to_body_and_ext(text):\n for suffix in EXTENSION_SUFFIXES:\n suffix = suffix[1:]\n if text.endswith(suffix):\n return (text[:-len(suffix)], suffix)\n return (text, '')",
"def glob_ext_files(dirname, ext=\"fa\") -> list:\n fnames = glob(os.path.join(dirname, f\"*.{ext}*\"))\n return [f for f in fnames if f.endswith((ext, f\"{ext}.gz\"))]",
"def splitpath_root_file_ext(path):\r\n head, tail = os.path.split(path)\r\n filename, ext = os.path.splitext(tail)\r\n return head, filename, ext",
"def split_path_dir_base_ext(path, dir_ext=True) -> T.Tuple[str, str, str]:\n split = os.path.split\n splitext = os.path.splitext\n p, b = split(path)\n if not dir_ext and os.path.isdir(path):\n n, e = b, ''\n else:\n n, e = splitext(b)\n return p, n, e",
"def _get_ext(self, path):\n return os.path.splitext(path)[1][1:]",
"def file_splitter(filename):\n filename_pieces = filename.split(delimiter)\n\n # Remove the last file piece and split file extension\n new_values = filename_pieces[-1].split('.')\n filename_pieces.pop(-1)\n for value in new_values:\n filename_pieces.append(value)\n\n return filename_pieces",
"def split3 (filename):\n directory, basename = os.path.split (filename)\n basename, extension = os.path.splitext (basename)\n return directory, basename, extension",
"def _parse_ext(raw_fname, verbose=False):\n fname, ext = os.path.splitext(raw_fname)\n # BTi data is the only file format that does not have a file extension\n if ext == '' or 'c,rf' in fname:\n if verbose is True:\n print('Found no extension for raw file, assuming \"BTi\" format and '\n 'appending extension .pdf')\n ext = '.pdf'\n # If ending on .gz, check whether it is an .nii.gz file\n elif ext == '.gz' and raw_fname.endswith('.nii.gz'):\n ext = '.nii.gz'\n fname = fname[:-4] # cut off the .nii\n return fname, ext",
"def filename_split(path):\n\tdirectory = os.path.dirname(path)\n\tfilename, extension = os.path.splitext(os.path.basename(path))\n\treturn directory, filename, extension",
"def get_file_ext(filename):\n return filename.rsplit('.', 1)[1]",
"def get_ext(url):\r\n root, ext = splitext(url)\r\n return ext",
"def get_file_extension(filename):\n # Find the first match from the list of supported file extensions\n extension = next((ext for ext in EXT_LST if filename.lower().endswith(ext)), None)\n return extension",
"def get_extension(filename: str) -> str:\n return filename.split(\".\")[-1]",
"def cutgz(x):\n if x[-3:] == '.gz':\n return x[:-3]\n else:\n return x",
"def getExtension(filename):\n return filename[filename.rfind('.'):]",
"def test_get_filename_extension(self):\r\n u = Uploader()\r\n filename = \"image.png\"\r\n err_msg = \"The extension should be PNG\"\r\n assert u.get_filename_extension(filename) == 'png', err_msg\r\n filename = \"image.jpg\"\r\n err_msg = \"The extension should be JPEG\"\r\n assert u.get_filename_extension(filename) == 'jpeg', err_msg\r\n filename = \"imagenoextension\"\r\n err_msg = \"The extension should be None\"\r\n assert u.get_filename_extension(filename) == None, err_msg",
"def filter_ext(exts=[]):\n\n def decorator(function):\n\n def wrapper(*args, **kwargs):\n\n files = function(*args, **kwargs)\n return [file for file in files if file.split('.')[-1] in exts]\n\n return wrapper\n\n return decorator",
"def filterExtStrToArray(filterExt):\n \n if filterExt is None:\n return []\n \n fe = filterExt.split(\" \")\n \n for f in fe:\n assert f[0] == \".\"\n \n return fe",
"def search_ext(self,strz):\n\t\tfor ext in file_type:\t#file_type = list of allow extension words\n\t\t\tif strz.endswith(ext):\n\t\t\t\tself.extension=ext\n\t\t\t\treturn strz.replace(ext,\"\")\n\t\treturn strz",
"def get_ext(url):\n\n path = urlparse(url).path\n ext = splitext(path)[1]\n return ext",
"def file_check(file):\n # unpacking the tuple\n file_name, file_extension = os.path.splitext(file)\n return file_extension",
"def detect_extension(name: Union[str, Path]):\n\n filename = name if isinstance(name, str) else name.name\n parts = filename.split('.')\n ext = parts[-1]\n if ext in COMPRESS_EXT:\n ext = '.'.join(parts[-2:])\n return ext",
"def test_return_only_filenames_under_ext():\n hardcodedpath = \"/home/dados/VideoAudio/Yt videos/Soc Sams vi/Lang Sams vi/Chinese Sams vi/\" \\\n \"Harbin Mandarin yu/BMC 19v 12' 2018 4h Beginning Mandarin Chinese Lessons yu Harbin ytpl/a\"\n path_in_arg_if_any = None\n for arg in sys.argv:\n if arg.startswith('-ppath='):\n path_in_arg_if_any = arg[len('-ppath='):]\n if path_in_arg_if_any is None:\n path_in_arg_if_any = hardcodedpath\n ppath = path_in_arg_if_any\n filenames = os.listdir(ppath)\n filenames.sort()\n print(filenames)\n retlist = return_only_filenames_under_ext(filenames, ppath)\n print(retlist)"
] |
[
"0.7340182",
"0.7257484",
"0.71303344",
"0.71084404",
"0.7084778",
"0.6784504",
"0.66711336",
"0.62481695",
"0.6245035",
"0.6069734",
"0.59981674",
"0.5978438",
"0.59362453",
"0.58886087",
"0.5830605",
"0.58134735",
"0.5779528",
"0.57668006",
"0.575168",
"0.5707265",
"0.5695358",
"0.5686811",
"0.5649396",
"0.5627758",
"0.5615309",
"0.560407",
"0.5590685",
"0.55886406",
"0.5582954",
"0.5554957"
] |
0.78384006
|
0
|
Creates a shorter version of the keys in params
|
def shorten_keys(params):
param_names = {}
for n in params:
parts = n.split('_')
firsts = [p[0] for p in parts]
param_names[n] = ''.join(firsts)
return param_names
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def freeze_dict(params):\n return hashlib.sha1(\"&\".join(\n \"{key}={value}\".format(\n key = key,\n value = value,\n )\n for key, value in sorted(six.iteritems(params))\n ).encode('utf-8')).hexdigest()",
"def join_params(**params):\n\tparam_list = get_sorted_keys(params)\n\tvalues = []\n\tfor k in param_list:\n\t\tvalues.append(k+'-'+join_items(params[k]))\n\treturn \"_\".join(values)",
"def make_args(self, args):\n result_str = \"?\"\n for k, v in args.iteritems():\n result_str = result_str + k + \"=\" + v + \"&\"\n return result_str",
"def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params",
"def createkey(*args): # {{{2\n return '-'.join(map(simplifyname, args))",
"def keep_params(self, base_key, *params):\n self.params[base_key + \".\" + \"|\".join(params)] = self.keep_params_s(self.params[base_key], params)",
"def _prepare_params(self, params):\n for key, value in params.items():\n if type(value) is list:\n params[key] = [(6, 0, value)]\n\n return params",
"def keep_params(self, base_key, *params):\n self.params[base_key + '.' + '|'.join(params)] = keep_params(\n self.params[base_key], *params)",
"def _sanitize_params(params: Dict[str, Any]) -> Dict[str, Any]:\n return {k: v if type(v) in [bool, int, float, str, torch.Tensor] else str(v) for k, v in params.items()}",
"def MakeKey(self, string, string_1, string_2):\n ...",
"def substitute_params_keys(params, new_keys):\n for p in params:\n p['type'] = new_keys[p['type']]",
"def normalize_parameters(params):\n\n # 1. First, the name and value of each parameter are encoded\n # (`Section 3.6`_).\n #\n # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6\n key_values = [(escape(k), escape(v)) for k, v in params]\n\n # 2. The parameters are sorted by name, using ascending byte value\n # ordering. If two or more parameters share the same name, they\n # are sorted by their value.\n key_values.sort()\n\n # 3. The name of each parameter is concatenated to its corresponding\n # value using an \"=\" character (ASCII code 61) as a separator, even\n # if the value is empty.\n parameter_parts = [f'{k}={v}' for k, v in key_values]\n\n # 4. The sorted name/value pairs are concatenated together into a\n # single string by using an \"&\" character (ASCII code 38) as\n # separator.\n return '&'.join(parameter_parts)",
"def _format_acp_query_items(\n cls, apc_key: str, params: Dict[str, str] = {}\n ) -> Dict[str, str]:\n ts = int(time.time())\n pre_str = \"\"\n keys_lst = params.keys()\n sorted(keys_lst)\n for key in keys_lst:\n pre_str += f\"{params[key]}\"\n pre_str += f\"{ts}\"\n pre_str += apc_key\n\n token = hashlib.md5(pre_str.encode()).hexdigest()\n return {\"ts\": f\"{ts}\", \"token\": f\"{token}\"}",
"def _filter_params(self):\n default_params = self.get_default_params()\n complete_params = dict(self.get_default_params())\n complete_params.update(self.params)\n\n return utils.format_dictionary(complete_params)",
"def _make_unique_kwarg_list(\n seq: Sequence[Tuple[Any, Any]]\n) -> Sequence[Tuple[Any, Any]]:\n return [\n (pair[0], \"_\".join([pair[1], str(seq[:i].count(pair))]))\n if seq.count(pair) > 1\n else pair\n for i, pair in enumerate(seq)\n ]",
"def build_redash_query_params(self, params: Dict) -> Dict:\n return {\n 'parameters': {\n 'SELECT_FIELDS': self.get_select_fields(params),\n 'SCHEMA_NAME': params.get('schema'),\n 'TABLE_NAME': params.get('tableName'),\n 'WHERE_CLAUSE': self.get_where_clause(params),\n 'RCD_LIMIT': str(self.default_query_limit)\n },\n 'max_age': self.max_redash_cache_age\n }",
"def _clean_and_encode_params(params: Mapping):\n # Keep only the parameters that were given a value\n params = {k: v for k, v in params.items() if v is not None}\n\n # All query parameters are later urlencoded - for projection, comma-separated\n # list is supported only on literal comma; convert comma-separated list\n # to a list of values which will be encoded to multiple query parameters\n try:\n params[\"projection\"] = [x.strip() for x in params[\"projection\"].split(\",\")]\n except KeyError:\n pass\n return params",
"def get_params(self, **params): # pylint: disable=unused-argument\n res = copy.deepcopy(self.sk_params)\n res.update({'build_fn': self.build_fn})\n return res",
"def _prepare_param_dict(self, params_dict):\n return params_dict",
"def _formatRequestArgs(args):\n\tres = [\"{\"]\n\tfor key in sorted(args):\n\t\tvalList = args[key]\n\t\tif not valList or key in _IGNORED_KEYS:\n\t\t\tcontinue\n\t\tres.append(\"%s: [\"%repr(key))\n\t\tfor value in valList:\n\t\t\ttry:\n\t\t\t\tif len(value)>100:\n\t\t\t\t\tres.append(\"<data starting with %s>,\"%repr(value[:30]))\n\t\t\t\telse:\n\t\t\t\t\tres.append(repr(value)+\",\")\n\t\t\texcept TypeError: # no len on value\n\t\t\t\tres.append(repr(value)+\",\")\n\t\tres.append(\"],\")\n\tres.append(\"}\")\n\treturn \"\".join(res)",
"def __make_params(args):\n data = {}\n for i in range(len(args)):\n if i == 0: # saltando a primeira iteracao pra\n # saltar o parametro que é o nome do arquivo de execução\n continue\n if not i % 2 == 0:\n data[args[i]] = args[i + 1]\n return data",
"def _get_parameters(self, *keys):\n return {k: v for k, v in self.param.items() if k in keys}",
"def params_to_args(**params):\n\targs = []\n\tkeys = get_sorted_keys(params)\n\tfor k in keys:\n\t\tif params[k] == False:\n\t\t\tcontinue\n\t\targs.append('--'+k)\n\t\tif params[k] == True:\n\t\t\tcontinue\n\t\t\n\t\tif isinstance(params[k], str):\n\t\t\targs.append(params[k])\n\t\t\tcontinue\n\t\ttry:\n\t\t\targs.extend([str(v) for v in params[k]])\n\t\texcept:\n\t\t\targs.append(str(params[k]))\n\treturn args",
"def make_key(*values, **kwargs):\n if len(kwargs) == 0:\n key = tuple(v.key for v in values)\n else:\n res = [v.key for v in values]\n for k, v in sorted(kwargs.items()):\n if isinstance(v, (int, float, str)):\n res.append(k)\n res.append(v)\n else:\n raise TypeError(\n f\"Type {type(v)} is not yet supported, \"\n f\"v={v} and parameter {k!r}.\")\n key = tuple(res)\n return key",
"def _cmd_params_to_dict(params):\n return {t[0]: t[1] for t in params}",
"def format_parameter_list(parameters):\n items = sorted(dict(parameters).items())\n return \" \".join([\"=\".join([key, repr(str(value))]) for (key,value) in items])",
"def prepare_request_params(\n request_params: Dict, model_id: Text, model_data: Dict\n) -> Dict:\n request_params = correct_types(request_params, model_data[\"columns_data\"])\n if model_data[\"hashed_indexes\"]:\n request_params = reverse_hash_names(model_id, request_params)\n return request_params",
"def generate_new_keys(old_keys):\n new_keys = {}\n for new, old in enumerate(sorted(old_keys), 1):\n new_keys[old] = str(new) # key in JSON object is always string\n return new_keys",
"def build_params_dict(params, param_names):\n if len(params) != len(param_names):\n raise ValueError('Parameter and parameter name length mismatch.')\n return dict(zip(param_names, params))",
"def _make_key(self, extra_prefix, key):\n if extra_prefix:\n return \"-\".join((self.prefix, extra_prefix, key))\n else:\n return \"-\".join((self.prefix, key))"
] |
[
"0.6766152",
"0.63372403",
"0.62510985",
"0.6122546",
"0.59665054",
"0.592801",
"0.5922785",
"0.5906238",
"0.59036815",
"0.58864623",
"0.585397",
"0.5847884",
"0.5844398",
"0.58411515",
"0.58168304",
"0.5811425",
"0.57726884",
"0.57306355",
"0.57174504",
"0.5715628",
"0.5713963",
"0.5701367",
"0.5699768",
"0.5692144",
"0.5692038",
"0.5677551",
"0.56656",
"0.5662041",
"0.56563324",
"0.5648372"
] |
0.7581339
|
0
|
Creates a string from the keyvalue pairs with _ separating them, sorted by key >>> join_params(alpha=.5, gamma=.9) 'alpha0.5_gamma0.9' >>> join_params(features=['a','b','c'],depth=15) 'depth15_featuresabc' >>> join_params(alpha=.1, trace_rate=None, l=['a','b']) 'alpha0.1_lab_trace_rateNone'
|
def join_params(**params):
param_list = get_sorted_keys(params)
values = []
for k in param_list:
values.append(k+'-'+join_items(params[k]))
return "_".join(values)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def format_parameter_list(parameters):\n items = sorted(dict(parameters).items())\n return \" \".join([\"=\".join([key, repr(str(value))]) for (key,value) in items])",
"def _join(lst, key, sep=\";\"):\n return sep.join([d[key] for d in lst if d[key]])",
"def joined_parameter(*values: str) -> str:\n return \"+\".join(values)",
"def join(*args, **kwargs):\n if args:\n print ', '.join([str(s) for s in args])\n if kwargs:\n sub_items = []\n for k, v in kwargs.items():\n sub_items.append(''.join([k, '=', v]))\n print ', '.join(sub_items)",
"def shorten_keys(params):\n\tparam_names = {}\n\tfor n in params:\n\t\tparts = n.split('_')\n\t\tfirsts = [p[0] for p in parts]\n\t\tparam_names[n] = ''.join(firsts)\n\treturn param_names",
"def make_param_string(delimiter='-', **kwargs):\n string_list = []\n for key, value in sorted(kwargs.items()):\n string_list.append(key)\n if isinstance(value, float):\n value_string = \"{:.2f}\".format(value)\n else:\n value_string = \"{}\".format(value)\n string_list.append(value_string)\n param_string = \"-\".join(string_list)\n if param_string == \"\":\n param_string = \"default-output-dir\"\n return param_string",
"def _get_parameter_string(min_key=-1, min_mode=0,\n min_acousticness=0.0, min_danceablility=0.0,\n min_energy=0.0, min_instrumentalness=0.0,\n min_liveness=0.0, min_loudness=-60,\n min_speechiness=0.0, min_valence=0.0, min_tempo=0,\n max_key=11, max_mode=1,\n max_acousticness=1.0, max_danceablility=1.0,\n max_energy=1.0, max_instrumentalness=1.0,\n max_liveness=1.0, max_loudness=0,\n max_speechiness=1.0, max_valence=1.0, max_tempo=99999):\n return (f\"&min_key={min_key}&max_key={max_key}\" +\n f\"&min_mode={min_mode}&max_mode={max_mode}\" +\n f\"&min_acousticness={min_acousticness}&max_acousticness={max_acousticness}\" +\n f\"&min_danceablility={min_danceablility}&max_danceablility={max_danceablility}\" +\n f\"&min_energy={min_energy}&max_energy={max_energy}\" +\n f\"&min_instrumentalness={min_instrumentalness}&max_instrumentalness={max_instrumentalness}\" +\n f\"&min_liveness={min_liveness}&max_liveness={max_liveness}\" +\n f\"&min_loudness={min_loudness}&max_loudness={max_loudness}\" +\n f\"&min_speechiness={min_speechiness}&max_speechiness={max_speechiness}\" +\n f\"&min_valence={min_valence}&max_valence={max_valence}\" +\n f\"&min_tempo={min_tempo}&max_tempo={max_tempo}\")",
"def group_by_keys(param_list, keys):\n\tkeys = list(keys)\n\tnames = {}\n\tfor p in param_list:\n\t\t\n\t\tif len(keys) > 0:\n\t\t\tkey = join_params(**{k: p.get(k, None) for k in keys})\n\t\t\t#vals = {k: p.get(k, None) for k in keys}\n\t\t\t#name = join_params(**vals)\n\t\t\t#names[name]=vals\n\t\telse:\n\t\t\tkey = ''\n\t\tif key in names:\n\t\t\tnames[key].append(p)\n\t\telse:\n\t\t\tnames[key]=[p]\n\treturn names",
"def join_string_lists(**kwargs) -> dict:\n return {k: \",\".join(v) for k, v in kwargs.items() if v}",
"def stringify(**kwargs):\n return \"_\".join(\n [\n \"{}-{}\".format(k, kwargs[k])\n for k\n in sorted(kwargs.keys())\n ]\n )",
"def stringify_slice_key(slice_key: SliceKeyType) -> Tuple[str, str]:\n key_count = len(slice_key)\n if not key_count:\n return ('Overall', 'Overall')\n\n keys = []\n values = []\n separator = ', '\n\n for (feature, value) in slice_key:\n keys.append(feature)\n values.append(value)\n\n # To use u'{}' instead of '{}' here to avoid encoding a unicode character with\n # ascii codec.\n return (\n separator.join([u'{}'.format(key) for key in keys]),\n separator.join([u'{}'.format(value) for value in values])\n )",
"def key_join(self, key, encode=True):\n if isinstance(key, str):\n parts = key.split('/')\n else:\n parts = key\n new_parts = []\n\n for part in parts:\n if isinstance(part, bytes):\n part = part.decode(\"utf-8\")\n if encode:\n part = quote(str(part))\n new_parts.append(part)\n\n return '/'.join(new_parts)",
"def urljoin(*args):\n return '/'.join(str(a or '').strip('/') for a in args)",
"def do_dict_join(d, k_sep=\".\", v_sep=\"=\"):\n \"\"\"Helper function for function do_dict_join\"\"\"\n res = []\n for k, v in d.iteritems():\n if isinstance(v, dict):\n new_res = map(lambda el: \"{}{}{}\".format(k, k_sep, el),\n do_dict_join(v, k_sep, v_sep))\n\n res.extend(new_res)\n else:\n res.append(\"{}{}{}\".format(k, v_sep, v))\n return res",
"def format_param_pairs(self, params_pairs):\n out = \"\"\n for param in params_pairs:\n out += \"{} {} \".format(*param)\n return out",
"def param_dict_to_str(data):\n if data is None or not data:\n return \"\"\n pairs = []\n for key, val in data.items():\n if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):\n pairs.append(str(key) + '=' + ','.join(map(str, val)))\n elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val):\n pairs.append(str(key) + '=' + str(val))\n elif val is not None:\n raise TypeError('Unknown type of parameter:%s, got:%s'\n % (key, type(val).__name__))\n return ' '.join(pairs)",
"def _FormatUrlParams(self, params):\n param_fragments = []\n for param in sorted(params.iteritems(), key=lambda x: x[0]):\n param_fragments.append('%s=%s' % (param[0], self._UrlEscape(param[1])))\n\n return '&'.join(param_fragments)",
"def FormatUrl(params):\n input_list = []\n for item in sorted(params.iteritems(), key=lambda x: x[0]):\n input_list.append('%s=%s' % (item[0], UrlEscape(item[1])))\n return '&'.join(input_list)",
"def normalize_parameters(params):\n\n # 1. First, the name and value of each parameter are encoded\n # (`Section 3.6`_).\n #\n # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6\n key_values = [(escape(k), escape(v)) for k, v in params]\n\n # 2. The parameters are sorted by name, using ascending byte value\n # ordering. If two or more parameters share the same name, they\n # are sorted by their value.\n key_values.sort()\n\n # 3. The name of each parameter is concatenated to its corresponding\n # value using an \"=\" character (ASCII code 61) as a separator, even\n # if the value is empty.\n parameter_parts = [f'{k}={v}' for k, v in key_values]\n\n # 4. The sorted name/value pairs are concatenated together into a\n # single string by using an \"&\" character (ASCII code 38) as\n # separator.\n return '&'.join(parameter_parts)",
"def build_command_line_parameters(params, command_name=\"-param\"):\n if params is None:\n return \"\"\n res = []\n for k, v in sorted(params.items()):\n if '\"' in v:\n v = v.replace('\"', '\\\\\"')\n one = '{2} {0}=\"{1}\"'.format(k, v, command_name)\n res.append(one)\n return \" \".join(res)",
"def build_query_string(params: Optional[Dict[str, Any]] = None) -> str:\n if params is None:\n return ''\n components = []\n for key, value in params.items():\n if isinstance(value, (list, tuple, set)):\n for v in value:\n c = '='.join([key, quote_plus(str(v))])\n components.append(c)\n else:\n c = '='.join([key, quote_plus(str(value))])\n components.append(c)\n if len(components) > 0:\n return '?{}'.format('&'.join(components))\n return ''",
"def __generate_key_from_list_of(self, list_of_keys):\r\n list_of_keys = list(list_of_keys)\r\n list_of_keys.sort()\r\n return \",\".join(list_of_keys)",
"def format_merge_props(props, sep=\" \"):\n assert sep in [\"\\t\", \"\\n\", \" \"] # must be a whitespace\n props = props.items()\n props.sort()\n L = []\n for h, r in props:\n L.append(\"%s:%s\" % (h, r))\n return sep.join(L)",
"def urljoin(base, *path, **query):\n if base and base.endswith('/'):\n base = base[:-1]\n retval = [base]\n\n # build the path\n path = '/'.join([''] + [quote(s, '') for s in path])\n if path:\n retval.append(path)\n\n # build the query string\n params = []\n for name, value in query.items():\n if type(value) in (list, tuple):\n params.extend([(name, i) for i in value if i is not None])\n elif value is not None:\n if value is True:\n value = 'true'\n elif value is False:\n value = 'false'\n params.append((name, value))\n if params:\n retval.extend(['?', urlencode(params)])\n\n return ''.join(retval)",
"def build_qs(self, params, key=None):\n parts = []\n\n if params and hasattr(params, 'items'):\n for name, value in params.items():\n\n if hasattr(value, 'values'):\n # Encode a dict\n parts.extend(self.build_qs(params=value.values(),\n key=self.build_qs_key(key, cgi.escape(name))))\n\n elif hasattr(value, '__iter__'):\n # Encode an iterable (list, tuple, etc)\n parts.extend(self.build_qs(params=dict(zip(xrange(0, len(value)), value)),\n key=self.build_qs_key(key, cgi.escape(name))))\n\n else:\n parts.extend('%s=%s' % (self.build_qs_key(key, cgi.escape(name)), cgi.escape(str(value))))\n\n return '&'.join(parts)",
"def _build_key(self, key: str) -> str:\n return \"-\".join((self._name, key))",
"def join_path(values: t.List[str]) -> str:\n from axonius_api_client.tools import listify\n\n return \" => \".join(listify(values))",
"def join_with_and(values, last_word: str = 'and') -> str:\n valuesList = list(values)\n length = len(valuesList)\n\n # value1, value2, value3 and value4\n if length > 2:\n return '{} {} {}'.format(', '.join(valuesList[:-1]), last_word, valuesList[-1])\n # value1 and value2\n elif length == 2:\n return '{} {} {}'.format(valuesList[0], last_word, valuesList[1])\n # value 1\n elif length == 1:\n return valuesList[0]\n # Empty\n return ''",
"def _urljoin(self, *args):\r\n\t\treturn \"/\".join(map(lambda x: str(x).rstrip('/'), args))",
"def convert_subscr_params(params_dict):\n params_list = []\n for (param, val) in iteritems(params_dict):\n if val is False:\n val = 'false'\n elif val is True:\n val = 'true'\n\n params_list.append('%s = %s' % (param, val))\n\n return ', '.join(params_list)"
] |
[
"0.6141748",
"0.59891856",
"0.5859167",
"0.58393925",
"0.57129323",
"0.57054585",
"0.55881333",
"0.55730945",
"0.54583883",
"0.5409094",
"0.5379683",
"0.53321546",
"0.527924",
"0.52433175",
"0.52432543",
"0.52367586",
"0.52364355",
"0.5234342",
"0.5218139",
"0.5169389",
"0.51582783",
"0.5112662",
"0.5105346",
"0.50884396",
"0.5081019",
"0.50690264",
"0.5061384",
"0.5054604",
"0.50266814",
"0.5021866"
] |
0.8081009
|
0
|
Turns a dictionary of parameters into a commandline list of arguments
|
def params_to_args(**params):
args = []
keys = get_sorted_keys(params)
for k in keys:
if params[k] == False:
continue
args.append('--'+k)
if params[k] == True:
continue
if isinstance(params[k], str):
args.append(params[k])
continue
try:
args.extend([str(v) for v in params[k]])
except:
args.append(str(params[k]))
return args
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def configToCliArguments(config):\n if not isinstance(config, dict):\n raise TypeError(\"Expected dict for config\")\n\n args = []\n for key, value in config.items():\n if value == None:\n args.append(f\"--{key}\")\n continue\n\n if isinstance(value, list):\n value = \",\".join(value)\n args.append(f\"--{key}={value}\")\n\n return args",
"def _dict_to_args(self, arg_dict):\n if arg_dict:\n yield \"--{}=data:application/json;charset=utf-8,{}\".format(\n self._CONFIG_FLAG.name,\n urllib.parse.quote(json_encode(arg_dict, pretty=False), encoding=\"utf-8\")\n )",
"def command_friendly_kv_pair(dict):\n # subprocess.run expects parameters to be in the foo=bar format. We build this format here and return a list\n output = []\n for key, value in dict.items():\n output.append('%s=%s' % (key, value))\n return output",
"def _build_command_list(self, arguments=None, debug=False):\n \n if debug:\n command_name = arguments[\"script_name\"]\n else:\n command_name = os.path.splitext(arguments[\"script_name\"])[0]\n \n command_list = [command_name]\n #del arguments[\"script_name\"]\n #del arguments[\"optional_arguments\"]\n\n for k in arguments:\n if k == \"script_name\": continue\n if type(arguments[k]) == type(list()):\n for n in arguments[k]:\n command_list.append(\"--{0}\".format(k))\n command_list.append(\"{0}\".format(n))\n else: \n command_list.append(\"--{0}\".format(k))\n command_list.append(\"{0}\".format(arguments[k]))\n \n return command_list",
"def build_arguments(self, *cmd_args, **cmd_kwargs):\n args = []\n args.extend(cmd_args)\n\n for raw_key, value in cmd_kwargs.items():\n if len(raw_key) == 1:\n args.append('-{}'.format(raw_key))\n else:\n key = raw_key.replace('_', '-')\n args.append('--{}'.format(key))\n\n if value is True:\n # If True, it is enough.\n # e.g.: system=True translates to --system\n continue\n\n args.append(str(value))\n\n return args",
"def _parse_arg_list(self):\n\t\targ_list = {}\n\t\tfor arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:\n\t\t\targ_list[arg[0][1:]] = arg[1]\n\t\n\t\treturn arg_list",
"def map_arguments():\n arguments = {\n '-c': 'ogg',\n '-d': 'no',\n '-q': '4'\n }\n args = sys.argv[:]\n args.pop(0)\n while len(args) > 1:\n if args[0] == '-c' and re.search('^mp3$|^ogg$', args[1]) or \\\n args[0] == '-d' and re.search('^y(es)?$', args[1]) or \\\n args[0] == '-q' and re.search('^[0-9]$', args[1]):\n arguments[args[0]] = args[1]\n args.pop(0)\n args.pop(0)\n else:\n print_help()\n if len(args) == 1:\n print_help()\n return arguments",
"def build_command_line_parameters(params, command_name=\"-param\"):\n if params is None:\n return \"\"\n res = []\n for k, v in sorted(params.items()):\n if '\"' in v:\n v = v.replace('\"', '\\\\\"')\n one = '{2} {0}=\"{1}\"'.format(k, v, command_name)\n res.append(one)\n return \" \".join(res)",
"def dict2argstr(args_dict):\n arg_str = \"\"\n for arg, value in args_dict.items():\n if value is not None:\n arg_str += \" --{} {}\".format(str(arg), str(value))\n return arg_str",
"def create_command_line(clt_desc, input_dict):\n args = []\n if 'arguments' in clt_desc:\n for argument in clt_desc['arguments']:\n if not isinstance(argument, str):\n exit_system_error('Sorry: I only understand strings for arguments.'\n 'Please use the inputs to pass arguments from input parameters.')\n args.append((-1, clt_desc['arguments']))\n\n for parameter in clt_desc['inputs']:\n args.append(create_argument(parameter, input_dict))\n\n args.sort(key=lambda arg: arg[0])\n\n # drop keys and flatten\n command_line = []\n for _, items in args:\n if items is not None:\n command_line.extend(items)\n return command_line",
"def arg_list():\n arg_list = [\n ['-d', '--domain', 'Specify the domain you are using'],\n ['-t', '--template-path', 'Specify template path'],\n ['-s', '--secrets-path', 'Specify template path'],\n ['-p', '--project', 'Specify a project name'],\n ['-c', '--cloud-platform', 'Specify the platform used'],\n ['-so', '--secrets-only', 'Generate secrets only'],\n ['-db', '--database-host', 'Specify the database host'],\n ['-dbc', '--database-connection-name', 'Specify the database connection name (GCP)'],\n ['-sbn', '--storage-bucket-name', 'Specify storage bucket name'],\n ['-sb', '--storage-backend', 'Specify storage backend s3/gcp/filesystem'],\n ['--acm', '--aws-cert-arn', 'Specify AWS ACM'],\n ['--sg-id', '--aws-alg-sg-id', 'Specify AWS SG ID'],\n ['--sentry', '--senty-dsn', 'Specify Sentry DSN'],\n ['-e', '--environment', 'Specify environment'],\n ['-g', '--gather', 'enable Gather yes or no'],\n ['--cm', '--cert-manager', 'Using cert manager?'],\n ['-m', '--modules', 'Aether modules i.e odk,ui,sync'],\n ['-r', '--redis-url', 'Redis endpoint for CouchDB sync'],\n ['-cdb', '--couchdb-url', 'Redis endpoint for CouchDB sync'],\n ['-gc', '--google-client-id', ' Google client ID for CouchDB sync']\n ]\n return arg_list",
"def cli(arg_dict):\n\n keys = list(arg_dict.keys())\n for key in keys:\n v = arg_dict[key]\n usr_args_ls = sys.argv\n for ind in range(len(usr_args_ls)):\n val = usr_args_ls[ind]\n if val == \"-\" + key[0] or val == \"--\" + key:\n if type(v).__name__ == \"bool\":\n v = not v\n else:\n v = usr_args_ls[ind + 1]\n\n arg_dict[key] = v",
"def _merge_args_opts(args_opts_dict, **kwargs):\n merged = []\n\n if not args_opts_dict:\n return merged\n\n for arg, opt in args_opts_dict.items():\n if not _is_sequence(opt):\n opt = shlex.split(opt or \"\")\n merged += opt\n\n if not arg:\n continue\n\n if \"add_input_option\" in kwargs:\n merged.append(\"-i\")\n\n merged.append(arg)\n\n return merged",
"def get_all_args(cmd_list):\n\tl = list()\n\tfor c in cmd_list:\n\t\tfor a in c[\"argument\"]:\n\t\t\tl += [(a[0], a[1], c[\"notes\"])]\n\treturn l",
"def args(hub, val: List[str] or str) -> Tuple[List[str], Dict[str, str]]:\n args = []\n kwargs = {}\n for v in hub.render.cli.render(val):\n if isinstance(v, dict):\n kwargs.update(v)\n else:\n args.append(v)\n\n return args, kwargs",
"def make_args(self, args):\n result_str = \"?\"\n for k, v in args.iteritems():\n result_str = result_str + k + \"=\" + v + \"&\"\n return result_str",
"def config_to_args(config):\n result = []\n\n for key, value in iteritems(config):\n if value is False:\n continue\n\n key = '--{0}'.format(key.replace('_', '-'))\n\n if isinstance(value, (list, set, tuple)):\n for item in value:\n result.extend((key, smart_str(item)))\n elif value is not True:\n result.extend((key, smart_str(value)))\n else:\n result.append(key)\n\n return tuple(result)",
"def create_usdzconvert_arguments(args: list) -> list:\n usdz_converter_path = current_app.config.get('USDZ_CONVERTER_PATH') / \\\n current_app.config.get('USDZ_CONVERTER_SCRIPT_PATH')\n\n arguments = [_get_converter_interpreter_arg(),\n usdz_converter_path.resolve().as_posix()]\n\n for arg in args:\n arguments.append(arg)\n\n return arguments",
"def params_commandline(lista):\n if len(lista)%2!=0:\n print('Error: The number of parameter names and values does not match')\n sys.exit()\n dict={}\n for i in range(0,len(lista),2):\n key=lista[i]\n if type(key)!=type(''):\n raise 'Keyword not string!'\n #replace commas in case they're present\n if key[0]=='-':key=key[1:]\n lista[i+1]=replace(lista[i+1],',',' ')\n values=tuple(split(lista[i+1]))\n if len(values)<1:\n mensaje='No value(s) for parameter '+key\n raise mensaje\n dict[key]=values\n if len(dict[key])==1: dict[key]=dict[key][0]\n return dict",
"def get_arguments_from_user() -> list:\n\n print(\"Für weitere Konfiguration bitte das Programm direkt über eine Konsole starten.\\nMit -h können alle Argumente aufgelistet werden\\n\")\n code = input(\"Impf-Code: \")\n plz = input(\"PLZ: \")\n bundesland = input(\"Bundesland des Zentrums (zB Baden-Württemberg): \")\n\n arguments = [\"-c\", code, \"-p\", plz, \"-b\", bundesland]\n return arguments",
"def parse_arguments(args):",
"def retrieve_args_dict():\n process_args = sys.argv[1:]\n dictionary = dict()\n for process_arg in process_args:\n splitted = process_arg.split(\":\")\n if len(splitted) > 1:\n key = splitted[0]\n value = \"\".join(splitted[1:])\n dictionary[key] = value\n return dictionary",
"def get_args(self):\n req_argv = self._ptr.contents.argv\n args = []\n if bool(req_argv):\n i = 0\n while 1:\n s = bytestostr(req_argv[i])\n i += 1\n if s == None:\n break\n args.append(s)\n return args",
"def list_args(args):\n run_list_args(args)",
"def _parse_command_line_arguments():\n parser = ArgumentParser(\n description=(\n 'Command-line tool to generate a list of unique from a TS file from FermiFAST'\n ),\n )\n parser.add_argument(\n 'ts-file',\n type=str,\n help=(\n 'A file containing the TS sky map'\n ),\n )\n parser.add_argument('--skiprows',\n type=int,\n help='number of rows to skip at the top (default 0)',\n required=False)\n parser.set_defaults(skiprows=0)\n arguments = vars(parser.parse_args())\n return arguments",
"def _flat_node_to_cmd_line_args(node):\n if isinstance(node, list):\n return node\n elif isinstance(node, dict):\n return list(itertools.chain(*[['--%s' % key,node[key]] if\n isinstance(node[key],basestring) else ['--%s' % key] + node[key] for key in node.keys()]))\n elif isinstance(node, basestring):\n return node.split()\n else:\n raise ValueError(\"%s node is has unsupported data type\")",
"def parse_args(argv: t.Iterable[str] = None):\n if argv is None:\n argv = sys.argv[1:]\n\n args: t.List[str] = []\n kwargs: t.MutableMapping[str, t.Any] = {}\n\n key = None\n for arg in argv:\n if arg.startswith('--'):\n if arg == '--help':\n print(USAGE)\n raise SystemExit\n if key is not None:\n kwargs[key] = True\n key = arg[2:]\n continue\n\n match = re.match('^(\\\\w+)=(.*)$', arg)\n if match:\n if key is not None:\n kwargs[key] = True\n key = None\n kwargs[match.group(1)] = match.group(2)\n continue\n\n if key is not None:\n kwargs[key] = arg\n key = None\n continue\n\n args.append(arg)\n\n if key is not None:\n kwargs[key] = True\n\n return (tuple(args), kwargs)",
"def get_args():\n\n params = {}\n\n if len(argv) == 1:\n\n input_file = input('Please enter the path to the parameter file: ')\n\n else:\n\n input_file = argv[1]\n\n if path.isfile(input_file) == False:\n\n print('ERROR: Cannot find input parameter file')\n exit()\n\n flines = open(input_file,'r').readlines()\n\n str_keys = ['catalog_file', 'red_dir',\n 'target_ra', 'target_dec',\n 'star_class', 'isochrone_file',\n 'target_lc_file_g', 'target_lc_file_r', 'target_lc_file_i']\n\n for line in flines:\n\n (key, value) = line.replace('\\n','').split()\n\n if key in str_keys:\n\n params[key] = value\n\n else:\n\n if 'none' not in str(value).lower():\n params[key] = float(value)\n else:\n params[key] = None\n\n return params",
"def cmdline_params(self, file1_name, file2_name):\n parameters = []\n\n pm_dict = self.get_dict()\n for option, enabled in pm_dict.items():\n if enabled:\n parameters += [\"--\" + option]\n\n parameters += [file1_name, file2_name]\n\n return [str(p) for p in parameters]",
"def _get_args(self) -> str:\n args = \"\"\n\n for config_key, config_option in self.config_options.items():\n if config_key in self.special_args:\n get_args_func = self.special_args[config_key]\n args += get_args_func(config_option)\n elif config_key in self.ignore_args:\n continue\n else:\n args += self._get_normal_args(config_key, config_option)\n\n return args.strip()"
] |
[
"0.7267064",
"0.6773042",
"0.67161405",
"0.66971356",
"0.6630844",
"0.6525081",
"0.6472653",
"0.6465406",
"0.63680834",
"0.6322142",
"0.6303616",
"0.62891084",
"0.6241041",
"0.6233576",
"0.62206215",
"0.62063277",
"0.62021786",
"0.61741203",
"0.61519337",
"0.6143028",
"0.61104125",
"0.61056715",
"0.6028893",
"0.6021764",
"0.60026515",
"0.6002631",
"0.5995818",
"0.59626526",
"0.59510547",
"0.5929333"
] |
0.75936013
|
0
|
Return an list of the directories in dirpath, starting with the directory in base_dir If base_dir is provided but dirpath does not contain it, return None >>> get_all_dirs('/tmp/asdf/fred') ['tmp', 'asdf', 'fred'] >>> get_all_dirs('/tmp/asdf/fred/') doesn't care about final slash ['tmp', 'asdf', 'fred'] >>> get_all_dirs('tmp/asdf/fred') doesn't care about starting slash ['tmp', 'asdf', 'fred'] >>> get_all_dirs('/tmp/asdf/fred/raw.txt') does include file ['tmp', 'asdf', 'fred', 'raw.txt'] >>> get_all_dirs('./tmp/asdf//fred') ignores extraneous details ['tmp', 'asdf', 'fred'] >>> get_all_dirs('/tmp/asdf/fred', base_dir='tmp') ['asdf', 'fred'] >>> get_all_dirs('/tmp/asdf/fred', base_dir='/tmp/') ['asdf', 'fred'] >>> get_all_dirs('tmp/asdf/fred', base_dir='/tmp/') base_dir must match >>> get_all_dirs('tmp/asdf/fred/', base_dir='../tmp') >>> get_all_dirs('../tmp/asdf/fred/', base_dir='../tmp') ['asdf', 'fred']
|
def get_all_dirs(dirpath, base_dir=None):
if not base_dir:
post = os.path.normpath(dirpath)
elif base_dir in dirpath:
(pre, post) = dirpath.split(os.path.normpath(base_dir))
post = os.path.normpath(post)
else:
return
dirs = []
(head, tail) = os.path.split(post)
while tail:
dirs.append(tail)
(head, tail) = os.path.split(head)
dirs.reverse()
return dirs
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_directories(path):\n\n # Uses abspath as the directory\n absolute = os.path.dirname(abspath(path))\n all_files = os.listdir(absolute)\n\n # Get the absolute path of each file\n absolute_files = [\"/\".join([absolute, d]) for d in all_files]\n\n # Here we filter all non-directires out and return\n return [i for i in absolute_files if os.path.isdir(i)]",
"def dirs_in_dir(path):\n listing = sorted(os.listdir(path))\n\n dirs = []\n for name in listing:\n longname = path + '/' + name\n if name[0] == '.':\n continue\n if not os.path.isdir(longname):\n continue\n dirs.append(name)\n\n return dirs",
"def get_all_subdirs(dir_path):\n\tls = os.listdir(dir_path)\n\tdirs = []\n\tfor f in ls:\n\t\tif os.path.isdir(os.path.join(dir_path, f)):\n\t\t\tdirs.append(f)\n\treturn dirs",
"def get_subdirs(dir_path: str) -> list:\n res = list(x.name for x in os.scandir(dir_path) if x.is_dir())\n return res",
"def get_dirs(source_dir):\n all_dirs = set()\n it = os.walk(source_dir)\n it.next()\n dirs = list(it)\n for d in dirs:\n if len(d[1])==0:\n all_dirs.add(d[0])\n return all_dirs",
"def getdirs():\n dirs = [i for i in os.listdir(dname) if not \\\n os.path.isfile(os.path.join(dname, i))]\n return dirs",
"def get_dirs(self, path):\n ds = []\n try:\n for d in os.listdir(path):\n if os.path.isdir(os.path.join(path, d)):\n ds.append(d)\n except OSError:\n pass\n ds.sort()\n return ds",
"def get_dirs():\n # join glob matchers\n dirnames = [\n str(dir_path.relative_to(get_data_dir()))\n for dir_path in get_data_dir().rglob(\"*\")\n if dir_path.is_dir()\n ]\n\n return dirnames",
"def getDirectoryList(path):\n dirList = [\"/\".join([path, object]) for object in os.listdir(path)]\n dirList = [object for object in dirList if os.path.isdir(object)]\n return dirList",
"def get_patient_dirs(base_folder):\n patient_dirs = sorted([x for x in base_folder.iterdir() if x.is_dir()])\n return patient_dirs",
"def list_directories(path):\n dir_list = os.listdir(path)\n directories = [f for f in dir_list if os.path.isdir(os.path.join(path, f))]\n return directories",
"def get_dir_list_recurse(basepath, itempath=\"\", parent=None):\n total = []\n if not basepath.endswith(\"/\"):\n basepath = basepath + \"/\"\n if itempath and not itempath.endswith(\"/\"):\n itempath = itempath + \"/\"\n items = os.listdir(basepath + itempath)\n for itemname in items:\n curpath = basepath + itempath + itemname\n if os.path.isdir(curpath):\n dirobj = ListDir(\n basepath=basepath,\n itempath=itempath + itemname,\n itemname=itemname,\n parent=parent\n )\n dirobj.contents = get_dir_list_recurse(\n basepath,\n itempath=itempath+itemname,\n parent=dirobj\n )\n total.append(dirobj)\n else:\n fileobj = ListItem(\n parent,\n basepath=basepath,\n itempath=itempath + itemname,\n itemname=itemname\n )\n total.append(fileobj)\n return total",
"def _RecursiveDirectoryListing(dirpath):\n result = []\n for root, _, files in os.walk(dirpath):\n for f in files:\n result.append(os.path.relpath(os.path.join(root, f), dirpath))\n return result",
"def get_dir_list(basepath):\n parent = ListDir(basepath=basepath)\n parent.contents = get_dir_list_recurse(basepath, parent=parent)\n return parent",
"def subdirs(dir):\n return [dir + '/' + name for name in os.listdir(dir)\n if os.path.isdir(os.path.join(dir, name))]",
"def list_dirs(self, path=\"/\"):\n path = j.sal.fs.pathClean(path)\n dir_obj = self._dir_model.get_by_name(path)\n if not dir_obj:\n raise j.exceptions.Base(\"path {} does not exist\".format(path))\n res = [self._dir_model.get(item).name for item in dir_obj[0].dirs]\n return res",
"def getsubdirs(toppath, search_string = \".\"):\n if not search_string:\n return [toppath]\n reg_prog = re.compile(search_string)\n dirlist = []\n if search_string == \".\":\n dirlist.append(toppath)\n for root, dirs, files in os.walk(toppath):\n for fname in files:\n if reg_prog.search(os.path.join(root,fname)):\n dirlist.append(root)\n continue\n uniqueList = []\n for value in dirlist:\n if value not in uniqueList:\n uniqueList.append(value)\n return uniqueList",
"def get_subdirectories(a_dir):\n return [a_dir + name + \"/\" for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]",
"def all_subdirs_of(dir='.'):\n result = []\n for item in os.listdir(dir):\n path = os.path.join(dir, item)\n if os.path.isdir(path):\n result.append(path)\n return result",
"def get_all_paths(dmt, directory_path=''):\n # Base case.\n if not dmt.children:\n return set()\n \n filesystem_items = set()\n for item in dmt.children.keys():\n filesystem_items.add(directory_path+item)\n # Also get the paths of subdirectory contents.\n if item[-1] == '/':\n subdir_name = item\n subdir_path = directory_path + subdir_name\n \n filesystem_items.add(subdir_path)\n filesystem_items.update(get_all_paths(dmt.children[subdir_name], subdir_path))\n \n return filesystem_items",
"def dirs(base, wildcard='[!.]*', recursive=1, prune=('.git', '.svn', 'CVS')):\n prune = tuple(prune or ())\n for dirpath, dirnames, _ in walk(native(base)):\n for item in prune:\n if item in dirnames:\n dirnames.remove(item)\n\n dirnames.sort()\n for name in _fnmatch.filter(dirnames, wildcard):\n dest = _os.path.join(dirpath, name)\n if dest.startswith(root):\n dest = dest.replace(root, '', 1)\n aslist = []\n head, tail = _os.path.split(dest)\n while tail:\n aslist.append(tail)\n head, tail = _os.path.split(head)\n aslist.reverse()\n dest = '/'.join(aslist)\n yield dest\n\n if not recursive:\n break",
"def _list_dir(dir_name:str)->list:\n files_and_dirs = os.listdir(dir_name)\n list_of_files = []\n for file in files_and_dirs:\n completePath = os.path.join(dir_name, file)\n if os.path.isdir(completePath):\n list_of_files = list_of_files + _list_dir(completePath)\n else:\n list_of_files.append(completePath)\n\n return list_of_files",
"def ls_dirs(self, path, recursive=False):\n if path != \"\" and not path.endswith(\"/\"):\n path += \"/\"\n\n blob_iter = self.client.list_blobs(name_starts_with=path)\n dirs = []\n for blob in blob_iter:\n relative_dir = os.path.dirname(os.path.relpath(blob.name, path))\n if (\n relative_dir\n and (recursive or \"/\" not in relative_dir)\n and relative_dir not in dirs\n ):\n dirs.append(relative_dir)\n\n return dirs",
"def directories_in_dir_recursive(search_dir, ignored_regex_objects):\n\n dir_paths = [search_dir]\n\n for dirpath, dirnames, filenames in os.walk(search_dir):\n\n for dirname in dirnames:\n\n if expression_helper.is_string_matched_in_regular_expression_objects(dirpath, ignored_regex_objects):\n # ignore subdirectories of ignored directory\n continue\n\n if os.path.islink(dirname):\n # ignore symlink\n # http://stackoverflow.com/questions/15718006/check-if-directory-is-symlink\n continue\n\n if expression_helper.is_string_matched_in_regular_expression_objects(dirname, ignored_regex_objects):\n # ignore this directory\n continue\n\n full_name = os.path.join(dirpath, dirname)\n dir_paths.append(full_name)\n\n return dir_paths",
"def dirs(self, dirs=['.']):\n return [Path(d) for d in dirs]",
"def getSubDirectories(path, ignore_dirs=()):\n\n result = []\n\n ignore_dirs = [os.path.normcase(ignore_dir) for ignore_dir in ignore_dirs]\n\n for root, dirnames, _filenames in os.walk(path):\n # Normalize dirnames for better matching.\n dirnames_normalized = [os.path.normcase(dirname) for dirname in dirnames]\n for ignore_dir in ignore_dirs:\n if ignore_dir in dirnames_normalized:\n dirnames.remove(ignore_dir)\n\n dirnames.sort()\n\n for dirname in dirnames:\n result.append(os.path.join(root, dirname))\n\n result.sort()\n return result",
"def listDirsInDir(self, path, recursive=False, dirNameOnly=True, filter=None):\n self._checkActive()\n def _process(args,path,ttype,moddate=0,size=0,md5hash=\"\"):\n dirNameOnly,filter,pathsreturn=args\n if ttype==\"D\":\n if (filter is None) or fnmatch.fnmatch(path, filter):\n if dirNameOnly:\n pathsreturn.append(q.system.fs.getDirName(path+\"/\",True))\n else:\n pathsreturn.append(path)\n pathsreturn=[]\n self.walk(_process, [dirNameOnly,filter,pathsreturn], path, recursive=recursive) \n return pathsreturn",
"def subdirs(path, name):\n f = lambda x: name is None or x.lower() == name.lower()\n return [file_path\n for file_name in os.listdir(path)\n if f(file_name) and not file_name.startswith('.')\n for file_path in (os.path.join(path, file_name),)\n if os.path.isdir(file_path)]",
"def get_dirs(root_dir, recursive=True):\n\n ret_dirs = []\n\n for root, dirs, _ in os.walk(root_dir, topdown=True):\n\n for name in dirs:\n ret_dirs.append(os.path.join(root, name))\n\n if not recursive:\n break\n\n return ret_dirs",
"def list_directories(dir_pathname, recursive=True, topdown=True,\n followlinks=False):\n for root, dir_names, _ in walk(dir_pathname, recursive, topdown, followlinks):\n for dir_name in dir_names:\n yield absolute_path(os.path.join(root, dir_name))"
] |
[
"0.68340456",
"0.6785555",
"0.6668709",
"0.63527954",
"0.63505065",
"0.6325283",
"0.6313887",
"0.62805694",
"0.62461233",
"0.6196611",
"0.6186414",
"0.61573446",
"0.6148574",
"0.60876536",
"0.60819036",
"0.6080527",
"0.6074074",
"0.60723394",
"0.60716766",
"0.6064101",
"0.6062908",
"0.60481596",
"0.6040847",
"0.6028157",
"0.6004873",
"0.59512",
"0.5912041",
"0.5905662",
"0.5882241",
"0.58810824"
] |
0.8212185
|
0
|
Split off the tail directory and add the parameters in that string to param dictionary passed. Return the head directory >>> params = {} >>> get_dir_params('/tmp/alpha1.0_alpha_decay1', params) '/tmp' >>> len(params) 2 >>> params['alpha_decay'] 1 >>> params['alpha'] 1.0
|
def get_dir_params(dirpath, params):
(head, tail) = os.path.split(dirpath)
params.update(split_params(tail))
return head
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def directory_parameters(directories):\n def _join_dirs(index, subdir):\n # collect sub-directories\n dirs = []\n for i in range(index+1):\n dirs += directories[steps[i]]\n if not dirs:\n return subdir\n else:\n dir = dirs[0]\n for d in dirs[1:]:\n dir = os.path.join(dir, d)\n return os.path.join(dir, subdir)\n\n global args\n parameters = []\n\n # add directory parameters\n # - preprocessing\n if args.preprocessed_directory:\n parameters += ['--preprocessed-directory', os.path.join(args.preprocessed_directory, _join_dirs(0, 'preprocessed'))] + skips[1]\n else:\n parameters += ['--preprocessed-directory', _join_dirs(0, 'preprocessed')]\n\n # - feature extraction\n parameters += ['--extracted-directory', _join_dirs(1, 'extracted'), '--extractor-file', _join_dirs(1, 'Extractor.hdf5')]\n\n # - feature projection\n parameters += ['--projected-directory', _join_dirs(2, 'projected'), '--projector-file', _join_dirs(2, 'Projector.hdf5')]\n\n # - model enrollment\n parameters += ['--model-directories', _join_dirs(3, 'N-Models'), _join_dirs(3, 'T-Models'), '--enroller-file', _join_dirs(3, 'Enroller.hdf5')]\n\n # - scoring\n parameters += ['--score-directories', _join_dirs(4, 'nonorm'), _join_dirs(4, 'ztnorm')]\n\n # - Experiment.info\n parameters += ['--experiment-info-file', _join_dirs(4, 'Experiment.info')]\n\n # the sub-dorectory, given on command line\n parameters += ['--sub-directory', args.sub_directory]\n\n global score_directories\n score_directories.append(_join_dirs(4, '.'))\n\n # grid database\n if args.grid is not None or args.parallel is not None:\n # we get one database per preprocessing job (all others might have job inter-dependencies)\n parameters += ['--gridtk-database-file', os.path.join(args.gridtk_database_directory, _join_dirs(args.gridtk_database_split_level, 'submitted.sql3'))]\n\n return parameters",
"def getTestCompDir(dirKey):\n\n if (not keyIsValid(dirKey)):\n fail(\"ERROR: %s is not defined\" % (dirKey))\n\n else:\n dirName = getParam(dirKey)\n\n # make sure we end in a \"/\"\n if (not (string.rfind(dirName, \"/\") == len(dirName)-1)):\n dirName = dirName + \"/\"\n \n if (not os.path.isdir(dirName)):\n fail(\"ERROR: %s is not a valid directory\" % (dirName))\n\n return dirName",
"def calc_directory(init_dir, dbg=False):\n dt_str, _ = calc_date_time()\n dt_final = os.sep.join([init_dir, dt_str])\n\n dbc.print_helper((\"Dir: \" + dt_final), dbg=dbg)\n return dt_final, dt_str",
"def _populate_params(self):\n self.params = []\n for root, dirs, files in os.walk(os.curdir):\n for file in files:\n fullfile = str(os.path.join(root, file))\n if self.config.regex_find_params.match(fullfile):\n self.params.append(fullfile)",
"def construct_params(self, params):\n params_list = [params['market'], params['currency'], params['locale'], params['pickupplace'], params[\n 'dropoffplace'], params['pickupdatetime'], params['dropoffdatetime'], params['driverage']]\n\n params_path = '/'.join(str(p) for p in params_list)\n\n return params_path",
"def load_params(outdir, paramsfn='parameters', ignore=None, params=None):\n if params in [None, 'none']:\n params = {}\n\n # If outdir is the entire path, it has .txt at end, and so use this to split it up into dir and filename\n if outdir[-4:] == '.txt':\n outsplit = outdir.split('/')\n outdir = ''\n for tmp in outsplit[:-1]:\n outdir += tmp + '/'\n paramsfn = outsplit[-1]\n\n if '*' in outdir:\n print 'outdir specified with wildcard, searching and taking first result...'\n outdir = glob.glob(outdir)[0]\n\n outdir = dio.prepdir(outdir)\n if paramsfn[-4:] != '.txt':\n paramsfn += '.txt'\n\n if os.path.exists(outdir + paramsfn):\n with open(outdir + paramsfn) as f:\n # for line in f:\n # print line\n for line in f:\n if '# ' not in line:\n (k, val) = line.split('=')\n key = k.strip()\n if key == 'date':\n val = val[:-1].strip()\n print '\\nloading params for: date= ', val\n elif sf.is_number(val):\n # val is a number, so convert to a float\n val = float(val[:-1].strip())\n else:\n '''This should handle tuples without a problem'''\n try:\n # If val has both [] and , --> then it is a numpy array\n # (This is a convention choice.)\n if '[' in val and ',' in val:\n make_ndarray = True\n\n # val might be a list, so interpret it as such using ast\n # val = ast.literal_eval(val.strip())\n exec ('val = %s' % (val.strip()))\n\n # Make array if found '[' and ','\n if make_ndarray:\n val = np.array(val)\n\n except:\n # print 'type(val) = ', type(val)\n # val must be a string\n try:\n # val might be a list of strings?\n val = val[:-1].strip()\n except:\n '''val is a list with a single number'''\n val = val\n if ignore is None:\n params[key] = val\n elif key not in ignore:\n params[key] = val\n\n if key == 'ignore_tris':\n print key, '-->', val\n else:\n print('No params file exists at '+ outdir + paramsfn)\n\n return params",
"def splitparams(path):\n if '/' in path:\n i = path.find(';', path.rfind('/'))\n else:\n i = path.find(';')\n if i < 0:\n return path, ''\n return path[:i], path[i + 1:]",
"def from_dict(base_dir, dictionary):\n p = Params(base_dir)\n for key in dictionary:\n setattr(p, key, dictionary[key])\n p.probs = ProblemName.read_names(dictionary['problems'], p.department)\n\n if p.duration is not None:\n t = datetime.datetime.strptime(p.duration, '%H:%M')\n p.duration = datetime.timedelta(hours=t.hour, minutes=t.minute)\n\n logging.debug(p)\n\n if 'dir' in dictionary:\n p.dir = pathlib.Path(dictionary['dir'])\n\n\n # меняем директорию, относительно которой будет разбирать пути\n p.base_dir = p.resolve_path(p.dir)\n # и определяем где лежат входные файлы\n p.file_data = p.resolve_path(p.file_data)\n p.login_list = p.resolve_path(p.login_list)\n\n # результаты конкретного факультета и контрольной - отдельно от данных\n p.output_dir = p.resolve_path(p.output_dir)\n logging.info(f'output_dir={p.output_dir} dep={p.department} stage={p.stage}')\n p.output_dir = p.output_dir / p.department / p.stage\n # директория выходных данных, создаем ее\n logging.info(f'Output directory is {p.output_dir.resolve()}')\n if not p.output_dir.exists():\n p.output_dir.mkdir(parents=True)\n\n return p",
"def _get_params_filepath(self):\n\t\treturn os.path.join(self.workdir, \"params.txt\")",
"def construct_params(self, params):\n params_list = [params['market'], params['currency'], params['locale'], params['entityid'], params[\n 'checkindate'], params['checkoutdate'], params['guests'], params['rooms']]\n\n params_path = '/'.join(str(p) for p in params_list)\n\n return params_path",
"def build_path_parameters(self):\n url_params = URL_PARAMS_PATTERN.findall(self.path)\n params = []\n\n for param in url_params:\n params.append({\n 'name': param,\n 'type': 'string',\n 'in': 'path',\n 'required': True\n })\n\n return params",
"def get_params():\n \n params = {}\n if len(argv)!= 4:\n params['input_dir'] = raw_input('Please enter the input directory path: ')\n params['output_dir'] = raw_input('Please enter the output directory path: ')\n params['star_file'] = raw_input('Please enter the path to the starlist file: ') \n else:\n params['input_dir'] = argv[1]\n params['output_dir'] = argv[2]\n params['star_file'] = argv[3]\n\n return params",
"def __dir_arg_parse(directory: Directory, directory_path: str) -> Entry:\n dir_split = directory_path.split(\"/\")\n for target in dir_split:\n if target == \"..\":\n if directory.get_parent():\n directory = directory.get_parent()\n elif directory.get_name() != target and target != \".\":\n directory = directory.get_entry(target)\n return directory",
"def make_folders(params, parameter_filename='parameters.yaml'):\n\n # Check that target path exists, create if not\n check_path(params['path']['features'])\n check_path(params['path']['feature_normalizers'])\n check_path(params['path']['models'])\n check_path(params['path']['results'])\n\n # Save parameters into folders to help manual browsing of files.\n\n # Features\n feature_parameter_filename = os.path.join(params['path']['features'], parameter_filename)\n if not os.path.isfile(feature_parameter_filename):\n save_parameters(feature_parameter_filename, params['features'])\n\n # Feature normalizers\n feature_normalizer_parameter_filename = os.path.join(params['path']['feature_normalizers'], parameter_filename)\n if not os.path.isfile(feature_normalizer_parameter_filename):\n save_parameters(feature_normalizer_parameter_filename, params['features'])\n\n # Models\n model_features_parameter_filename = os.path.join(params['path']['base'],\n params['path']['models_'],\n params['features']['hash'],\n parameter_filename)\n if not os.path.isfile(model_features_parameter_filename):\n save_parameters(model_features_parameter_filename, params['features'])\n\n model_models_parameter_filename = os.path.join(params['path']['base'],\n params['path']['models_'],\n params['features']['hash'],\n params['classifier']['hash'],\n parameter_filename)\n if not os.path.isfile(model_models_parameter_filename):\n save_parameters(model_models_parameter_filename, params['classifier'])\n\n # Results\n # Save parameters into folders to help manual browsing of files.\n result_features_parameter_filename = os.path.join(params['path']['base'],\n params['path']['results_'],\n params['features']['hash'],\n parameter_filename)\n if not os.path.isfile(result_features_parameter_filename):\n save_parameters(result_features_parameter_filename, params['features'])\n\n result_models_parameter_filename = os.path.join(params['path']['base'],\n params['path']['results_'],\n params['features']['hash'],\n params['classifier']['hash'],\n parameter_filename)\n if not os.path.isfile(result_models_parameter_filename):\n save_parameters(result_models_parameter_filename, params['classifier'])\n\n result_detector_parameter_filename = os.path.join(params['path']['base'],\n params['path']['results_'],\n params['features']['hash'],\n params['classifier']['hash'],\n params['detector']['hash'],\n parameter_filename)\n if not os.path.isfile(result_detector_parameter_filename):\n save_parameters(result_detector_parameter_filename, params['detector'])",
"def create_directory_list(root_dir: str):\n if not os.path.exists(root_dir):\n raise FileNotFoundError(\"Directory {} does not exist\".format(root_dir))\n\n # List all directories associated to different videos.\n recording_path_list = [os.path.join(root_dir, f) for f in os.listdir(root_dir)]\n\n input_data_path = []\n for g in recording_path_list:\n # Append the different directories associated to different video frame intervals.\n input_data_path.extend([os.path.join(g, f) for f in os.listdir(g)])\n\n return input_data_path",
"def _parse_directories(d):\n for k, v in d.items():\n if isinstance(v, abc.Mapping):\n _parse_directories(v)\n else:\n d[k] = os.path.expandvars(v)\n return d",
"def build_parms(args):\r\n readDir=args.dir\r\n #target_date=args.target_date\r\n target_date=args.target_date\r\n outdir=args.outdir \r\n parms = {\"readDir\":readDir,\r\n \"target_date\":target_date,\r\n \"outdir\":outdir}\r\n \r\n return(parms)",
"def _dirname(self, key):\n return os.path.join(self.root, key[:2])",
"def extractparam(pardict, parsuffix):\n return next(v for k, v in pardict.items() if k.endswith(parsuffix))",
"def construct_params(self, params):\n params_list = [params['country'], params['currency'], params['locale'], params[\n 'originplace'], params['destinationplace'], params['outbounddate']]\n\n if params.get('inbounddate', None):\n params_list.append(params.get('inbounddate', None))\n\n params_path = '/'.join(params_list)\n\n return params_path",
"def get_last_dir(path):\n head, tail = split(path)\n while not tail:\n head, tail = split(head)\n return tail",
"def find_dir(dirs):\n skf_dir = None\n for d in dirs:\n if os.path.isdir(d):\n return d\n raise RuntimeError(\"Cannot find the dirtory for dftb parameters\")",
"def split_leading_directory(file_path):\n\tdelim = '/'\n\tpath_split = file_path.split(delim)\n\tif len(path_split) > 0:\n\t\tlead_dir = path_split[0]\n\telse:\n\t\tlead_dir = ''\n\tif len(path_split) > 1:\n\t\trest = delim.join(path_split[1:])\n\telse:\n\t\trest = ''\n\treturn lead_dir, rest",
"def ND_params_dir(directory=None, default_ND_params=None):\n if default_ND_params is None:\n try:\n default_ND_params = get_default_ND_params(directory)\n except KeyboardInterrupt:\n # Allow C-C to interrupt\n raise\n except Exception as e:\n raise ValueError('Problem with flats in ' + directory + ': ' + str(e))\n \n if persistent_default_ND_params is not None:\n default_ND_params = persistent_default_ND_params\n\n # Collect file names\n files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]\n\n objs = []\n for f in sorted(files):\n if 'flat' in f.lower():\n pass\n elif 'bias' in f.lower():\n pass \n elif 'dark' in f.lower():\n pass\n else:\n objs.append(os.path.join(directory, f))\n\n start = time.time()\n\n count = 0\n torus_count = 0\n Na_count = 0\n for count, f in enumerate(objs):\n D.say(f)\n try:\n O = IoIO.CorObsData(f, default_ND_params=default_ND_params)\n if O.header[\"EXPTIME\"] == 300:\n if O.header[\"FILTER\"] == \"[SII] 6731A 10A FWHM\":\n torus_count += 1\n if O.header[\"FILTER\"] == \"Na 5890A 10A FWHM\":\n Na_count += 1\n \n D.say(O.obj_center)\n if O.obj_to_ND > 30:\n log.warning('Large dist: ' + str(int(O.obj_to_ND)))\n except KeyboardInterrupt:\n # Allow C-C to interrupt\n raise\n except Exception as e:\n log.error('Skipping: ' + str(e)) \n\n elapsed = time.time() - start\n\n return((count, torus_count, Na_count, elapsed, count/elapsed))",
"def parse_params(theta):\n params = {}\n if r_prior is not None:\n params[\"r\"] = theta[0]\n theta = theta[1:]\n if alpha_prior is not None:\n params[\"alpha\"] = theta[:num_alpha]\n theta = theta[num_alpha:]\n if res_prior is not None:\n params[\"res\"] = theta[:num_res]\n theta = theta[num_res:]\n if beam_prior is not None:\n params[\"beam\"] = theta[:num_beam]\n theta = theta[num_beam:]\n if betad_prior is not None:\n params[\"betad\"] = theta[0]\n theta = theta[1:]\n if dust_amp_prior is not None:\n # param for ee and bb\n params[\"dust_amp\"] = theta[:2]\n theta = theta[2:]\n if dust_ellind_prior is not None:\n params[\"dust_ellind\"] = theta[0]\n theta = theta[1:]\n if len(theta):\n raise ValueError(\"Too many parameters to parse\")\n return params",
"def get_sort_params(input_params, default_key='created_at',\n default_dir='desc'):\n params = input_params.copy()\n sort_keys = []\n sort_dirs = []\n\n while 'sort_key' in params:\n sort_keys.append(params.pop('sort_key').strip())\n while 'sort_dir' in params:\n sort_dirs.append(params.pop('sort_dir').strip())\n if len(sort_keys) == 0 and default_key:\n sort_keys.append(default_key)\n if len(sort_dirs) == 0 and default_dir:\n sort_dirs.append(default_dir)\n return sort_keys, sort_dirs",
"def split_params(param_string):\n\t#TODO: check for negatives i.e. alpha--1\n\tparts = param_string.split('_')\n\tparams = {}\n\n\tfor i in range(len(parts)):\n\t\tparam = split_items(parts[i])\n\t\tif len(param) < 2:\n\t\t\ttry:\n\t\t\t\tparts[i+1] = parts[i] + \"_\" + parts[i+1]\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tcontinue\n\t\telif len(param) == 2:\n\t\t\tparams[param[0]] = param[1]\n\t\telif len(param) == 3 and len(param[1]) == 0:\n\t\t\tparams[param[0]] = -param[2]\n\t\telse:\n\t\t\tparams[param[0]] = param[1:]\n\treturn params",
"def get_params(param_file_path):\n\n config = ConfigParser.SafeConfigParser()\n config.read(param_file_path)\n\n # Get \"raw\" dictionaries from `config` object\n raw_params = dict(config.items('general'))\n raw_cosmo_params = dict(config.items('cosmology'))\n raw_io_params = dict(config.items('io'))\n raw_binning_params = dict(config.items('binning'))\n raw_maps_to_stack_params = dict(config.items('maps_to_stack'))\n raw_map_path_params = dict(config.items('map_path'))\n raw_map_file_params = dict(config.items('map_file'))\n raw_noise_path_params = dict(config.items('map_path'))\n raw_noise_file_params = dict(config.items('noise_file'))\n raw_beams_params = dict(config.items('beams'))\n raw_color_correction_params = dict(config.items('color_correction'))\n raw_catalogs_params = dict(config.items('catalogs'))\n\n raw_io_params['param_file_path'] = os.path.abspath(param_file_path) # Store parameter file path\n\n # Convert \"raw\" config dictionary to \"organized\" dictionary `params`\n params = get_general_params(raw_params) \n params['io'] = get_io_parameters(raw_io_params)\n params['cosmo'] = get_cosmology_parameters(raw_cosmo_params)\n params['map_files'] = get_maps_parameters(raw_maps_to_stack_params,raw_map_path_params,raw_map_file_params)\n params['noise_files'] = get_maps_parameters(raw_maps_to_stack_params,raw_noise_path_params,raw_noise_file_params)\n params['wavelength'] = get_wavelength_parameters(raw_maps_to_stack_params)\n params['psfs'] = get_beams_parameters(raw_maps_to_stack_params,raw_beams_params)\n params['color_correction'] = get_color_correction_parameters(raw_maps_to_stack_params,raw_color_correction_params)\n params['catalogs'] = get_catalogs_parameters(raw_catalogs_params)\n params['bins'] = get_binning_parameters(raw_binning_params)\n params['library_keys'] = params['map_files'].keys()\n\n logging.info(\"---------- PARAMETER VALUES ----------\")\n logging.info(\"======================================\")\n logging.info(\"\\n\" + pprint.pformat(params, indent=4) + \"\\n\")\n\n #pdb.set_trace()\n return params",
"def params():\n return utils.Params('../experiments/base-model/params.json')",
"def write_initparams(params, outdir, padding_var=7, paramsfn='parameters', skiplat=False, skipglat=False):\n paramfile = outdir + paramsfn + '.txt'\n with open(paramfile, 'w') as myfile:\n myfile.write('# Parameters\\n')\n\n dio.ensure_dir(outdir)\n for key in params:\n if key == 'reg1' or key == 'reg2' or key == 'reg3':\n np.savetxt(outdir + key + '.txt', params[key], fmt='%d', delimiter=',', header=key + ' particle IDs')\n if key == 'xyv0':\n np.savetxt(outdir + 'xyv0.txt', params['xyv0'], delimiter=',',\n header='xy0 (initial positions) v0 (initial velocities)')\n elif key == 'xy':\n if not skiplat:\n np.savetxt(outdir + 'xy.txt', params['xy'], delimiter=',',\n header='xy0 (undeformed lattice positions from mesh)')\n elif key == 'KL':\n if not skiplat:\n np.savetxt(outdir + 'KL.txt', params['KL'], fmt='%i', delimiter=',',\n header='KL (Bond Connectivity List)')\n elif key == 'NL':\n if not skiplat:\n np.savetxt(outdir + 'NL.txt', params['NL'], fmt='%i', delimiter=',', header='NL (Neighbor List)')\n elif key == 'BND':\n np.savetxt(outdir + 'BND.txt', params['BND'], fmt='%i', header='BND (Boundary List)')\n elif key == 'OmK':\n if not skipglat:\n np.savetxt(outdir + 'OmK.txt', params['OmK'], fmt='%f', delimiter=',',\n header='OmK (spring frequency array, for Nash limit: (-1)^(c+b)kl^2/Iw')\n elif key == 'OmG':\n if not skipglat:\n np.savetxt(outdir + 'Omg.txt', params['OmG'], fmt='%f', delimiter=',',\n header='Omg (gravitational frequency array, for Nash limit: (-1)^(c+1)mgl/Iw')\n elif key == 'LVUC':\n if not skiplat:\n np.savetxt(outdir + 'LVUC.txt', params['LVUC'], fmt='%i', delimiter=',',\n header='Lattice Vector and Unit cell vector coordinates')\n else:\n with open(paramfile, 'a') as myfile:\n # print 'Writing param ', str(key)\n # print ' with value ', str(params[key])\n # print ' This param is of type ', type(params[key])\n\n if isinstance(params[key], str):\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + params[key] + '\\n')\n elif isinstance(params[key], np.ndarray):\n # print params[key].dtype\n if key == 'BIND':\n print 'BIND = ', str(params[key]).replace('\\n', '')\n\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + \", \".join(np.array_str(params[key]).split()).replace('[,', '[') + '\\n')\n # if params[key].dtype == 'float64':\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ np.array_str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n # elif params[key].dtype == 'int32':\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n # else:\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n elif isinstance(params[key], list):\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + str(params[key]) + '\\n')\n else:\n # print key, ' = ', params[key]\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + '{0:.12e}'.format(params[key]) + '\\n')\n\n # elif key == 'LV':\n # np.savetxt(outdir+'LV.txt',params['LV'], fmt='%18e',delimiter=',', header='Lattice Vector coordinates')\n # elif key == 'UC':\n # np.savetxt(outdir+'UC.txt',params['UC'], fmt='%18e',delimiter=',', header='Unit cell vector coordinates')\n #\n # elif key == 'h':\n # with open(outdir+'h.txt', \"w\") as hfile:\n # hfile.write(\"# h (time step) \\n{0:5e}\".format(h) )\n # elif key == 'beta':\n # with open(outdir+'beta.txt', \"w\") as betafile:\n # betafile.write(\"# beta (damping coeff) \\n{0:5e}\".format(beta) )"
] |
[
"0.5661217",
"0.5205796",
"0.5171829",
"0.5156654",
"0.51377976",
"0.5104749",
"0.50199026",
"0.49683952",
"0.4960869",
"0.49373043",
"0.49353787",
"0.49041578",
"0.4877014",
"0.48736858",
"0.4725308",
"0.4718489",
"0.47135746",
"0.47031134",
"0.4695834",
"0.46458367",
"0.46410954",
"0.46338117",
"0.46214563",
"0.461692",
"0.46150386",
"0.4608448",
"0.45858425",
"0.4575223",
"0.4555458",
"0.45547786"
] |
0.800422
|
0
|
Join a number to the end of a string in the standard way If width is provided will backfill >>> join_number('fred', 10) 'fred10' >>> join_number('fred', 10, 3) 'fred010'
|
def join_number(string, num, width=None):
num = str(num)
if width:
num = num.rjust(width, '0')
return string + '-' + str(num)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def pad(number, width=0):\n return str(number).zfill(width)",
"def pad_number(number, length):\n\n string_number = str(number)\n number_of_zeros = length - len(string_number)\n if number_of_zeros >= 0:\n return \"0\" * number_of_zeros + string_number\n else:\n return string_number",
"def format(number):\n number = compact(number)\n return ' '.join((number[:2], number[2:5], number[5:8], number[8:]))",
"def pad_digits(x, width):\n if pd.notnull(x):\n return '{0:0{1}d}'.format(int(x), width)\n else:\n return x",
"def format_number(num):\n result = \" \" + str(num) + \" \"\n if num < 10:\n result = result + \" \"\n return result",
"def padding_zeroes(number, length_string):\n return str(number).zfill(length_string)",
"def pad(value, digits, to_right=False):\n len_val = len(value)\n assert len_val <= digits\n rem_digits = digits - len_val\n if to_right:\n return value + \"0\"*rem_digits\n else:\n return \"0\"*rem_digits + value",
"def filter_pad(val: Union[int, str], width: int, fillchar: str = '0') -> str:\n return str(val).rjust(width, fillchar)",
"def add_leading_zero(num):\n formated = \"\"\n if num <= 9:\n formated = \"0\" + str(num)\n else:\n formated = str(num)\n \n return formated",
"def join(self):\n return \"\".join(self._digits)",
"def str_fill(i, n):\r\n return str(i).zfill(n)",
"def r_pad(arg, length):\n if length <= len(arg):\n return arg\n else:\n return arg + \" \" * (length - len(arg))",
"def format_num(num):\n if num is None:\n return num\n if (num < 10):\n return '0' + str(num)\n return str(num)",
"def format(number):\n number = compact(number)\n return '-'.join([\n number[:2],\n number[2:6],\n number[6:13],\n number[13:]])",
"def rjust(value, length):\n\n if value is None or value == '':\n value = '0'\n else:\n value = str(value)\n value = value.rjust(length, '0')\n return value",
"def format(number, separator=' '):\n number = compact(number)\n return separator.join((number[0:3], number[3:6], number[6:]))",
"def str_or_dots(number, length):\n if number is None:\n return '.'*length\n else:\n format_type = {\n 'int': 'd',\n 'float': '.0f',\n }[type(number).__name__]\n return ''.join(('%0',str(length),format_type)) % number",
"def print_last_digit(number):\n\n ld = abs(number) % 10\n print(ld, end=\"\")\n return ld",
"def left_fill(s, n, x=\"0\"):\n sl = len(s)\n zn = n - sl\n if zn > 0:\n return zn*\"0\" + s\n else:\n return s",
"def num_string(length):\n import math\n base = 10\n lines = []\n for ndigit in range(int(math.log10(length)) + 1):\n interval = int(math.pow(base, ndigit))\n lines.append(''.join(\n (str(n % base) + (' ' * (interval-1)))\n for n in range((length - 1) // interval + 1)))\n return '\\n'.join(lines)",
"def str_padding(length, val):\n return '{0:<{fill}}'.format(val, fill=length)",
"def _padright(width, s):\n fmt = \"{0:<%ds}\" % width\n return fmt.format(s)",
"def pad_to_three_digits(my_int):\n my_int_length = len(str(my_int))\n result = \"\"\n if my_int_length == 3:\n result = str(my_int)\n elif my_int_length == 2:\n result = \"{:0>2d}\".format(my_int)\n elif my_int_length == 1:\n result = \"{:0>3d}\".format(my_int)\n return result",
"def _bin_zfill(num, width=None):\n s = bin(num)[2:]\n return s if width is None else s.zfill(width)",
"def number2patten(number, length):\n if length == 1:\n return NUMBER_TO_BASE[number]\n prefix_index = number // 4\n base = NUMBER_TO_BASE[number % 4]\n return number2patten(prefix_index, length - 1) + base",
"def format_number(separator, n):\n n_s = str(n)\n if len(n_s) <= 3:\n return n_s\n else:\n upper = n_s[:-3]\n lower = n_s[-3:]\n return format_number(separator, upper) + separator + lower",
"def compact(number):\n number = clean(number).strip().replace(' ', '-').split('-')\n if len(number) == 4:\n # zero pad the different sections if they are found\n lengths = (2, 4, 7, 3)\n return ''.join(n.zfill(l) for n, l in zip(number, lengths))\n else:\n # otherwise zero pad the account type\n number = ''.join(number)\n return number[:13] + number[13:].zfill(3)",
"def _rzfill(string, to_len):\n if len(string) > to_len:\n raise ValueError(\"string is already longer than to_len\")\n return string + '0' * (to_len - len(string))",
"def leadingzero(number, minlength):\n\n # Return the number as a string with the filled number\n return str(number).zfill(int(minlength))",
"def convert(num):\r\n if len(str(num))==1:\r\n return \"000%i\"%num\r\n elif len(str(num)) == 2:\r\n return \"00%i\"%num\r\n elif len(str(num)) == 3:\r\n return \"0%i\"%num\r\n elif len(str(num)) == 4:\r\n return \"%i\"%num"
] |
[
"0.7575873",
"0.6719598",
"0.6585075",
"0.6569616",
"0.6502219",
"0.6435242",
"0.6325695",
"0.63152623",
"0.63049746",
"0.6270025",
"0.6255137",
"0.62551343",
"0.6232007",
"0.6167308",
"0.61607224",
"0.61002517",
"0.60625637",
"0.59663886",
"0.5961309",
"0.59325504",
"0.59198487",
"0.5918809",
"0.5896388",
"0.58798975",
"0.5853914",
"0.58416003",
"0.58302915",
"0.5819328",
"0.58108765",
"0.58021235"
] |
0.8679115
|
0
|
Splits off a number from the end of the string and returns the tuple >>> split_number('rawdata.txt500') ('rawdata.txt', 500) >>> split_number('squareboxsquare2.5') ('squareboxsquare', 2.5) >>> split_number('fred') ('fred', None) >>> split_number('fredjones') ('fredjones', None) >>> split_number(0) ('', 0) >>> split_number('0') ('', 0) >>> print(split_number([0])) None
|
def split_number(string):
try:
parts = string.split('-')
except AttributeError:
try:
string * string
return ('', string)
except TypeError:
return None
end = parts[-1]
if '.' in end:
try:
num = float(end)
except:
num = None
else:
try:
num = int(end)
except:
num = None
if num is not None:
parts.pop(-1)
return ('-'.join(parts), num)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def split_num(s):\n i = 0\n while i < len(s):\n if s[i] < '0' or s[i] > '9':\n break\n i += 1\n if s[i:]:\n return (int(s[:i]), s[i:], )\n return (int(s[:i]), )",
"def split(n):\n rest_of_num, last_num = n // 10, n % 10\n return rest_of_num, last_num",
"def split_num(a_str):\n idx = None\n for i in iter(a_str):\n if i.isdigit():\n idx = a_str.index(i)\n break\n if idx == None:\n return (a_str[:idx], int('1'))\n else:\n return (a_str[:idx], int(a_str[idx:]))",
"def split_str_digit(s):\n res = []\n for m in re.finditer('(\\d*)(\\D*)', s):\n for g in m.groups():\n if g != '':\n try:\n res.append(int(g))\n except ValueError:\n res.append(g)\n return tuple(res)",
"def num_split(num):\r\n num = list(str(num))\r\n return [int(i) for i in num]",
"def get_number(x):\n\n return re.findall(r'\\d+', x)[0]",
"def extract_only_number(x):\n\tr = re.findall(r'\\d+', x)\n\tif len(r) > 0:\n\t\treturn r[0]\n\treturn 0",
"def splitTag(my_tag):\n my_split = re.findall(r'(\\d+)(\\D+)', my_tag)\n return ((int(x[0]), x[1]) for x in my_split)",
"def get_valid_digits(self, part: str) -> Optional[Tuple[str, str]]:\n digits = part.split(\"_\")\n digit1 = digits[0]\n digit2 = digits[1]\n if not digit1.isdigit() or not digit2.isdigit():\n return None\n return digit1, digit2",
"def ending_is_number(document_id):\n logger.debug('Function Successful: % s',\n 'ending_is_number: ending_is_number successfully called from process_doc', extra=d)\n logger.info('Ensuring document ID ends in a number...')\n\n logger.debug('Calling Function: % s',\n 'ending_is_number: ending_is_number calling split', extra=d)\n list = re.split(\"-\", document_id)\n logger.debug('Function Successful: % s',\n 'ending_is_number: ending_is_number successfully called split', extra=d)\n\n number = list[-1]\n\n logger.debug('Returning: %s',\n 'ending_is_number: returning the number', extra=d)\n logger.info('The ending of the document ID is, in fact, a number')\n\n return number.isdigit()",
"def get_last_number(s:str):\n array = re.findall(r'[0-9]+', s)\n if array.__len__() is 0:\n return -1\n return int(array[-1])",
"def _urlparse_splitfragment(url):\r\n\r\n fpart = url.split(\"#\", 1)\r\n if len(fpart) == 2:\r\n fragment = fpart[1]\r\n else:\r\n fragment = \"\"\r\n\r\n return fpart[0], fragment",
"def extract_number(word):\n number_flag = True\n number = ''\n word = word.rstrip('.').lstrip('.')\n for char in word:\n try:\n if char == '.' and number_flag:\n number += char\n else:\n int(char)\n if number_flag:\n number += char\n except:\n if len(number) > 0:\n number_flag = False\n continue\n return number",
"def return_num(strnum):\r\n if strnum != strnum or strnum == ' ':\r\n return -1\r\n else:\r\n strnum = re.sub('[^1-9]', '', str(strnum))\r\n return int(strnum)",
"def extract_digits(cls, phone_number):\n extracted_num = \"\"\n for ch in phone_number:\n if ch in cls.INTEGER_STRING:\n extracted_num += ch\n return extracted_num",
"def get_valid_num(cls, phone_number):\n if type(phone_number) != str:\n return None\n elif cls.MAX_NUM_LEN < len(phone_number):\n return None\n else:\n extracted_num = cls.extract_digits(phone_number)\n if len(extracted_num) != cls.NUM_OF_DIGITS:\n return None\n return extracted_num",
"def get_number(word):\n return int(re.match(NUMBER, word).group(1))",
"def _parseNumbers(s):\n ss = utils.unclump(s)\n\n m3 = re.match('^\\d+$', ss)\n if m3 is not None:\n return decimal.Decimal(round(float(ss), 2))\n\n m1 = re.match(r'(\\d+)\\s+(\\d)/(\\d)', ss)\n if m1 is not None:\n num = int(m1.group(1)) + (float(m1.group(2)) / float(m1.group(3)))\n return decimal.Decimal(str(round(num, 2)))\n\n m2 = re.match(r'^(\\d)/(\\d)$', ss)\n if m2 is not None:\n num = float(m2.group(1)) / float(m2.group(2))\n return decimal.Decimal(str(round(num, 2)))\n\n return None",
"def ExtractNumbers(s):\n\n t = s.strip('[]\\n')\n comma_space = r', '\n re_comma_space = re.compile(comma_space)\n z = re_comma_space.split(t)\n #print z\n return z",
"def parse_number():\n nonlocal idx\n num = \"\"\n def parse_digits():\n nonlocal idx\n num = \"\"\n while idx < len(source) and is_num_char(source[idx]):\n num += source[idx]\n idx += 1\n return num\n # Parse initial numbers\n oidx = idx\n num += parse_digits()\n if idx < len(source) and source[idx] == '.': # if we find a dot\n # Parse out the second part of the number string\n idx += 1\n num += (\".\" + parse_digits())\n if idx < len(source) and not terminal(source[idx]): # the number didn't terminate... this is an identifier\n idx = oidx\n return parse_symbol()\n idx -= 1 # Backtrack, bc last character is *invalid* and loop assumes we stop on a valid token character\n return num",
"def extract_int(text):\n m = re.search(r\"\\d+\", text)\n if m is not None:\n return m.group(0)",
"def split_extended_slug(slug):\n if not slug:\n return None, None, 0, 0\n\n parts = slug.rsplit('/')\n\n if len(parts) == 1:\n return parts[0], None, 0, 0\n elif len(parts) == 2:\n return parts[0], parts[1], 0, 0\n\n build_id, sep, job_id = parts[2].partition('.')\n build_id = int(build_id)\n if job_id:\n job_id = int(job_id)\n\n return parts[0], parts[1], build_id, job_id",
"def _number_finder(s, regex, numconv, py3_safe):\n\n # Split. If there are no splits, return now\n s = regex.split(s)\n if len(s) == 1:\n return tuple(s)\n\n # Now convert the numbers to numbers, and leave strings as strings\n s = remove_empty(s)\n for i in py23_range(len(s)):\n try:\n s[i] = numconv(s[i])\n except ValueError:\n pass\n\n # If the list begins with a number, lead with an empty string.\n # This is used to get around the \"unorderable types\" issue.\n # The _py3_safe function inserts \"\" between numbers in the list,\n # and is used to get around \"unorderable types\" in complex cases.\n # It is a separate function that needs to be requested specifically\n # because it is expensive to call.\n if not isinstance(s[0], py23_basestring):\n return _py3_safe([''] + s) if py3_safe else [''] + s\n else:\n return _py3_safe(s) if py3_safe else s",
"def split_string(line, nth):\n return [int(line[i:i+nth]) for i in range(0, len(line), nth)]",
"def split_name(process_name):\n identifier, box_name = process_name.split(\"-\")\n identifier = int(identifier)\n if Ibox.itersep in box_name:\n box_exec_name = box_name.split(\".\")[0]\n box_iter_name, iteration = box_exec_name.split(Ibox.itersep)\n iteration = int(iteration)\n else:\n box_exec_name = None\n box_iter_name = None\n iteration = None\n return identifier, box_name, box_exec_name, box_iter_name, iteration",
"def _urlparse_splitquery(url):\r\n\r\n qpart = url.split(\"?\", 1)\r\n if len(qpart) == 2:\r\n query = qpart[1]\r\n else:\r\n query = \"\"\r\n\r\n return qpart[0], query",
"def extract_journal(name):\n match = re.search(\"\\d+\", name)\n if match != None: \n return name[:match.start()], int(name[match.start(): match.end()])\n else: \n return \"\", 0",
"def split(value: str, sep: str = \":\") -> Tuple:\n left, _, right = value.partition(sep)\n return (left, right) if right else (None, left)",
"def __read_number(self, buffer):\n\t\tret = []\n\t\ttoken = buffer.read(1)\n\t\twhile token.isdigit():\n\t\t\tret.append(token)\n\t\t\ttoken = buffer.read(1)\n\t\tbuffer.seek(-1, SEEK_CUR)\n\t\treturn int(\"\".join(ret) or 0)",
"def split_string_at_numbers(string):\n split_list = re.compile(r'(\\d+)').split(string)\n filtered_list = []\n skip_next_loops = 0\n for i in range(len(split_list)):\n if skip_next_loops > 0:\n skip_next_loops -= 1\n continue\n part = split_list[i]\n if part.isdigit() or (part == '.' and i < len(split_list) - 1 and split_list[i + 1].isdigit()):\n # Some kind of number\n if part == '.':\n # number of format '.###' (start of string)\n part += split_list[i + 1]\n skip_next_loops = 1\n elif i < len(split_list) - 2 and split_list[i + 1] == '.' and split_list[i + 2].isdigit():\n # number of format '###.###'\n part += split_list[i + 1] + split_list[i + 2]\n skip_next_loops = 2\n elif (i > 0 and len(filtered_list) and len(filtered_list[-1]) and\n filtered_list[-1][-1] == '.'):\n # number of format '.###' (within string)\n filtered_list[-1] = filtered_list[-1][:-1]\n part = '.' + part\n # otherwise just number of format '###'\n factor = 1\n if i < len(split_list) - 1:\n # check next part for unit information\n msg = split_list[i + 1].strip()\n msg = msg.lstrip('_([{')\n msg = re.split('[^a-zA-Zµ]', msg)[0]\n if msg:\n for unit in tools.science.UNIT_SYMBOLS:\n if msg.endswith(unit):\n msg = msg[:-len(unit)]\n break\n if len(msg) == 1:\n factor = 10**tools.science.SI_PREFIXES.get(msg[0], 0)\n filtered_list.append(float(part)*factor)\n else:\n # Actual string\n filtered_list.append(part)\n return filtered_list"
] |
[
"0.6397843",
"0.6025169",
"0.60193896",
"0.58248246",
"0.5821307",
"0.5711017",
"0.54345703",
"0.53441983",
"0.53429466",
"0.5247775",
"0.5150985",
"0.5026478",
"0.50150573",
"0.50136864",
"0.4990291",
"0.49878863",
"0.49797055",
"0.49792823",
"0.49334368",
"0.49008104",
"0.48950312",
"0.4881977",
"0.48791885",
"0.48721522",
"0.48714793",
"0.48704097",
"0.48678517",
"0.48178783",
"0.48172188",
"0.48068833"
] |
0.70436335
|
0
|
Splits a parameter string into its keyvalue pairs >>> d = split_params('alpha0.5_gamma0.9') >>> d['alpha'] 0.5 >>> d['gamma'] 0.9 >>> d = split_params('depth15_featuresabc') >>> d['depth'] 15 >>> d['features'] ['a', 'b', 'c'] >>> d = split_params('alpha0.1_lab_trace_rateNone') >>> d['alpha'] 0.1 >>> d['l'] ['a', 'b'] >>> d['trace_rate'] >>> print(d['trace_rate']) None >>> split_params('abc')
|
def split_params(param_string):
#TODO: check for negatives i.e. alpha--1
parts = param_string.split('_')
params = {}
for i in range(len(parts)):
param = split_items(parts[i])
if len(param) < 2:
try:
parts[i+1] = parts[i] + "_" + parts[i+1]
except:
pass
continue
elif len(param) == 2:
params[param[0]] = param[1]
elif len(param) == 3 and len(param[1]) == 0:
params[param[0]] = -param[2]
else:
params[param[0]] = param[1:]
return params
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _split_url_string(param_str):\n parameters = parse_qs(param_str, keep_blank_values=False)\n for key, val in parameters.iteritems():\n parameters[key] = urllib.unquote(val[0])\n return parameters",
"def test_splitParamArgs(self):\n res = irc.ServerSupportedFeatures._splitParamArgs([\"A:1\", \"B:2\", \"C:\", \"D\"])\n self.assertEqual(res, [(\"A\", \"1\"), (\"B\", \"2\"), (\"C\", \"\"), (\"D\", \"\")])",
"def test_splitParam(self):\n params = [\n (\"FOO\", (\"FOO\", [\"\"])),\n (\"FOO=\", (\"FOO\", [\"\"])),\n (\"FOO=1\", (\"FOO\", [\"1\"])),\n (\"FOO=1,2,3\", (\"FOO\", [\"1\", \"2\", \"3\"])),\n (\"FOO=A\\\\x20B\", (\"FOO\", [\"A B\"])),\n (\"FOO=\\\\x5Cx\", (\"FOO\", [\"\\\\x\"])),\n (\"FOO=\\\\\", (\"FOO\", [\"\\\\\"])),\n (\"FOO=\\\\n\", (\"FOO\", [\"\\\\n\"])),\n ]\n\n _splitParam = irc.ServerSupportedFeatures._splitParam\n\n for param, expected in params:\n res = _splitParam(param)\n self.assertEqual(res, expected)\n\n self.assertRaises(ValueError, _splitParam, \"FOO=\\\\x\")\n self.assertRaises(ValueError, _splitParam, \"FOO=\\\\xNN\")\n self.assertRaises(ValueError, _splitParam, \"FOO=\\\\xN\")\n self.assertRaises(ValueError, _splitParam, \"FOO=\\\\x20\\\\x\")",
"def split(s):\n args_str, kwargs_str = s.split('\\n')\n args = tuple(args_str.split(', '))\n kwargs = {}\n for s in kwargs_str.split(', '):\n k, v = s.split('=')\n kwargs[k] = v\n print args\n print kwargs",
"def splitkv(s):\n a=re.split('(\\w*)\\s*=\\s*\"([^=\"]*)\"\\s*', s)\n a=[ t for t in a if t!='']\n return a",
"def _split_parameters(self, parameters):\n if not parameters:\n return []\n return [parameter.strip() for parameter in parameters.split(', ')]",
"def _parse_params(self, params):\r\n if params[0] == \":\":\r\n params = [params[1:]]\r\n else:\r\n params = params.split(\" :\", 1)\r\n if len(params) == 1:\r\n last_arg = None\r\n else:\r\n last_arg = params[1]\r\n params = params[0].split(None)\r\n if last_arg != None:\r\n params.append(last_arg)\r\n return params",
"def _split_url_string(query_string):\r\n parameters = parse_qs(to_utf8(query_string), keep_blank_values=True)\r\n for k, v in parameters.iteritems():\r\n parameters[k] = urllib.unquote(v[0])\r\n return parameters",
"def splitparams(path):\n if '/' in path:\n i = path.find(';', path.rfind('/'))\n else:\n i = path.find(';')\n if i < 0:\n return path, ''\n return path[:i], path[i + 1:]",
"def test_splitParamArgsProcessor(self):\n res = irc.ServerSupportedFeatures._splitParamArgs(\n [\"A:1\", \"B:2\", \"C\"], irc._intOrDefault\n )\n self.assertEqual(res, [(\"A\", 1), (\"B\", 2), (\"C\", None)])",
"def _parse_parameters(parameters):\n\n if not re.match(r'^(\\w+)=\"([^=]+)\"(\\s{1}(\\w+)=\"([^=]+)\")*$', parameters):\n raise ValueError\n\n # first we add tokens that separate key/value pairs.\n # in case of key='ss sss ss', we skip tokenizing when we se the first single quote\n # and resume when we see the second\n replace_space = True\n tokenized = \"\"\n for c in parameters:\n if c == '\\\"':\n replace_space = not replace_space\n elif c == ' ' and replace_space:\n tokenized += \"$$\"\n else:\n tokenized += c\n\n # now get the tokens\n tokens = tokenized.split('$$')\n result = {}\n for token in tokens:\n # separate key/values\n key_value = token.split(\"=\")\n result[key_value[0]] = key_value[1]\n return result",
"def split_params(self, params):\n\t\tindex = 0\n\t\tacc = ''\n\t\tret = [] #return value (is ret a bad name?)\n\t\twhile index < len(params):\n\t\t\tif params[index] == ',': #End of a parameter\n\t\t\t\tret.append(acc)\n\t\t\t\tacc = ''\n\t\t\telif params[index] == '(': #start of a type that is a function\n\t\t\t\tend = params.match_paren(index)\n\t\t\t\twhile index <= end: #so the commas in the function type\n\t\t\t\t\t# are disregarded\n\t\t\t\t\tacc += params[index]\n\t\t\t\t\tindex += 1\n\t\t\t\tcontinue #so index doesn't get incremented again\n\t\t\telse:\n\t\t\t\tacc += params[index]\n\t\t\tindex += 1\n\n\t\tif acc: #if they ended the list with a comma then acc would be ''\n\t\t\tret.append(acc) #parameters not ended with a comma,\n\t\t\t# acc last the last param\n\n\t\treturn ret",
"def split(value, key):\n return str(value).split(key)",
"def get_params(string_in, separator=' ', defaultmissing='-', params_to_get=3):\r\n rtr = str(string_in).split(separator)\r\n if len(rtr) > params_to_get:\r\n rtr = []\r\n rtr.append(str(string_in))\r\n for x in range(0, (params_to_get - len(rtr))):\r\n rtr.append(defaultmissing)\r\n return rtr[0],rtr[1],rtr[2]",
"def split_param(text: str, prefixes: Sequence[str], sep: str) -> tuple[str, str, str]:\n stripped = text.strip()\n if not prefixes:\n prefix = ''\n rest = stripped\n else:\n try:\n prefix = next(filter(stripped.startswith, prefixes))\n except StopIteration:\n prefix = ''\n rest = stripped\n else:\n rest = stripped.split(prefix, maxsplit=1)[1].strip()\n assert len(prefix) >= 1\n assert rest\n arg, part_sep, descr = rest.partition(sep.join((' ', ' ')))\n if not part_sep:\n if rest.endswith(sep):\n arg = rest[:-1]\n elif sep + ' ' in rest:\n arg, _, descr = rest.partition(sep + ' ')\n # if we hit neither then there is no '-' in text, possible case of '[prefix] foo'?\n return prefix, arg.strip(), descr.lstrip()",
"def _split_name(name):\n name_split = name.split('_view_')\n view_num = None\n if(len(name_split) > 1):\n view_num = int(name_split[1])\n optimizer_key = ''\n fp16_key = ''\n if name_split[0].startswith('Moment_1'):\n optimizer_key = 'Moment_1_'\n elif name_split[0].startswith('Moment_2'):\n optimizer_key = 'Moment_2_'\n elif name_split[0].startswith('Update_Count'):\n optimizer_key = 'Update_Count_'\n elif name_split[0].endswith('_fp16'):\n fp16_key = '_fp16'\n param_name = name_split[0]\n if optimizer_key != '':\n param_name = param_name.split(optimizer_key)[1]\n param_name = param_name.split('_fp16')[0]\n return param_name, optimizer_key, view_num, fp16_key",
"def getParams(text):\n s = text.split('=')\n for i in range(len(s)):\n s[i] = s[i].strip()\n\n param = s[0]\n val = s[1]\n # Assume that there are two values only: (variable, value) pair\n assert len(s) == 2\n \n return (param, val)",
"def extract_parameters(self, tokens: list):\n results = {}\n\n for parameter in tokens:\n words = parameter.split('=')\n if len(words) != 2:\n raise Exception('Failed to break \"' + parameter +\n '\" into a \"name=__value\" pair.')\n try:\n value = float(words[1])\n results[words[0]] = value\n except:\n raise Exception('Expecting ' + words[1]\n + ' to be a floating point.')\n\n return results",
"def _split_uri(uri):\n parts = uri.split('/')\n assert '' == parts.pop(0)\n params = []\n res = pkcollections.Dict(params=params)\n in_optional = None\n in_path_info = None\n first = None\n for p in parts:\n assert not in_path_info, \\\n 'path_info parameter={} must be last: next={}'.format(rp.name, p)\n m = _PARAM_RE.search(p)\n if not m:\n assert first is None, \\\n 'too many non-parameter components of uri={}'.format(uri)\n first = p\n continue\n rp = pkcollections.Dict()\n params.append(rp)\n rp.is_optional = bool(m.group(1))\n if rp.is_optional:\n rp.is_path_info = m.group(1) == _PATH_INFO_CHAR\n in_path_info = rp.is_path_info\n else:\n rp.is_path_info = False\n rp.name = m.group(2)\n if rp.is_optional:\n in_optional = True\n else:\n assert not in_optional, \\\n '{}: optional parameter ({}) followed by non-optional'.format(\n uri,\n rp.name,\n )\n res.base_uri = first or ''\n return res",
"def _parse_params( self ):\n paramDic={}\n # Parameters are on the 3rd arg passed to the script\n paramStr=sys.argv[2]\n print paramStr\n if len(paramStr)>1:\n paramStr = paramStr.replace('?','')\n \n # Ignore last char if it is a '/'\n if (paramStr[len(paramStr)-1]=='/'):\n paramStr=paramStr[0:len(paramStr)-2]\n \n # Processing each parameter splited on '&' \n for param in paramStr.split(\"&\"):\n try:\n # Spliting couple key/value\n key,value=param.split(\"=\")\n except:\n key=param\n value=\"\"\n \n key = urllib.unquote_plus(key)\n value = urllib.unquote_plus(value)\n \n # Filling dictionnary\n paramDic[key]=value\n print paramDic\n return paramDic",
"def split_config(s):\n x = re.split(r\";\", s)\n d = {k: v for (k, v) in [i.split(\"=\") for i in x]}\n return d",
"def _parse_params(params):\n for key, value in params.items():\n if value.lower() in ('none', 'null', ''):\n params[key] = None\n elif value.lower() == 'true':\n params[key] = True\n elif value.lower() == 'false':\n params[key] = False\n elif value.isdigit() or (value[0] == '-' and value[1:].isdigit()):\n params[key] = int(value)\n elif ',' in value:\n params[key] = list(map(lambda x: x.strip(), value.split(',')))\n else:\n try:\n params[key] = float(value)\n except:\n pass\n return params",
"def splitQuery(query_string):\n\ttry:\n\t\td = dict([x.split('=') for x in query_string.split('&') ])\n\texcept ValueError:\n\t\td = {}\n\treturn d",
"def parse_line(self, line):\n line = line.strip()\n log.debug(\"Parsing line: '{}'\".format(line))\n if len(line) == 0:\n log.warning(\"Zero length line detected\")\n return\n split = line.split(DELIMETER)\n key = split[0]\n if key in FORMATS:\n log.debug(\"Using formatter for key: {}\".format(key))\n formatter = FORMATS[key]\n for (name, parser), value in zip(formatter,split[1:]):\n self._params[name] = parser(value)\n log.info(\"Parameters: {}\".format(self._params))\n self.notify_watchers()\n else:\n log.debug(\"Invalid key: {}\".format(key))",
"def getModelParameters(parameterstring):\n \n def getFormattedValue(strval):\n if '\\'' in strval:\n return strval.replace('\\'', '')\n elif '\"' in strval:\n return strval.replace('\"', '')\n elif '.' in strval:\n return float(strval)\n elif strval == 'True':\n return True\n elif strval == 'False':\n return False\n else:\n return int(strval)\n \n ((25,),)\n def parseTuple(strval):\n idx = strval.find(\"(\")+1\n values = []\n i = idx\n while i < len(strval):\n if strval[i] == '(':\n nested, lnested = parseTuple(strval[i:])\n print(i)\n i += lnested\n idx = i+1\n print(i)\n values.append(nested)\n elif strval[i] == ')':\n newval = strval[idx:i].strip()\n if newval != '':\n values.append(getFormattedValue(newval))\n return tuple(values), i\n elif strval[i] == ',':\n newval = strval[idx:i].strip()\n if newval != '':\n values.append(getFormattedValue(newval))\n idx = i+1\n i += 1\n \n rv = dict()\n if parameterstring is None:\n return rv\n params = parameterstring.strip().split(\"=\")\n nextkey = params[0]\n for pi in range(1,len(params)):\n cur = params[pi]\n if '(' in cur:\n if cur.count(\"(\") != cur.count(\")\"):\n raise InvalidParameters(\"Unequal number of paranthesis.\")\n value, _ = parseTuple(cur)\n rv[nextkey] = value\n nextkey = cur[cur.rfind(',')].strip()\n else:\n commasplit = cur.split(\",\")\n value = commasplit[0].strip()\n rv[nextkey] = getFormattedValue(value)\n nextkey = commasplit[1].strip()\n \n return rv",
"def get_parameter(pstring):\n parameters = pstring.replace(',', ' ').split()\n if len(parameters) == 1:\n init_value = float(parameters[0])\n return (init_value, None, None)\n elif len(parameters) == 3:\n init_value = float(parameters[0])\n if parameters[1].upper() == 'NONE':\n lower_value = None\n else:\n lower_value = float(parameters[1])\n if parameters[2].upper() == 'NONE':\n upper_value = None\n else:\n upper_value = float(parameters[2])\n return (init_value, lower_value, upper_value)\n else:\n raise ValueError('Invalid parameter format: %s' % pstring)",
"def parse_header_parameters(line):\n parts = _parseparam(\";\" + line)\n key = parts.__next__().lower()\n pdict = {}\n for p in parts:\n i = p.find(\"=\")\n if i >= 0:\n has_encoding = False\n name = p[:i].strip().lower()\n if name.endswith(\"*\"):\n # Lang/encoding embedded in the value (like \"filename*=UTF-8''file.ext\")\n # https://tools.ietf.org/html/rfc2231#section-4\n name = name[:-1]\n if p.count(\"'\") == 2:\n has_encoding = True\n value = p[i + 1 :].strip()\n if len(value) >= 2 and value[0] == value[-1] == '\"':\n value = value[1:-1]\n value = value.replace(\"\\\\\\\\\", \"\\\\\").replace('\\\\\"', '\"')\n if has_encoding:\n encoding, lang, value = value.split(\"'\")\n value = unquote(value, encoding=encoding)\n pdict[name] = value\n return key, pdict",
"def get_parameters(file_str):\n\n idx = [x.start() for x in re.finditer(\"_\", file_str)]\n\n # if file_str contains path to pkl files (eg '../results/*.pkl)\n # start reading file name from the last '/'\n if file_str.rfind(\"/res\") != -1:\n start_idx = file_str.rfind(\"/res\") + 4\n else:\n start_idx = 0\n\n n = int(file_str[start_idx : idx[0]])\n m = int(file_str[idx[0] + 1 : idx[1]])\n r = int(file_str[idx[1] + 1 : idx[2]])\n loss = str(file_str[idx[2] + 1 : idx[3]])\n sparsity = float(file_str[idx[3] + 1 : idx[4]])\n method = str(file_str[idx[4] + 1 : idx[5]])\n gamma = float(file_str[idx[5] + 1 : -4])\n\n return n, m, r, loss, sparsity, method, gamma",
"def parse_params(params):\n pairs = params.split(' ')\n content = dict()\n for key, value in [pair.split('=') for pair in pairs]:\n content[key] = int(value)\n return content",
"def split_url_and_query_params(url):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n url = urlunsplit((scheme, netloc, path, None, fragment))\n return url, query_params"
] |
[
"0.66845363",
"0.6530655",
"0.6480405",
"0.6348581",
"0.62863165",
"0.61444205",
"0.60926306",
"0.6052062",
"0.6042144",
"0.6038052",
"0.59232545",
"0.58511275",
"0.5802076",
"0.5720275",
"0.5705016",
"0.5694049",
"0.5687249",
"0.56427014",
"0.56421286",
"0.5573257",
"0.5479929",
"0.54708475",
"0.5467517",
"0.54115474",
"0.5393125",
"0.5390923",
"0.5387419",
"0.53628916",
"0.53622854",
"0.53486174"
] |
0.7688976
|
0
|
Removes modifiers from the given string and returns the original name plus a list of the modifiers present (checked against mod_set if provided) >>> split_modifiers('joint_active_scaled_return', ['trace', 'scaled', 'return']) ('joint_active', ['scaled', 'return']) >>> split_modifiers('joint_active_scaled') ('joint', ['active', 'scaled'])
|
def split_modifiers(mod_string, mod_set=None):
parts = mod_string.split('_')
if mod_set is None:
return (parts[0], parts[1:])
name = [parts[0]]
mods = []
for p in parts[1:]:
if p in mod_set:
mods.append(p)
else:
name.append(p)
return ('_'.join(name), mods)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def modifiers(m) -> Set[str]:\n return set(m[\"modifier_list\"])",
"def split(string, separator, keep_separator):\n\t\t\tparts = string.split(separator)\n\t\t\tif keep_separator:\n\t\t\t\t*parts, last_part = parts\n\t\t\t\tparts = [part + separator for part in parts]\n\t\t\t\tif last_part:\n\t\t\t\t\treturn parts + [last_part]\n\t\t\treturn parts",
"def SplitBehavior(behavior):\n return [x for x in re.split('[ ()\"-.,]', behavior) if len(x) > 0]",
"def remove_modifiers(*values, sort=False, mod_set=None):\n\tfeatures = []\n\tfor f in values:\n\t\t(name, mods) = split_modifiers(f, mod_set=mod_set)\n\t\tif name not in features:\n\t\t\tfeatures.append(name)\n\tif sort:\n\t\tfeatures.sort()\n\treturn features",
"def split(self, string):\n return (re.split('; |, |: |\"(\"|\"(\"|;|,|:| |', string))",
"def split( self, string ):\n splitted_string = []\n \n str_len = len( string )\n i = 0\n for j in range( str_len ):\n if string[j] in self.delimiters:\n if i != j:\n splitted_string.append( string[i:j] )\n i = j+1\n \n if i != j:\n splitted_string.append( string[i:j+1] )\n \n return splitted_string",
"def _split(string):\n out = [\"\", \"\"]\n for i in string:\n if i.isalpha():\n out[0] += i\n elif i.isnumeric() or i == \".\":\n out[1] += i\n return out",
"def split(self, string):\n if self.chars:\n return list(string)\n else:\n return string.split(' ')",
"def split_str(cmdline_str, has_options):\n return Splitter.split_list(shlex.split(cmdline_str), has_options)",
"def split(\n string: str,\n splitters: Union[str, List[str]],\n count: Optional[int] = None,\n removeEmpty: int = 0,\n) -> List[str]:\n\n if count and count < 0:\n raise ValueError(\"Count cannot be less than zero\")\n\n if count == 0:\n return []\n\n if isinstance(splitters, str):\n if not removeEmpty:\n return string.split(splitters, count - 1 if count else -1)\n\n splitters = [splitters]\n\n splitters = [escape(x) for x in splitters] or [\" \"]\n\n i = 0\n splits: List[str] = []\n matches = re.finditer(\"|\".join(splitters), string)\n for m in matches:\n if count is not None and count <= 1:\n break\n\n split = string[i : m.start()]\n if split or not removeEmpty:\n splits.append(split)\n\n count = count - 1 if count is not None else count\n\n i = m.end()\n\n if (count is None or count and count > 0) and len(string) - i > -1:\n split = string[i:]\n if split or not removeEmpty:\n splits.append(split)\n\n return splits",
"def __split_for_delimiter__(self, string):\n if not self.__delimiter__ == '':\n return string.split(self.__delimiter__)\n return string.split()",
"def split_string(string: str, indices: list) -> list:\n return [string[n:m] for n, m in zip(indices[:-1], indices[1:])]",
"def split_by(string: str, symbols: list, include_symbols=True) -> list:\n out = []\n curr = \"\"\n for c in string:\n if c in symbols:\n out.append(curr)\n out.append(c)\n curr = \"\"\n else:\n curr += c\n out.append(curr)\n return out",
"def split_preserve_tokens(s):\n return re.split(r'(\\W)', s)",
"def extract(s: str, delimiter: str=DELIMITER) -> []:\n s += DELIMITER\n # create rotations\n rotations = [s[index:] + s[:index] for index in range(len(s))]\n rotations.sort()\n return list(enumerate(rotations))",
"def split(self, s):\r\n l = [self._split(x) for x in _SPLIT_RE.split(s)]\r\n return [item for sublist in l for item in sublist]",
"def _regex_split(pattern, string):\n splits = list((m.start(), m.end()) for m in re.finditer(pattern, string))\n starts = [0] + [i[1] for i in splits]\n ends = [i[0] for i in splits] + [len(string)]\n return [string[start:end] for start, end in zip(starts, ends)]",
"def split_package_spec(package_spec):\n match = re.match('^(.*?)\\((.*)\\)$', package_spec)\n if match:\n package_name = match.group(1)\n package_options = match.group(2).split(',')\n else:\n package_name = package_spec\n package_options = []\n return package_name, package_options",
"def split_cmdline_filter_items(string):\n filter_items = string.split(',')\n return filter_items",
"def split_name(name: str) -> List[str]:\n parts = name.split('.')\n return list(filter(lambda n: n, parts))",
"def getListOfModifiers(self, *args):\n return _libsbml.Reaction_getListOfModifiers(self, *args)",
"def split_skill_string(skill):\n skill = skill.strip().replace(\" \", \"\").replace(\"\\n\", \"\")\n skill_regex = \"([\\w_-]+)\\{(.*)\\}\"\n try_match = re.match(skill_regex, skill)\n if try_match:\n name, args_str = try_match.group(1, 2)\n arg_regex = '([\\w_-]+)=\\\\\"([\\w_-]+)\\\\\"(?:,)?'\n # split along the regex, filtering empty matches\n arg_list = list(filter(None, re.split(arg_regex, args_str)))\n # convert list to dictionary with even odd matching\n # see https://stackoverflow.com/a/23286311\n args = dict(zip(arg_list[::2], arg_list[1::2]))\n return name, args\n else:\n raise Exception(\"invalid skill format\", skill)",
"def safe_split(string, sep=','):\n regex = re.escape(sep) + r'\\s*(?![^\\[\\]]*\\])(?![^()]*\\))'\n return re.split(regex, string)",
"def split_string(self, string):\n self.actions = []\n start = 0\n\n for match in self._ansi_pattern.finditer(string):\n raw = string[start:match.start()]\n substring = self._special_pattern.sub(self._replace_special, raw)\n if substring or self.actions:\n yield substring\n start = match.end()\n\n self.actions = []\n try:\n params = []\n for param in match.group(1).split(';'):\n if param:\n params.append(int(param))\n except ValueError:\n # Silently discard badly formed escape codes.\n pass\n else:\n self.set_csi_code(match.group(2), params)\n\n raw = string[start:]\n substring = self._special_pattern.sub(self._replace_special, raw)\n if substring or self.actions:\n yield substring",
"def split(self) -> List[String]:\n pass",
"def split_str(str):\n \n logger = logging.getLogger(__name__)\n \n logger.debug('{0}'.format(str))\n \n match = re.match(r\"([0-9]+.?\\d{0,32}?)(d|m|s)\", str)\n \n if match:\n items = match.groups()\n \n return items[0], items[1]",
"def modifier(self) -> str:\n match = RE_MODIFIER.match(self.string.split(\".\")[-1])\n return match.group(1) if match else None",
"def splitstring(string, splitcharacter=' ', part=None):\n\n # If the part is empty\n if part in [None, '']:\n # Return an array of the splitted text\n return str(string).split(splitcharacter)\n\n # Return an array of the splitted text with a specific part\n return str(string).split(splitcharacter)[part]",
"def lsplit(self, string):\n rhs = string()\n lhs = string()\n pattern_match=string()\n return lhs, pattern_match, rhs",
"def splitpop(string, delimiter):\n if delimiter not in string:\n string += delimiter\n fields = string.split(delimiter)\n return delimiter.join(fields[:-1]), fields[-1]"
] |
[
"0.59638035",
"0.5797061",
"0.54651815",
"0.54193926",
"0.5409097",
"0.53156203",
"0.5306072",
"0.5209662",
"0.50047225",
"0.49938494",
"0.49826226",
"0.4935543",
"0.49057367",
"0.48880288",
"0.48861265",
"0.48632205",
"0.4861422",
"0.4847447",
"0.4827977",
"0.48262042",
"0.4809146",
"0.48077154",
"0.47900325",
"0.47826144",
"0.47616252",
"0.4746015",
"0.47386152",
"0.47347322",
"0.47260544",
"0.47180682"
] |
0.80421674
|
0
|
Removes _scaled, etc, from the feature list to create a unique set of the features as in the environment directory >>> features = ['obs2_scaled_decayed','obs1_scaled','obs2_scaled','obs1_return'] >>> remove_modifiers(features, sort=False) ['obs2', 'obs1'] >>> remove_modifiers(features, sort=True) ['obs1', 'obs2'] >>> remove_modifiers('trace_rate','trace_key_scaled', 'trace_rate_scaled') ['trace'] >>> remove_modifiers('trace_rate','trace_key_scaled', 'trace_rate_trace', mod_set=['scaled', 'trace']) ['trace_rate', 'trace_key']
|
def remove_modifiers(*values, sort=False, mod_set=None):
features = []
for f in values:
(name, mods) = split_modifiers(f, mod_set=mod_set)
if name not in features:
features.append(name)
if sort:
features.sort()
return features
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def modifiers(m) -> Set[str]:\n return set(m[\"modifier_list\"])",
"def test_remove_feature_with_extras():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n dism.remove_feature(\"sponge\", True)\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Disable-Feature\",\n \"/FeatureName:sponge\",\n \"/Remove\",\n \"/NoRestart\",\n ]\n )",
"def attr_remove(self):\n def _del_if_in(obj, attr):\n if attr in obj:\n del obj[attr]\n if self._modifier_exists(REMOVE_KEY):\n to_remove = self[CONFIG_KEY][SAMPLE_MODS_KEY][REMOVE_KEY]\n _LOGGER.debug(\"Removing attributes: {}\".format(to_remove))\n for attr in to_remove:\n [_del_if_in(s, attr) for s in self.samples]",
"def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x",
"def pruneModifierRelationships(self):\n modifiers = self.getConTextModeNodes(\"modifier\")\n for modifier in modifiers:\n modified_by = self.successors(modifier)\n if modified_by and len(modified_by) > 1:\n minm = min([(modifier.dist(mod_by), mod_by) for mod_by in modified_by])\n edgs = self.edges(modifier)\n edgs.remove((modifier, minm[1]))\n if self.getVerbose():\n print(\"deleting relationship(s)\", edgs)\n\n self.remove_edges_from(edgs)",
"def remove_feature_accessors(obj, feats: FeaturesTuple):\n for feat in feats:\n try:\n delattr(obj, feat.get_name())\n\n except AttributeError:\n pass",
"def removeModifier(self, *args):\n return _libsbml.Reaction_removeModifier(self, *args)",
"def apply_feature_filter(self):\n self.features = set()\n for language in self.data.values():\n features_in_data = set(language.keys())\n features_to_keep = features_in_data & self.feature_filter\n self.features |= features_to_keep\n features_to_remove = features_in_data - features_to_keep\n for feat in features_to_remove:\n language.pop(feat)\n self.features = sorted(list(self.features))",
"def split_modifiers(mod_string, mod_set=None):\n\tparts = mod_string.split('_')\n\tif mod_set is None:\n\t\treturn (parts[0], parts[1:])\n\tname = [parts[0]]\n\tmods = []\n\t\n\tfor p in parts[1:]:\n\t\tif p in mod_set:\n\t\t\tmods.append(p)\n\t\telse:\n\t\t\tname.append(p)\n\t\n\treturn ('_'.join(name), mods)",
"def test_remove_feature():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n dism.remove_feature(\"test\")\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Disable-Feature\",\n \"/FeatureName:test\",\n \"/NoRestart\",\n ]\n )",
"def removeExtraSets(cls, obj, *args, **kwargs):\n for i in pm.polyUVSet(obj, query=True, allUVSetsIndices=True)[1:]:\n name = pm.getAttr(obj + '.uvSet[' + str(i) + '].uvSetName')\n pm.polyUVSet(obj, delete=True, uvSet=name)",
"def removeBrokenModifiers(context):\n from Products.CMFEditions.interfaces.IModifier import IConditionalModifier\n\n tool = getToolByName(context, \"portal_modifier\", None)\n for modifier_id, modifier in tool.objectItems():\n if not IConditionalModifier.providedBy(modifier):\n continue\n if not modifier.isBroken():\n continue\n tool._delObject(modifier_id)\n logger.info(\"Removed broken %s from portal_modifier.\", modifier_id)",
"def delete(feature_set, population):\n features = [x for x in list(feature_set)]\n pop = [x for y in population for x in y]\n min = float(\"+inf\")\n rem = features[0]\n for i in range(0, len(features)):\n x = pop.count(features[i])\n if x < min:\n min = x\n rem = features[i]\n features.remove(rem)\n return set(features)",
"def remove_deletions(murim_mutations):\n\n pass",
"def remove_attributes(self, remove_attrs):\n remove = []\n for attr in self.data:\n for prefix in remove_attrs:\n if attr.startswith(prefix):\n remove.append(attr)\n break\n\n self.data = self.data.drop(remove, axis=1)",
"def remove_attributes(self, remove_attrs):\n remove = []\n for attr in self.data:\n for prefix in remove_attrs:\n if attr.startswith(prefix):\n remove.append(attr)\n break\n\n self.data = self.data.drop(remove, axis=1)",
"def _drop_features(self, X, drop_features):\n self.drop_features = drop_features\n if len(self.drop_features) != 0:\n cfp = ComprehensiveFCParameters()\n df2 = []\n for df in self.drop_features:\n if df in X.columns:\n df2.append(df) # exact match\n else:\n if df in cfp.keys() or df in ['fft_coefficient_hann']:\n df = '*__{:s}__*'.format(df) # feature calculator\n # wildcard match\n df2 += [col for col in X.columns if fnmatch(col, df)] \n X = X.drop(columns=df2)\n return X",
"def remove_elements_from_set(s: set, *args) -> set:\n for _ in args:\n s.remove(_)\n return s",
"def remove_not_added(target, xtal_list):\n all_prots = Protein.objects.filter(target_id=target)\n # make sure not to delete any of the computed set proteins (which are protected)\n computed_prots = [mol.pdb for mol in ComputedMolecule.objects.filter(pdb__target_id=target)]\n unprotected = [x for x in all_prots if x not in computed_prots]\n\n for prot in unprotected:\n # Code consists of 'directory:alternate_name' if exists (code is renamed based on the metadata)\n code_first_part = prot.code.split(\":\")[0]\n if code_first_part not in xtal_list:\n prot.delete()\n return None",
"def remove_old_flags():\n # N.B. We remove only those features we know absolutely nothing about,\n # which means that FEATURES_PENDING_REMOVAL are left alone.\n known = set(FEATURES) | set(FEATURES_PENDING_REMOVAL)\n unknown_flags = Feature.query.filter(sa.not_(Feature.name.in_(known)))\n count = unknown_flags.delete(synchronize_session=False)\n if count > 0:\n log.info('removed %d old/unknown feature flags from database', count)",
"def remove_features(x_train, x_val, x_test, features, ordered_feature_names):\n indices = np.where(np.isin(ordered_feature_names,unwanted_features))\n #print(indices)\n if len(indices) is not 0:\n x_train = np.delete(x_train, indices, axis=1)\n x_test = np.delete(x_test, indices, axis=1)\n x_val = np.delete(x_val,indices,axis=1)\n ordered_feature_names = np.delete(ordered_feature_names, indices, axis=None)\n return x_train,x_val, x_test, ordered_feature_names",
"def compute_filtered_features(self, features):\n return [\n feature\n for feature in features\n if \"\".join(feature.qualifiers.get(\"is_edit\", \"false\")) != \"true\"\n ]",
"def test_01_cart_modifier_pool_loads_modifiers_properly(self):\n MODIFIERS = [\n 'shop.cart.modifiers.tax_modifiers.TenPercentGlobalTaxModifier']\n with SettingsOverride(SHOP_CART_MODIFIERS=MODIFIERS):\n thelist = modifiers_pool.cart_modifiers_pool.get_modifiers_list()\n self.assertEqual(len(thelist), 1)\n instance = thelist[0]\n self.assertTrue(hasattr(instance, 'TAX_PERCENTAGE'))",
"def _remove_data(things, lst_remove=None):\n\n for data in things:\n data.pop(\"_sa_instance_state\", None)\n data.pop(\"user_id\", None)\n\n if lst_remove is not None:\n for str_remove in lst_remove:\n if str_remove in data:\n data.pop(str_remove, None)\n\n return things",
"def remove_activation_hooks(self):\n for h in self.hooks:\n h.remove()\n h = None\n for l in self.list_mods:\n if ('norm' in self.list_mods):\n (b, l) = l\n # Skip non-prunable layers\n if (hasattr(l, 'prune_values')):\n l.prune_values = None\n self.hooks = None",
"def removeModulesNotOnAPathExcluding( process, keepList=() ):\n allMods=set((x for x in process.producers_().iterkeys()))\n allMods.update((x for x in process.filters_().iterkeys()))\n allMods.update((x for x in process.analyzers_().iterkeys()))\n allMods.update((x for x in process.outputModules_().iterkeys()))\n \n modulesOnPaths = set()\n for p in process.paths_():\n modulesOnPaths.update( (x for x in getattr(process,p).moduleNames())) \n for p in process.endpaths_():\n modulesOnPaths.update( (x for x in getattr(process,p).moduleNames()))\n\n notOnPaths = allMods.difference(modulesOnPaths)\n \n keepModuleNames = set( (x.label_() for x in keepList) )\n \n getRidOf = notOnPaths.difference(keepModuleNames)\n \n for n in getRidOf:\n delattr(process,n)",
"def remove_mass_unsafe(self, *focal_elements):\n for focal in focal_elements:\n if focal[0] in self.focals:\n self.focals[focal[0]] -= focal[1]\n else:\n self.focals[focal[0]] = -focal[1]",
"def _RemoveFromCloneList(self, clone, attrNamesToClone):\n attrNamesToClone = super(EquationUnit, self)._RemoveFromCloneList(clone, attrNamesToClone)\n \n dontClone = [\"_Funcs\", \"_FuncsDefs\"]\n \n for name in dontClone:\n if name in attrNamesToClone:\n attrNamesToClone.remove(name)\n \n return attrNamesToClone",
"def remove_ops(self):\n return self._remove_ops",
"def test_removeFlags(self):\n self._flagsTest('removeFlags', b'-FLAGS')"
] |
[
"0.60711634",
"0.5854533",
"0.576454",
"0.552694",
"0.5376382",
"0.53698",
"0.53240466",
"0.5302668",
"0.53006244",
"0.52412456",
"0.5228294",
"0.52273476",
"0.5213669",
"0.51895434",
"0.517557",
"0.517557",
"0.51688987",
"0.51524097",
"0.5135517",
"0.5121186",
"0.50604916",
"0.50084203",
"0.49744293",
"0.49580622",
"0.4955502",
"0.48701584",
"0.48462668",
"0.4842973",
"0.483837",
"0.4808825"
] |
0.80447644
|
0
|
Return true if filepath ends in 'gz' extension
|
def is_zip(filepath):
return os.path.splitext(filepath)[1] == '.gz'
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_archive_ext(filepath):\n file_extension = os.path.splitext(filepath)[1].lower()\n if file_extension in get_archive_extensions():\n return True\n else:\n return False",
"def chk_for_gz(filenm):\n import os\n from os.path import expanduser\n filenm = expanduser(filenm)\n\n # File exist?\n if os.path.lexists(filenm):\n chk=True\n return filenm, chk\n\n # .gz already\n if filenm.find('.gz') > 0:\n chk=0\n return filenm, chk\n\n # Add .gz\n if os.path.lexists(filenm+'.gz'):\n chk=True\n return filenm+'.gz', chk\n else:\n chk=False\n return None, chk",
"def is_gz_file(f):\n with open(f, \"rb\") as fin:\n return binascii.hexlify(fin.read(2)) == b\"1f8b\"",
"def isgzip(filename):\n magic_number = b'\\x1f\\x8b\\x08'\n with open(filename, 'rb') as f:\n file_start = f.read(len(magic_number))\n\n if magic_number == file_start:\n return True\n return False",
"def _gz(filename):\n \n with open(filename, 'rb') as f:\n return binascii.hexlify(f.read(2)) == b'1f8b'",
"def is_file(path_name):\n if re.search(\"\\.[a-zA-Z]+$\", os.path.basename(path_name)):\n return True\n else:\n return False",
"def is_good_file(filename):\n for e in extensions:\n if filename.endswith(e):\n return True\n return False",
"def is_suffix_right(file: Path, extension: str):\n ext = extension.lower()\n fext = file.suffix.lower()[1:]\n jpgs = {\"jpg\", \"jpeg\"}\n if fext in jpgs and ext in jpgs:\n return True\n elif fext == ext:\n return True\n return False",
"def _has_extension(self, path):\r\n if re.match(r'.*\\\\.*\\..*$', path):\r\n return True",
"def is_archive(afile):\n return file_ext(os.path.basename(afile)) in ARCHIVE_COMPRESS_FORMATS",
"def _file_can_be_compressed(filename):\n content_type = ''\n with open(filename, 'rb') as f:\n content_type = _get_content_type(f)\n return content_type in TEXT_TYPES",
"def _iszip(self, filename):\n fname, ext = os.path.splitext(filename)\n return ext in _file_openers.keys()",
"def check_file_ext(f_name):\n global im_ext_\n for ext_ in im_ext_:\n if f_name.lower().endswith(ext_):\n return True\n return False",
"def _check_extension(self, filepath):\n extensions = ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'pcx', 'tga', 'tiff', 'tif', 'iff', 'xpm']\n for extension in extensions:\n try:\n if filepath.index(extension) == len(filepath) - len(extension):\n return True\n except:\n pass\n\n return False",
"def cutgz(x):\n if x[-3:] == '.gz':\n return x[:-3]\n else:\n return x",
"def are_files_gzipped(raw_files):\n files_are_gzipped = None\n for file_name in raw_files:\n if re.search(r\"\\.gz$\", file_name) is not None:\n if files_are_gzipped is False:\n raise Exception(\n \"It seems one file is compressed and the \"\n \"other is \"\n \"not:\\n{}\".format(\"\\n\".join(raw_files))\n )\n files_are_gzipped = True\n else:\n if files_are_gzipped:\n raise Exception(\n \"It seems one file is compressed and the \"\n \"other is \"\n \"not:\\n{}\".format(\"\\n\".join(raw_files))\n )\n files_are_gzipped = False\n return files_are_gzipped",
"def is_gzipped(infile):\n logger = logging.getLogger(__name__)\n\n magic_number = b'\\x1f\\x8b'\n f = open(infile, 'rb')\n with f:\n try:\n assert f.read(2) == magic_number\n except AssertionError as e:\n logger.info(f'{infile} is not gzipped')\n return False\n else:\n logger.debug(f'{infile} is gzipped')\n return True",
"def has_extension(filepath, extensions):\n return any(filepath.endswith(ext) for ext in extensions)",
"def isjson(filepath):\n return filepath.lower().endswith('.json')",
"def is_file(self):\n\n url_path = self.url.split('/')\n if re.match(r\".+\\.\\w+\", url_path[-1]):\n # Find <file_name>.<extension>\n return True\n return False",
"def split_ext(filepath):\n\t(fn, ext) = os.path.splitext(filepath)\n\tif ext=='.gz':\n\t\t(fn, ext) = os.path.splitext(fn)\n\t\text += '.gz'\n\treturn (fn, ext)",
"def is_image_file(filename, extensions):\n return filename.lower().endswith(extensions)",
"def match_extensions(filename):\n return any(filename.endswith(e) for e in extensions)",
"def is_archive_file(filepath):\n\n file_extension = os.path.splitext(filepath)[1].lower()\n if file_extension == \".zip\":\n if not zipfile.is_zipfile(filepath):\n raise DeidentificationError(\n \"The ZIP file has the .zip extension but it is not a ZIP file\")\n else:\n return True\n elif file_extension in get_archive_extensions():\n if not tarfile.is_tarfile(filepath):\n raise DeidentificationError(\n \"The file has an archive extension but it is not a TAR file\")\n else:\n return True\n else:\n return False",
"def has_extension(self, filename):\n if filename is None:\n return False\n return filename.split(\".\")[-1].lower() in self.extensions",
"def is_jpg(filename):\n return '.jpg' in filename",
"def is_gzip(fp):\r\n return open(fp, 'rb').read(2) == '\\x1f\\x8b'",
"def _accept_for_flag (self, filename):\n\t\troot, ext = os.path.splitext(filename)\n\t\tif not ext:\n\t\t\treturn 1\n\t\telse:\n\t\t\tbinary_extensions = ['.jpg', '.gif', '.png', '.jar' ]\n\t\t\treturn ext not in ['.bak', '.off','.old', '.works', '.clean', '.obs', '.log', '.db'] + binary_extensions",
"def has_file_allowed_extension(filename, extensions):\n return filename.lower().endswith(extensions)",
"def has_file_allowed_extension(filename, extensions):\n return filename.lower().endswith(extensions)"
] |
[
"0.73746085",
"0.73102397",
"0.728944",
"0.7181048",
"0.69474477",
"0.67704386",
"0.6675972",
"0.6661081",
"0.6557845",
"0.6541729",
"0.6535836",
"0.6522381",
"0.64815605",
"0.646029",
"0.6333025",
"0.6316177",
"0.62578523",
"0.6246907",
"0.6244113",
"0.6234546",
"0.6212786",
"0.62123877",
"0.61964524",
"0.6172384",
"0.6169029",
"0.6166852",
"0.6160389",
"0.61373377",
"0.6094176",
"0.6094176"
] |
0.80162406
|
0
|
Recursively creates every directory in dirpath if it does not exists. Returns True/False on success/failure >>> newdir = '/tmp/asdf/fdsa/fred' >>> os.path.exists(newdir) False >>> os.path.exists('/tmp/asdf/fdsa') False >>> make_dirs(newdir) '/tmp/asdf/fdsa/fred' >>> os.path.exists(newdir) True >>> os.path.exists('/tmp/asdf/fdsa') True >>> shutil.rmtree('/tmp/asdf') >>> make_dirs('/Users/fred', debug=False) False
|
def make_dirs(dirpath, debug=False):
if not os.path.exists(dirpath):
try:
os.mkdir(dirpath)
except OSError as e:
if debug:
print(e)
(head, tail) = os.path.split(dirpath)
if '/' not in head or os.path.exists(head):
return False
else:
if(make_dirs(head)):
return make_dirs(dirpath)
return dirpath
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_dirs(path):\n\tif not os.path.exists(path):\n\t\treturn os.makedirs(path)",
"def make_dirs(path):\n if not os.path.exists(path):\n os.makedirs(path)",
"def make_dirs_or_not(dirpath: Union[PathOrStrType]):\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)",
"def makeDir(path):\r\n\r\n try:\r\n os.makedirs(path)\r\n except OSError as exc:\r\n if exc.errno == errno.EEXIST and os.path.isdir(path):\r\n pass\r\n else:\r\n raise",
"def _ensure_dirs(dirpath):\n if not os.path.isdir(dirpath):\n if os.path.exists(dirpath):\n err = \"log path ({}) exists but is not a directory\"\n raise ConfigError(err.format(dirpath))\n os.makedirs(dirpath, 0o777)",
"def make_dir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path, exist_ok=True)\n return True",
"def mkdirpath (dirpath):\n\n if os.path.isdir(dirpath):\n return\n\n incpath = \"\"\n for subdir in os.path.normpath(dirpath).split(os.path.sep):\n incpath = os.path.join(incpath, subdir)\n if not os.path.isdir(incpath):\n os.mkdir(incpath)",
"def makeDir(dir_path):\n if os.path.exists(dir_path): return\n dir_path = os.path.realpath(dir_path)\n dir_path = os.path.normpath(dir_path)\n if os.path.exists(os.path.dirname(dir_path)):\n os.mkdir(dir_path)\n else:\n makeDir(os.path.dirname(dir_path))\n os.mkdir(dir_path)",
"def make_dir(path, is_dir=False):\n target = path if is_dir else os.path.dirname(path)\n try:\n os.makedirs(target)\n except OSError as exception:\n if exception.errno == errno.EEXIST and os.path.isdir(target):\n pass\n else:\n raise",
"def mkdirs(path):\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)",
"def make_dir(dir_path):\n if os.path.isdir(dir_path) == False:\n os.mkdir(dir_path)",
"def make_dirs(dirs):\n\n for d in dirs:\n if not os.path.exists(d):\n try:\n os.mkdir(d)\n except OSError as e:\n if e.errno != 17:\n raise",
"def MakeDir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path)\n return True",
"def create_directories(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n\n if e.errno != errno.EEXIST:\n logging.error(str(e))\n raise",
"def __make_dirs(path, mode=0o777):\n\n try:\n os.makedirs(path, mode=mode)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise Ai1wmError('error creating a directory: {}, error: {}'.format(path, e))\n return path",
"def remake_directories(*dirnames):\r\n for d in dirnames:\r\n d = path(d)\r\n if d.exists():\r\n d.rmtree()\r\n d.mkdir()\r\n return",
"def rmkdir(path):\n t = []\n sep = os.path.sep\n if sep != \"/\":\n parts = path.replace(os.path.sep, \"/\").split(\"/\")\n else:\n parts = path.split(sep)\n \n if path[0] == \"/\":\n t = [\"/\" + parts[0]]\n parts = parts[1:]\n \n for p in parts:\n t.append(p)\n # I chose isdir so we'll get a helpful error if it exists but is a file\n if os.path.isdir(sep.join(t)): continue\n os.mkdir(sep.join(t))",
"def create_dirs(dirs):\n try:\n for dir_ in dirs:\n if not os.path.exists(dir_):\n os.makedirs(dir_)\n return 0\n\n except Exception as err:\n logging.error(\"Creating directories error: {0}\".format(err))\n exit(-1)",
"def create_dir_if_doesnt_exist(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return",
"def MakeDirs(dirname: str):\n exist_dir = False\n if not exist_dir:\n os.makedirs(dirname, exist_ok=True)",
"def find_and_create_dirs(dir_name):\n if os.path.exists(dir_name) is False:\n os.makedirs(dir_name)\n return dir_name",
"def maybe_make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)",
"def create_dirs(dirs):\n try:\n for dir_ in dirs:\n if not os.path.exists(dir_):\n os.makedirs(dir_)\n return 0\n except Exception as err:\n print(\"Creating directories error: {0}\".format(err))\n exit(-1)",
"def make_directories(file_path):\n logger.info(\"Create all directories in the path %s\", file_path)\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n else:\n logger.warning(\"Cannot create directories %s. The directory already exists\", file_path)",
"def create_local_directory(dirpath: pathlib.Path):\n if dirpath.exists():\n return False\n\n # TODO: Debug and improve this in order to catch particular exceptions\n try:\n dirpath.mkdir(parents=True)\n except Exception as error:\n raise SrtUtilsException(\n f'Directory was not created: {dirpath}. Exception '\n f'occured ({error.__class__.__name__}): {error}'\n )\n\n return True",
"def mkdir(path):\n path = expandPath(path).split(os.sep)[1:]\n tmp = os.sep\n for entry in path:\n tmp += '%s%s' % (entry,os.sep)\n try:\n os.mkdir(tmp)\n except OSError:\n pass",
"def make_dir(new_dir, path, exist_ok=True, parents=False):\n new_path = path / Path(new_dir)\n new_path.mkdir(exist_ok=exist_ok, parents=parents)\n\n return new_path",
"def ensuredirs(dpath, *dpaths):\n try:\n makedirs(path.join(dpath, *dpaths))\n except OSError as e:\n if e.errno != EEXIST:\n raise # Re-raise the exception.",
"def make_dirs():\n global paths_made\n\n #Have we done this already? Then why are we trying to do it again?\n if paths_made:\n return\n\n #Make the dirs\n os.makedirs(log_dir, exist_ok=True)\n os.makedirs(datastream_dir, exist_ok=True)\n paths_made = True",
"def ensure_dirs_exist(path):\n os.makedirs(path, exist_ok=True)"
] |
[
"0.7709915",
"0.7483823",
"0.7414723",
"0.73108613",
"0.72236437",
"0.7209876",
"0.71590054",
"0.71147317",
"0.7107763",
"0.710238",
"0.7098765",
"0.70964026",
"0.7016854",
"0.7009933",
"0.70048064",
"0.6991996",
"0.69547653",
"0.69364005",
"0.6925537",
"0.692539",
"0.6917738",
"0.69048035",
"0.6887163",
"0.6851755",
"0.6843804",
"0.6842428",
"0.68356943",
"0.6833574",
"0.682576",
"0.6821615"
] |
0.84734285
|
0
|
Standardize array naming! >>> get_array_headers('tile_index', 3) ['tile_index0', 'tile_index1', 'tile_index2'] >>> get_array_headers('a', 1) ['a0'] >>> get_array_headers('a', 10)[0] 'a00' >>> get_array_headers('a', 1000)[1] 'a0001'
|
def get_array_headers(array_name, length):
width = len(str(length))
return [join_items([array_name, str(i).zfill(width)]) for i in range(length)]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_headers(worksheet):\n headers = {}\n cell_idx = 0\n while cell_idx < worksheet.ncols:\n cell_type = worksheet.cell_type(0, cell_idx)\n if cell_type == 1:\n header = slughifi(worksheet.cell_value(0, cell_idx))\n if not header.startswith(\"_\"):\n headers[cell_idx] = header\n cell_idx += 1\n return headers",
"def _extract_headers(self):\n\n with open(self.file_path, \"rt\", encoding=self._encoding) as csv_file:\n for row in csv.reader(csv_file):\n if self._file_headings:\n return [header if header != \"\" else f\"Untitled_{index + 1}\" for index, header in enumerate(row)]\n\n else:\n return [f\"Untitled_{i + 1}\" for i in range(len(row[0]))]",
"def get_header():\n str_list = ['specimennumber','speciesid','group','family','genus','species','scientificname', \\\n 'commonname','country','state','county','locality','latitude','longitude', \\\n 'source','accuracy','drainagename','centroidtype','huc8name','huc8', \\\n 'huc10name','huc10','huc12name','huc12','date','year','month','day','status','comments', \\\n 'recordtype','disposal','museumcatnumber','freshmarineintro','references']\n return str_list",
"def make_headers():\n headers = [\"agent_ident\", \"chro\"]\n for i in range(10):\n for j in range(5):\n s = \"d\" + str(i) + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for j in range(5):\n s = \"d\" + \"a\" + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for j in range(5):\n s = \"d\" + \"b\" + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for i in range(6):\n for j in range(5):\n s = \"s\" + str(i) + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for i in range(5):\n for j in range(6):\n s = \"e\" + str(i) + \"a\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n return headers",
"def create_header(numValues):\n\n header = []\n for value in range(numValues):\n header.append(\"att{}\".format(value))\n return header",
"def headers(self):\n fd = open(self.filename, \"r\")\n file_head = fd.readlines()\n fd.close()\n\n zip_heads = zip(file_head[0].split(self.separator),\n file_head[1].split(self.separator))\n\n metric_names = []\n category = \"\"\n for _category, metric in zip_heads:\n # fill empty category names\n if len(_category) is not 0:\n category = _category\n\n metric_names.append(\"%s.%s\" % (category, metric))\n\n return metric_names[:-1]",
"def Header(nmax):\r\n n = np.arange(1,nmax+1)\r\n return (2*n+1)/(n*(n+1))",
"def dataset_headers(dataset):\n return list(dataset.columns.values)",
"def nameColumns(name, numbColumns):\n namesColumns = []\n for i in range(numbColumns):\n nColumn = name + '_' + str(i)\n namesColumns.append(nColumn)\n return namesColumns",
"def _get_headers(self, data):\n if not self._headers:\n self._headers = list(map(lambda col: col.lower(), data.columns))\n return self._headers",
"def get_header_names(fname, row_num, delimiter = None):\n names = []\n with open(fname) as f:\n for i in range(row_num):\n line = f.readline()\n list_ = line.split(delimiter)\n names.append(list_)\n return np.array(names, dtype = str)",
"def get_name_by(indices: List[int]) -> List[str]:\n return [wiki_data[\"name\"][i] for i in indices]",
"def _get_cleaned_headers(headers):\r\n cleaned_headers = []\r\n for header in headers:\r\n # Google strips special characters, whitespace, and underscores first,\r\n # and then strips any *leading* digits. This order is extremely\r\n # important!\r\n sanitized = sub(r'^\\d+', '', sub(r'[\\W_]', '', header.lower()))\r\n if len(sanitized) > 0:\r\n cleaned_headers.append(sanitized)\r\n else:\r\n raise GoogleSpreadsheetError(\"Encountered a header '%s' that was \"\r\n \"either blank or consisted only of special characters. \"\r\n \"Could not map the header to the internal representation \"\r\n \"used by the Google Spreadsheet. Please change the header \"\r\n \"to consist of at least one alphanumeric character.\"\r\n % header)\r\n\r\n # When the same sanitized header appears multiple times in the first row\r\n # of a spreadsheet, _n is appended to the name to make it unique.\r\n header_count = defaultdict(int)\r\n results = []\r\n\r\n for header, cleaned_header in zip(headers, cleaned_headers):\r\n new_header = cleaned_header\r\n\r\n if header_count[cleaned_header] > 0:\r\n # Google's numbering starts from _2, hence the +1.\r\n new_header = '%s_%d' % (cleaned_header,\r\n header_count[cleaned_header] + 1)\r\n\r\n header_count[cleaned_header] += 1\r\n results.append(new_header)\r\n\r\n return results",
"def create_header(list_of_freqs):\r\n header = \"\"\r\n for i in range(len(list_of_freqs)):\r\n if list_of_freqs[i] != 0:\r\n header = header + str(i) + \" \" + str(list_of_freqs[i]) + \" \"\r\n return header",
"def __make_game_header(self, state_index: int = -1):\n\n if state_index < 0:\n state_index = len(self.history) + state_index\n\n # get board_state as FEN string\n fen = self.history[state_index]\n splitfen = fen.split(\" \")\n\n color = np.full((8, 8), int(self.__get_whose_turn_in_history(state_index)), dtype=np.float32)\n\n # (8,8) array of ones if white is current player otherwise zeros\n fifty_move = np.full((8, 8), int(splitfen[4]), dtype=np.float32)\n\n # (8,8) array full of number of moves since the first black's move\n fullmove_cnt = np.full((8, 8), int(splitfen[5]), dtype=np.float32)\n\n # stack the 3 sub headers\n return np.array([color, fifty_move, fullmove_cnt])",
"def _parse_headers(headers):\n\n headers_new = []\n # reformat column headers if needed\n for j, hd in enumerate(headers):\n # rename so always have T1/2 (s)\n if hd == \"T1/2 (num)\" or hd == \"T1/2 (seconds)\":\n hd = \"T1/2 (s)\"\n # for uncertainties, add previous column header to it\n if j > 0 and \"Unc\" in hd:\n hd = headers[j - 1] + \" \" + hd\n if \"Unc\" in hd and \"Unc.\" not in hd:\n hd = hd.replace(\"Unc\", \"Unc.\")\n # expand abbreviated headers\n if \"Energy\" in hd and \"Energy Level\" not in hd:\n hd = hd.replace(\"Energy\", \"Energy Level\")\n if \"Par. Elevel\" in hd:\n hd = hd.replace(\"Par. Elevel\", \"Parent Energy Level\")\n if \"Abund.\" in hd:\n hd = hd.replace(\"Abund.\", \"Abundance (%)\")\n if \"Ene.\" in hd:\n hd = hd.replace(\"Ene.\", \"Energy\")\n if \"Int.\" in hd:\n hd = hd.replace(\"Int.\", \"Intensity (%)\")\n if \"Dec\" in hd and \"Decay\" not in hd:\n hd = hd.replace(\"Dec\", \"Decay\")\n if \"Rad\" in hd and \"Radiation\" not in hd:\n hd = hd.replace(\"Rad\", \"Radiation\")\n if \"EP\" in hd:\n hd = hd.replace(\"EP\", \"Endpoint\")\n if \"Mass Exc\" in hd and \"Mass Excess\" not in hd:\n hd = hd.replace(\"Mass Exc\", \"Mass Excess\")\n headers_new.append(hd)\n if len(set(headers_new)) != len(headers_new):\n raise NNDCRequestError(\n \"Duplicate headers after parsing\\n\"\n + f' Original headers: \"{headers}\"\\n'\n + f' Parsed headers: \"{headers_new}\"'\n )\n return headers_new",
"def _get_header(self, headline, column_widths):\n header = []\n header_underline = []\n header_widths = map(len, headline)\n\n for width, header_width in zip(column_widths, header_widths):\n width = max(header_width, width)\n\n item = '-' * width\n header_underline.append(item)\n\n header.append(headline)\n header.append(header_underline)\n\n return header",
"def getMeasHeaders(self):\n headers = []\n for ii in range(self.rows):\n inst = self.instruments[self.stringInsts.index(self.selInsts[ii])]\n param = inst.getParam(self.selParams[ii])\n if type(param.comps) is not list:\n if param.type == 'cont':\n headers.append(sc.formatHeader(inst, param, param.units))\n else:\n headers.append(sc.formatHeader(inst, param))\n else:\n for ii,comp in enumerate(param.comps):\n if param.type == 'cont':\n headers.append(sc.formatHeader(inst, comp, param.units[ii]))\n else:\n headers.append(sc.formatHeader(inst, comp))\n return headers",
"def _get_record_names(recarray, stripped=False):\n rec_names = np.unique(recarray[\"name\"])\n if not stripped:\n return rec_names\n else:\n seen = []\n for recname in rec_names:\n if recname in [\"IN-OUT\", \"TOTAL_IN\", \"TOTAL_OUT\", \"IN_OUT\"]:\n continue\n if recname.endswith(\"_IN\"):\n recname = recname[:-3]\n elif recname.endswith(\"_OUT\"):\n recname = recname[:-4]\n if recname not in seen:\n seen.append(recname)\n seen.extend([\"IN-OUT\", \"TOTAL\", \"IN_OUT\"])\n return np.array(seen)",
"def build_ws_header(work_sheet, max_hits):\n first_header_info = ['Query #', 'Query Sequence',\n 'Top Hit Accession in L.h.', 'E-Value', 'Filename']\n r = 1\n c = 1\n for val in first_header_info:\n c = set_cell(ws, r, c, val)",
"def header_info(msg_ids, accumulator):\n headers = []\n for ms_id in msg_ids:\n if ms_id in accumulator.headers_map.keys():\n headers.append(accumulator.headers_map[ms_id])\n return headers",
"def getTileNames(cls):\n return sorted(TILENAMEMAP.keys())",
"def get_header(data):\n header = \"\"\n for item in data:\n if len(item) > 10:\n header = item\n break\n return header",
"def GetHeader(header, sample_prefixes):\n if len(sample_prefixes) == 0: return [header]\n else:\n header_items = []\n for sp in sample_prefixes:\n header_items.append(header+\"-\"+sp)\n return header_items",
"def get_ctffind_4_1_0_header_names() -> typing.List[str]:\n return [\n 'DefocusU',\n 'DefocusV',\n 'DefocusAngle',\n 'PhaseShift',\n 'CtfFigureOfMerit',\n 'CtfMaxResolution',\n ]",
"def get_nifti1hdr_from_h5attrs(h5attrs):\n hdr = nib.Nifti1Header()\n for k in list(h5attrs.keys()):\n hdr[str(k)] = np.array(h5attrs[k])\n\n return hdr",
"def test_normalize_headers():\n headers = [\n 'AllocationTransferAgencyIdentifier', 'BeginningPeriodOfAvailability', 'flex_mycol', 'FLEX_ANOTHER'\n ]\n mapping = {'allocationtransferagencyidentifier': 'ata', 'beginningperiodofavailability': 'boa'}\n\n result = csvReader.normalize_headers(headers, False, mapping)\n assert list(result) == [\n 'allocationtransferagencyidentifier', 'beginningperiodofavailability', 'flex_mycol', 'flex_another'\n ]\n result = csvReader.normalize_headers(headers, True, mapping)\n assert list(result) == ['ata', 'boa', 'flex_mycol', 'flex_another']",
"def get_split_col_names():\n return ['dna_%d' % (idx+1) for idx in range(60)]",
"def _addStatsHeadersToMatrix(self, m):\n\n atoz = \"JKLMNOPQRSTUVWXYZABCDEFGHI\"\n\n counter = 0\n\n for col in m.TopAxis.DataMembers:\n if counter < 26:\n logicalletter = str(atoz[counter])\n col.MemberSigTestHeading = logicalletter\n counter += 1\n else:\n counter = 0",
"def rename_headers(reads: list, organism: str):\r\n\r\n i = 0\r\n read_strings = []\r\n for read in reads:\r\n read_str = read.raw.splitlines()\r\n read_str[0] = f'@{organism}_{i}'\r\n read_str = '\\n'.join(read_str)\r\n read_strings.append(read_str)\r\n i += 1\r\n\r\n return read_strings"
] |
[
"0.6100702",
"0.5570762",
"0.5531662",
"0.54993814",
"0.548593",
"0.5444392",
"0.5387722",
"0.53857964",
"0.5276731",
"0.52353406",
"0.5227597",
"0.5223803",
"0.5222528",
"0.5206451",
"0.51679057",
"0.5157311",
"0.50992876",
"0.50846374",
"0.5073744",
"0.5066414",
"0.5048884",
"0.50447196",
"0.5018426",
"0.5010919",
"0.50083613",
"0.49869478",
"0.49325132",
"0.4928289",
"0.49226213",
"0.49214748"
] |
0.7206283
|
0
|
Set attributes of the obj according to arguments in params include_all will add all the arguments in params to the object if not will only add those that are in valid_params if validate_params, will check that the params in valid_params are not None
|
def set_attributes(obj, include_all=True, validate_params=False, valid_params=None, **params):
# make sure all required values are here
if valid_params:
for k in valid_params:
if k not in params:
if not hasattr(obj, k):
raise ParameterException("Required parameter {0} missing".format(k))
else:
params[k] = getattr(obj, k)
for k, v in params.items():
check_value = False
# see if we're supposed to add this parameter
if not include_all:
if not valid_params or (valid_params and k not in valid_params):
continue
# see if we are supposed to validate the value, and if it's excluded
if validate_params and (not valid_params or k in valid_params):
check_value = True
if check_value and v is None:
raise ParameterException("Required parameter {0} set to None".format(k))
else:
setattr(obj, k, v)
return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_params(self,**kwargs):\n for key in kwargs:\n setattr(self, key, kwargs[key])",
"def _validate_params(self):\n assert set(self.required_params) - set(self._params) == set()\n for par, val in self.optional_params.items():\n if par not in self._params:\n self._params[par] = val",
"def updateParameters(self,*args,**kwargs):\n for key in kwargs.keys():\n self._params[key] = kwargs[key]",
"def addParams(self, *params):\n for param in params:\n self.addParam(param)\n self.params = list(set(self.params))",
"def check_params(cls, **kwargs) -> None:\n\n for key, val in kwargs.items():\n cls.check_param(key, val)",
"def set_params(self, **kwargs):\n for key, value in kwargs.items():\n if key in self.params.keys():\n self.params[key] = value\n else:\n raise KeyError",
"def set_params(self, **params):\n if not params:\n # Simple optimisation to gain speed (inspect is slow)\n return self\n\n valid_params = self.get_params(deep = True) \n for key, value in params.items():\n split = key.split('__', 1)\n\n if len(split) > 1:\n # nested objects case\n name, sub_name = split\n index_subobj = int(name.replace('f', '', 1))\n list_subobj = self._get_one_param('list_func')\n if index_subobj > len(list_subobj):\n raise ValueError('Looking for the %s -th nested function but'\n 'there is only %s functions . ' %\n (index_subobj, len(list_subobj)))\n\n sub_object = list_subobj[index_subobj]\n sub_object.set_params(**{sub_name: value})\n else:\n # simple objects case\n if key not in valid_params:\n raise ValueError('Invalid parameter %s for function %s. '\n 'Check the list of available parameters '\n 'with `cls.print_params_name()`.' %\n (key, self.__class__.__name__))\n setattr(self, '__' + key, value)",
"def set_params(self, **params):\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n\n for key, value in params.items():\n if hasattr(self, key):\n setattr(self, key, value)\n else:\n self.kwargs[key] = value\n\n return self",
"def __update_params(self,**kwargs):\n updatedArgSet = set(self._updateParamsArgs) & kwargs.viewkeys()\n if len(updatedArgSet) > 0:\n args = self._subDictionary(self._updateParamsArgs)\n newArgs = self._onParamsUpdate(**args)\n updatedArgs =dict()\n for k in updatedArgSet:\n try:\n updatedArgs[k] = newArgs[k]\n except:\n pass\n\n self.__dictionary.update(newArgs)\n else:\n pass",
"def _set_params(instance: BaseTpcpObjectObjT, **params: Any) -> BaseTpcpObjectObjT:\n # Basically copied from sklearn\n if not params:\n return instance\n valid_params = instance.get_params(deep=True)\n comp_fields = getattr(instance, \"_composite_params\", ())\n\n nested_params: DefaultDict[str, Any] = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition(\"__\") # noqa: PLW2901\n if key not in valid_params:\n raise ValueError(f\"`{key}` is not a valid parameter name for {type(instance).__name__}.\")\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(instance, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n if key in comp_fields:\n _set_comp_field(instance, key, sub_params)\n else:\n valid_params[key].set_params(**sub_params)\n return instance",
"def set_params(self, **params):\n\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition('__')\n if key not in valid_params:\n raise ValueError('Invalid parameter %s for regressor %s. '\n 'Check the list of available parameters '\n 'with `regressor.get_params().keys()`.' %\n (key, self))\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self._regressor, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n valid_params[key].set_params(**sub_params)\n\n return self",
"def set_params(self, **kwargs):\n ...",
"def set_params(self, **kwargs):\n for param_name, value in kwargs.iteritems():\n # only set parameters that are in the default\n if param_name in self._default_params():\n setattr(self, param_name, value)\n self.params[param_name] = value\n else:\n print('AdjustedStat class does not accept %s as a ' \\\n 'parameter and will be ignored' % param_name)",
"def update(self, *args, **kwargs):\n if args is not () and args is not None:\n attr_names = [\"id\", \"size\", \"x\", \"y\"]\n for index, attr in enumerate(args):\n setattr(self, attr_names[index], attr)\n else:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)",
"def edit(self, params):\n for key, value in params.items():\n if hasattr(self, key):\n setattr(self, key, value)\n else:\n raise ValueError(\"{} has no attribute {}\".format(self, key))",
"def set_params(self, **params):\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n else:\n self.kwargs.update(params)\n\n return self",
"def set(self, **kwargs):\n for key in kwargs:\n if key in self.bool_params:\n self.bool_params[key] = kwargs[key]\n elif key in self.int_params:\n self.int_params[key] = kwargs[key]\n elif key in self.str_params:\n self.str_params[key] = kwargs[key]\n elif key in self.float_params:\n self.float_params[key] = kwargs[key]\n else:\n raise RuntimeError('MOPAC calculator: unknown keyword: ' + key)",
"def update(self, **kwargs):\n for key, value in sorted(kwargs.items()):\n if value:\n if hasattr(self, key):\n setattr(self, key, value)",
"def set_params(self, dic):\n if dic is not None:\n for key, val in zip(dic.keys(), dic.values()):\n if key in self.__dict__.keys():\n self.__dict__[key] = val\n\n if 'scale_params' in self.__dict__.keys():\n self.scale_params.set_params(dic)\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n self.atmospheric_params.set_params(dic)\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n self.atemperature_params.set_params(dic)\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n self.oceanic_params.set_params(dic)\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n self.ground_params.set_params(dic)\n\n if 'otemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n self.gotemperature_params.set_params(dic)\n\n if 'gtemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n self.gotemperature_params.set_params(dic)",
"def set_params(self, **params):\n return super().set_params(**params)",
"def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n\n for (key, value) in kwargs.iteritems():\n # use setattr so that validation is triggered\n setattr(self, key, value)",
"def set_params(self, **params):\n\n return super().set_params(**params)",
"def update(self, **kwargs):\n for key, value in kwargs.items():\n if key not in self.VALID_NAMES:\n continue\n\n setattr(self, key, value)",
"def update_obj(obj, attributes, params):\n for key in params.keys():\n if key in attributes:\n try:\n set_attribute(obj, key, params[key])\n except:\n abort(400)\n \n Session.flush()\n Session.commit()",
"def add_data(self,**kwargs):\n self.given_data.update([x for x in kwargs.keys() if kwargs[x]!=None ])\n for i in range(len(self.attr)):\n param=self.attr[i]\n if param in kwargs and kwargs[param]!=None:\n if i==0 and not (0 <= kwargs['angle'] <= 90) :# atribute is angle\n raise ValueError('Angle should be between 0 an 90 degrees')\n elif i==7 and not (0 <= kwargs[param] <= 1):\n raise ValueError('Coefficient (kf) should be between 0 and 1')\n else:\n self.data[param]=kwargs[param]\n print('Added data to object. See current data by using print(object_name) or using check_data method')",
"def update(self, *args, **kwargs):\n list_args = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n count = 0\n a_dict = {}\n\n if args:\n if len(args) > 0 and len(args) < 6:\n for arg in args:\n a_dict.update({list_args[count]: arg})\n setattr(self, list_args[count], arg)\n count += 1\n elif kwargs:\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def set_params(cls, param_dict):\n for param in param_dict:\n if param in cls.params:\n cls.params[param] = param_dict[param]\n else:\n raise AttributeError(\"Invalid parameter dictionary! Format: {'<param>': <value>}\")",
"def set_parameters(self, **kwargs):\n\n invalid_params = set(self.parameter_names).difference(kwargs.keys())\n if invalid_params:\n raise ValueError(\n \"unknown parameters: {}\".format(\", \".join(invalid_params))) \n \n for parameter_name, value in kwargs.items():\n setattr(self, \"_{}\".format(parameter_name), value)\n\n return kwargs",
"def set_params(self, *arg):\n pass",
"def update(self, *args, **kwargs):\n flist = ['id', 'width', 'height', 'x', 'y']\n fieldlist = [0, 0, 0, 0, 0]\n idx = 0\n for arg in args:\n fieldlist[idx] = arg\n idx += 1\n if fieldlist[0] > 0:\n self.id = fieldlist[0]\n if fieldlist[1] > 0:\n self.width = fieldlist[1]\n if fieldlist[2] > 0:\n self.height = fieldlist[2]\n if fieldlist[3] > 0:\n self.x = fieldlist[3]\n if fieldlist[4] > 0:\n self.y = fieldlist[4]\n flist = ['id', 'width', 'height', 'x', 'y']\n if len(args) == 0:\n for key, value in kwargs.items():\n if key in flist:\n setattr(self, key, value)"
] |
[
"0.63769895",
"0.62967455",
"0.60953456",
"0.5948282",
"0.59235907",
"0.5901765",
"0.5898007",
"0.5861664",
"0.58595383",
"0.5832369",
"0.58027583",
"0.5785382",
"0.57812935",
"0.57797676",
"0.5772693",
"0.57565576",
"0.57520235",
"0.575147",
"0.57505697",
"0.574735",
"0.5744103",
"0.5743905",
"0.573932",
"0.5719181",
"0.5698238",
"0.5689398",
"0.5663389",
"0.56485283",
"0.5644706",
"0.5640662"
] |
0.8129952
|
0
|
Make sure the iterable params contains all elements of required_params If validate_values is True, make sure params[k] are set. If required_params is a dictionary, make sure params[k] are set to the values given >>> validate_params(['a','b','c'], ['a','b']) True >>> validate_params(['a','b','c'], ['a','b','d']) False
|
def validate_params(params, required_params, validate_values=False):
# every key (or element) in required_params must be present in the given params
for k in required_params:
if k not in params:
return False
elif validate_values:
try:
# see if we got a dictionary of parameters
p_val = params.get(k)
except AttributeError:
# if it's not a dictionary, it doesn't have values, obviously
return False
# now we need to check if the given parameter value is valid
try:
req_vals = required_params.get(k)
# check if there's a list of requirements
try:
if p_val not in req_vals:
return False
except TypeError:
# check if it matches the required value
if p_val != req_vals:
return False
except AttributeError:
# if the requirements are not specified, just make sure it's set to something
if p_val is None:
return False
# and if we pass all the checks for all the required_params, it's valid
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _validate_params(self):\n assert set(self.required_params) - set(self._params) == set()\n for par, val in self.optional_params.items():\n if par not in self._params:\n self._params[par] = val",
"def _check_parameters(self, ep, params):\n\n any_group_satisfied = False\n for group in ep.REQUIRED:\n if all(required_param in params for required_param in group):\n any_group_satisfied = True\n\n if not any_group_satisfied:\n raise ValueError(f\"Got parameters {params}, expected one of {ep.REQUIRED}\")\n\n for key in params:\n if key not in ep.POSSIBLE:\n raise ValueError(f\"Got {key}, expected one of {ep.POSSIBLE}\")",
"def validate_params(params, expected, opt_param=set()):\n expected = set(expected)\n opt_param = set(opt_param)\n pkeys = set(params)\n if expected - pkeys:\n raise ValueError(\"Required keys {} not in supplied parameters\"\n .format(\", \".join(expected - pkeys)))\n defined_param = expected | opt_param\n for param in params:\n if param not in defined_param:\n logger.warning(\"Unexpected parameter {} supplied\".format(param))",
"def validate_params(params, expected, opt_param=set()):\n expected = set(expected)\n opt_param = set(opt_param)\n pkeys = set(params)\n if expected - pkeys:\n raise ValueError(\"Required keys {} not in supplied parameters\"\n .format(\", \".join(expected - pkeys)))\n defined_param = expected | opt_param\n for param in params:\n if param not in defined_param:\n logging.warning(\"Unexpected parameter {} supplied\".format(param))",
"def _validate(self):\n for p in self.parameters:\n #Check for missing required parameters:\n if p.is_required and not(p.is_set):\n raise ValueError(\"Parameter %s is not set.\" \\\n % p.names[-1])\n #Also repeat the parameter validation here, just in case?",
"def check_params(cls, **kwargs) -> None:\n\n for key, val in kwargs.items():\n cls.check_param(key, val)",
"def _check_required_params(self):\n logging.debug('.. check if Experiment have all required parameters')\n for n in self.REQUIRED_PARAMS:\n if n not in self.params:\n raise ValueError('missing \"%s\" among %r' % (n, self.params.keys()))",
"def check_required_parameters(required_params_dict=dict()):\r\n print threading.currentThread().getName(), 'Starting'\r\n is_valid = True\r\n required_params_not_set = pythontools.validate_required_parameters(required_params_dict)\r\n if len(required_params_not_set) > 0:\r\n is_valid = False\r\n msg = \"Validate all required input parameters are set failed.\"\r\n for param in required_params_not_set:\r\n steplog.error(\"Required parameter %s is not set.\" % param)\r\n else:\r\n msg = \"Validate all required input parameters are set succeeded.\"\r\n return is_valid, msg",
"def check_valid_params(cls, **user_params):\n # Check that the appropriate number of params are provided\n if not all(key in user_params for key in cls.param.keys()):\n raise ValueError(f\"Missing parameter! Expected {cls.param.keys()} but was given {user_params.keys()}\")\n\n # Check parameter units and values\n for (key, allowed_params), user_param in zip(cls.param.items(), user_params.values()):\n\n # If both have units, check that the user param value is valid. If valid, continue. Else, error\n if type(user_param) == Quantity and type(allowed_params) == Quantity:\n if get_physical_type(user_param.unit) != get_physical_type(allowed_params.unit):\n raise UnitTypeError(f\"Incorrect units {user_param.unit} provided for parameter {key}, \"\n f\"expected {allowed_params.unit}\")\n\n elif np.isin(user_param.to(allowed_params.unit).value, allowed_params.value):\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")\n\n # If one only one has units, then error\n elif (type(user_param) == Quantity) ^ (type(allowed_params) == Quantity):\n # User param has units, model param is unitless\n if type(user_param) == Quantity:\n raise ValueError(f\"Invalid units {user_param.unit} for parameter {key} provided, expected None\")\n else:\n raise ValueError(f\"Missing units for parameter {key}, expected {allowed_params.unit}\")\n\n # Check that unitless user param value is valid. If valid, continue. Else, Error\n elif user_param in allowed_params:\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")\n\n # Check Combinations (Logic lives inside model subclasses under model.isvalid_param_combo)\n if user_params not in cls.get_param_combinations():\n raise ValueError(\n f\"Invalid parameter combination. See {cls.__class__.__name__}.get_param_combinations() for a \"\n \"list of allowed parameter combinations.\")",
"def _validate_params(self):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def __verify_required_parameters(self, parameters, required_parameters):\n\n\t\tfor parameter in required_parameters:\n\t\t\tif False == parameters.has_key(parameter):\n\t\t\t\traise MissingParameterError(parameter)\n\n\t\treturn True",
"def validate_params(self, params: Scenario) -> bool:\n valid = True\n # Make sure all needed parameters were provided\n valid = valid and \"R\" in params\n valid = valid and \"L\" in params\n\n # Make sure all parameters are physically valid\n valid = valid and params[\"R\"] > 0\n valid = valid and params[\"L\"] > 0\n\n return valid",
"def _validate_freq_params(freq_params):\n allowed_params = (\n \"Nfreqs\",\n \"start_freq\",\n \"bandwidth\",\n \"freq_array\",\n \"channel_width\",\n )\n allowed_combinations = [\n combo\n for combo in itertools.combinations(allowed_params, 3)\n if \"start_freq\" in combo and \"freq_array\" not in combo\n ] + [(\"freq_array\",)]\n for combination in allowed_combinations:\n if all(freq_params.get(param, None) is not None for param in combination):\n return True\n\n # None of the minimum necessary combinations are satisfied if we get here\n return False",
"def validate_params(self, params: Dict[str, Any]) -> bool:\n dict_set_defaults(params, self.DEFAULT_PARAMS)\n\n for k in self.params:\n if k in {\"name\", \"descr\", \"cache_file\"}:\n continue\n\n if self.params[k] != params.get(k):\n return False\n\n return True",
"def _validate(self, **parameters):\n provided = set(parameters.keys())\n required = set([\n field.name for field in self.fields if field.required\n ])\n optional = set([\n field.name for field in self.fields if not field.required\n ])\n\n # Determine any parameter names supplied that are not valid.\n unexpected = provided - (optional | required)\n unexpected = ['\"' + item + '\"' for item in sorted(unexpected)]\n if unexpected:\n prefix = len(unexpected) > 1 and 'parameters ' or 'parameter '\n raise ValueError('Unknown ' + prefix + ', '.join(unexpected))\n\n # Determine if any required field names not supplied.\n missing = required - provided\n missing = ['\"' + item + '\"' for item in sorted(missing)]\n if missing:\n prefix = len(missing) > 1 and 'parameters ' or 'parameter '\n raise ValueError('Missing required ' + prefix + ', '.join(missing))\n\n # Ensure all parameter values are valid types.\n for value in parameters.values():\n _validate_parameter(value)",
"def check_params(params):\n\n required = ['gtsrb_train_root', 'gtsrb_test_root', 'batch_size']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)",
"def _check_required_parameters(\n self,\n required_parameters,\n parameters\n ):\n self.log([u\"Checking required parameters '%s'\", required_parameters])\n self.log(u\"Checking input parameters are not empty\")\n if (parameters is None) or (len(parameters) == 0):\n self._failed(u\"No parameters supplied.\")\n return\n self.log(u\"Checking no required parameter is missing\")\n for req_param in required_parameters:\n if req_param not in parameters:\n self._failed(u\"Required parameter '%s' not set.\" % req_param)\n return\n self.log(u\"Checking all parameter values are allowed\")\n self._check_allowed_values(parameters)\n self.log(u\"Checking all implied parameters are present\")\n self._check_implied_parameters(parameters)\n return self.result",
"def check_params(params):\n assert 'split' in params.keys(\n ), 'Params must include split (train, val, or test).'\n\n required = ['batch_size', 'im_shape']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)",
"def _check_allowed_values(self, parameters):\n for key, allowed_values in self.ALLOWED_VALUES:\n self.log([u\"Checking allowed values for parameter '%s'\", key])\n if key in parameters:\n value = parameters[key]\n if value not in allowed_values:\n self._failed(u\"Parameter '%s' has value '%s' which is not allowed.\" % (key, value))\n return\n self.log(u\"Passed\")",
"def check_required(self, required):\n for k in required:\n if self.__dict__.get(k) is None:\n raise ValueError(\n \"Required argument: '{0}' not provided\".format(k))",
"def test_validate_params(mocker, params):\n validate_params(**params)",
"def _validate_parameter(value):\n if isinstance(value, (dict)):\n if any([not isinstance(key, string_types) for key in value.keys()]):\n raise TypeError(\"Invalid parameter. Dictionary keys must be strings.\")\n [_validate_parameter(item) for item in value.values()]\n elif isinstance(value, (list, tuple)):\n [_validate_parameter(item) for item in value]\n elif (\n value is None or\n isinstance(value, string_types) or\n isinstance(value, (int, float, bool))\n ):\n pass\n else:\n raise TypeError(\"Invalid parameter type. Got '%s'.\" % type(value))",
"def _validate_parameters(self):\n errors = []\n for key in self.PARAMETERS.keys():\n if key not in self.request_obj.data_params:\n errors.append(key)\n\n if errors:\n raise DataParsingError('Following data items are missing: {}'.format(', '.join(errors)))\n\n for key, params in self.PARAMETERS.items():\n params[0].validate_type(key, self.request_obj.data_params.get(key), params[1])",
"def check_params(params):\n assert 'split' in params.keys(\n ), 'Params must include split (train, val, or test).'\n\n required = ['batch_size', 'root', 'im_shape']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)",
"def _validate_parameters(parameters):\n if not isinstance(parameters, dict):\n raise ValueError(\"Please enter a dictionary for parameters\")\n for key, val in parameters.items():\n if isinstance(val, list):\n for params in val:\n if not isinstance(params, u.unyt_array):\n raise ValueError(\n \"Parameter value {} lacks a unyt\".format(val)\n )\n else:\n if not isinstance(val, u.unyt_array):\n raise ValueError(\n \"Parameter value {} lacks a unyt\".format(val)\n )\n if not isinstance(key, str):\n raise ValueError(\"Parameter key {} is not a str\".format(key))\n\n return parameters",
"def check_mandatory(params: Dict[str, str]):\n for key, val in params.items():\n if val is None or val == '':\n raise ValueError(f'Missing mandatory param: `{key}`.')",
"def validate(self):\n\n\tmissing = []\n\tbadcheck = []\n\tfor name, checkfunc, params in self._required:\n\t try:\n\t\targ = self.make_required(name)\n\t\tif checkfunc is not None:\n\t\t if params is not None:\n\t\t\tparams = (self.param_map[name], arg) + params\n\t\t else:\n\t\t\tparams = (self.param_map[name], arg)\n\t\t try:\n\t\t\tapply(checkfunc, params)\n\t\t except ValidationError, msg:\n\t\t\tbadcheck.append(msg)\n\t except ValidationError, args:\n\t\tmissing.append(args)\n\n\tfor (name, checkfunc, params) in self._optional:\n\t tup = self.make_optional(name)\n\t if tup and checkfunc is not None:\n\t\tif params is not None:\n\t\t params = (self.param_map[name], tup) + params\n\t\telse:\n\t\t params = (self.param_map[name], tup)\n\t\ttry:\n\t\t apply(checkfunc, params)\n\t\texcept ValidationError, msg:\n\t\t badcheck.append(msg)\n\n\tif (missing or badcheck) and self.log_errors:\n\t self.log_error(missing, badcheck)\n\n\tif (missing or badcheck) and self.generate_error_page:\n\t self.generate_HTML(missing, badcheck)\n\n\tself.missing = missing\n\tself.badcheck = badcheck\n\n\treturn not (missing or badcheck)",
"def _check_implied_parameters(self, parameters):\n for key, values, implied_keys in self.IMPLIED_PARAMETERS:\n self.log([u\"Checking implied parameters by '%s'='%s'\", key, values])\n if (key in parameters) and (parameters[key] in values):\n found = False\n for implied_key in implied_keys:\n if implied_key in parameters:\n found = True\n if not found:\n if len(implied_keys) == 1:\n msg = u\"Parameter '%s' is required when '%s'='%s'.\" % (implied_keys[0], key, parameters[key])\n else:\n msg = u\"At least one of [%s] is required when '%s'='%s'.\" % (\",\".join(implied_keys), key, parameters[key])\n self._failed(msg)\n return\n self.log(u\"Passed\")",
"def params_required(self) -> bool:\n if self.no_params or self.params_optional:\n return False\n else:\n return True",
"def _check_param(in_params, req_param, opt_param=list()):\n for param in req_param:\n if param not in in_params:\n raise ValueError('{} parameter is required'.format(param))\n defined_param = set(req_param + opt_param)\n for param in in_params:\n if param not in defined_param:\n print(\n \"WARNING: received unexpected parameter {}\".format(param))"
] |
[
"0.75490326",
"0.7438984",
"0.7273896",
"0.7218024",
"0.71317744",
"0.7084169",
"0.7016126",
"0.6884917",
"0.67766726",
"0.6771224",
"0.6769371",
"0.66928345",
"0.6688257",
"0.6678528",
"0.6674225",
"0.6648262",
"0.663149",
"0.65512455",
"0.65321004",
"0.65198606",
"0.6519411",
"0.6492082",
"0.64902025",
"0.6482996",
"0.6482589",
"0.64590156",
"0.6411811",
"0.64109355",
"0.6268731",
"0.6170979"
] |
0.8133651
|
0
|
Get the next noncommented line of strings from a file, separated by whitespace >>> f = open('results/testing/pos/Large/rawdata.txt', 'r') >>> read_strings(f) ['a', 'b', 'c', 'd', 'e', 'f', 'step']
|
def read_strings(filepointer):
line = '#'
try:
while line and line[0]=='#':
line = filepointer.readline()
except (IOError, ValueError):
return None
if line:
return line.split()
else:
return None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_strings(src_file):\n res = []\n try:\n res = open(src_file,'r').readlines()\n res = [x.strip() for x in res]\n except:\n res = []\n return res",
"def get_next_hundered_lines(file):\n count = 0\n result = []\n while count < 100:\n count += 1\n next_line = file.readline()\n if next_line != \"\":\n result.append(next_line)\n else:\n break\n return result",
"def read_lines(filepath):\n file = open(filepath, 'r')\n lines = []\n\n while True:\n # Get next line from file \n line = file.readline()\n if not line:\n break\n lines.append(line.strip())\n file.close()\n return lines",
"def read_next_code_chunk(self) -> List[str]:\n with open(self._filepath) as f:\n for line in f:\n yield [line.strip()]",
"def lineReadfile(filename):\n#\t\"input:filename output=readlines() \"\n\tf = open(filename)\n\tlist1 =[]\n\twhile 1:\n\t\ts = f.readline()\n\t\tif s==\"\":\n\t\t\tbreak\n\t\ts=string.replace(s,\"\\n\",\"\")\n\t\tif s==\"\":\n\t\t\tcontinue\n\t\tlist1.append(s)\n\tf.close()\n\treturn list1",
"def read_lines(fp: TextIO) -> Iterator[str]:\n while line := fp.readline().strip():\n yield line",
"def read_file(filepath):\n with open(filepath, \"r\") as file:\n return list(map(lambda s: s.strip(), file.readlines()))",
"def readStrings(filename):\n txtlist = []\n f = open(filename)\n for line in f.readlines():\n txtlist.extend(line.split())\n return txtlist",
"def get_next_line(fin):\n line = fin.readline()\n\n pos = line.find(\"#\")\n\n while (pos == 0 or line.strip() == \"\") and line:\n line = fin.readline()\n pos = line.find(\"#\")\n\n if pos == -1:\n return line.strip()\n return line[:pos]",
"def lines_for_string(self, string):\n\t\tall_lines = string.split('\\n')\n\t\treturn [line for line in all_lines if line is not \"\"]",
"def read_lines(file_path: str) -> Generator[str, None, None]:\n with open(file_path) as f:\n for line in f:\n yield line.rstrip()",
"def readFile(filePath):\n with open(filePath, 'r') as f:\n return [l.strip() for l in f.readlines()]",
"def read_lines(filename):\n # Absolute dir the script is in\n script_dir = os.path.dirname(__file__)\n # Join the relative path to the input file\n resource_path = os.path.join(script_dir, '../resources/' + filename)\n # Open the file\n with open(resource_path) as f:\n # Read all lines from the file. Per default we have now strings\n return [line.rstrip() for line in f]",
"def _fileLinesToList(filename) :\n o = []\n with open(filename, \"r\") as fi :\n for l in fi :\n if l.strip() != \"\" :\n o.append(l.strip())\n return o",
"def read_lines(filename):\n with file(filename) as f:\n for line in f:\n _line = line.strip()\n if _line:\n yield _line",
"def read_file_lines(path:str):\n lines:list = []\n file = open(path, 'r')\n while True:\n line = file.readline()\n if not line:\n break\n lines.append(line.strip())\n \n return lines",
"def read_line(f: IO[str]) -> str:\n line = f.readline()\n\n while len(line) > 0 and line[0] == '#':\n line = f.readline()\n\n return line",
"def read_line(f: IO[str]) -> str:\n line = f.readline()\n\n while len(line) > 0 and line[0] == '#':\n line = f.readline()\n\n return line",
"def read_line(filename):\n line = \"Unknown\"\n try:\n with open(filename) as f:\n line = f.readline().strip()\n finally:\n return line",
"def read_file_lines(filepath):\n with open(filepath, 'r', encoding='utf-8', newline=os.linesep) as f:\n return f.readlines()",
"def read_lines_from_file(fname):\n return []",
"def extract_lines(infile):\n with open(infile, 'r') as src:\n return read_on(get_line, src)",
"def head(filename, lines=5):\n from itertools import islice\n with open(filename, \"r\") as f:\n return list(islice(f, lines))",
"def readlines(path):\n with open(path, \"r\") as f:\n return [line.strip() for line in f.readlines()]",
"def simple_text_reader(text_file):\n with open(text_file, 'rt') as file:\n data = file.read()\n return data",
"def read_string_list(fn, prefix=\"\"):\n\n if ( False == os.path.isfile( fn ) ):\n raise Exception(\"%s does not exist.\" % (fn))\n \n with open(fn, \"r\") as fp:\n lines = fp.read().splitlines()\n\n n = len(lines)\n\n if ( \"\" == prefix ):\n for i in range(n):\n lines[i] = lines[i].strip()\n else:\n for i in range(n):\n lines[i] = \"%s/%s\" % ( prefix, lines[i].strip() )\n\n return lines",
"def _lines(filename):\n \n handle = gzip.open(filename, 'rt') if _gz(filename) else open(filename)\n for line in handle:\n if not line.startswith('#'):\n yield line.strip().split('\\t')",
"def readlines(filename, encoding='utf-8'):\r\n text, encoding = read(filename, encoding)\r\n return text.split(os.linesep), encoding",
"def get_first_line(file: str) -> str:\n with open(file) as f:\n return f.readline().split('\\n')[0]",
"def read_file_in_lines(filename):\r\n\twith open(filename) as infile:\r\n\t\tlines = infile.readlines()\r\n\treturn [line.strip() for line in lines]"
] |
[
"0.6840863",
"0.6173089",
"0.612168",
"0.6075292",
"0.6003574",
"0.59796333",
"0.59180355",
"0.5904176",
"0.5896242",
"0.5887557",
"0.5884237",
"0.5814879",
"0.5809892",
"0.5803777",
"0.57963556",
"0.57625866",
"0.5758329",
"0.5758329",
"0.57532275",
"0.57433313",
"0.57394236",
"0.5689261",
"0.5664636",
"0.566391",
"0.56442595",
"0.5638432",
"0.5632467",
"0.56189406",
"0.5615792",
"0.5613147"
] |
0.74165165
|
0
|
Get the next line of floats from a file, separated by whitespace >>> f = open('results/testing/pos/Large/rawdata.txt', 'r') >>> read_floats(f) [0.0, 0.2, 500.0, 0.0, 0.001, 0.0, 1.0]
|
def read_floats(filepointer):
data = read_strings(filepointer)
if not data:
return None
try:
data = [float(x) for x in data]
return data
except:
# try the next line
return read_floats(filepointer)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_float(filename):\n\tf = open(filename, \"r\")\n\tarr = np.fromfile(f, dtype='>f4')\n\treturn arr",
"def txt2float(file: str) -> float:\n return float(get_first_line(file))",
"def read_floats(self, count=1, location=None):\n return_vals = []\n byteorder = {'little':'<f', 'big':'>f'}[self._byteorder]\n if self._tiff is not None:\n off = self._offset\n if location is not None:\n off = location\n for c in range(count):\n return_vals.append(unpack_from(byteorder, self._tiff[off:off+4])[0])\n off += 4# size\n if location is None:\n self._offset += (count * 4) #size)\n return return_vals",
"def readFloats(path, dimensions, header=0):\n size = reduce(operator.mul, dimensions)\n ra = RandomAccessFile(path, 'r')\n try:\n ra.skipBytes(header)\n bytes = zeros(size * 4, 'b')\n ra.read(bytes)\n floats = zeros(size, 'f')\n ByteBuffer.wrap(bytes).asFloatBuffer().get(floats)\n return ArrayImgs.floats(floats, dimensions)\n finally:\n ra.close()",
"def readfile(filename,multiplier=1.0):\n with open(filename,'r') as f:\n lines = f.readlines()\n vec = [multiplier*float(a.strip()) for a in lines]\n return vec",
"def myloadtxt(fname, skiprows = 0):\n fin = file(fname)\n for i in range(skiprows):\n fin.readline()\n ln = fin.readline()\n lns = []\n while (ln != \"\"):\n thisln = []\n ln = ln.strip().split()\n for s in ln:\n try:\n f = float(s)\n except:\n f = None\n thisln.append(f)\n lns.append(thisln)\n ln = fin.readline()\n return np.array(lns)",
"def get_float_list(gene_file, c):\n\tfile = open(gene_file,'r')\n\tList = []\n\tfor line in file:\n\t\tif not re.match(\"#\", line):\n\t\t\tline = line.strip()\n\t\t\tsline = line.split()\n\t\t\tList.append(atof(sline[c]))\n\tfile.close()\n\treturn List",
"def read_file(filename):\n data = []\n with open(filename, 'r') as infile:\n for line in infile:\n data.append([float(value) for value in line.split()])\n data = np.array(data)\n return data.T",
"def read_line(l):\n return [read_float(l[s]) for s in slices['data']]",
"def __load_raw_data(path: str,\n filename: str):\n filepath = os.path.join(path, filename)\n f = open(filepath)\n data = f.read()\n f.close()\n\n lines = data.split('\\n')\n header = lines[0].split(',')\n lines = lines[1:]\n\n float_data = np.zeros((len(lines), len(header) - 1))\n for i, line in enumerate(lines):\n values = [float(x) for x in line.split(',')[1:]]\n float_data[i, :] = values\n\n return float_data",
"def _parse(file_name) -> Tuple[Optional[List[List[float]]], Optional[IOError]]:\n try:\n with open(pkg_resources.resource_filename(__data_pkg__, file_name)) as file_handler:\n next(file_handler)\n return [[float(x) for x in line.split(\" \") if len(x) > 0] for line in file_handler], None\n except IOError as err:\n return None, err",
"def readCSVasFloat(filename):\n returnArray = []\n lines = open(filename).readlines()\n for line in lines:\n line = line.strip().split(\",\")\n if len(line) > 0:\n returnArray.append(np.array([np.float32(x) for x in line]))\n\n returnArray = np.array(returnArray)\n return returnArray",
"def read_text_file(file_name, ncol = 0):\n\t\n\tfrom string import split\n\tinf = file(file_name, \"r\")\n\tline = inf.readline()\n\tdata = []\n\twhile len(line) > 0:\n\t\tif ncol == -1:\n\t\t\tvdata = split(line)\n\t\t\tif data == []:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata.append([float(vdata[i])])\n\t\t\telse:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata[i].append(float(vdata[i]))\t\t\t\n\t\telse:\n\t\t\tvdata = float(split(line)[ncol])\n\t\t\tdata.append(vdata)\n\t\tline = inf.readline()\n\treturn data",
"def iterfile(f):\n for line in f:\n fields = line.replace(\"\\n\",\"\").split(\"\\t\")\n yield float(fields[0]), fields[-1].lower()",
"def readFLO(file):\r\n\r\n tag_float = 202021.25\r\n with open(file) as f:\r\n nbands = 2\r\n tag = np.fromfile(f, np.float32, 1)[0]\r\n\r\n if tag != tag_float:\r\n raise ValueError('wrong tag possibly due to big-endian machine?')\r\n\r\n width = np.fromfile(f, np.int32, 1)[0]\r\n height = np.fromfile(f, np.int32, 1)[0]\r\n\r\n tmp = np.fromfile(f, np.float32)\r\n tmp = tmp.reshape(height, width * nbands)\r\n\r\n flow = np.zeros((height, width, 2))\r\n flow[:, :, 0] = tmp[:, 0::2]\r\n flow[:, :, 1] = tmp[:, 1::2]\r\n\r\n return flow",
"def read_flt_file(filename):\n\n fid = open(filename,'rb')\n arr = array.array('i')\n arr.fromfile(fid, 1) # dim\n dim = arr[0]\n #http://www.python.org/search/hypermail/python-1993/0393.html\n if dim>100:\n \"\"\"print 'Read very high dimension (>100).'\n print 'Endianness may come into play.'\n print 'Try to swap the byte order.'\"\"\"\n swap = True;\n arr.byteswap()\n dim = arr[0]\n #print 'dim =',dim\n else:\n swap = False\n assert(dim>=1 and dim<=4) # only accept data up to 4 dimensions.\n\n arr = array.array('i')\n arr.fromfile(fid,dim+2)\n if swap:\n arr.byteswap()\n volume = reduce(lambda x,y: x*y, arr[0:dim-1], 1)\n\n binvalues = array.array('f')\n binvalues.read(fid, volume*arr[dim-1])\n if swap:\n binvalues.byteswap()\n fid.close()\n\n data = numpy.array(binvalues, numpy.float)\n data = numpy.reshape(data, (arr[dim-1], volume))\n\n return (arr[:dim],data)",
"def read_txt(path):\n mz = []\n i = []\n with open(path) as f:\n for line in f:\n line = line.split()\n mz.append(float(line[0]))\n i.append(float(line[1]))\n return mz, i",
"def read_list(f, nb_freqs):\n alist = []\n while len(alist) < nb_freqs:\n line = f.readline()\n splitted = line.split()\n well_splitted = True\n for entry in splitted:\n well_splitted = well_splitted and entry.count('.') <= 1\n if well_splitted:\n entries = splitted\n else:\n if line.count('-') > 0:\n # Probably coming from an SDSS spectrum.\n entries = [line[i:i+12] for i in range(0, len(line) - 1, 12)]\n else:\n entries = [line[i:i+8] for i in range(0, len(line) - 1, 8)]\n for entry in entries:\n try:\n alist.append(float(entry))\n except ValueError:\n # If conversion to float fails, put 0 instead.\n alist.append(0)\n return numpy.array(alist)",
"def load_raw(fname):\n # Read all the data from the file\n ctd = []\n with open(fname) as ctdfile:\n \n for line in ctdfile:\n \n if (line.find('*') < 0) and (line.find('#') < 0):\n \n # This line contains data; parse the line\n entries = line.strip().split()\n # Convert data to float64\n entries = [np.float64(entries[i]) \n for i in range(len(entries))]\n # Append to list\n ctd.append(entries)\n \n # Return the raw data as an numpy array\n return np.array(ctd)",
"def datareader(self, path):\n\n f = open(path, 'r')\n data = f.read()\n data = data.split('\\n')\n data_tmp = []\n for idx in range(len(data)):\n if str(data[idx]).find('@data') >= 0:\n data_tmp = data[idx + 1:]\n break\n res = []\n for record in data_tmp:\n record = record.split(',')\n record = map(float, record)\n res.append(record)\n return res",
"def read_line_as_numbers(filename):\n # As we expect numbers here and need them in order to do calculations lets map to int\n return list(map(int, read_lines(filename)))",
"def read_float(data):\n s_type = \"=%s\" % get_type(\"float\")\n return struct.unpack(s_type, data.read(4))[0]",
"def read_data(self, path):\n if self.data_format == 'twenty': \n length = 20\n else: raise ValueError(\"self.data_format = '%s' unknown.\" % \n self.data_format)\n data = []\n with open(path,'r') as f:\n for line in f:\n data.append([float(line[k:(k + length)]) for k in range(\n 0, len(line.strip('\\n')),length)])\n return np.array(data)",
"def _readFloat(self, rawData, offset=0):\n val, = unpack(\n self.floatFormat, rawData[\n offset:offset + self.floatFormatLen])\n \n return val",
"def read_float(stream, writer_schema=None, reader_schema=None): # noqa\n return unpack('<f', stream.read(4))[0]",
"def read_file_lines(filename, cols, skip=0, stop=-1, column_major=False, separator='[\\t ]'):\n\n # Set current directory\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n # Open file\n f = open(__location__ + '/' + filename, \"r\")\n\n # Read lines and skip initial lines if necessary\n lines = f.readlines()[skip:]\n\n # Select columns\n res = [[np.float64(line[col]) for col in cols] for line in [re.split(separator, l.strip()) for l in lines]]\n return np.transpose(res) if column_major else res",
"def readData(filename, commentSymbol=\"#\"):\n\tinFile = open(filename, \"r\")\n\tdata = []\n\tfor aLine in inFile.readlines():\n\t\tif aLine.find(commentSymbol)!=-1: continue\n\t\tlineData = []\n\t\tfor piece in aLine.split():\n\t\t\tif listR.isFloat(piece): # is numerical\n\t\t\t\tlineData.append(float(piece))\n\t\t\telse: # is a string\n\t\t\t\tlineData.append(piece)\n\t\tdata.append(lineData)\n\tinFile.close()\n\treturn data",
"def get_data(dataf):\n with open(dataf) as f:\n label = []\n e_val = []\n for line in f:\n label.append(float(line.split()[1]))\n e_val.append(-1 * float(line.split()[0]))\n return label, e_val",
"def _read_all_values(self, line):\n return [float(v) for v in PATTERN_NUMBER.findall(line)]",
"def read_points():\n\tpoints = []\n\tf = open(r'sample_points.txt')\n\twhile True:\n\t\tnstr = f.readline()\n\t\tif len(nstr) == 0:\n\t\t\tbreak\n\t\tline = nstr.rstrip('\\n').split(', ')\n\t\t# print(line)\n\n\t\tpoints.append((round(float(line[0]),3),round(float(line[1]),3))) \n\n\tprint(points)\n\treturn points"
] |
[
"0.66995656",
"0.6633805",
"0.65709525",
"0.65611815",
"0.65453225",
"0.6478923",
"0.6464272",
"0.64488614",
"0.6371395",
"0.63362014",
"0.6324018",
"0.6272083",
"0.62164915",
"0.61707884",
"0.61507845",
"0.61435115",
"0.61096746",
"0.60898066",
"0.6086851",
"0.6082991",
"0.6082613",
"0.6075538",
"0.6065664",
"0.6048148",
"0.60445875",
"0.6036073",
"0.6020563",
"0.6003741",
"0.5972904",
"0.5963509"
] |
0.80918074
|
0
|
Returns a dictionary of the column indices keyed by the header in the file
|
def get_header_indices(filepath):
headers = get_header_list(filepath, sort=False)
return {h: i for i, h in enumerate(headers)}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getColumnIndices(*args, filepath=\"CO2.tab\"):\n # idxDict = {\"PT\": 0, \"TM\": 0, \"HG\": 0, \"SEG\": 0}\n idxDict = {\"PT\": 0, \"TM\": 0, \"HG\": 0, \"VISG\": 0, \"VISHL\": 0, \"ROG\": 0, \"ROHL\": 0}\n if filepath:\n cols = tabLineToList(readFullLine(filepath, 52))\n for key in idxDict:\n idxDict[key] = cols.index(key)\n return idxDict",
"def _get_header_index(self, columnname):\n\n return self.headers.index(columnname)",
"def parse_header(f):\n columns = ['pokemon', 'species_id', 'height', 'weight', 'type_1', 'type_2',\n 'url_image', 'generation_id', 'evolves_from_species_id']\n sep = ','\n result = {}\n allData = []\n with open(const.DATA_FILENAME, newline=\"\") as myData:\n for line in myData:\n line = line.strip()\n line = line.split(sep)\n allData.append(line)\n for i in columns:\n j = 0\n while j < len(allData[0]):\n if allData[0][j] == i:\n result[i] = j\n j += 1\n return result",
"def parseHeader(header):\n tokens = [t for t in header.split(' ') if t]\n result = {}\n for i in range(len(tokens)):\n result[tokens[i]] = i \n\n return result",
"def get_header(fname, path='./'):\r\n f = file(path+fname,'r')\r\n \r\n header = {}\r\n headlines = 0\r\n \r\n while True:\r\n line = f.readline()\r\n clean_line = string.strip(line).split()\r\n key = string.strip(clean_line[0])\r\n val = string.strip(clean_line[-1])\r\n if not key[0].isalpha():\r\n break\r\n try:\r\n val = int(val)\r\n except:\r\n val = float(val)\r\n if key != 'NODATA_value':\r\n key = key.lower()\r\n header[key] = val\r\n headlines += 1\r\n \r\n f.close()\r\n\r\n for key in ['ncols','nrows','cellsize','xllcorner','yllcorner']:\r\n if not header.has_key(key):\r\n raise KeyError, 'File %s header does not contain key %s'%(path+fname, key)\r\n \r\n return header, headlines",
"def get_colnumber(self, header):\n for i in range(0, len(self.data)):\n if self.data[i][0] == header:\n return i\n return None",
"def column_index(input_file, name):\n col, com = find_columns(input_file)\n col_name = name\n contents = open(input_file, 'r').readlines()\n for line in contents:\n if com[col.index(col_name)] in line:\n line_index = contents.index(line)+1\n return line_index",
"def find_indeces(self, header):\n indeces = {'T': None, 'WV': None, 'WK': None, 'BZ': None, 'SPR': None,\n 'WBER': None, 'ABG.': None, 'UNG.': None, 'SPOE': None,\n 'FPOE': None, 'OEVP': None, 'GRUE': None, 'NEOS': None,\n 'WWW': None, 'ANDAS': None, 'GFW': None, 'SLP': None,\n 'WIFF': None, 'M': None, 'FREIE': None}\n for index, item in enumerate(header):\n indeces[item] = index\n return indeces",
"def header(self) -> list:\n cols = self.data.columns.tolist()\n header = [\"index\"]\n for col_int in cols:\n header.append(col_int)\n return header",
"def index(self):\n path = self.path.format('index')\n \n with open(path, 'r', newline='') as file:\n l = list(csv.reader(file))\n \n index = [v for _ in l for v in _]\n index = dict((v, i) for (i, v) in enumerate(index))\n \n return index",
"def _get_columns_mapping_dict():\n\n columns_mapping_dict = {}\n for original_header in COLUMN_HEADERS_MAPPER:\n new_header = COLUMN_HEADERS_MAPPER[original_header]\n columns_mapping_dict[new_header] = [original_header]\n return columns_mapping_dict",
"def column_indexer(data):\n idCol = {label: index for index, label in enumerate(data.columns)}\n return idCol",
"def find_header_info(file):\n\n hdr = pyfits.getheader(file, 1)\n obsid = hdr['OBS_ID']\n detnam = hdr['DETNAM']\n date_obs = hdr['DATE-OBS']\n date_end = hdr['DATE-END']\n tstart = hdr['TSTART']\n tstop = hdr['TSTOP']\n ra_pnt = hdr['RA_PNT']\n dec_pnt = hdr['DEC_PNT']\n roll_pnt = hdr['ROLL_PNT']\n defocus = hdr['DEFOCUS']\n foc_len = hdr['FOC_LEN']\n ra_nom = hdr['RA_NOM']\n dec_nom = hdr['DEC_NOM']\n sim_x = hdr['SIM_X']\n sim_y = hdr['SIM_Y']\n sim_z = hdr['SIM_Z']\n\n return [obsid, detnam, date_obs, date_end, tstart, tstop, ra_pnt, dec_pnt, ra_nom, dec_nom, roll_pnt, foc_len, defocus, sim_x, sim_y, sim_z]",
"def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h",
"def get_header(file):\n read_file = open(file, 'r')\n reader = csv.DictReader(read_file)\n return reader.fieldnames",
"def get_column_dict(self) -> HeaderToWells:\n return self._grid.columns",
"def getHeaderDict(self):\r\n #put the headers into a dict\r\n \r\n print(\"opening \",self.filename)\r\n with open(self.filename, 'r') as readfile:\r\n headers = readfile.readline()\r\n firstrow = readfile.readline()\r\n if not firstrow:\r\n print(\"first line after headers is blank\")\r\n self.loadDictRow(keystring=headers)\r\n else: #assume first row after headers is test router\r\n print(\"load test router row\") \r\n self.loadDictRow(keystring = headers, valuestring = firstrow) \r\n \r\n # check for headers\r\n miscount=0\r\n for key in self.dataheader:\r\n if not key in self.objdict:\r\n print(\"missing key !\", key)\r\n miscount += 1\r\n\r\n if miscount == 0:\r\n print(\"all Columns found. Thank you.\")\r\n # elif (miscount == 11) and (\"IPADDRESS\" in ):\r\n # print(\"Found IP Address column. program will add additional columns\")\r\n elif miscount > 11:\r\n print(\"Could not locate Header Row\")\r\n elif miscount > 0:\r\n print(\"some columns missing, will add additional columns\")\r\n \r\n \r\n #end file check on filename \r",
"def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices",
"def create_index_dict(vcb_file):\n index_dict = {}\n vcb = open(vcb_file).readlines()\n for line in vcb:\n line = line.split()\n index_dict[int(line[0])] = line[1]\n return index_dict",
"def get_counts(filename, key):\r\n column_keys, get_data = get_csv(filename)\r\n assert(key in column_keys[1:])\r\n column = column_keys[1:].index(key)\r\n print 'getcounts() %s : %s column = %d' % (filename, key, column+1) \r\n counts_dict = {}\r\n for i,(k,v) in enumerate(get_data()):\r\n x = v[column]\r\n counts_dict[x] = counts_dict.get(x, 0) + 1\r\n return counts_dict",
"def getIndex(self,filt):\n indx = [i for i in xrange(len(self._header)) if filt == self._header[i]]\n return indx",
"def check_for_header(filename):\n header = {}\n start_id = -1\n with open(filename, \"r\") as f:\n start = re.compile(r\"\\bSTART|start\\b\")\n # if the file has the keyword start, extract header\n if bool(start.search(f.read())):\n f.seek(0) # set the cursor back to the beginning\n lines = f.readlines()\n for i, line in enumerate(lines):\n if start.match(line):\n start_id = i # the line number where start is used (divides header and body)\n break\n args = line.split()\n args.insert(0, \"\") # check_for_commands only handles the second argument (first is usually res_id)\n header['DEFAULT'] = check_for_commands(args, 1, 2)\n\n return header, start_id",
"def order_column_indices(self):\n return self._order_column_indices()",
"def read_and_Kent_index(filename):\n chr_dict = defaultdict(lambda : defaultdict(list))\n debug = 0\n with open(filename, 'rU') as fh:\n # Skip comment lines\n # :TODO Fix this and make more general\n fh.next()\n fh.next()\n for line in fh:\n p_line = line[:-1].split(\"\\t\")\n try:\n start = int(p_line[1])\n end = int(p_line[2])\n kent_bin = binFromRangeStandard(start, end)\n except ValueError:\n # Case for VCF files\n start = int(p_line[1]) - 1\n end = int(p_line[1])\n kent_bin = binFromRangeStandard(start, end)\n chr_dict[p_line[0]][kent_bin].append(GTab(start, end))\n return(chr_dict)",
"def _get_table_columns(self):\n try:\n table_header = parse_table_head(self.table.value, version=self.version)\n merged_data = self.table.value[table_header.tdef_header_end:]\n if table_header.TDEF_header.next_page_ptr:\n merged_data = merged_data + self._merge_table_data(table_header.TDEF_header.next_page_ptr)\n\n parsed_data = parse_table_data(merged_data, table_header.real_index_count,\n table_header.column_count, version=self.version)\n\n # Merge Data back to table_header\n table_header['column'] = parsed_data['column']\n table_header['column_names'] = parsed_data['column_names']\n\n except ConstructError:\n logging.error(f\"Failed to parse table header {self.table.value}\")\n return\n col_names = table_header.column_names\n columns = table_header.column\n\n # Add names to columns metadata so we can use only columns for parsing\n for i, c in enumerate(columns):\n c.col_name_str = col_names[i].col_name_str\n\n # column_index is more accurate(id is always incremented so it is wrong when a column is deleted).\n # Some tables like the catalog don't have index, so if indexes are 0 use id.\n\n # create a dict of index to column to make it easier to access. offset is used to make this zero based\n offset = min(x.column_index for x in columns)\n column_dict = {x.column_index - offset: x for x in columns}\n # If column index is not unique try best effort\n if len(column_dict) != len(columns):\n # create a dict of id to column to make it easier to access\n column_dict = {x.column_id: x for x in columns}\n\n if len(column_dict) != table_header.column_count:\n logging.debug(f\"expected {table_header.column_count} columns got {len(column_dict)}\")\n return column_dict, table_header",
"def get_water_index_map(archive, header):\n column_re = {\n 'surface': {\n 'flow': 'pretok',\n 'level': 'vodostaj'\n },\n 'ground': {\n 'altitude': 'nivo',\n 'level': 'vodostaj'\n }\n }\n column_map = {key: -1 for key in column_re[archive].keys()}\n empty = True\n\n # Do regex search of every db column for every CSV file column heading.\n for i, column in enumerate(header):\n for column_name in column_re[archive].keys():\n if re.search(column_re[archive][column_name], column, re.IGNORECASE):\n if column_map[column_name] != -1:\n continue\n column_map[column_name] = i\n empty = False\n\n return None if empty else column_map",
"def file_fzp_start(filename):\n\n with open(filename) as in_f:\n c= 0\n cols = []\n #find start of VISSIM data\n line = in_f.readline()\n while 'VehNr;' not in line:\n line = in_f.readline()\n cols = [x.strip() for x in line.split(';')][:-1]\n c +=1\n\n return {'lines_to_skip' : c, 'header_cols' : cols}",
"def read_header(fname):\n \n with open(fname, 'r') as f:\n first_line = f.readline()\n cols_info = first_line.split(' ')\n col_names = []\n for col_info in cols_info:\n col_name = col_info.split('(')[0].strip('#')\n col_names.append(col_name)\n return col_names",
"def index_value(self):\r\n\t\tfor index, column_header in enumerate(self.header_row):\r\n\t\t\tprint(index, column_header)",
"def _create_img_id_to_idx(self):\n with h5py.File(self.image_features_path, 'r') as features_file:\n coco_ids = features_file['ids'][()]\n coco_id_to_index = {id: i for i, id in enumerate(coco_ids)}\n return coco_id_to_index"
] |
[
"0.76352763",
"0.6538343",
"0.6447495",
"0.6401379",
"0.63600105",
"0.6346782",
"0.63438034",
"0.6278738",
"0.6268012",
"0.6213276",
"0.6110932",
"0.610526",
"0.6068173",
"0.6054864",
"0.6004899",
"0.5946061",
"0.5935479",
"0.59272295",
"0.5905116",
"0.5894535",
"0.588547",
"0.5877414",
"0.58754814",
"0.585924",
"0.5830661",
"0.58268017",
"0.5822864",
"0.5786363",
"0.5778897",
"0.5775004"
] |
0.80510694
|
0
|
Moves file pointer to the next noncomment line and returns the comments as a list of strings.
|
def skip_comments(filepointer):
comments = []
data = '#'
try:
pos = filepointer.tell()
except:
print("Could not read file.")
return None
while data[0] == '#':
data = filepointer.readline()
if not data:
raise Exception("Unexpected end of file while reading comments.")
if data[0] == '#':
comments.append(data)
pos = filepointer.tell()
else:
filepointer.seek(pos)
return comments
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_list_of_comments(path):\n\n # opens comments file\n try:\n return [\n re.sub(\" +\", \" \", comment.strip().rstrip())\n for comment in list(open(path, \"r\"))\n ]\n except Exception as e:\n print(\"Error loading comments file: \", e)\n sys.exit(1)",
"def _strip_comments(file_contents):\n lines_without_comments = []\n for line in file_contents:\n comment_position = line.find(COMMENT_INDICATOR)\n if comment_position != -1:\n lines_without_comments.append(line[:comment_position])\n else:\n lines_without_comments.append(line)\n return lines_without_comments",
"def line_comments(self, tail_comment = None):\n tail_comment = tail_comment or self.tail_comment()\n return [comment for comment in self.comments if comment != tail_comment]",
"def remove_c_style_comments(fd):\n ret = []\n comment_state = False\n for line in fd:\n while True:\n # seems we have nothing left\n if len(line) < 2:\n break\n # we're still inside a comment\n if comment_state:\n idx = line.find(\"*/\")\n if idx > -1:\n line = line[idx + 2:]\n comment_state = False\n continue\n # comment doesn't seem to end on this line\n break\n # we're not inside any comment\n else:\n idx = line.find(\"/*\")\n if idx > -1:\n line = line[idx + 2:]\n comment_state = True\n continue\n if \"//\" in line:\n line = line.split(\"//\", 1)[0]\n # only now we can actually do our job\n line = line.strip()\n if len(line) > 0:\n ret.append(line)\n break\n return ret",
"def listFromLines(lines):\n reComment = re.compile('#.*')\n temp = [reComment.sub('',x).strip() for x in lines.split('\\n')]\n temp = [x for x in temp if x]\n return temp",
"def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))",
"def _parse_comments(reader):\n regex = r'\\s*(#|\\/{2}).*$'\n regex_inline = r'(:?(?:\\s)*([A-Za-z\\d\\.{}]*)|((?<=\\\").*\\\"),?)(?:\\s)*(((#|(\\/{2})).*)|)$'\n\n pipe = []\n for line in reader:\n if re.search(regex, line):\n if re.search(r'^' + regex, line, re.IGNORECASE): continue\n elif re.search(regex_inline, line):\n pipe.append(re.sub(regex_inline, r'\\1', line))\n else:\n pipe.append(line)\n return \"\\n\".join(pipe)",
"def comments_from_file(file_path):\n comments = []\n analyze = False\n comment_block_begin = False\n with open(file_path, 'r') as config_file:\n lines = config_file.readlines()\n lines = [line.rstrip() for line in lines]\n for line in lines:\n if line.startswith('# THIS MUST PRECEDE DIRECTLY BEFORE LIST OF CONFIG OPTIONS!'):\n analyze = True\n continue\n if line.startswith('# THIS MUST FOLLOW DIRECTLY AFTER LIST OF CONFIG OPTIONS!'):\n break\n if analyze and line.startswith('#'):\n if line.startswith('# BEGIN'):\n comments.append(line)\n comment_block_begin = False\n continue\n if comment_block_begin:\n comments[-1] += line.lstrip('#') if not comments[-1].endswith('/') else line.lstrip('# ')\n continue\n comment_block_begin = True\n comments.append(line.lstrip('# '))\n else: # not comment\n if comment_block_begin:\n comment_block_begin = False\n return comments",
"def clean_comments(self):\n new_lines = list()\n for line in self.lines:\n if ((not line.startswith(\"//\")) & (not line.isspace()) &\n (not line.startswith(\"/*\") & (not line.startswith(\"*/\")))):\n line = Parser.strip_line(line)\n new_lines.append(line)\n self.lines = new_lines",
"def getcomments(object):\r\n try:\r\n lines, lnum = findsource(object)\r\n except (IOError, TypeError):\r\n return None\r\n\r\n if ismodule(object):\r\n # Look for a comment block at the top of the file.\r\n start = 0\r\n if lines and lines[0][:2] == '#!': start = 1\r\n while start < len(lines) and string.strip(lines[start]) in ('', '#'):\r\n start = start + 1\r\n if start < len(lines) and lines[start][:1] == '#':\r\n comments = []\r\n end = start\r\n while end < len(lines) and lines[end][:1] == '#':\r\n comments.append(string.expandtabs(lines[end]))\r\n end = end + 1\r\n return string.join(comments, '')\r\n\r\n # Look for a preceding block of comments at the same indentation.\r\n elif lnum > 0:\r\n indent = indentsize(lines[lnum])\r\n end = lnum - 1\r\n if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \\\r\n indentsize(lines[end]) == indent:\r\n comments = [string.lstrip(string.expandtabs(lines[end]))]\r\n if end > 0:\r\n end = end - 1\r\n comment = string.lstrip(string.expandtabs(lines[end]))\r\n while comment[:1] == '#' and indentsize(lines[end]) == indent:\r\n comments[:0] = [comment]\r\n end = end - 1\r\n if end < 0: break\r\n comment = string.lstrip(string.expandtabs(lines[end]))\r\n while comments and string.strip(comments[0]) == '#':\r\n comments[:1] = []\r\n while comments and string.strip(comments[-1]) == '#':\r\n comments[-1:] = []\r\n return string.join(comments, '')",
"def getAllComments(self):\r\n return [(ind, comment) for ind, comment in enumerate(self.comments)]",
"def comments(self) -> list:\n return self._node[\"app_data\"][\"ui_data\"].get(\"comments\", [])",
"def comment_tokens(self):\n return self._comment_tokens",
"def comments(self):\n lineno = 0\n novermin = set()\n src = self.__source\n if type(src) == bytes:\n src = src.decode(errors=\"ignore\")\n for line in src.splitlines():\n lineno += 1\n line = line.strip()\n m = RE_COMMENT.match(line)\n if m is not None:\n comment = m.group(2).strip()\n if comment == \"novermin\" or comment == \"novm\":\n # Ignore if it is inside another comment, like: `# test: # novm`\n if m.start(0) < m.start(1) and m.group(0).strip().startswith(\"#\"):\n continue\n # Associate with next line if the comment is \"alone\" on a line, i.e. '#' starts the line.\n novermin.add(lineno + 1 if m.start(1) == 0 else lineno)\n return novermin",
"def _dump_comment(comment: List[str]) -> List[str]:\n return [\"/**\"] + comment + [\"*/\"]",
"def filter_comments(asm_utf):\n comments = []\n # removes nones\n a = filter(lambda x: x != None, asm_utf)\n # splits on comment token\n comments = [re.split(\";\", line) for line in a]\n # takes only those that have a comment token\n comments = list(filter(lambda x: len(x) > 1, comments))\n # strips the whitespace from those tokens\n comments = [line[1].strip() for line in comments]\n # removes the singleton chars\n comments = list(filter(lambda x: len(x) > 1, comments))\n # regex to remove section markers and extraneous tabs\n # left over by poor reading of files\n comments = [re.sub('([-=].*[-=]|\\t)', '', line) for line in comments]\n comments = list(filter(lambda x: x != '', comments))\n return comments",
"def get_comment_strings(ast_root):\n\n comment_nodes = []\n\n for node in ast.walk(ast_root):\n if isinstance(node, ast.Str) and \\\n dump_ast(node).lstrip(\" \").startswith(\"#\"):\n comment_nodes.append(node)\n\n return sorted(comment_nodes, key=lambda n: (n.lineno, n.col_offset))",
"def strip_comments(tokens):\n prev_typ = None\n prev_end_col = 0\n for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:\n if typ in (tokenize.NL, tokenize.NEWLINE):\n if prev_typ in (tokenize.NL, tokenize.NEWLINE):\n start_col = 0\n else:\n start_col = prev_end_col\n end_col = start_col + 1\n elif typ == tokenize.COMMENT and start_row > 2:\n continue\n prev_typ = typ\n prev_end_col = end_col\n yield typ, tok, (start_row, start_col), (end_row, end_col), line",
"def remove_comments(ls):\r\n for i in range(len(ls)):\r\n ls[i] = re.sub(r'//.*', '', ls[i])\r\n\r\n return ls",
"def lines(filename, exclude_imports=True, exclude_comments=True, exclude_tests=True, exclude_globals=True, exclude_blank=True, verbose=False, is_c=False, s=None):\n if s is None:\n s = open(filename, 'rt').read()\n\n L = s.split('\\n')\n \n # Hack to strip out triple and single quote string lines in a heuristic (unreliable) way, which avoids parsing Cython\n if not is_c:\n for i in range(len(L)):\n if L[i].strip().startswith(\"'\") and L[i].strip().endswith(\"'\"):\n L[i] = ''\n i = 0\n while i < len(L):\n found = False\n for triple_quote in ['\"\"\"', \"'''\"]:\n if L[i].strip().startswith(triple_quote):\n L[i] = L[i].strip()[3:]\n for j in range(i, len(L)):\n if triple_quote in L[j]:\n found = True\n L[j] = ''\n if found:\n break\n i = j+1\n if not found:\n i += 1\n else:\n begin_comment = '/*'\n end_comment = '*/'\n i = 0\n while i < len(L):\n found = False\n if begin_comment in L[i]:\n rest = L[i][L[i].index(begin_comment)+len(begin_comment):]\n L[i] = L[i][:L[i].index(begin_comment)]\n if end_comment in rest:\n found = True\n i += 1\n else:\n for j in range(i+1, len(L)):\n if end_comment in L[j]:\n found = True\n L[j] = L[j][L[j].index(end_comment)+len(end_comment):]\n else:\n L[j] = ''\n if found:\n break\n i = j + 1\n if not found:\n i += 1\n\n# util.print_header('Lines before exclude_tests:' + filename, '\\n'.join(L))\n\n # Hack to strip out def test() and other methods in a heuristic (unreliable) way, which avoids parsing Cython\n if exclude_tests:\n # Also exclude makeColorMatrix so that our camera pipe is apples-to-apples comparable with reported lines in Halide paper\n if not is_c:\n methods = 'test run_test_all mandelbrot_gray mandelbrot_color composite_numpy composite_numexpr makeColorMatrix'.split()\n else:\n methods = ['int main', 'void main']\n i = 0\n while i < len(L):\n L_i_strip = L[i].strip()\n if ((not is_c and (any(L_i_strip.startswith('def ' + method) for method in methods) or\n any(L_i_strip.startswith('cdef ' + method) for method in methods))) or\n (is_c and (any(L_i_strip.startswith(method) for method in methods)))):\n L[i] = ''\n for j in range(i+1, len(L)):\n L_j_strip = L[j].strip()\n c_ok = True\n if is_c:\n c_ok = L_j_strip != '{' and L_j_strip != '}'\n if not L[j].startswith(' ') and not L[j].startswith('\\t') and not len(L[j].strip()) == 0 and c_ok:\n break\n else:\n L[j] = ''\n i = j\n elif (L[i].strip().startswith('test(') or L[i].strip().startswith('run_test_all(')) and not is_c:\n L[i] = ''\n i += 1\n else:\n i += 1\n\n# util.print_header('Lines before exclude_imports:' + filename, '\\n'.join(L))\n if exclude_imports:\n if not is_c:\n L = [x for x in L if not x.lstrip().startswith('import') and not x.lstrip().startswith('cimport') and not x.startswith('cdef extern')]\n else:\n L = [x for x in L if not x.lstrip().startswith('#include')]\n# util.print_header('Lines before exclude_comments:' + filename, '\\n'.join(L))\n if exclude_comments:\n if not is_c:\n L = [x for x in L if not x.lstrip().startswith('#') and not x.strip() == 'pass']\n else:\n L = [x for x in L if not x.lstrip().startswith('//')]\n# util.print_header('Lines before exclude_globals:' + filename, '\\n'.join(L))\n if exclude_globals and not is_c:\n L = [x for x in L if (x.startswith(' ') or x.startswith('\\t') or x.startswith('def') or x.startswith('cdef')) and (not x.lstrip().startswith('has_'))]\n# util.print_header('Lines before exclude_blank:' + filename, '\\n'.join(L))\n\n if is_c:\n # Also exclude makeColorMatrix so that C camera pipe is apples-to-apples comparable with reported lines in Halide paper\n L = [x for x in L if not x.lstrip().startswith('matrix_3200') and not x.lstrip().startswith('matrix_7000')]\n if exclude_blank:\n L = [x for x in L if not len(x.strip()) == 0]\n\n if verbose:\n util.print_header('Final lines for:' + filename, '\\n'.join(L))\n\n return len(L)",
"def extract_comment_py():\n debug(\"extract comment from a python script.\")\n for line in CURRENT_BUFFER[:3]:\n if re.search(r\"coding[:=]\\s*([-\\w.]+)\", line):\n pattern = re.compile(r\"coding[:=]\\s*(?P<encoding>[-\\w.]+)\")\n globals()['ENCODING'] = pattern.search(line).group('encoding')\n debug(\"found encoding: %s\" % globals()['ENCODING'])\n\n lines = list(CURRENT_BUFFER)\n for (i, iline) in enumerate(lines[:10]):\n # find \"\"\" or ''' in the first few lines.\n if '\"\"\"' in iline or \"'''\" in iline:\n # find the end of it.\n breaker = '\"\"\"' if '\"\"\"' in iline else \"'''\"\n for j, jline in enumerate(lines[i+1:]):\n if breaker in jline:\n # found it, format the comment a little bit.\n if j == 0:\n # in the same line, this is a one line comment.\n return [jline[jline.index(breaker)+3:jline.rindex(breaker)]]\n else:\n lines[i] = lines[i][lines[i].index(breaker)+3:]\n lines[i+j+1] = lines[i+j+1][:lines[i+j+1].rindex(breaker)]\n return lines[i:i+j+1]\n else:\n # end of the comment is not found.\n return\n else:\n # comment might start with #\n return extract_comment_sh(python_style=True)",
"def skipComment(self):\r\n\t\tch = self.nextChar()\r\n\t\twhile ch and ch != \"\\n\":\r\n\t\t\tch = self.nextChar()",
"def print_comments():\n with open('a_cpp_file.cpp', 'r') as file:\n data = file.read()\n to_print = ''\n should_print = False\n for i, char in enumerate(data):\n if i > 1:\n if data[i-1] == '*' and data[i-2] == '/':\n should_print = True\n if char == '*' and data[i+1] == '/' and should_print:\n should_print = False\n print(to_print)\n to_print = ''\n if should_print:\n to_print += char\n should_print = False\n for i, char in enumerate(data):\n if i > 1:\n if data[i-1] == '/' and data[i-2] == '/':\n should_print = True\n if char == '\\n' and should_print:\n should_print = False\n print(to_print)\n to_print = ''\n if should_print:\n to_print += char",
"def comments(self):\n return self._comments",
"def comments(self):\n return self._comments",
"def get_extra_comments():\n try:\n yml_iter = cfg.yml_config[\"comments\"]\n except:\n # Probably no \"comments\" section in the yml-file.\n return \"\\n\"\n\n return (\"\\n\".join(yml_iter) + \"\\n\") if yml_iter is not None else \"\\n\"",
"def get_initial_comment_list(self, comment_tree):\n return []",
"def tokenize_file_keep_comments(infilename):\n# reg expss\n\timport re\n\timport sys\n\timport fileinput\n\timport math\n\n#\n# open and parse input \n#\n\n\ttry:\n\t\tfp = open (infilename, 'r')\n\texcept IOError:\n\t\tprint \"Error opening file\"\n\t\traise\n\n\tlines = fp.readlines ()\n\n#\n# put all tokens into tokens and remove comments\n#\n\ttokens = []\n\tfor line in lines:\n\t\ttmp = re.split ('[ \\t\\n]*',line)\n#\t\tprint \"tmp = \", tmp\n\t\tfor tok in tmp:\n\t\t\tif (tok != ''):\n\t\t\t\ttokens.append(tok)\n#\tprint \"tokens = \", tokens\n\n\tfp.close()\n\n\treturn tokens",
"def comments(self):\n return self.container['comments']",
"def get_comments(self):\n raise NotImplementedError"
] |
[
"0.7470412",
"0.71484244",
"0.7036303",
"0.6953977",
"0.69023705",
"0.6800556",
"0.6739347",
"0.6587444",
"0.65565395",
"0.6545687",
"0.6507777",
"0.6506175",
"0.63937104",
"0.63558143",
"0.63366956",
"0.62867033",
"0.6239873",
"0.622723",
"0.62262815",
"0.6225103",
"0.6222308",
"0.61993885",
"0.61867017",
"0.618398",
"0.618398",
"0.61790925",
"0.61404115",
"0.6137202",
"0.61367226",
"0.61180705"
] |
0.75017625
|
0
|
Return true if all of the keys specified in required are present and match or do not match settings if specified.
|
def check_param_matches(candidate, settings=None, required=None, restricted=None):
print("Deprecated?")
if not settings:
settings = {}
if not required:
required = []
if not restricted:
restricted = []
# required keys must be present in candidate and match the value in settings,
# if provided
for p in required:
if p not in candidate:
return False
elif p in settings and settings[p] != candidate[p]:
return False
#TODO: generalize this to allow lists of required/restricted
# restricted keys must not match the value in settings
for p in restricted:
if settings[p] == candidate[p]:
return False
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def validate_required_keys(plist, filename, required_keys):\n passed = True\n for req_key in required_keys:\n if not plist.get(req_key):\n print(\"{}: missing required key {}\".format(filename, req_key))\n passed = False\n return passed",
"def validate_required_keys(input_dict, filename, required_keys):\n passed = True\n for req_key in required_keys:\n if not input_dict.get(req_key):\n print(\"{}: missing required key {}\".format(filename, req_key))\n passed = False\n return passed",
"def all_keys_not_none(d: dict, required: list):\n passed = 0\n for r in required:\n v = d.get(r)\n if v is not None:\n passed += 1\n\n return len(required) == passed",
"def subfields_any(verifield, required):\n for req_key, req_val in required.items():\n if getitem(verifield, req_key, '') == req_val:\n return True\n return False",
"def is_valid(self):\n\n # Test whether every element in required_keys is in actual_keys\n actual_keys = set(self.fields.keys())\n required_keys = set(self.required_keys)\n has_required_keys = required_keys <= actual_keys\n if not has_required_keys:\n return False\n\n # TODO: Complete the following block. \n\n # Assume all is valid at first, then as soon as one invalid\n # is detected, whole thing becomes invalid.\n all_valid = True \n\n # Now iterate over each key-value pair to check\n for key, value in self.fields.items():\n if key == 'byr':\n this_key_valid = len(str(value)) == 4 and (1920 <= value <= 2002)\n all_valid = all_valid and this_key_valid\n if key == 'iyr':\n this_key_valid = len(str(value)) == 4 and (2010 <= value <= 2020)\n all_valid = all_valid and this_key_valid\n if key == 'eyr':\n this_key_valid = len(str(value)) == 4 and (2020 <= value <= 2030)\n all_valid = all_valid and this_key_valid\n if key == 'hgt':\n if len(str(value)) < 4:\n all_valid = False\n else:\n ending = value[-2:]\n num = int(value[:-2])\n this_key_valid = (ending == 'in' and (59 <= num <= 76)) or (ending == 'cm' and (150 <= num <= 193))\n all_valid = all_valid and this_key_valid\n if key == 'hcl':\n re_str = '#[0-9a-f]{6}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 7\n all_valid = all_valid and this_key_valid\n if key == 'ecl':\n this_key_valid = value in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n all_valid = all_valid and this_key_valid\n if key == 'pid':\n re_str = '[0-9]{9}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 9\n all_valid = all_valid and this_key_valid\n if key == 'cid':\n this_key_valid = True\n all_valid = all_valid and this_key_valid\n\n # If all fields are valid, return True\n return all_valid",
"def any_keys_not_none(d: dict, required: list):\n passed = 0\n for r in required:\n v = d.get(r)\n if v is not None:\n passed += 1\n\n if len(required) == 1 and passed == len(required): # Exclusion for sequence with 1 element\n return True\n\n return 0 < passed < len(required)",
"def subfields_all(verifield, required):\n for req_key, req_val in required.items():\n if getitem(verifield, req_key, '') != req_val:\n return False\n return True",
"def has_keys(self) -> bool:\n \n for key, value in self.key_satified.items():\n if value is not True:\n return False\n return True",
"def test_check_required_success():\n settings = SettingsModel()\n # Tamper required settings\n settings._required_settings = (\"FOO\", \"PLOP\")\n\n settings.load_from_kwargs(\n FOO=True,\n BAR=True,\n check=False,\n defaults=False,\n )\n\n with pytest.raises(InvalidSettings):\n settings.check()\n\n settings.load_from_kwargs(PLOP=True, check=False, defaults=False)\n\n settings.check()",
"def validate_required(self, claims, required, *args, **kwargs):\n return all(claims.get(claim) for claim in required)",
"def has_at_least_one_relevant_key(file_as_dict):\n for key in file_as_dict.keys():\n b = True\n for unwanted_key in non_selected_keys:\n if unwanted_key in key.lower() :\n b = False \n if b :\n return True\n return False",
"def _has_all_keys_from(d, valid_d):\n\n for k, v in valid_d.items():\n if k not in d:\n return False\n return True",
"def _is_valid(key):\n is_valid = False\n for valid_key in VALID_KEYS:\n if valid_key in key:\n is_valid = True\n for invalid_key in INVALID_KEYS:\n if invalid_key in key:\n is_valid = False\n return is_valid",
"def check_required(self, required):\n for k in required:\n if self.__dict__.get(k) is None:\n raise ValueError(\n \"Required argument: '{0}' not provided\".format(k))",
"def __contains__(self, key):\n return key in self._opts or key in self._groups",
"def valid_compatible_data(compatible_data):\n if not isinstance(compatible_data, dict):\n return False\n if set(compatible_data.keys()) != compatible_data_keys_set:\n return False\n for key in compatible_data:\n boolean = (compatible_data[key] is True or\n compatible_data[key] is False)\n if not boolean:\n return False\n return True",
"def _check_implied_parameters(self, parameters):\n for key, values, implied_keys in self.IMPLIED_PARAMETERS:\n self.log([u\"Checking implied parameters by '%s'='%s'\", key, values])\n if (key in parameters) and (parameters[key] in values):\n found = False\n for implied_key in implied_keys:\n if implied_key in parameters:\n found = True\n if not found:\n if len(implied_keys) == 1:\n msg = u\"Parameter '%s' is required when '%s'='%s'.\" % (implied_keys[0], key, parameters[key])\n else:\n msg = u\"At least one of [%s] is required when '%s'='%s'.\" % (\",\".join(implied_keys), key, parameters[key])\n self._failed(msg)\n return\n self.log(u\"Passed\")",
"def validate_params(self, params: Dict[str, Any]) -> bool:\n dict_set_defaults(params, self.DEFAULT_PARAMS)\n\n for k in self.params:\n if k in {\"name\", \"descr\", \"cache_file\"}:\n continue\n\n if self.params[k] != params.get(k):\n return False\n\n return True",
"def __verify_required_parameters(self, parameters, required_parameters):\n\n\t\tfor parameter in required_parameters:\n\t\t\tif False == parameters.has_key(parameter):\n\t\t\t\traise MissingParameterError(parameter)\n\n\t\treturn True",
"def _key_check(self, key_list, chk_dict=None):\n exists = False\n if chk_dict is None:\n chk_dict = self._e_dict\n for key in key_list:\n exists = key in chk_dict.keys()\n if exists:\n chk_dict = chk_dict[key]\n else:\n break\n return exists",
"def _is_in_dict(d, valid_d):\n\n for k, v in d.items():\n if k not in valid_d:\n return False\n else:\n if Settings._is_primitive(v):\n if not Settings._is_in_prim(v, valid_d[k]):\n return False\n elif Settings._is_list(v):\n if not Settings._is_in_list(v, valid_d[k]):\n return False\n elif Settings._is_dict(v):\n if isinstance(valid_d[k], dict):\n if not Settings._is_in_dict(v, valid_d[k]):\n return False\n elif isinstance(valid_d[k], list):\n if not Settings._is_dict_in_one_of_dicts(v, valid_d[k]):\n return False\n else:\n raise InvalidSettingError()\n else:\n raise InvalidSettingError()\n return Settings._has_all_keys_from(d, valid_d)",
"def _check_keys(setting_dict):\n for key in SettingContainer.key_list:\n if not key in setting_dict:\n raise Exception(\n f\"No value for {key} found in language-settings\")",
"def validate(dic, option_list):\n\tfor key in dic.viewkeys():\n\t\tif key in option_list:\n\t\t\tfor option in option_list:\n\t\t\t\tif option != key:\n\t\t\t\t\tif dic[option] and dic[key]:\n\t\t\t\t\t\traise click.UsageError('Invalid option combination --%s \\\n\t\t\t\t\t\t\tcannot be used with --%s' % (option, key))\n\n\treturn True",
"def check_keys_in_dict(dictionary, keys):\n if not all(key in dictionary for key in keys):\n raise KeyError(\"Dictionary missing key values.\"\n \"Requires: {}\".format(keys))\n return True",
"def _check_required_opts(self, namespace=None):\n for info, group in self._all_opt_infos():\n opt = info['opt']\n\n if opt.required:\n if 'default' in info or 'override' in info:\n continue\n\n if self._get(opt.dest, group, namespace) is None:\n raise RequiredOptError(opt.name, group)",
"def keys_exist(self, *keys):\n return tuple(key in self.keys() for key in keys)",
"def validate_auth(self, required_keys):\n # The config validates that the 'auth' dict was loaded, but do a safety check here\n if not self.auth:\n raise AppAuthError('[{}] Auth config is empty'.format(self))\n\n auth_key_diff = required_keys.difference(set(self.auth))\n if not auth_key_diff:\n return True\n\n missing_auth_keys = ', '.join('\\'{}\\''.format(key) for key in auth_key_diff)\n raise AppAuthError('[{}] Auth config is missing the following '\n 'required keys: {}'.format(self, missing_auth_keys))",
"def is_valid_request(request_args, keys):\n if type(keys) != list:\n raise TypeError(\"Keys must be of type list\")\n\n for key in keys:\n if key not in request_args:\n return False\n return True",
"def _is_supplied_by_config(group: argparse._MutuallyExclusiveGroup, conf: Dict[str, Any]) -> bool:\n group_args = []\n for arg in group._group_actions:\n group_args.append(arg.dest)\n\n count = 0\n for val in group_args:\n if val in conf:\n count += 1\n return count == len(group_args) or count == 0",
"def validate_params(params, required_params, validate_values=False):\n\t# every key (or element) in required_params must be present in the given params\n\tfor k in required_params:\n\t\tif k not in params: \n\t\t\treturn False\n\t\telif validate_values:\n\t\t\ttry:\n\t\t\t\t# see if we got a dictionary of parameters\n\t\t\t\tp_val = params.get(k)\n\t\t\texcept AttributeError:\n\t\t\t\t# if it's not a dictionary, it doesn't have values, obviously\n\t\t\t\treturn False\n\t\t\t# now we need to check if the given parameter value is valid\n\t\t\ttry:\n\t\t\t\treq_vals = required_params.get(k)\n\n\t\t\t\t# check if there's a list of requirements\n\t\t\t\ttry:\n\t\t\t\t\tif p_val not in req_vals:\n\t\t\t\t\t\treturn False\n\t\t\t\texcept TypeError:\n\t\t\t\t\t# check if it matches the required value\n\t\t\t\t\tif p_val != req_vals:\n\t\t\t\t\t\treturn False\n\t\t\texcept AttributeError:\n\t\t\t\t# if the requirements are not specified, just make sure it's set to something\n\t\t\t\tif p_val is None:\n\t\t\t\t\treturn False\n\t# and if we pass all the checks for all the required_params, it's valid\n\treturn True"
] |
[
"0.68788046",
"0.68218213",
"0.6819219",
"0.68076414",
"0.67759246",
"0.6773437",
"0.67254263",
"0.66216743",
"0.65819454",
"0.65736276",
"0.6532516",
"0.64255255",
"0.63672996",
"0.6340896",
"0.63351995",
"0.63173246",
"0.63134223",
"0.63104975",
"0.6309449",
"0.6299355",
"0.6292566",
"0.6277922",
"0.6234617",
"0.6226176",
"0.6225485",
"0.6208584",
"0.6205841",
"0.61998576",
"0.6191554",
"0.6189478"
] |
0.685779
|
1
|
For the given list of parameter dictionaries, return a list of the dictionary keys that appear in every parameter dictionary
|
def get_shared_keys(param_list):
if not param_list:
return
keys = set(param_list[0].keys())
for i in range(1, len(param_list)):
keys = keys.intersection(param_list[i].keys())
keys = list(keys)
keys.sort()
return keys
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_unique_keys(param_list):\n\tif not param_list:\n\t\treturn\n\tcounts = {}\n\tmax_count = len(param_list)\n\tfor p in param_list:\n\t\tfor k in p:\n\t\t\tcounts[k] = 1 + counts.get(k, 0)\n\tunique = []\n\t# now find out which keys are not shared\n\tfor k in counts:\n\t\tif counts[k] < max_count:\n\t\t\tunique.append(k)\n\tunique.sort()\n\treturn unique",
"def extract_key_query_params(\n query_params: Dict[str, List[str]], param_key: str\n) -> Set[str]:\n return set(\n [\n item.lower()\n for sublist in [\n [value.lower() for value in query_params[key]]\n for key in query_params.keys()\n if key.lower() == param_key and query_params.get(key)\n ]\n for item in sublist\n ]\n )",
"def search_preproc(list_pre, dic_pre):\n result = []\n for x in list_pre:\n shared_items = [x[k] == dic_pre[k] for k in x if k in dic_pre]\n result.append(sum(shared_items)==len(dic_pre))\n return [i for i, x in enumerate(result) if x]\n # maybe also searches if the files exist?",
"def iterparams(params: Dict[str, List[Any]]) -> Dict[str, Any]:\n for set in product(*params.values()):\n yield dotdict(zip(params.keys(), set))",
"def _dict_params(self, the_dict: Dict):\n return [p for _, e in the_dict.items() for p in self._params(e)]",
"def get_shared_values(param_list):\n\tif not param_list:\n\t\treturn\n\tkeys = [p.keys() for p in param_list]\n\tshared_keys = set(keys[0]).intersection(*keys)\n\tshared = {k: param_list[0][k] for k in shared_keys}\n\tfor p in param_list:\n\t\tfor k in p:\n\t\t\tif k in shared and shared[k] != p[k]:\n\t\t\t\tshared.pop(k)\n\treturn shared",
"def key_list(dict):\n list = []\n for key in dict:\n list.append(key)\n return list",
"def group_by_keys(param_list, keys):\n\tkeys = list(keys)\n\tnames = {}\n\tfor p in param_list:\n\t\t\n\t\tif len(keys) > 0:\n\t\t\tkey = join_params(**{k: p.get(k, None) for k in keys})\n\t\t\t#vals = {k: p.get(k, None) for k in keys}\n\t\t\t#name = join_params(**vals)\n\t\t\t#names[name]=vals\n\t\telse:\n\t\t\tkey = ''\n\t\tif key in names:\n\t\t\tnames[key].append(p)\n\t\telse:\n\t\t\tnames[key]=[p]\n\treturn names",
"def get_identifiers(args_list, valid_keys):\n # ignore keys which have no variation among results\n identifiers = []\n for args in args_list:\n identifier = ''\n for key in valid_keys:\n if key in args:\n identifier += '{}={},'.format(key, args[key])\n identifiers.append(identifier)\n return identifiers",
"def _get_parameters(self, *keys):\n return {k: v for k, v in self.param.items() if k in keys}",
"def search_multiple_keys(dictionary, primary_search='isRequired', search_list=['label', 'name']):\n\n # get a flat list of the schema and keep only items in all_search_list\n all_search_list = [primary_search] + search_list\n result = []\n flat_dict = flatten(dictionary)\n for k, v in flat_dict.items():\n if any(x in k for x in all_search_list):\n result.append( {k: v} )\n\n # iterate through the schema and get the search items corresponding to each primary_search item (at the same level/section)\n help_list = []\n for i in result:\n try:\n tmp_dict = {}\n # key = i.keys()[0]\n key = list(i.keys())[0] # n Python 3 dict.keys() returns an iterable but not indexable object. Therefore convert it to an iterable, which is list.\n if key and key.endswith(primary_search):\n for item in all_search_list:\n corresponding_label_key = '.'.join(key.split('.')[:-1]) + '.' + item\n for j in result:\n key_label = list(j.keys())[0]\n if key_label and key_label.endswith(item) and key_label == corresponding_label_key: # and result.has_key(key):\n tmp_dict.update({item: j[key_label]})\n if tmp_dict:\n help_list.append( tmp_dict )\n #if tmp_dict:\n # help_list.append( {primary_search: tmp_dict} )\n\n except Exception as e:\n #import ipdb; ipdb.set_trace()\n print(e)\n\n return help_list",
"def get_keys(in_data: Any) -> List:\n if np.isscalar(in_data) or in_data is None:\n return []\n try:\n return list(in_data.keys()) + flatten([get_keys(v) for v in in_data.values()])\n except AttributeError:\n # some sort of list like iterable\n return flatten([get_keys(x) for x in in_data])",
"def all_keys(blueprint: Union[dict, list]) -> list:\n\n keys = list()\n\n if isinstance(blueprint, list):\n for item in blueprint:\n keys.extend(all_keys(item))\n elif isinstance(blueprint, dict):\n for key, value in blueprint.items():\n keys.append(key)\n keys.extend(all_keys(value))\n\n return keys",
"def search_keys(dictionary, search_list=['help_text', 'label']):\n search_item1 = search_list[0]\n search_item2 = search_list[1]\n result = []\n flat_dict = flatten(dictionary)\n for k, v in flat_dict.items():\n if any(x in k for x in search_list):\n result.append( {k: v} )\n\n help_list = []\n for i in result:\n try:\n key = list(i.keys())[0]\n if key and key.endswith(search_item1):\n corresponding_label_key = '.'.join(key.split('.')[:-1]) + '.' + search_item2\n for j in result:\n key_label = list(j.keys())[0]\n if key_label and key_label.endswith(search_item2) and key_label == corresponding_label_key: # and result.has_key(key):\n #import ipdb; ipdb.set_trace()\n help_list.append({search_item2: j[key_label], search_item1: i[key]})\n except Exception as e:\n #import ipdb; ipdb.set_trace()\n print(e)\n\n return help_list",
"def fixture_sorted_param_names(allparams):\n return sorted(list(allparams.keys()))",
"def ParameterCombinations(parameter_dict):\n d = parameter_dict\n return [dict(zip(d, v)) for v in itertools.product(*d.values())]",
"def all_key_seqs(template):\n result = []\n for k, v in template.iteritems():\n if isinstance(v, dict):\n for suffix in all_key_seqs(v):\n result.append([k] + suffix)\n else:\n result.append([k])\n return result",
"def _get_params(self, fluents):\n objects_all = set()\n for fluent in fluents:\n objects = fluent.replace(\"(\",\"\").replace(\")\",\"\").split(\" \")[1:]\n objects_all.update(objects)\n\n return objects_all",
"def _parameter_combinations(parameters_dict):\n value_combinations = list(itertools.product(*parameters_dict.values()))\n keys = list(parameters_dict.keys())\n return [{keys[n]:values[n] for n in range(len(keys))} for values in value_combinations]",
"def multifilter(names, patterns):\n for name in names:\n if isinstance(name, collections.Mapping):\n for key in name.iterkeys():\n for pattern in patterns:\n if fnmatch.fnmatch(key, pattern):\n yield key\n break\n else:\n for pattern in patterns:\n if fnmatch.fnmatch(name, pattern):\n yield name\n break",
"def package_parameters(parameter_dict):\n parameter_dict = {\n k: v for k, v in parameter_dict.iteritems() if isinstance(v, list)\n }\n keys_sorted = sorted(parameter_dict)\n values = list(it.product(*(parameter_dict[key] for key in keys_sorted)))\n combos = tuple({k: v for k, v in zip(keys_sorted, row)} for row in values)\n return list(combos)",
"def have(keylist, dic):\n return all(key in dic and dic[key] for key in keylist)",
"def combinations(self, key_list, lst=None):\n lst = self.filtered(key_list, lst)\n tups = [tuple([d[INDEP].get(k, d[DEP].get(k)) for k in key_list]) for d in lst]\n s = set(tups)\n l = list(s)\n l.sort()\n return [{k: v for k, v in zip(key_list, vals)} for vals in l]",
"def get_valid_keys(args_list, black_list=['out', 'gpu']):\n keys = args_list[0].keys()\n valid_keys = []\n for key in keys:\n if key not in black_list:\n cur = None\n for args in args_list:\n if cur is None:\n cur = args[key]\n if key not in args:\n warnings.warn('{} not in args={}'.format(key, args))\n continue\n if cur != args[key]:\n valid_keys.append(key)\n break\n return valid_keys",
"def pair_keys(list_of_dicts, first_key, second_key):\n return [{ dictionary[first_key]: dictionary[second_key] } \n for dictionary in list_of_dicts]",
"def check_all_have_keys(dict_list, keys, name):\n if len(dict_list) == 0:\n return\n keys = set(keys)\n for dct in dict_list:\n if not keys.issubset(dct.keys()):\n raise DGLError('Expect all {} to include keys {}, but got {}.'.format(\n name, keys, dct.keys()))",
"def _keys(obj):\n \n k = []\n \n if len(obj) > 0:\n # There is at least one thing\n for x in obj:\n # Make sure keys() is defined\n if hasattr(x, 'keys'):\n \n k.extend(x.keys())\n \n k = list(set(k))\n k.sort()\n \n return k",
"def keys(self):\n return self.params.keys()",
"def compile_ids(parameters):\n return tuple(p.keys()[0] for p in parameters[1:])",
"def get_vars(triple):\n return set([v for k, v in triple.items() if v.startswith('?')])"
] |
[
"0.6639945",
"0.6413684",
"0.63082236",
"0.6279882",
"0.61982036",
"0.6186242",
"0.6142993",
"0.6075495",
"0.6054601",
"0.6009603",
"0.5964664",
"0.59626204",
"0.59246665",
"0.59243405",
"0.5853851",
"0.58462185",
"0.5836325",
"0.5816195",
"0.58044916",
"0.5801902",
"0.5774324",
"0.57397157",
"0.573602",
"0.5717529",
"0.5710945",
"0.56801254",
"0.56717354",
"0.5655554",
"0.5616891",
"0.5614731"
] |
0.686177
|
0
|
Return a dictionary of the unique sets of param values for the given keys, indexed by a name made up of those values
|
def group_by_keys(param_list, keys):
keys = list(keys)
names = {}
for p in param_list:
if len(keys) > 0:
key = join_params(**{k: p.get(k, None) for k in keys})
#vals = {k: p.get(k, None) for k in keys}
#name = join_params(**vals)
#names[name]=vals
else:
key = ''
if key in names:
names[key].append(p)
else:
names[key]=[p]
return names
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_parameters(self, *keys):\n return {k: v for k, v in self.param.items() if k in keys}",
"def get_unique_keys(param_list):\n\tif not param_list:\n\t\treturn\n\tcounts = {}\n\tmax_count = len(param_list)\n\tfor p in param_list:\n\t\tfor k in p:\n\t\t\tcounts[k] = 1 + counts.get(k, 0)\n\tunique = []\n\t# now find out which keys are not shared\n\tfor k in counts:\n\t\tif counts[k] < max_count:\n\t\t\tunique.append(k)\n\tunique.sort()\n\treturn unique",
"def shorten_keys(params):\n\tparam_names = {}\n\tfor n in params:\n\t\tparts = n.split('_')\n\t\tfirsts = [p[0] for p in parts]\n\t\tparam_names[n] = ''.join(firsts)\n\treturn param_names",
"def extract_parameters(self) -> Dict[str, Set[str]]:\n regex = \"\\{([A-Za-z0-9_]+)\\}\"\n reserved_parameters = [\n \"output\",\n \"input\",\n \"output_vec\",\n \"input_vec\",\n \"df\",\n \"vec_open\",\n \"vec_close\",\n ]\n parameters = {}\n for scope in self.scopes:\n parameters[scope] = set(\n [\n x\n for x in re.findall(regex, self.call)\n if x not in reserved_parameters\n ]\n )\n return parameters",
"def build_params_dict(params, param_names):\n if len(params) != len(param_names):\n raise ValueError('Parameter and parameter name length mismatch.')\n return dict(zip(param_names, params))",
"def get_shared_keys(param_list):\n\tif not param_list:\n\t\treturn\n\tkeys = set(param_list[0].keys())\n\tfor i in range(1, len(param_list)):\n\t\tkeys = keys.intersection(param_list[i].keys())\n\tkeys = list(keys)\n\tkeys.sort()\n\treturn keys",
"def extract_key_query_params(\n query_params: Dict[str, List[str]], param_key: str\n) -> Set[str]:\n return set(\n [\n item.lower()\n for sublist in [\n [value.lower() for value in query_params[key]]\n for key in query_params.keys()\n if key.lower() == param_key and query_params.get(key)\n ]\n for item in sublist\n ]\n )",
"def _parameter_combinations(parameters_dict):\n value_combinations = list(itertools.product(*parameters_dict.values()))\n keys = list(parameters_dict.keys())\n return [{keys[n]:values[n] for n in range(len(keys))} for values in value_combinations]",
"def generate_parameters_2_variations(x: dict):\n keys = tuple(x.keys())\n prods = product(*[v for _, v in x.items()])\n return [{keys[i]: p[i] for i in [0, 1]} for p in prods]",
"def valsForKeys(h, *keys):\n return [h[k] for k in keys]",
"def _get_parameter_values(parameters):\n # type: (Dict[str, str]) -> Dict[str, Any]\n parameter_values = {}\n for p in parameters.items():\n if p[1] in _values_by_type:\n parameter_values[p[0]] = _values_by_type[p[1]]\n return parameter_values",
"def iterparams(params: Dict[str, List[Any]]) -> Dict[str, Any]:\n for set in product(*params.values()):\n yield dotdict(zip(params.keys(), set))",
"def values(self):\n return {n: getattr(self, n) for n in self._hparam_types.keys()}",
"def get_shared_values(param_list):\n\tif not param_list:\n\t\treturn\n\tkeys = [p.keys() for p in param_list]\n\tshared_keys = set(keys[0]).intersection(*keys)\n\tshared = {k: param_list[0][k] for k in shared_keys}\n\tfor p in param_list:\n\t\tfor k in p:\n\t\t\tif k in shared and shared[k] != p[k]:\n\t\t\t\tshared.pop(k)\n\treturn shared",
"def fixture_allparams_batch(request, allparams, sorted_param_names):\n idx = request.param\n idx_start = idx * BATCHSIZE\n idx_end = min((idx + 1) * BATCHSIZE, NPARAMS)\n pnames = sorted_param_names[idx_start: idx_end]\n return {pname: allparams[pname] for pname in pnames}",
"def get_all_combinations(param_opt):\n\tif not param_opt:\n\t\treturn {}\n\treturn (dict(zip(param_opt.keys(), x)) for x in itertools.product(*param_opt.values()))",
"def permutation_dict(params):\n params_list = {key: value for (key, value) in params.items() if isinstance(value, list)}\n params_single = {key: value for (key, value) in params.items() if not isinstance(value, list)}\n keys, values = zip(*params_list.items())\n permutations = [dict(zip(keys, v), **params_single) for v in itertools.product(*values)]\n return permutations",
"def _mappingGetValueSet(mapping, keys):\n setUnion = set()\n for k in keys:\n setUnion = setUnion.union(mapping[k])\n return setUnion",
"def listify_values(params):\n return dict((k, listify(v)) for (k, v) in params.iteritems())",
"def parameters_dict(self):\n return dict(zip(self.parameters_names(), self.parameters_list))",
"def filter_args(func, keys):\n filtered = {}\n sign = list(signature(func).parameters.keys())\n for k, v in {**keys}.items():\n if k in sign:\n filtered[k] = v\n return filtered",
"def get_parkey_map(self):\n pkmap = {}\n for selection in self.selections.normal_values():\n for parkey, choices in selection.get_parkey_map().items():\n if parkey not in pkmap:\n pkmap[parkey] = set()\n pkmap[parkey] |= set(choices)\n for parkey, choices in pkmap.items():\n pkmap[parkey] = list(pkmap[parkey])\n if \"CORR\" not in parkey:\n pkmap[parkey].sort()\n return pkmap",
"def get_all_combinations(self, hash_set):\n\t\tnames = sorted(hash_set)\n\t\treturn [dict(zip(names, prod)) for prod in it.product(\n\t\t*(hash_set[name] for name in names))]",
"def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params",
"def attribute_to_params_map(self):\n return self._param_names_map",
"def valid_parameter_combinations(parameterSpace):\n all_combinations = product(*parameterSpace.values())\n return [{k:v for k,v in zip(parameterSpace.keys(), combn)} for combn in all_combinations]",
"def get_params(param_names, param_store, serialize_param=serialize_param,\r\n translate_param=translate_identity):\r\n if param_names is None:\r\n param_names = [name for name in param_store.keys() if name != 'self']\r\n\r\n return dict((translate_param(name), serialize_param(param_store[name]))\r\n for name in param_names if param_store.get(name) is not None)",
"def keys(self):\n return self.params.keys()",
"def get_keys_info() -> Dict[str, List[str]]:\n args_dict = {}\n\n for api in API_DICT:\n arg_list = list(\n getattr(\n sys.modules[__name__], \"set_\" + str(api) + \"_key\"\n ).__code__.co_varnames\n )\n arg_list.remove(\"persist\")\n arg_list.remove(\"show_output\")\n args_dict[api] = arg_list\n\n return args_dict",
"def _filter_kwargs(names, dict_):\n return {k: v for k, v in dict_.items() if k in names and v is not None}"
] |
[
"0.675617",
"0.6436779",
"0.6321909",
"0.62885296",
"0.61847234",
"0.60890037",
"0.59862626",
"0.5977559",
"0.5854747",
"0.58353114",
"0.5812666",
"0.5799092",
"0.57755125",
"0.57612205",
"0.5678434",
"0.5674742",
"0.56722915",
"0.56689703",
"0.5647891",
"0.56409335",
"0.5590331",
"0.55764276",
"0.55758244",
"0.5570361",
"0.5547903",
"0.5547793",
"0.552531",
"0.55168235",
"0.5494292",
"0.5472078"
] |
0.67722785
|
0
|
For each entry in arg_dict add the argument to the parser if it is not already in the namespace provided. If sources is a dictionary of strings, will use the strings as the help message for the key If source is a dictionary of dictionaries, will pass the dictionary elements as parameters to add_argument
|
def add_arguments(arg_dict, parser, namespace=None):
for k in arg_dict:
if namespace and hasattr(namespace, k):
continue
try:
h = arg_dict[k]
if isinstance(h, dict):
parser.add_argument('--'+k, **h)
else:
parser.add_argument('--'+k, help=h)
except:
parser.add_argument('--'+k, help='manager parameter')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _map_arguments(self, args):\n data = args.get('data')\n comp = args.get('comp')\n library = args.get('library')\n dry_run = args.get('dry_run', False)\n\n self._set_link('srcmaps-catalog', SrcmapsCatalog_SG,\n comp=comp, data=data,\n library=library,\n nsrc=args.get('nsrc', 500),\n dry_run=dry_run)\n\n self._set_link('gather-srcmaps', GatherSrcmaps_SG,\n comp=comp, data=data,\n library=library,\n dry_run=dry_run)\n \n self._set_link('merge-srcmaps', MergeSrcmaps_SG,\n comp=comp, data=data,\n library=library,\n dry_run=dry_run)",
"def _map_arguments(self, args):\n data = args.get('data')\n comp = args.get('comp')\n library = args.get('library')\n dry_run = args.get('dry_run', False)\n\n self._set_link('sum-rings', SumRings_SG,\n library=library,\n outdir=args['outdir'],\n dry_run=dry_run)\n\n self._set_link('srcmaps-diffuse', SrcmapsDiffuse_SG,\n comp=comp, data=data,\n library=library,\n make_xml=args['make_xml'],\n dry_run=dry_run)\n\n self._set_link('vstack-diffuse', Vstack_SG,\n comp=comp, data=data,\n library=library,\n dry_run=dry_run)",
"def _add_sample_specific_arguments(self, is_required: bool, *args):\n for arg in args:\n name_or_flags = arg[\"name_or_flags\"]\n options = arg[\"options\"]\n options[\"required\"] = is_required\n self._specific_args_group.add_argument(*name_or_flags, **options)",
"def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")",
"def __add_xx_args(self, other_xx_args_dict):\n self.__merge_xx_switch_args(other_xx_args_dict)\n self.__merge_xx_value_args(other_xx_args_dict)",
"def add_argument(self, *args, **kwargs):\n self.arguments[args[0]] = self._Argument(*args, **kwargs)",
"def map_parameters(arguments, *args):\n for arg in args:\n if arg in ['clear', 'copy', 'fromkeys', 'get', 'items', 'keys', 'pop',\n 'popitem', 'setdefault', 'update', 'values', 'format',\n 'type']:\n Console.error(f'`{arg}` is predefined method.'\n f' Use `arguments[\"--{arg}\"]` in your code')\n raise ValueError(f\"{arg} already used in arguments\")\n elif arg in arguments:\n Console.error(f'`{arg}` is already used in arguments.'\n f' Use `arguments[\"--{arg}\"]` in your code')\n raise ValueError(f\"{arg} already used in arguments\")\n else:\n flag = \"--\" + arg\n if flag in arguments:\n value = arguments[flag]\n else:\n value = None\n arguments[arg] = value",
"def _parse_from_dict(self, arg_dict):\n missing = {}\n for key, value in arg_dict.items():\n flag_desc = self._defs.get(key.replace(\"-\", \"_\"), None)\n if flag_desc is None:\n missing[key] = value\n else:\n flag_desc.parse(value)\n return missing",
"def dictVarArgs(arg1, arg2='default', **theDict ):\n len = 0\n print \"************ Presently dictionary elaborated variable args **************\"\n print \"Function\\'s first is \", arg1\n print \"Funx second is \", arg2\n for var in theDict:\n print \"additional args %d for key \" % (len), str(var), \"=\", str(theDict[var])\n len = len + 1",
"def AddSourceFlag(parser):\n parser.add_argument(\n '--source',\n required=False,\n help='Events source kind by which to filter results.')",
"def _merge_args_opts(args_opts_dict, **kwargs):\n merged = []\n\n if not args_opts_dict:\n return merged\n\n for arg, opt in args_opts_dict.items():\n if not _is_sequence(opt):\n opt = shlex.split(opt or \"\")\n merged += opt\n\n if not arg:\n continue\n\n if \"add_input_option\" in kwargs:\n merged.append(\"-i\")\n\n merged.append(arg)\n\n return merged",
"def inject_args_in_config(args, config):\n log = logging.getLogger(__name__)\n\n for t_opt in list(args._options.values()):\n n = t_opt.name\n first_ = n.find('_')\n if first_ > 0:\n s, o = n[:first_], n[first_ + 1:]\n v = t_opt.value()\n log.info('inject argument {} = {} in configuration section {}, option {}'.format(n, v, s, o))\n if not config.has_section(s):\n config.add_section(s)\n config.set(s, o, v)\n return config",
"def add_args(self, parser):",
"def updateArgs(self, namespace, updates):\n namespace = self._fixNS(namespace)\n for k, v in updates.items():\n self.setArg(namespace, k, v)",
"def add_args(process_args, parser):\n for arg, options in process_args.items():\n _help = options.get('help')\n _default = options.get('default')\n _type = options.get('type')\n _action = options.get('action')\n _required = options.get('required')\n if not _action:\n parser.add_argument(\n f'--{arg}', help=_help, default=_default, type=_type, required=_required\n )\n else:\n parser.add_argument(\n f'--{arg}', help=_help, default=_default, action=_action\n )\n return parser",
"def add_args(parser):\n parser.add_argument(\n \"--share-encoder-embeddings\",\n action=\"store_true\",\n help=\"share encoder embeddings across languages\",\n )\n parser.add_argument(\n \"--share-decoder-embeddings\",\n action=\"store_true\",\n help=\"share decoder embeddings across languages\",\n )\n parser.add_argument(\n \"--share-encoders\",\n action=\"store_true\",\n help=\"share encoders across languages\",\n )\n parser.add_argument(\n \"--share-decoders\",\n action=\"store_true\",\n help=\"share decoders across languages\",\n )",
"def setArg(self, namespace, key, value):\n assert key is not None\n assert value is not None\n namespace = self._fixNS(namespace)\n # try to ensure that internally it's consistent, at least: str -> str\n if isinstance(value, bytes):\n value = str(value, encoding=\"utf-8\")\n self.args[(namespace, key)] = value\n if not (namespace is BARE_NS):\n self.namespaces.add(namespace)",
"def add_arg_metadata(\n arg_name: str,\n description: str,\n default_value: Any,\n is_array: bool = False,\n secret: bool = False,\n options: list = [],\n execution: bool = False,\n required: bool = False,\n default: bool = False,\n ) -> dict:\n arg_metadata = {\n \"name\": arg_name,\n \"isArray\": False,\n \"description\": arg_name,\n \"required\": required,\n \"secret\": False,\n \"default\": default,\n }\n if description:\n arg_metadata[\"description\"] = description\n if default_value:\n arg_metadata[\"defaultValue\"] = default_value\n if is_array:\n arg_metadata[\"isArray\"] = True\n if options:\n arg_metadata[\"predefined\"] = options\n arg_metadata[\"auto\"] = \"PREDEFINED\"\n if secret:\n arg_metadata[\"secret\"] = True\n if execution:\n arg_metadata[\"execution\"] = True\n\n return arg_metadata",
"def add_arguments(self, parser):",
"def merge(self: Dict[str, Arg], argument: Arg):\n dest = argument.destination\n if dest in self:\n self[dest].merge_all(argument)\n return\n self[dest] = argument",
"def parse_arguments(args: list = None) -> Dict[str, str]:\n arg_parser = argparse.ArgumentParser(description=\"Console command to crypt \"\n \"and decrypt texts using \"\n \"classic methods. It also \"\n \"performs crypto attacks \"\n \"against those methods.\\n\",\n epilog=\"Follow cifra development at: \"\n \"<https://github.com/dante-signal31/cifra>\")\n cifra_subparsers = arg_parser.add_subparsers(help=\"Available modes\",\n dest=\"mode\",\n required=True)\n # DICTIONARY MANAGEMENT.\n dictionary_parser = cifra_subparsers.add_parser(name=\"dictionary\",\n help=\"Manage dictionaries to \"\n \"perform crypto attacks.\")\n dictionary_actions_subparser = dictionary_parser.add_subparsers(help=\"Action to perform.\",\n dest=\"action\")\n # DICTIONARY CREATION.\n dictionary_create_parser = dictionary_actions_subparser.add_parser(name=\"create\",\n help=\"Create a dictionary of unique words.\")\n dictionary_create_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to create.\",\n metavar=\"NEW_DICTIONARY_NAME\")\n dictionary_create_parser.add_argument(\"-i\", \"--initial_words_file\",\n type=_check_is_file,\n help=\"Optionally you can load in the dictionary words located in a text file\",\n metavar=\"PATH_TO FILE_WITH_WORDS\")\n # DICTIONARY REMOVAL.\n dictionary_delete_parser = dictionary_actions_subparser.add_parser(name=\"delete\",\n help=\"Remove an existing dictionary.\")\n dictionary_delete_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to delete.\",\n metavar=\"DICTIONARY_NAME_TO_DELETE\")\n # DICTIONARY UPDATING.\n dictionary_update_parser = dictionary_actions_subparser.add_parser(name=\"update\",\n help=\"Add words to an existing dictionary.\")\n dictionary_update_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to update with additional words.\",\n metavar=\"DICTIONARY_NAME_TO_UPDATE\")\n dictionary_update_parser.add_argument(\"words_file\",\n type=_check_is_file,\n help=\"Pathname to a file with words to add to dictionary\",\n metavar=\"PATH_TO_FILE_WITH_WORDS\")\n # DICTIONARY LISTING.\n _ = dictionary_actions_subparser.add_parser(name=\"list\",\n help=\"Show existing dictionaries.\")\n # CIPHER MANAGEMENT.\n cipher_parser = cifra_subparsers.add_parser(name=\"cipher\",\n help=\"Cipher a text using a key.\")\n cipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to cipher.\",\n metavar=\"ALGORITHM_NAME\")\n cipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to cipher.\",\n metavar=\"CIPHERING_KEY\")\n cipher_parser.add_argument(\"file_to_cipher\",\n type=_check_is_file,\n help=\"Path to file with text to cipher.\",\n metavar=\"FILE_TO_CIPHER\")\n cipher_parser.add_argument(\"-o\", \"--ciphered_file\",\n type=str,\n help=\"Path to output file to place ciphered text. If not used then\"\n \"ciphered text will be dumped to console.\",\n metavar=\"OUTPUT_CIPHERED_FILE\")\n cipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # DECIPHERING MANAGEMENT\n decipher_parser = cifra_subparsers.add_parser(name=\"decipher\",\n help=\"Decipher a text using a key.\")\n decipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to decipher.\",\n metavar=\"ALGORITHM_NAME\")\n decipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to decipher.\",\n metavar=\"CIPHERING_KEY\")\n decipher_parser.add_argument(\"file_to_decipher\",\n type=_check_is_file,\n help=\"Path to file with text to decipher.\",\n metavar=\"FILE_TO_DECIPHER\")\n decipher_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n decipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # ATTACK MANAGEMENT\n attack_parser = cifra_subparsers.add_parser(name=\"attack\",\n help=\"Attack a ciphered text to get its plain text\")\n attack_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to attack.\",\n metavar=\"ALGORITHM_NAME\")\n attack_parser.add_argument(\"file_to_attack\",\n type=_check_is_file,\n help=\"Path to file with text to attack.\",\n metavar=\"FILE_TO_ATTACK\")\n attack_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n attack_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n\n parsed_arguments = vars(arg_parser.parse_args(args))\n filtered_parser_arguments = {key: value for key, value in parsed_arguments.items()\n if value is not None}\n return filtered_parser_arguments",
"def add_argument(self, *args, **kwargs):\r\n\r\n # if no positional args are supplied or only one is supplied and\r\n # it doesn't look like an option string, parse a positional\r\n # argument\r\n chars = self.prefix_chars\r\n if not args or len(args) == 1 and args[0][0] not in chars:\r\n if args and 'dest' in kwargs:\r\n raise ValueError('dest supplied twice for positional argument')\r\n kwargs = self._get_positional_kwargs(*args, **kwargs)\r\n\r\n # otherwise, we're adding an optional argument\r\n else:\r\n kwargs = self._get_optional_kwargs(*args, **kwargs)\r\n\r\n # if no default was supplied, use the parser-level default\r\n if 'default' not in kwargs:\r\n dest = kwargs['dest']\r\n if dest in self._defaults:\r\n kwargs['default'] = self._defaults[dest]\r\n elif self.argument_default is not None:\r\n kwargs['default'] = self.argument_default\r\n\r\n # create the action object, and add it to the parser\r\n action_class = self._pop_action_class(kwargs)\r\n if not _callable(action_class):\r\n raise ValueError('unknown action \"%s\"' % action_class)\r\n action = action_class(**kwargs)\r\n\r\n # raise an error if the action type is not callable\r\n type_func = self._registry_get('type', action.type, action.type)\r\n if not _callable(type_func):\r\n raise ValueError('%r is not callable' % type_func)\r\n\r\n return self._add_action(action)",
"def add_optional_arguments(arguments_parser):\n for argument in ARGS_CONFIG:\n arguments_parser.add_argument(\n argument['shortcut'],\n argument['name'],\n action=argument['action'],\n help=argument['help']\n )",
"def apply_arg(original_key: str, args: Namespace, config: Config) -> None:\n from confu.schema import Attribute, Schema\n\n schema = config._schema\n\n path = original_key.split(\"__\")\n\n arg_data = getattr(args, original_key)\n\n if len(path) > 1:\n data = config.data\n\n current_schema = schema._attr\n\n for key in path:\n\n if isinstance(current_schema.get(key), Schema):\n current_schema = current_schema.get(key)._attr\n\n elif isinstance(current_schema.get(key), Attribute):\n attribute = current_schema.get(key)\n\n # If we cannot find the attribute in the schema\n # we don't add it\n if attribute is None:\n return\n\n for key in path[:-1]:\n if key not in data:\n data[key] = {}\n data = data[key]\n data[path[-1]] = arg_data\n\n else:\n attribute = schema._attr.get(original_key)\n\n # If we cannot find the attribute in the schema\n # we don't add it\n if attribute is None:\n return\n\n config.data[original_key] = arg_data\n # attribute.validate(arg_data, path)",
"def addMetaKeys(self,args, params={}):\n if any(not mathUtils.isAString(a) for a in args):\n self.raiseAnError('Arguments to addMetaKeys were not all strings:',args)\n self.metadataKeys = self.metadataKeys.union(set(args))\n self.metadataParams.update(params)",
"def AddArgsForAddLabels(parser):\n required_labels_flag = base.Argument(\n '--labels',\n metavar='KEY=VALUE',\n type=arg_parsers.ArgDict(),\n action=arg_parsers.UpdateAction,\n required=True,\n help='A list of labels to add.')\n\n required_labels_flag.AddToParser(parser)",
"def add_override_argument(parser, *names, **kwargs):\r\n if not names:\r\n names = DEFAULT_OVERRIDE_OPTION_NAMES\r\n dest = kwargs.pop('dest', None)\r\n required = kwargs.pop('required', False)\r\n help = kwargs.pop('help', 'extra overrides to apply to the config')\r\n if kwargs:\r\n raise TypeError('add_override_argument() got an invalid keyword argument: %s' %\r\n list(kwargs)[0])\r\n\r\n ov_container = ConfigContainer()\r\n ov_container.get_metadata().is_override_set = True\r\n parser.add_argument(\r\n *names,\r\n dest=dest,\r\n default=ov_container,\r\n required=required,\r\n action=_add_to_override_set,\r\n type=_dict_from_string,\r\n help=help\r\n )",
"def _add_argument(hparams, key, value, update=True):\n if hasattr(hparams, key):\n if update:\n setattr(hparams, key, value)\n else:\n hparams.add_hparam(key, value)",
"def __call__(self, parser, namespace, values, option_string=None):\n if values == \"json\":\n setattr(namespace, self.dest, json.dumps)\n else:\n raise argparse.ArgumentTypeError(\"Valid formats are: json\")",
"def add_custom_argument(self, *name_or_flags, **options):\n self._specific_args_group.add_argument(*name_or_flags, **options)"
] |
[
"0.58115476",
"0.5370188",
"0.52439994",
"0.52399445",
"0.5215256",
"0.51828295",
"0.5136675",
"0.5135823",
"0.5095992",
"0.5078414",
"0.50783795",
"0.503532",
"0.5018439",
"0.5017416",
"0.50130874",
"0.49956897",
"0.4963854",
"0.496178",
"0.49517304",
"0.4924612",
"0.49196917",
"0.49005163",
"0.48817852",
"0.48786107",
"0.4870753",
"0.48542598",
"0.48540738",
"0.48513708",
"0.48479944",
"0.48479185"
] |
0.6476159
|
0
|
Parse the given launch arguments from the command line, into list of tuples for launch.
|
def parse_launch_arguments(launch_arguments: List[Text]) -> List[Tuple[Text, Text]]:
parsed_launch_arguments = OrderedDict() # type: ignore
for argument in launch_arguments:
count = argument.count(':=')
if count == 0 or argument.startswith(':=') or (count == 1 and argument.endswith(':=')):
raise RuntimeError(
"malformed launch argument '{}', expected format '<name>:=<value>'"
.format(argument))
name, value = argument.split(':=', maxsplit=1)
parsed_launch_arguments[name] = value # last one wins is intentional
return parsed_launch_arguments.items()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def parse_arguments(args):",
"def _parse_command_line_arguments():\n parser = ArgumentParser(\n description=(\n 'Command-line tool to generate a list of unique from a TS file from FermiFAST'\n ),\n )\n parser.add_argument(\n 'ts-file',\n type=str,\n help=(\n 'A file containing the TS sky map'\n ),\n )\n parser.add_argument('--skiprows',\n type=int,\n help='number of rows to skip at the top (default 0)',\n required=False)\n parser.set_defaults(skiprows=0)\n arguments = vars(parser.parse_args())\n return arguments",
"def parse_args():\n from argparse import ArgumentParser\n ap = ArgumentParser(prog=__exe__, description=__purpose__)\n ap.add_argument(\n '--host', dest='host', default=None,\n help='Host for XNAT. Default: env XNAT_HOST.')\n ap.add_argument(\n '-u', '--username', dest='username', default=None,\n help='Username for XNAT.')\n ap.add_argument('project', help='Project Label')\n ap.add_argument('session', help='Session Label')\n ap.add_argument(\n 'proc_suffix', help='Proc name suffix', nargs='?', default='')\n ap.add_argument(\n '-sd', '--subjects_dir', dest='subjects_dir',\n help='Subjects Directory',\n default=os.environ.get('SUBJECTS_DIR', '/tmp'))\n return ap.parse_args()",
"def argumentsParser(args):\n\targuments = []\n\tif args.find('\"') > -1:\n\t\tt_arguments = args.split('\"')\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args.find(\"'\") > -1:\n\t\tt_arguments = args.split(\"'\")\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args == ' ':\n\t\tpass\n\telse:\n\t\targuments = args.split(' ')\n\treturn arguments",
"def parse_command_line_arguments(argv):\n print(\"reading command line arguments in...\")\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-i', '--input', help='Location of input csv file', required=True)\n parser.add_argument('-p', '--predicting', help='The column name containing the category to predict', required=True)\n parser.add_argument('-s', '--scoring', help='The scoring type to be used with model evaluation', required=False)\n parser.add_argument('-c', '--scale', help='List of column names to scale values for', nargs='+', required=False)\n args = parser.parse_args()\n\n return args.input, args.predicting, args.scoring, args.scale",
"def _parse_launch_args(args, logger):\n if not args:\n return []\n\n if isinstance(args, str):\n args = [args]\n\n override_list = []\n\n # Now look for any option and conf file arguments:\n bad = False\n for arg in args:\n m = re.match(r'''(?x)\n (?P<section>[a-zA-Z][a-zA-Z0-9_]*)\n \\.(?P<option>[^=]+)\n =(?P<value>.*)$''', arg)\n # check if argument is a explicit variable override\n if m:\n section = m.group('section')\n key = m.group('option')\n value = m.group('value')\n override_list.append((section, key, value))\n continue\n\n filepath = arg\n # check if argument is a path to a file that exists\n if not os.path.exists(filepath):\n logger.error(f'Invalid argument: {filepath}')\n bad = True\n continue\n\n # expand file path to full path\n filepath = os.path.realpath(filepath)\n\n # path exists but is not a file\n if not os.path.isfile(filepath):\n logger.error(f'Conf is not a file: {filepath}')\n bad = True\n continue\n\n # warn and skip if file is empty\n if os.stat(filepath).st_size == 0:\n logger.warning(f'Conf file is empty: {filepath}. Skipping')\n continue\n\n # add file path to override list\n override_list.append(filepath)\n\n # exit if anything went wrong reading config arguments\n if bad:\n sys.exit(2)\n\n return override_list",
"def parseArgs():\n parser = argparse.ArgumentParser(description='Runs RHEAS simulation.')\n parser.add_argument('config', help='configuration file')\n parser.add_argument('-d', metavar='DB', help='name of database to connect')\n parser.add_argument('-u', help='update database', action='store_true')\n args = parser.parse_args()\n return args.config, args.d, args.u",
"def get_argv_list(args):\n # parse the string format of arguments and return a list of arguments\n argv = args.split(' ')\n if len(argv) == 1 and argv[0] == '':\n return []\n return argv",
"def parse_arguments():\n arguments_parser = argparse.ArgumentParser()\n add_optional_arguments(arguments_parser)\n add_positional_arguments(arguments_parser)\n arguments = arguments_parser.parse_args()\n return [arguments.paths, _get_options(arguments)]",
"def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"-threads\", help=\"specifies a thread count for parallel operations\", type=int)\n return parser.parse_args()",
"def parse_args():\n from argparse import ArgumentParser\n ap = ArgumentParser(prog='dax_launch', description=__description__)\n ap.add_argument(dest='settings_path', help='Settings Path')\n ap.add_argument('--logfile', dest='logfile',\n help='Logs file path if needed.', default=None)\n _help = 'Project ID from XNAT to run dax_update on locally (only one \\\nproject).'\n ap.add_argument('--project', dest='project', help=_help, default=None)\n _help = 'list of sessions label from XNAT to run dax_launch on locally.'\n ap.add_argument('--sessions', dest='sessions', help=_help, default=None)\n ap.add_argument('--writeonly', dest='writeonly', action='store_true',\n help='Only write job files without launching them.')\n _help = 'Folder to store the PBS when using --writeonly. Default: \\\nRESULTS_DIR/TRASH.'\n ap.add_argument('--pbsfolder', dest='pbsfolder', help=_help, default=None)\n ap.add_argument('--nodebug', dest='debug', action='store_false',\n help='Avoid printing DEBUG information.')\n ap.add_argument('--no_qsub', dest='no_qsub', action='store_true',\n help='Run the jobs locally on your computer in serial.')\n return ap.parse_args()",
"def parseArgs ( args ) :\n assert len ( args ) == 5\n loginInfo = []\n for s in args :\n loginInfo.append ( s )\n loginInfo.pop ( 0 )\n assert len ( loginInfo ) == 4\n return loginInfo",
"def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Reads datapacket pcds, interpolates quaternions and generates scans from dataset in config file\")\n parser.add_argument(\"--visualization\", \"-v\", action=\"store_true\", help=\"if generated clouds should be visualized\")\n parser.add_argument(\"--directory\", \"-d\",\n help=\"if only specified directory should be interpolated, e.g. 'fragments/fragment0'\")\n args = parser.parse_args()\n return args.visualization, args.directory",
"def read_command_line_arguments() -> Tuple[\n LocalConfig, AuthConfig, Optional[List[str]]\n]:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"config\", type=str, help=\"Path to the main config file\"\n )\n parser.add_argument(\n \"auth\", type=str, help=\"Path to the authentication config file\"\n )\n parser.add_argument(\n \"--execute-now\",\n type=str,\n help=\"\"\"A set of channel names to execute immediately, or none to\n determine automatically based on the current time.\"\"\",\n nargs=\"*\",\n choices=notification_channels.keys(),\n )\n args = parser.parse_args()\n\n config_file = read_local_config(args.config)\n auth_file = read_local_auth(args.auth)\n\n return config_file, auth_file, args.execute_now",
"def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 4:\n print(\"[ERR] Invalid number of command line arguments!\")\n print(len(sys.argv))\n print(sys.argv[:])\n _usage()\n sys.exit(1)\n\n # Check if lis.config template exists.\n lis_config_template = sys.argv[1]\n if not os.path.exists(lis_config_template):\n print(f\"[ERR] {lis_config_template} does not exist!\")\n sys.exit(1)\n\n # Check if directory for restart files exists. Actual restart file\n # shall be checked later.\n restart_dir = sys.argv[2]\n if not os.path.exists(restart_dir):\n print(f\"[ERR] Directory {restart_dir} does not exist!\")\n sys.exit(1)\n\n # Get start date of new LIS run.\n yyyymmdd = sys.argv[3]\n if len(yyyymmdd) != 8:\n print(\"[ERR] Invalid length for YYYYMMDD, must be 8 characters!\")\n sys.exit(1)\n year = int(yyyymmdd[0:4])\n month = int(yyyymmdd[4:6])\n day = int(yyyymmdd[6:8])\n try:\n startdate = datetime.date(year, month, day)\n except ValueError:\n print(\"[ERR] Invalid YYYYMMDD passed to script!\")\n sys.exit(1)\n\n return lis_config_template, restart_dir, startdate",
"def _parse_args(self, cmd_line_list):\n parser = ArgumentParser()\n parser.add_argument('--yaml', help='yaml file specifying config to run')\n args = parser.parse_args(cmd_line_list)\n return vars(args)",
"def parse_args():\n parser = argparse.ArgumentParser(\"Run arguments for system submitted tasks\")\n\n parser.add_argument(\"-f\", \"--funcs\", type=str, nargs=\"?\", required=True,\n help=\"path to pickle file containing a list of \"\n \"functions/methods that should be run by the \"\n \"submitted process\"\n )\n parser.add_argument(\"-k\", \"--kwargs\", type=str, nargs=\"?\", required=False,\n default=None,\n help=\"path to pickle file containing a dictionary of \"\n \"keyword argumnets that should be passed to the \"\n \"functions\")\n parser.add_argument(\"-e\", \"--environment\", type=str, nargs=\"?\",\n required=False,\n help=\"Optional comma-separated environment variables, \"\n \"which should be given as \"\n \"VARNAME1=value1,VARNAME2=value2 and so on. These \"\n \"will be separated and instantiated into Python's \"\n \"os.environ\")\n\n return parser.parse_args()",
"def _parse_arg_list(self):\n\t\targ_list = {}\n\t\tfor arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:\n\t\t\targ_list[arg[0][1:]] = arg[1]\n\t\n\t\treturn arg_list",
"def parse_args(args=None):\n return AP.parse_args(args=args)",
"def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments",
"def parse_args(argv: t.Iterable[str] = None):\n if argv is None:\n argv = sys.argv[1:]\n\n args: t.List[str] = []\n kwargs: t.MutableMapping[str, t.Any] = {}\n\n key = None\n for arg in argv:\n if arg.startswith('--'):\n if arg == '--help':\n print(USAGE)\n raise SystemExit\n if key is not None:\n kwargs[key] = True\n key = arg[2:]\n continue\n\n match = re.match('^(\\\\w+)=(.*)$', arg)\n if match:\n if key is not None:\n kwargs[key] = True\n key = None\n kwargs[match.group(1)] = match.group(2)\n continue\n\n if key is not None:\n kwargs[key] = arg\n key = None\n continue\n\n args.append(arg)\n\n if key is not None:\n kwargs[key] = True\n\n return (tuple(args), kwargs)",
"def command_line_parse(iargs=None):\n\n parser = create_parser()\n inps = parser.parse_args(args=iargs)\n\n return inps",
"def parse_args() -> Tuple[argparse.Namespace, List[str]]:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--config\", type=str, required=True, help=\"The config to load settings from.\"\n )\n parser.add_argument(\n \"--job\",\n type=JobType,\n choices=list(iter(JobType)),\n metavar=str({str(job.value) for job in iter(JobType)}),\n help=\"Job type for this run.\",\n )\n parser.add_argument(\n \"--resume\",\n type=str,\n default=\"\",\n help=\"A wandb run and filename to resume training a model from, \\\n e.g. graphgen/a1b2c3d:checkpoints/current.pt\",\n )\n parser.add_argument(\n \"--sync\", action=\"store_true\", help=\"Sync results to wandb if specified.\"\n )\n return parser.parse_known_intermixed_args()",
"def get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"model_type\")\n parser.add_argument(\"run_name\")\n parser.add_argument(\"run_comment\")\n parser.add_argument(\"epoch\")\n parser.add_argument(\"--verbose\", default=True)\n args = parser.parse_args()\n return (args.model_type, args.run_name, args.run_comment, args.epoch, args.verbose)",
"def parse_arguments():\n parser = ArgumentParser(description=\"Run tests in parallel.\")\n parser.add_argument(\n \"-d\", \"--debug\", action=\"store_true\", help=\"Enable debug logging\"\n )\n parser.add_argument(\n \"-l\", \"--layer\", help=\"Greedy match test layer name.\", action=\"append\"\n )\n parser.add_argument(\n \"-m\", \"--module\", help=\"Greedy match module name.\", action=\"append\"\n )\n return parser.parse_args()",
"def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument()\n return p.parse_args()",
"def parse(self, command_line=sys.argv[1:]):\n return self._parser.parse_args(command_line)",
"def parse_args():\n parser = argparse.ArgumentParser(description='Extract left-turn speed data CSV files from Excel')\n parser.add_argument('veh_conflict_data', type=str, help='Excel file with all veh conflicts data')\n return parser.parse_args()",
"def get_unparsed_args():\n # Threads are set to 1 so that running tests doesn't completely drain\n # computing power, although it slows down the tests.\n # Specify output folder, else it will create folder in working directory\n # where the test module is run from.\n unparsed_args = [\"run.py\", PIPELINE, DB,\n \"-t\", str(1),\n \"-o\", str(test_folder),\n \"-b\", str(2)]\n return unparsed_args",
"def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 5:\n print(\"[ERR] Invalid number of command line arguments!\")\n _usage()\n sys.exit(1)\n\n # Get path to config file\n configfile = sys.argv[1]\n if not os.path.exists(configfile):\n print(f\"[ERR] Config file {configfile} does not exist!\")\n sys.exit(1)\n\n # Get top directory of LIS data\n topdatadir = sys.argv[2]\n if not os.path.exists(topdatadir):\n print(f\"[ERR] LIS data directory {topdatadir} does not exist!\")\n sys.exit(1)\n\n # Get valid year and month\n yyyymm = sys.argv[3]\n if len(yyyymm) != 6:\n print(\"[ERR] Invalid length of YYYYMM, must be 6 characters!\")\n sys.exit(1)\n year = int(yyyymm[0:4])\n month = int(yyyymm[4:6])\n try:\n startdate = datetime.datetime(year, month, day=1)\n except ValueError:\n print(\"[ERR] Invalid YYYYMM passed to script!\")\n sys.exit(1)\n\n # Get model forcing ID\n model_forcing = sys.argv[4]\n\n return configfile, topdatadir, startdate, model_forcing"
] |
[
"0.7123171",
"0.68779016",
"0.68531114",
"0.68477416",
"0.6797236",
"0.6794053",
"0.67317593",
"0.6700564",
"0.6684113",
"0.66812426",
"0.66508806",
"0.66495556",
"0.6644529",
"0.6633411",
"0.66262376",
"0.6623169",
"0.66193837",
"0.66157824",
"0.65600795",
"0.65463066",
"0.65034485",
"0.6491854",
"0.6491093",
"0.6488282",
"0.6487392",
"0.6478596",
"0.64761513",
"0.6475931",
"0.64623904",
"0.645615"
] |
0.7299944
|
0
|
Return a single experience instance
|
def get_single_experience(self, time_step):
assert self.n_experience - 1 > time_step, "Sample time step must be less than number of experience minus one."
return self.buffer_experience[time_step]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_experience(cls, state, action, reward, done, next_state) -> 'Experience':\n return cls(\n state=state,\n action=action,\n reward=reward,\n done=done,\n next_state=next_state,\n )",
"def get_experience(uid, rid):\n experience = Experience.query.filter(Experience.uid == uid).filter(Experience.rid == rid).first()\n return experience",
"def get_experience(self):\n return self.experience_set.all()",
"def experiences(self):\n return self.client.call('GET',\n self.name + 'experiences')",
"def test_experience_created(self):\n\t\tExperience.objects.create(\n\t\t\tuser=self.user,\n\t\t\tjob_title=self.job_title,\n\t\t\torganisation_name=self.organisation_name,\n\t\t\tstart_date=self.start_date,\n\t\t\tend_date=self.end_date,\n\t\t\tjob_description=self.job_description\n\t\t)\n\t\texperience_instance = Experience.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tself.user,\n\t\t\texperience_instance.user,\n\t\t\t'users don\\'t match'\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.job_title,\n\t\t\texperience_instance.job_title,\n\t\t\t'job_titles don\\'t match'\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.organisation_name,\n\t\t\texperience_instance.organisation_name,\n\t\t\t'organisation_names don\\'t match'\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.start_date,\n\t\t\texperience_instance.start_date,\n\t\t\t'start_dates don\\'t match'\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.end_date,\n\t\t\texperience_instance.end_date,\n\t\t\t'end_dates don\\'t match'\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.job_description,\n\t\t\texperience_instance.job_description,\n\t\t\t'job_descriptions don\\'t match'\n\t\t)",
"def get_objective(self):\n # Note that this makes the generic objectives call to Handcar\n # without specifying the objectiveBank:\n url_str = (self._base_url + '/objectives/' +\n self._my_map['objectiveId'])\n return Objective(self._load_json(url_str))",
"def learning_experience(self):\n return self._get_child_page_of_type(LearningTechniquesPage)",
"def experiment(self) -> Run:\n if self._experiment is None:\n self._experiment = self.create_experiment()\n return self._experiment",
"def _get_instance(self):\n #return '_earth_instance_' + rospy.get_name().strip('/')\n return self.instance",
"def run_one_step(self):\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n if self.env.check_terminal() is False:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n self.env.update(action)\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n self.agent.update_history(copy.copy(latest_experience))\n self.count += 1\n # If the latest history has a large enough batch, perform an update\n # CHECK IF THIS IS THE RIGHT METHOD\n if self.count % self.batch_size == 0:\n self.agent.update_policy_ordered(max(1, self.batch_size))\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.count = 0\n self.agent.policy.learner.update_target_network()\n else:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n # Perform an update on all of the previous experiences that haven't been updated\n if self.count % self.batch_size > 0:\n self.agent.update_policy_ordered((self.count % self.batch_size) + 1)\n self.count = 0\n return reward",
"def experience_function(self) -> ExperienceFunctionEnum:\n return self._experience_function",
"def experience_replay(self):\n return",
"def instance(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance\")",
"def instance(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance\")",
"def get_exploration_by_id(exploration_id, strict=True):\n exploration_memcache_key = _get_exploration_memcache_key(exploration_id)\n memcached_exploration = memcache_services.get_multi(\n [exploration_memcache_key]).get(exploration_memcache_key)\n\n if memcached_exploration is not None:\n return memcached_exploration\n else:\n exploration_model = exp_models.ExplorationModel.get(\n exploration_id, strict=strict)\n if exploration_model:\n exploration = exp_domain.Exploration(exploration_model)\n memcache_services.set_multi({\n exploration_memcache_key: exploration})\n return exploration\n else:\n return None",
"def get_instance(self, instance):\n return self._get(_instance.Instance, instance)",
"def make_exp() -> rb.Experience:\r\n lmoves = list(moves.Move)\r\n return rb.Experience(make_state(), random.choice(lmoves), random.randint(1, 3),\r\n make_state(), random.random(), random.randint(1, 2),\r\n random.random(), np.random.uniform(0, 1, 5).astype('float32'),\r\n np.random.uniform(0, 1, 5).astype('float32'))",
"def get_instance(cls, *args, **kwargs):\n if cls._instance is not None:\n return cls._instance\n return cls(*args, **kwargs)",
"def get_instance(self, ix=None, name=None):\n assert ix is None or name is None\n if ix is None:\n instance = [ex for ex in self.instances if ex.name == name]\n assert len(instance) == 1\n return instance[0]\n else:\n return self.instances[ix]",
"def get_instance(self, instance_id):\n return self.instances.get(instance_id)",
"def _get_instance(self):",
"def _get_instance(self):",
"def get_representative_exploration_model(self) -> BaseExplorationModel:\n return self.playback_exploration_models[0]",
"def get_experience():\n print('-'*50)\n exp_list = ['entry_level', 'mid_level', 'senior_level']\n exps = ['Entry level', 'Mid level', 'Senior level']\n\n print(\"\\nWhat level of experience(s) do you have?\\n\")\n print(*exps, sep=', ')\n print(\"\\nEnter your level of experience: \")\n\n experience = ''\n\n answer = input() \n if answer != '':\n if answer.lower().startswith('entry'):\n experience = exp_list[0]\n elif answer.lower().startswith('mid'):\n experience = exp_list[1]\n elif answer.lower().startswith('senior'):\n experience = exp_list[2]\n else:\n print('\\nYou don\\'t provide a correct experience level.\\n')\n get_experience()\n else:\n print('\\nExperience can\\'t be empty.\\n')\n get_experience()\n\n return experience",
"def base_experience(self) -> int:\n return self._base_experience",
"def test_get_experience(self):\n current_resume = resume.objects.first()\n expected = list(current_resume.get_experience())\n case = list(current_resume.experience_set.all())\n self.assertEqual(case,expected)",
"def instance(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance\")",
"def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')",
"def GetInstance():\n pass",
"def get(self):\n return self.__expedition"
] |
[
"0.6488589",
"0.6468385",
"0.6168099",
"0.58028114",
"0.5753395",
"0.5579298",
"0.5455302",
"0.5434499",
"0.5421998",
"0.54172146",
"0.53958464",
"0.53948414",
"0.5381991",
"0.5371138",
"0.5356552",
"0.5337088",
"0.5315421",
"0.5306023",
"0.5300618",
"0.5293251",
"0.5285871",
"0.5285871",
"0.528452",
"0.5279784",
"0.5267287",
"0.52499115",
"0.52491105",
"0.5248795",
"0.52454627",
"0.52410066"
] |
0.7033003
|
0
|
Return batch list of experience instance
|
def get_batch_experience(self, batch_size):
batch = []
for i in range(batch_size):
index = random.choice(range(self.n_experience - 1))
batch.append(self.get_single_experience(index))
return batch
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_experience(self):\n return self.experience_set.all()",
"def batch(self):\n return self._client.batch()",
"def experiences(self):\n return self.client.call('GET',\n self.name + 'experiences')",
"def __iter__(self):\n batch = []\n for i_batch in range(self.episode_num):\n classes = torch.randperm(len(self.idx_list))[: self.way_num]\n for c in classes:\n idxes = self.idx_list[c.item()]\n pos = torch.randperm(idxes.size(0))[: self.image_num]\n batch.append(idxes[pos])\n if len(batch) == self.episode_size * self.way_num:\n batch = torch.stack(batch).reshape(-1)\n yield batch\n batch = []",
"def get_instances(self) -> List[Instance]:\n\n def download_and_read_lines(file_name: str) -> List[str]:\n file_path: str = os.path.join(data_path, file_name)\n ensure_file_downloaded(\n source_url=MeQSumScenario.SOURCE_URL_TEMPLATE.format(file_name=file_name),\n target_path=file_path,\n unpack=False,\n )\n\n with open(file_path) as f:\n return f.read().splitlines()\n\n data_path: str = os.path.join(self.output_path, \"data\")\n ensure_directory_exists(data_path)\n\n instances: List[Instance] = []\n for split in ALL_SPLITS:\n dataset_split: str = \"val\" if split == VALID_SPLIT else split\n\n # The files with the questions end with \".source\"\n questions: List[str] = download_and_read_lines(f\"{dataset_split}.source\")\n\n # The files with the summaries end with \".target\"\n summaries: List[str] = download_and_read_lines(f\"{dataset_split}.target\")\n\n for question, summary in zip(questions, summaries):\n instances.append(\n Instance(\n input=Input(text=question),\n references=[Reference(output=Output(text=summary), tags=[CORRECT_TAG])],\n split=split,\n )\n )\n\n return instances",
"def get_instances(cls):\n raise NotImplementedError",
"def Collecting_experiences(self)-> None:\n for epoch_no in range(self.epochs):\n print(\"EPOCH %d\", epoch_no + 1)\n \n #beam_dqn = self.beam_min + int(self.beam_max * epoch_no/self.epochs)\n #egreed = self.egreed_max*(1 - epoch_no/(1.1*self.epochs))\n #self.gamma = self.gamma_max*(1 - epoch_no/(2*self.epochs))\n\n beam_dqn = 1\n egreed = 0.5\n #self.gamma = self.gamma_max\n self.gamma = 0.6\n\n self.tb_writer.add_scalar(\"parameters/beam_dqn\",\n beam_dqn, epoch_no)\n self.tb_writer.add_scalar(\"parameters/egreed\",\n egreed, epoch_no)\n self.tb_writer.add_scalar(\"parameters/gamma\",\n self.gamma, epoch_no)\n if beam_dqn > self.actions_size:\n print(\"The beam_dqn cannot exceed the action size!\")\n print(\"then the beam_dqn = action size\")\n beam_dqn = self.actions_size\n\n print(' beam_dqn, egreed, gamma: ', beam_dqn, egreed, self.gamma)\n for _, data_set in self.data_to_train_dqn.items():\n \n valid_iter = make_data_iter(\n dataset=data_set, batch_size=1, batch_type=self.batch_type,\n shuffle=False, train=False)\n #valid_sources_raw = data_set.src\n # disable dropout\n #self.model.eval()\n\n i_sample = 0\n for valid_batch in iter(valid_iter):\n freeze_model(self.model)\n batch = Batch(valid_batch\n , self.pad_index, use_cuda=self.use_cuda)\n \n encoder_output, encoder_hidden = self.model.encode(\n batch.src, batch.src_lengths,\n batch.src_mask)\n # if maximum output length is not globally specified, adapt to src len\n \n if self.max_output_length is None:\n self.max_output_length = int(max(batch.src_lengths.cpu().numpy()) * 1.5)\n \n batch_size = batch.src_mask.size(0)\n prev_y = batch.src_mask.new_full(size=[batch_size, 1], fill_value=self.bos_index,\n dtype=torch.long)\n output = []\n hidden = self.model.decoder._init_hidden(encoder_hidden)\n prev_att_vector = None\n finished = batch.src_mask.new_zeros((batch_size, 1)).byte()\n\n # print(\"Source_raw: \", batch.src)\n # print(\"Target_raw: \", batch.trg_input)\n # print(\"y0: \", prev_y)\n \n \n \n exp_list = []\n # pylint: disable=unused-variable\n for t in range(self.max_output_length):\n if t != 0:\n if self.state_type == 'hidden':\n state = torch.cat(hidden, dim=2).squeeze(1).detach().cpu().numpy()[0]\n else:\n if t == 0:\n state = hidden[0].squeeze(1).detach().cpu().numpy()[0]\n else:\n state = prev_att_vector.squeeze(1).detach().cpu().numpy()[0]\n \n \n # decode one single step\n logits, hidden, att_probs, prev_att_vector = self.model.decoder(\n encoder_output=encoder_output,\n encoder_hidden=encoder_hidden,\n src_mask=batch.src_mask,\n trg_embed=self.model.trg_embed(prev_y),\n hidden=hidden,\n prev_att_vector=prev_att_vector,\n unroll_steps=1)\n # logits: batch x time=1 x vocab (logits)\n if t != 0:\n if self.state_type == 'hidden':\n state_ = torch.cat(hidden, dim=2).squeeze(1).detach().cpu().numpy()[0]\n else:\n state_ = prev_att_vector.squeeze(1).detach().cpu().numpy()[0]\n \n # if t == 0:\n # print('states0: ', state, state_)\n\n # greedy decoding: choose arg max over vocabulary in each step with egreedy porbability\n \n if random.uniform(0, 1) < egreed:\n i_ran = random.randint(0,beam_dqn-1)\n next_word = torch.argsort(logits, descending=True)[:, :, i_ran]\n else:\n next_word = torch.argmax(logits, dim=-1) # batch x time=1\n # if t != 0:\n a = prev_y.squeeze(1).detach().cpu().numpy()[0]\n #a = next_word.squeeze(1).detach().cpu().numpy()[0]\n \n # print(\"state \",t,\" : \", state )\n # print(\"state_ \",t,\" : \", state_ )\n # print(\"action \",t,\" : \", a )\n # print(\"__________________________________________\")\n\n output.append(next_word.squeeze(1).detach().cpu().numpy())\n\n #tup = (self.memory_counter, state, a, state_)\n \n \n prev_y = next_word\n # check if previous symbol was <eos>\n is_eos = torch.eq(next_word, self.eos_index)\n finished += is_eos\n if t != 0:\n self.memory_counter += 1\n tup = (self.memory_counter, state, a, state_, 1)\n exp_list.append(tup)\n \n #print(t)\n # stop predicting if <eos> reached for all elements in batch\n if (finished >= 1).sum() == batch_size:\n a = next_word.squeeze(1).detach().cpu().numpy()[0]\n self.memory_counter += 1\n #tup = (self.memory_counter, state_, a, np.zeros([self.state_size]) , is_eos[0,0])\n tup = (self.memory_counter, state_, a, np.zeros([self.state_size]), 0)\n exp_list.append(tup)\n #print('break')\n break\n if t == self.max_output_length-1:\n #print(\"reach the max output\")\n a = 0\n self.memory_counter += 1\n #tup = (self.memory_counter, state_, a, np.zeros([self.state_size]) , is_eos[0,0])\n tup = (self.memory_counter, state_, a, -1*np.ones([self.state_size]), 1)\n exp_list.append(tup)\n \n \n \n \n #Collecting rewards\n hyp = np.stack(output, axis=1) # batch, time\n\n if epoch_no == 0:\n if i_sample == 0 or i_sample == 3 or i_sample == 6:\n #print(i_sample)\n r = self.Reward(batch.trg, hyp, show=True) # 1 , time-1 \n else:\n r = self.Reward(batch.trg, hyp, show=False) # 1 , time -1 \n else:\n #print(\"aaaa - \",i_sample)\n r = self.Reward(batch.trg, hyp, show=False) # 1 , time -1 \n \n # if i_sample == 0 or i_sample == 3 or i_sample == 6:\n # print(\"\\n Sample Collected: \", i_sample, \"-------------Target vs Eval_net prediction:--Raw---and---Decoded-----\")\n # print(\"Target: \", batch.trg, decoded_valid_out_trg)\n # print(\"Eval : \", stacked_output, decoded_valid_out)\n # print(\"Reward: \", r, \"\\n\")\n \n i_sample += 1\n self.store_transition(exp_list, r)\n \n #Learning.....\n if self.memory_counter > self.mem_cap - self.max_output_length:\n self.learn()\n \n self.tb_writer.close()",
"def get_instances(self) -> List[Instance]:\n big_bench_task: Dict = BIGBenchScenario.download_and_get_task(self.output_path, self.task, self.subtask)\n\n # From https://github.com/google/BIG-bench/blob/main/docs/doc.md#json-schema,\n # \"keywords\", \"description\" and \"examples\" are all required fields for a BIG-bench task.\n # keywords: \"A list of strings, where each string contains a separate keyword describing the task\"\n self.tags = big_bench_task[\"keywords\"]\n\n # description: \"A plaintext description of the task, suitable for a non-expert to perform the task and\n # potentially generate new examples.\"\n # Append the task, subtask and task-specific description from BIG-bench to `description`.\n self.description = (\n f\"{self.description} Task: {self.task} \"\n f\"{f'Subtask: {self.subtask} ' if self.subtask else ''} \"\n f\"Description: {big_bench_task['description']}\"\n )\n\n # examples: \"A list of dicts\"\n examples: List[Dict] = big_bench_task[\"examples\"]\n # Before splitting the data, shuffle the examples with a fixed seed for reproducibility.\n random.seed(0)\n random.shuffle(examples)\n\n # BIG-bench split the data according to\n # https://github.com/google/BIG-bench/blob/main/bigbench/bbseqio/README.md#splits:\n # all: This contains all the examples.\n # validation: This contains 20% of the examples or at least 16 examples.\n # train: All examples that are not in the validation split (generally 80% of the examples)\n # For few-shot eval, use the all split.\n #\n # TODO: I'm not sure what they mean by \"for few-shot eval, use the all split.\"\n # Does that mean they don't draw in-context examples from a separate train split?\n #\n # We split the data as follows:\n # test: This contains 20% of the examples or at least 16 examples.\n # validation: Same size as the test split.\n # train: Remaining examples, not in the test and validation splits.\n total_examples: int = len(examples)\n num_test_examples: int = max(int(0.2 * total_examples), BIGBenchScenario.MIN_TEST_EXAMPLES)\n num_train_examples: int = total_examples - num_test_examples * 2\n\n # Build `Instance`s from `examples`.\n instances: List[Instance] = []\n for i, example in enumerate(examples):\n # Build references.\n references: List[Reference]\n\n # Each example has \"input\" and either \"target_scores\" or \"target\".\n if \"target_scores\" in example:\n # For \"target_scores\", BIG-bench compares target scores against the model's predicted probabilities:\n # \"The example score is then the target score (as specified in the target_scores dict) of the target\n # that received the highest probability. Scores are averaged across examples. Conventional\n # multiple-choice accuracy can be achieved by assigning the correct target a score of 1, and\n # all incorrect targets a score of 0.\"\n # It seems all BIG-bench Lite tasks with target scores either have a target score\n # of 0 (incorrect answer) or 1 (correct answer).\n # So, for now, `Reference`s with the highest target score are correct.\n highest_score = max(example[\"target_scores\"].values())\n references = [\n Reference(Output(text=target), tags=[CORRECT_TAG] if score == highest_score else [])\n for target, score in example[\"target_scores\"].items()\n ]\n elif \"target\" in example:\n # All the outputs in \"target\" are correct e.g., {\"input\": \"1 + 1 = \", \"target\": [\"two\",\"2\"]}.\n # \"target\" can either be a list of correct values or a single correct value.\n targets: List[str] = example[\"target\"] if type(example[\"target\"]) == list else [example[\"target\"]]\n references = [Reference(Output(text=target), tags=[CORRECT_TAG]) for target in targets]\n else:\n raise ValueError(f\"Invalid example that doesn't have `target` or `target_scores` field: {example}\")\n\n # Get split based on current index `i`.\n split: str\n if i < num_train_examples:\n split = TRAIN_SPLIT\n elif num_train_examples <= i < num_train_examples + num_test_examples:\n split = TEST_SPLIT\n else:\n split = VALID_SPLIT\n\n instances.append(Instance(Input(text=example[\"input\"]), references, split=split))\n\n return instances",
"def get_many(cls, limit: int = 100, offset: int = 0):\n if limit > 100:\n raise ModelExceptions(\"It is not possible to list more than 100 resources.\")\n\n instance_list = DBSESSION.query(cls)\n instance_list = instance_list.order_by(cls.id)\n instance_list = instance_list.offset(offset)\n instance_list = instance_list.limit(limit)\n instance_list = instance_list.all()\n if not instance_list:\n raise ObjectNotFound(f\"No registers of {cls.str_representation} found\")\n\n return instance_list",
"def experience_replay(batch_size):\n memory = []\n while True:\n experience = yield rsample(memory, batch_size) if batch_size <= len(memory) else None\n memory.append(experience)",
"def collect_episode_batch(policy, *,\n env,\n batch_size,\n n_workers=4):\n sampler = get_sampler(policy, env=env, n_workers=n_workers)\n agent_update = policy.get_param_values()\n episodes = sampler.obtain_samples(0, batch_size, agent_update)\n return episodes",
"def list_instances(self):\n # list instances\n self._list_instances()",
"def batch(self):\n return self._batch",
"def instances(self):\n return self.get('instances')",
"def chunk_list(exps):\n import numpy as np\n batch_idx = [exp['batch_id'] for exp in exps]\n unique_batch_idx = np.unique(batch_idx)\n exp_chunks = []\n for batch in unique_batch_idx:\n idx = [i for i, batch_id in enumerate(batch_idx)\n if batch_id == batch]\n size = exps[idx[0]]['slurm']['experiments_per_job']\n exp_chunks.extend(([exps[i] for i in idx[pos:pos + size]] for pos in range(0, len(idx), size)))\n return exp_chunks",
"def batches(self):\n return [self.get_batch(i) for i in range(self.num_batches)]",
"def _get_batch(self):\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n response = apiCall.get(self._get_token(), url,self._proxy, {}, 10)\n logging.debug(response)\n return response",
"def __getitem__(self, index):\n # Generate indexes of the batch\n rows = self.metadata_dataframe.iloc[index * self.batch_size:(index + 1) * self.batch_size]\n names = rows['Name']\n\n rng = range(index * self.batch_size, (index + 1) * self.batch_size)\n img_files_temp = [names[k] for k in rng]\n # create batch item list\n img_batch_list = []\n meta_batch_list = []\n y_batch_list = []\n for img_file in img_files_temp:\n # Generate data\n print(\"IMAGE FILE:(\")\n print(img_file)\n img, meta, y = self.__data_generation(img_file)\n img_batch_list.append(img)\n meta_batch_list.append(meta)\n y_batch_list.append(y)\n\n # batch_inputs = (img_batch_list, meta_batch_list)\n # return batch_inputs #, y_batch_list\n return [np.array(img),np.array(meta_batch_list)], np.array(y_batch_list)",
"def __getitem__(self, batch_index):\n batch_images = np.zeros(shape=(self.batch_size, *MODEL_INPUT_SIZE, MODEL_INPUT_CHANNELS), dtype=np.float32)\n # For ages use -1 instead of zeros, because for black images age should be 0 months\n batch_ages = np.full(shape=(self.batch_size, 1), fill_value=-1, dtype=np.float32)\n batch_males = np.zeros(shape=(self.batch_size, 1), dtype=np.uint8)\n\n # Generate image indexes of the batch\n batch_image_indexes = self.image_indexes[batch_index * self.batch_size:(batch_index + 1) * self.batch_size]\n\n for item_number, batch_image_index in enumerate(batch_image_indexes):\n image_id = self.image_ids[batch_image_index][0]\n age = self.ages[batch_image_index]\n male = self.males[batch_image_index]\n\n image_path = self.images_path / f'{image_id}.png'\n image = skimage.io.imread(str(image_path))\n image = normalized_image(image)\n\n if self.is_train:\n augmented_image = augmentate_image(image)\n else:\n augmented_image = image\n\n augmented_image = augmented_image * 255\n augmented_image = np.stack((augmented_image,) * MODEL_INPUT_CHANNELS, axis=-1)\n batch_images[item_number, ...] = augmented_image\n\n batch_ages[item_number, ...] = age\n batch_males[item_number, ...] = male\n\n batch_images = preprocess_input(batch_images)\n return [batch_images, batch_males], batch_ages",
"def instantiate_batch(self, inputs):\n return inputs",
"def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list",
"def retag_all_batches(apps, schema_editor):\n pass",
"def _create_batch_list(self):\n return [None] * self.bufsize",
"def batch_list(session):\n weigh_events_sq = batch_events_by_type(session, \"weigh\").subquery(\"weigh_events\")\n propagate_events_sq = batch_events_by_type(session, \"propagate\").subquery(\n \"propagate_events\"\n )\n transfer_events_sq = batch_events_by_type(session, \"transfer\").subquery(\n \"transfer_events\"\n )\n harvest_events_sq = batch_events_by_type(session, \"harvest\").subquery(\n \"harvest_events\"\n )\n harvest_sq = harvest_with_unit_yield(session).subquery(\"harvest_with_unit_yield\")\n locations_sq = locations_with_extras(session).subquery(\"locations\")\n\n query = (\n session.query(\n BatchClass.id.label(\"batch_id\"),\n BatchClass.tray_size,\n BatchClass.number_of_trays,\n CropTypeClass.id.label(\"crop_type_id\"),\n CropTypeClass.name.label(\"crop_type_name\"),\n weigh_events_sq.c.event_time.label(\"weigh_time\"),\n propagate_events_sq.c.event_time.label(\"propagate_time\"),\n transfer_events_sq.c.event_time.label(\"transfer_time\"),\n harvest_events_sq.c.event_time.label(\"harvest_time\"),\n transfer_events_sq.c.next_action_time.label(\"expected_harvest_time\"),\n locations_sq.c.id.label(\"location_id\"),\n locations_sq.c.zone,\n locations_sq.c.aisle,\n locations_sq.c.column,\n locations_sq.c.shelf,\n locations_sq.c.summary.label(\"location_summary\"),\n harvest_sq.c.yield_per_sqm,\n harvest_sq.c.crop_yield,\n harvest_sq.c.waste_disease,\n harvest_sq.c.waste_defect,\n harvest_sq.c.over_production,\n (harvest_events_sq.c.event_time - transfer_events_sq.c.event_time).label(\n \"grow_time\"\n ),\n case(\n [\n (harvest_events_sq.c.event_time != None, \"harvest\"),\n (transfer_events_sq.c.event_time != None, \"transfer\"),\n (propagate_events_sq.c.event_time != None, \"propagate\"),\n (weigh_events_sq.c.event_time != None, \"weigh\"),\n ],\n else_=None,\n ).label(\"last_event\"),\n )\n .join(CropTypeClass, CropTypeClass.id == BatchClass.crop_type_id)\n # We inner join on weigh_events, because if the batch doesn't have a weigh event\n # it doesn't really exist, but outer join on the others since they are optional.\n .join(weigh_events_sq, weigh_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(propagate_events_sq, propagate_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(transfer_events_sq, transfer_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(harvest_events_sq, harvest_events_sq.c.batch_id == BatchClass.id)\n .outerjoin(locations_sq, locations_sq.c.id == transfer_events_sq.c.location_id)\n .outerjoin(harvest_sq, harvest_sq.c.batch_event_id == harvest_events_sq.c.id)\n )\n return query",
"def __init__(self, list_instances):\n\n self.batch_instance = list_instances\n self.mappings = []\n self.batch_instance_torch = None\n self.list_instance_torch = None\n self.batch_size = len(list_instances)\n self.rewards = [0]*self.batch_size\n self.Omega = list_instances[0].Omega\n self.Phi = list_instances[0].Phi\n self.Lambda = list_instances[0].Lambda\n self.Budget = self.Omega + self.Phi + self.Lambda\n self.player = get_player(self.Omega, self.Phi, self.Lambda)\n self.batch_torch()\n self.compute_mappings()\n self.update_budgets()\n self.next_player = get_player(self.next_Omega, self.next_Phi, self.next_Lambda)",
"def collect_experiences(self):\n for i in range(self.num_frames_per_proc):\n # Do one agent-environment interaction\n\n preprocessed_obs0 = self.preprocess_obss(self.obs0, device=self.device)\n \n preprocessed_obs1 = self.preprocess_obss(self.obs1, device=self.device)\n \n with torch.no_grad():\n \n model_results0 = self.acmodel0(preprocessed_obs1, self.memory0 * self.mask0.unsqueeze(1)) ### NOTE\n \n dist0 = model_results0['dist'] ### NOTE\n value0 = model_results0['value']\n memory0 = model_results0['memory']\n msg0 = model_results0['message']\n dists_speaker0 = model_results0['dists_speaker']\n extra_predictions0 = model_results0['extra_predictions']\n #self.rng_states0[i] = model_results0['rng_states']\n #if torch.cuda.is_available():\n # self.cuda_rng_states0[i] = model_results0['cuda_rng_states']\n \n preprocessed_obs0.instr *= 0\n preprocessed_obs0.image *= 0\n model_results1 = self.acmodel1(preprocessed_obs0, self.memory1 * self.mask1.unsqueeze(1), msg=(msg0.transpose(0, 1) * self.mask1.unsqueeze(1).unsqueeze(2)).transpose(0, 1)) ### NOTE\n \n dist1 = model_results1['dist']\n value1 = model_results1['value']\n memory1 = model_results1['memory']\n msg1 = model_results1['message']\n dists_speaker1 = model_results1['dists_speaker']\n extra_predictions1 = model_results1['extra_predictions']\n #self.rng_states1[i] = model_results1['rng_states']\n #if torch.cuda.is_available():\n # self.cuda_rng_states1[i] = model_results1['cuda_rng_states']\n \n #state = torch.get_rng_state()\n action0 = dist0.sample()\n \n #torch.set_rng_state(state)\n action1 = dist1.sample()\n\n obs0, reward0, done0, env_info0 = self.env0.step(action0.cpu().numpy())\n \n obs1, reward1, done1, env_info1 = self.env1.step(action1.cpu().numpy())\n \n # mask any rewards based on (previous) been_done\n rewardos0 = [0] * self.num_procs\n rewardos1 = [0] * self.num_procs\n for j in range(self.num_procs):\n rewardos0[j] = reward0[j] * (1 - self.been_done0[j].item())\n rewardos1[j] = reward1[j] * (1 - self.been_done1[j].item())\n \n reward0 = tuple(rewardos0)\n reward1 = tuple(rewardos1)\n \n #reward0 = tuple(0.5*r0 + 0.5*r1 for r0, r1 in zip(reward0, reward1)) ### NOTE\n #reward1 = reward0\n \n # reward sender agent (0) equally for success of receiver agent (1) ### NOTE\n reward0 = reward1\n \n self.been_done0 = (1 - (1 - self.been_done0) * (1 - torch.tensor(done0, device=self.device, dtype=torch.float)))\n self.been_done1 = (1 - (1 - self.been_done1) * (1 - torch.tensor(done1, device=self.device, dtype=torch.float)))\n both_done = self.been_done0 * self.been_done1\n \n # reset if receiver agent (1) is done ### NOTE\n both_done = self.been_done1\n \n obs0 = self.env0.sync_reset(both_done, obs0)\n obs1 = self.env1.sync_reset(both_done, obs1)\n \n if self.aux_info:\n env_info0 = self.aux_info_collector0.process(env_info0)\n # env_info0 = self.process_aux_info0(env_info0)\n \n env_info1 = self.aux_info_collector1.process(env_info1)\n # env_info1 = self.process_aux_info1(env_info1)\n\n # Update experiences values\n\n self.obss0[i] = self.obs0\n self.obs0 = obs0\n \n self.obss1[i] = self.obs1\n self.obs1 = obs1\n\n self.memories0[i] = self.memory0\n self.memory0 = memory0\n \n self.memories1[i] = self.memory1\n self.memory1 = memory1\n \n self.msgs0[i] = self.msg0\n self.msg0 = msg0\n \n self.msgs1[i] = self.msg1\n self.msg1 = msg1\n \n self.msgs_out0[i] = msg0\n \n self.msgs_out1[i] = msg1\n\n self.masks0[i] = self.mask0\n #self.mask0 = 1 - torch.tensor(done0, device=self.device, dtype=torch.float)\n self.mask0 = 1 - both_done\n self.actions0[i] = action0\n self.values0[i] = value0\n if self.reshape_reward is not None:\n self.rewards0[i] = torch.tensor([\n self.reshape_reward(obs_, action_, reward_, done_)\n for obs_, action_, reward_, done_ in zip(obs0, action0, reward0, done0)\n ], device=self.device)\n else:\n self.rewards0[i] = torch.tensor(reward0, device=self.device)\n self.log_probs0[i] = dist0.log_prob(action0)\n self.speaker_log_probs0[i] = self.acmodel0.speaker_log_prob(dists_speaker0, msg0)\n \n self.masks1[i] = self.mask1\n #self.mask1 = 1 - torch.tensor(done1, device=self.device, dtype=torch.float)\n self.mask1 = 1 - both_done\n self.actions1[i] = action1\n self.values1[i] = value1\n if self.reshape_reward is not None:\n self.rewards1[i] = torch.tensor([\n self.reshape_reward(obs_, action_, reward_, done_)\n for obs_, action_, reward_, done_ in zip(obs1, action1, reward1, done1)\n ], device=self.device)\n else:\n self.rewards1[i] = torch.tensor(reward1, device=self.device)\n self.log_probs1[i] = dist1.log_prob(action1)\n self.speaker_log_probs1[i] = self.acmodel1.speaker_log_prob(dists_speaker1, msg1)\n\n if self.aux_info:\n self.aux_info_collector0.fill_dictionaries(i, env_info0, extra_predictions0)\n \n self.aux_info_collector1.fill_dictionaries(i, env_info1, extra_predictions1)\n\n # Update log values\n\n self.log_episode_return0 += torch.tensor(reward0, device=self.device, dtype=torch.float)\n self.log_episode_reshaped_return0 += self.rewards0[i]\n \n self.log_episode_return1 += torch.tensor(reward1, device=self.device, dtype=torch.float)\n self.log_episode_reshaped_return1 += self.rewards1[i]\n \n self.log_episode_num_frames0 += torch.ones(self.num_procs, device=self.device)\n self.log_episode_num_frames1 += torch.ones(self.num_procs, device=self.device)\n \n #for i, done_ in enumerate(done0):\n for i in range(self.num_procs):\n #if done_:\n if both_done[i]:\n self.log_done_counter0 += 1\n self.log_return0.append(self.log_episode_return0[i].item())\n self.log_reshaped_return0.append(self.log_episode_reshaped_return0[i].item())\n self.log_num_frames0.append(self.log_episode_num_frames0[i].item())\n \n #for i, done_ in enumerate(done1):\n #if done_:\n self.log_done_counter1 += 1\n self.log_return1.append(self.log_episode_return1[i].item())\n self.log_reshaped_return1.append(self.log_episode_reshaped_return1[i].item())\n self.log_num_frames1.append(self.log_episode_num_frames1[i].item())\n\n # if both are done, reset both to not done\n self.been_done0 *= (1 - both_done)\n self.been_done1 *= (1 - both_done)\n\n self.log_episode_return0 *= self.mask0\n self.log_episode_reshaped_return0 *= self.mask0\n self.log_episode_num_frames0 *= self.mask0\n\n self.log_episode_return1 *= self.mask1\n self.log_episode_reshaped_return1 *= self.mask1\n self.log_episode_num_frames1 *= self.mask1\n\n # Add advantage and return to experiences\n\n preprocessed_obs0 = self.preprocess_obss(self.obs0, device=self.device)\n preprocessed_obs1 = self.preprocess_obss(self.obs1, device=self.device)\n \n with torch.no_grad():\n tmp = self.acmodel0(preprocessed_obs1, self.memory0 * self.mask0.unsqueeze(1)) ### NOTE\n next_value0 = tmp['value']\n \n preprocessed_obs0.instr *= 0\n preprocessed_obs0.image *= 0\n next_value1 = self.acmodel1(preprocessed_obs0, self.memory1 * self.mask1.unsqueeze(1), msg=(tmp['message'].transpose(0, 1) * self.mask1.unsqueeze(1).unsqueeze(2)).transpose(0, 1))['value'] ### NOTE\n\n for i in reversed(range(self.num_frames_per_proc)):\n next_mask0 = self.masks0[i+1] if i < self.num_frames_per_proc - 1 else self.mask0\n next_value0 = self.values0[i+1] if i < self.num_frames_per_proc - 1 else next_value0\n next_advantage0 = self.advantages0[i+1] if i < self.num_frames_per_proc - 1 else 0\n \n next_mask1 = self.masks1[i+1] if i < self.num_frames_per_proc - 1 else self.mask1\n next_value1 = self.values1[i+1] if i < self.num_frames_per_proc - 1 else next_value1\n next_advantage1 = self.advantages1[i+1] if i < self.num_frames_per_proc - 1 else 0\n\n delta0 = self.rewards0[i] + self.discount * next_value0 * next_mask0 - self.values0[i]\n self.advantages0[i] = delta0 + self.discount * self.gae_lambda * next_advantage0 * next_mask0\n \n delta1 = self.rewards1[i] + self.discount * next_value1 * next_mask1 - self.values1[i]\n self.advantages1[i] = delta1 + self.discount * self.gae_lambda * next_advantage1 * next_mask1\n\n # Flatten the data correctly, making sure that\n # each episode's data is a continuous chunk\n\n exps0 = DictList()\n exps0.obs = [self.obss0[i][j]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n \n exps1 = DictList()\n exps1.obs = [self.obss1[i][j]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n \n # In commments below T is self.num_frames_per_proc, P is self.num_procs,\n # D is the dimensionality\n\n # T x P x D -> P x T x D -> (P * T) x D\n exps0.memory = self.memories0.transpose(0, 1).reshape(-1, *self.memories0.shape[2:])\n \n exps1.memory = self.memories1.transpose(0, 1).reshape(-1, *self.memories1.shape[2:])\n \n exps0.message = self.msgs0.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel0.max_len_msg, self.acmodel0.num_symbols)\n \n exps1.message = self.msgs1.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel1.max_len_msg, self.acmodel1.num_symbols)\n \n exps0.message_out = self.msgs_out0.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel0.max_len_msg, self.acmodel0.num_symbols)\n \n exps1.message_out = self.msgs_out1.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel1.max_len_msg, self.acmodel1.num_symbols)\n \n #exps0.rng_states = self.rng_states0.transpose(0, 1).reshape(-1, *self.rng_states0.shape[2:])\n #if torch.cuda.is_available():\n # exps0.cuda_rng_states = self.cuda_rng_states0.transpose(0, 1).reshape(-1, *self.cuda_rng_states0.shape[2:])\n \n #exps1.rng_states = self.rng_states1.transpose(0, 1).reshape(-1, *self.rng_states1.shape[2:])\n #if torch.cuda.is_available():\n # exps1.cuda_rng_states = self.cuda_rng_states1.transpose(0, 1).reshape(-1, *self.cuda_rng_states1.shape[2:])\n \n # T x P -> P x T -> (P * T) x 1\n exps0.mask = self.masks0.transpose(0, 1).reshape(-1).unsqueeze(1)\n \n exps1.mask = self.masks1.transpose(0, 1).reshape(-1).unsqueeze(1)\n\n # for all tensors below, T x P -> P x T -> P * T\n exps0.action = self.actions0.transpose(0, 1).reshape(-1)\n exps0.value = self.values0.transpose(0, 1).reshape(-1)\n exps0.reward = self.rewards0.transpose(0, 1).reshape(-1)\n exps0.advantage = self.advantages0.transpose(0, 1).reshape(-1)\n exps0.returnn = exps0.value + exps0.advantage\n exps0.log_prob = self.log_probs0.transpose(0, 1).reshape(-1)\n exps0.speaker_log_prob = self.speaker_log_probs0.transpose(0, 1).reshape(-1)\n \n exps1.action = self.actions1.transpose(0, 1).reshape(-1)\n exps1.value = self.values1.transpose(0, 1).reshape(-1)\n exps1.reward = self.rewards1.transpose(0, 1).reshape(-1)\n exps1.advantage = self.advantages1.transpose(0, 1).reshape(-1)\n exps1.returnn = exps1.value + exps1.advantage\n exps1.log_prob = self.log_probs1.transpose(0, 1).reshape(-1)\n exps1.speaker_log_prob = self.speaker_log_probs1.transpose(0, 1).reshape(-1)\n\n if self.aux_info:\n exps0 = self.aux_info_collector0.end_collection(exps0)\n \n exps1 = self.aux_info_collector1.end_collection(exps1)\n\n # Preprocess experiences\n\n exps0.obs = self.preprocess_obss(exps0.obs, device=self.device)\n\n exps1.obs = self.preprocess_obss(exps1.obs, device=self.device)\n\n # Log some values\n\n keep0 = max(self.log_done_counter0, self.num_procs)\n\n keep1 = max(self.log_done_counter1, self.num_procs)\n\n log0 = {\n \"return_per_episode\": self.log_return0[-keep0:],\n \"reshaped_return_per_episode\": self.log_reshaped_return0[-keep0:],\n \"num_frames_per_episode\": self.log_num_frames0[-keep0:],\n \"num_frames\": self.num_frames,\n \"episodes_done\": self.log_done_counter0,\n }\n\n log1 = {\n \"return_per_episode\": self.log_return1[-keep1:],\n \"reshaped_return_per_episode\": self.log_reshaped_return1[-keep1:],\n \"num_frames_per_episode\": self.log_num_frames1[-keep1:],\n \"num_frames\": self.num_frames,\n \"episodes_done\": self.log_done_counter1,\n }\n\n self.log_done_counter0 = 0\n self.log_return0 = self.log_return0[-self.num_procs:]\n self.log_reshaped_return0 = self.log_reshaped_return0[-self.num_procs:]\n self.log_num_frames0 = self.log_num_frames0[-self.num_procs:]\n\n self.log_done_counter1 = 0\n self.log_return1 = self.log_return1[-self.num_procs:]\n self.log_reshaped_return1 = self.log_reshaped_return1[-self.num_procs:]\n self.log_num_frames1 = self.log_num_frames1[-self.num_procs:]\n\n return exps0, log0, exps1, log1",
"def collect_experience(env_, agent_, size):\n env_.reset()\n state, reward, done, _ = env_.step(env_.action_space.sample())\n for data in range(size):\n action = env_.action_space.sample()\n next_state, reward, done, _ = env_.step(action)\n # penalize reward based on the position of the cart\n reward = max(0, reward * (1 - abs(next_state[0]/2.4)))\n if done:\n next_state = np.zeros(state.shape)\n # save experience in agent's memory\n agent_.remember((state, action, reward, next_state))\n env_.reset()\n state, reward, done, _ = env_.step(env.action_space.sample())\n else:\n # save experience in agent's memory\n agent_.remember((state, action, reward, next_state))\n state = next_state",
"def get(self) -> list:\n return self.__expedition",
"def examples(self):\n for obj_ind in range(len(self.objects)):\n yield self.get_object_intent_by_index(obj_ind)",
"def _generate_instances(self, single_traj):\n return [single_traj[:2].values]"
] |
[
"0.6554049",
"0.6396855",
"0.6297287",
"0.6107383",
"0.6098944",
"0.60974336",
"0.6025186",
"0.60197103",
"0.58755547",
"0.58387226",
"0.58213526",
"0.5778656",
"0.56729084",
"0.565521",
"0.5647804",
"0.5634885",
"0.56303805",
"0.5615144",
"0.5606871",
"0.5605314",
"0.5564643",
"0.55580366",
"0.5554459",
"0.5543006",
"0.5503106",
"0.54749244",
"0.54673463",
"0.5464214",
"0.54584026",
"0.5439079"
] |
0.7348829
|
0
|
test that audiobook can be inserted into db
|
def test_audiobook_can_insert(self):
data = {
"audiotype": "Audiobook",
"metadata": {
"duration": 37477,
"title": "another",
"author": "Solomon",
"narrator": "Ndiferke"
}
}
response = requests.post(
"http://localhost:9001/api/create-audio", json=data)
success = response.json()
self.assertEqual(success["success"], True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_api_can_create_a_music(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)",
"def test_upload_voice_dataset(self):\n pass",
"async def test_valid_insert(database, valid_data):\n await database.setup_database(reset=True)\n for id ,user_id,embeddings,batch_id in valid_data:\n await database.insert_user(user_id=user_id)\n await database.insert(id=id,\n user_id=user_id,\n embeddings=embeddings,\n batch_id=batch_id)\n await database.close_pool()",
"def test_create_record(self):\n pass",
"def test_insert_minimal(self):\n album = Album(artist='Artist', album='Album',\n totalseconds=120, totaltracks=2)\n pk = album.insert(self.app.db, self.app.curs)\n\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist')\n self.assertEqual(album_row['alalbum'], 'Album')\n self.assertEqual(album_row['altype'], 'album')\n self.assertEqual(album_row['totaltracks'], 2)\n self.assertEqual(album_row['totalseconds'], 120)\n self.assertEqual(album_row['lasttransform'], 0)",
"def test_insert_invalid_type(self):\n album = Album(artist='Artist', album='Album', album_type='xyzzy',\n totalseconds=120, totaltracks=2)\n with self.assertRaises(Exception):\n pk = album.insert(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 0)",
"def test_insert_simple(self):\n album = Album(artist='Artist', album='Album', album_type='ep',\n totaltracks=5, totalseconds=42, last_transform=3)\n pk = album.insert(self.app.db, self.app.curs)\n\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist')\n self.assertEqual(album_row['alalbum'], 'Album')\n self.assertEqual(album_row['altype'], 'ep')\n self.assertEqual(album_row['totaltracks'], 5)\n self.assertEqual(album_row['totalseconds'], 42)\n self.assertEqual(album_row['lasttransform'], 3)",
"def test_create_soundcloud(self):\n # Count the number of records before the save\n existing_records_count = Track.objects.all().count()\n post_data = {\n 'source_type': 'soundcloud',\n 'source_id': 153868082,\n }\n\n with transaction.atomic():\n resp = self.api_client.post('/api/metadata/tracks/', data=post_data)\n\n new_records_count = Track.objects.all().count()\n # Ensure request was successful\n self.assertEqual(resp.status_code, 200)\n # Ensure a new record was created in the database\n self.assertEqual(existing_records_count+1, new_records_count)",
"def test_audiobook_can_update(self):\n\n data = {\n \"audiotype\": \"Audiobook\",\n \"metadata\": {\n \"title\": \"audiobook1\",\n \"duration\": 45678,\n \"author\": \"Solomon\",\n \"narrator\": \"Aniefiok\"\n }\n }\n\n num = str(3)\n\n response = requests.put(\n \"http://localhost:9001/api/update-audio/Audiobook/\"+num, json=data)\n\n self.assertEqual(response.status_code, 200)",
"def test_insert(self):\n db=Database(\"test.db\")\n db.query(\"insert into game (user_a, user_b, winner, board) values('a', 'b', 'sinner', 'asdf');\");\n self.assertEqual(len(db.query(\"select * from game\"))>=1, True)",
"def test_insert_minimal(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(pk)\n self.assertEqual(track_row['artist'], 'Artist')\n self.assertEqual(track_row['album'], '')\n self.assertEqual(track_row['title'], 'Title')\n self.assertEqual(track_row['ensemble'], '')\n self.assertEqual(track_row['composer'], '')\n self.assertEqual(track_row['conductor'], '')\n self.assertEqual(track_row['source'], 'xmms')\n self.assertEqual(track_row['album_id'], 0)\n self.assertEqual(track_row['tracknum'], None)\n self.assertEqual(track_row['seconds'], 0)\n self.assertEqual(track_row['lasttransform'], 0)",
"def test_create_youtube(self):\n # Count the number of records before the save\n existing_records_count = Track.objects.all().count()\n post_data = {\n 'source_type': 'youtube',\n 'source_id': 'StTqXEQ2l-Y',\n }\n\n with transaction.atomic():\n resp = self.api_client.post('/api/metadata/tracks/', data=post_data)\n\n new_records_count = Track.objects.all().count()\n # Ensure request was successful\n self.assertEqual(resp.status_code, 200)\n # Ensure a new record was created in the database\n self.assertEqual(existing_records_count+1, new_records_count)",
"def test_upload_body(db_conn, cards_table):\n\n card, errors = UploadCard.insert(db_conn, {\n 'unit_id': 'RUF531',\n 'name': 'What is?',\n 'file_extensions': ['jpg'],\n 'rubric': True, # TODO\n })\n assert len(errors) == 1\n card, errors = card.update(db_conn, {'body': 'Testing 1234'})\n assert len(errors) == 0",
"async def test_invalid_insert_no_user(database, valid_data):\n await database.setup_database(reset=True)\n for id,user_id,embeddings,batch_id in valid_data:\n try:\n await database.insert(id=id,\n user_id=user_id,\n embeddings=embeddings,\n batch_id=batch_id)\n assert False\n except(NotFoundError, DuplicateKeyError):\n assert True\n await database.close_pool()",
"def test_add():\n\n conn = psycopg2.connect(host=\"sculptor.stat.cmu.edu\", database=c.DB_USER,\n user=c.DB_USER, password=c.DB_PASSWORD)\n cur = conn.cursor()\n cur.execute(\"\"\" SELECT COUNT(*) FROM songs WHERE song_title = SHERlocked\n artist_name = unknown\"\"\")\n count = cur.fetchone()[0]\n assert count != 0",
"async def test_valid_insert_batch(database,valid_data):\n await database.setup_database(reset=True)\n data = []\n for _id,user_id,embeddings,batch_id in valid_data: #pylint: disable=unused-variable\n await database.insert_user(user_id)\n data.append((_id,user_id,embeddings,1))\n await database.insert_batch(data)\n await database.close_pool()",
"def insertClip(dbConnection, audiourl, podcastName, description, parsedDate, title):\n try:\n cursor = dbConnection.cursor()\n title = title.replace(\"'\", \"''\")\n cursor.execute(\"INSERT INTO transcriptions(audiourl, realtimefactor, podcastname, transcription, description, date, title, pending, datetranscribed) VALUES('\" + audiourl + \"', NULL, '\" + podcastName + \"', NULL, '\" + description + \"', '\" + parsedDate + \"', '\" + title + \"', FALSE, NULL);\")\n dbConnection.commit()\n cursor.close()\n return True\n except:\n return False\n return False",
"def test_audiobook_can_read(self):\n\n response = requests.get(\n \"http://localhost:9001/api/get-audio/Audiobook\")\n\n self.assertEqual(response.status_code, 200)",
"def test_add_book(self):\n\n first_book_list = BookList()\n first_book = Book()\n\n first_book.create_book({\n \"title\": \"First Man\",\n \"author\": \"James R. Hansen\",\n \"year\": 2005,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 1\n })\n\n assert first_book_list.add_book(first_book)\n assert first_book_list.find_book(\"First Man\")\n assert first_book_list.num_books() == 1",
"def test_insert_simple(self):\n track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', composer='Composer', conductor='Conductor',\n tracknum=1, seconds=10, album_id=42, last_transform=5)\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(pk)\n self.assertEqual(track_row['artist'], 'Artist')\n self.assertEqual(track_row['album'], 'Album')\n self.assertEqual(track_row['title'], 'Title')\n self.assertEqual(track_row['ensemble'], 'Ensemble')\n self.assertEqual(track_row['composer'], 'Composer')\n self.assertEqual(track_row['conductor'], 'Conductor')\n self.assertEqual(track_row['source'], 'xmms')\n self.assertEqual(track_row['album_id'], 42)\n self.assertEqual(track_row['tracknum'], 1)\n self.assertEqual(track_row['seconds'], 10)\n self.assertEqual(track_row['lasttransform'], 5)",
"def test_adding_invalid_file(self):\n (added, status) = self.app.add_album(__file__)\n self.assertEqual(added, False)\n self.assertIn('Unable to load', status)\n self.assertEqual(self.get_album_count(), 0)",
"def test_user_can_create_a_book(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '5698745124'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('success', str(res2))",
"def test_create_with_bad_backend(self):\n # Count the number of records before the save\n post_data = {\n 'source_type': 'test',\n 'source_id': '4bCOAuhvjsxbVBM5MM8oik',\n }\n resp = self.api_client.post('/api/metadata/tracks/', data=post_data)\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'The record could not be found.')",
"def test_db(self):\n db.tests.insert_one({'name': 'test-name'})\n r = db.tests.find_one({'name': 'test-name'})\n self.assertEqual(r['name'], 'test-name')\n\n db.tests.insert_one({'_id': '_id', 'a': 'A', 'b': 'B', 'c': 'c'})",
"def test_upload_rubric(db_conn, cards_table):\n\n card, errors = UploadCard.insert(db_conn, {\n 'unit_id': 'RUF531',\n 'name': 'What is?',\n 'body': 'Testing 1234',\n 'file_extensions': ['jpg'],\n })\n assert len(errors) == 1\n card, errors = card.update(db_conn, {'rubric': None})\n assert len(errors) == 0",
"def test_questions_answers_add_model(self):\n content = Content(header = \"Test_Header\", content = \"Test_Content\")\n question = Questions(question_text = \"Test_Question?\", content = content)\n answer = Answers(answer_text = \"Answer_Test\", correct = 0, question = question)\n db.session.add(content)\n db.session.add(question)\n db.session.add(answer)\n db.session.commit()\n self.assertEqual(Questions.query.filter_by(question_text = \"Test_Question?\").count(), 1)\n self.assertEqual(Answers.query.filter_by(answer_text = \"Answer_Test\", correct = 0, question = question).count(), 1)",
"def test_add_books(self):\n response = self.client.post(\n '/api/v1/books', data=json.dumps(add_book), content_type='application/json',\n headers=self.get_admin_token())\n result = json.loads(response.data.decode())\n self.assertEqual(result['message'], 'Book awarded successfully')\n assert response.status_code == 201",
"def test_insert_empty_totalseconds(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=2)\n with self.assertRaises(Exception):\n pk = album.insert(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 0)",
"def test_create_document(self):\n self.client.force_login(user=Customer.objects.get(id=self.dummy.customer[0]).user)\n url = reverse('document-list')\n\n data = {\n 'file':open('Murphy.txt'),\n 'type':2,\n 'res':self.res_id,\n 'obsolete': True,\n 'description':'Sth',\n }\n response = self.client.post(url,data,format='multipart')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n document = Document.objects.get(id=json.loads(response.content)['id'])\n self.assertEqual(document.res_id,self.res_id)\n self.client.logout()",
"def test_save(self):"
] |
[
"0.67848116",
"0.6577634",
"0.6528779",
"0.63863647",
"0.6381396",
"0.63655",
"0.63352036",
"0.63241166",
"0.6306235",
"0.6287628",
"0.6280709",
"0.62133586",
"0.620948",
"0.62088597",
"0.6194809",
"0.61815",
"0.6163415",
"0.61458015",
"0.6109276",
"0.6099825",
"0.6083798",
"0.60744053",
"0.6046503",
"0.60314286",
"0.601403",
"0.5998407",
"0.597563",
"0.59637856",
"0.5961495",
"0.59430516"
] |
0.83560413
|
0
|
test that audiobook can be read from DB
|
def test_audiobook_can_read(self):
response = requests.get(
"http://localhost:9001/api/get-audio/Audiobook")
self.assertEqual(response.status_code, 200)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_audiobook_can_insert(self):\n\n data = {\n \"audiotype\": \"Audiobook\",\n \"metadata\": {\n \"duration\": 37477,\n \"title\": \"another\",\n \"author\": \"Solomon\",\n \"narrator\": \"Ndiferke\"\n }\n }\n response = requests.post(\n \"http://localhost:9001/api/create-audio\", json=data)\n\n success = response.json()\n self.assertEqual(success[\"success\"], True)",
"def test_sounds_get(self):\n pass",
"def test_sounds_id_get(self):\n pass",
"def test_gathering_audio_files(\n requests_mock: rm_Mocker,\n json_db_mock: str,\n lep_dl: LepDL,\n) -> None:\n requests_mock.get(\n conf.JSON_DB_URL,\n text=json_db_mock,\n )\n lep_dl.get_remote_episodes()\n lep_dl.files = downloader.gather_all_files(lep_dl.db_episodes)\n audio_files = lep_dl.files.filter_by_type(Audio)\n assert len(audio_files) == 18",
"def test_no_audio():\n # This file doesn't exist\n no_audio_file_struct = FileStruct(\"fixtures/chirp_noaudio.mp3\")\n no_audio_file_struct.features_file = \"features/chirp_noaudio.json\"\n feat_type = FeatureTypes.framesync\n CQT(no_audio_file_struct, feat_type, sr=22050).features\n assert (os.path.isfile(no_audio_file_struct.features_file))\n with open(no_audio_file_struct.features_file) as f:\n data = json.load(f)\n assert(CQT.get_id() in data.keys())",
"def test_plays_get(self):\n pass",
"def test_readSongData():\n\n # make sure the number of columns pull out from the database is correct\n assert svd.song_df.shape[1] == 8",
"def test_upload_voice_dataset(self):\n pass",
"def test_get_voice_datasets(self):\n pass",
"def test_read_book(self):\n\n delete_books()\n\n book = create_book(\"title one\")[\"book\"]\n\n with test_client.get(\"/book/{}/\".format(book[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(response.get_data(as_text=True)),\n {\n \"status\": \"success\",\n \"book\": book\n }\n )\n\n \"\"\"\n clear the table, create several books and read them\n \"\"\"\n\n delete_books()\n\n book_one = create_book(\"title one\")[\"book\"]\n book_two = create_book(\"title two\")[\"book\"]\n\n with test_client.get(\"/book/{}/\".format(book_one[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(response.get_data(as_text=True)),\n {\n \"status\": \"success\",\n \"book\": book_one\n }\n )\n\n with test_client.get(\"/book/{}/\".format(book_two[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(response.get_data(as_text=True)),\n {\n \"status\": \"success\",\n \"book\": book_two\n }\n )",
"def test_songs_played(self):\n self.assertEqual(self.show.song_booleans, {\n 'you-enjoy-myself': 1,\n 'tweezer': 0\n })",
"def test_readSongData():\n\n # check type\n assert isinstance(song_df, pd.DataFrame)\n\n # check shape\n assert song_df.shape == (1972060, 8)",
"def test_audiobook_can_update(self):\n\n data = {\n \"audiotype\": \"Audiobook\",\n \"metadata\": {\n \"title\": \"audiobook1\",\n \"duration\": 45678,\n \"author\": \"Solomon\",\n \"narrator\": \"Aniefiok\"\n }\n }\n\n num = str(3)\n\n response = requests.put(\n \"http://localhost:9001/api/update-audio/Audiobook/\"+num, json=data)\n\n self.assertEqual(response.status_code, 200)",
"def test_no_audio_no_features():\n # This file doesn't exist\n no_audio_file_struct = FileStruct(\"fixtures/caca.mp3\")\n feat_type = FeatureTypes.framesync\n with raises(NoAudioFileError):\n CQT(no_audio_file_struct, feat_type, sr=11025).features",
"def test_audiobook_can_delete(self):\n num = str(5)\n response = requests.delete(\n \"http://localhost:9001/api/delete-audio/Audiobook/\"+num)\n\n self.assertEqual(response.status_code, 200)",
"def test_api_can_create_a_music(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)",
"def test_extracting_audio_data(\n only_audio_episodes: LepEpisodeList,\n lep_dl: LepDL,\n) -> None:\n expected_audio = Audio(\n ep_id=2009101908, # many posts in that day\n name=\"15. Extra Podcast – 12 Phrasal Verbs\",\n short_date=\"2009-10-19\",\n filename=\"[2009-10-19] # 15. Extra Podcast – 12 Phrasal Verbs\",\n primary_url=\"http://traffic.libsyn.com/teacherluke/15-extra-podcast-12-phrasal-verbs.mp3\", # noqa: E501,B950\n )\n lep_dl.files = downloader.gather_all_files(only_audio_episodes)\n audio_files = lep_dl.files.filter_by_type(Audio)\n assert audio_files[1] == expected_audio",
"def test_load_quality_codes():\n assert len(code_reader.load_quality_codes()) > 0",
"def test_get_audio_fulfillment_file(self):\n self.api.queue_response(200, content=\"A license\")\n response = self.api.get_audio_fulfillment_file(\"patron id\", \"bib id\")\n\n [[method, url, args, kwargs]] = self.api.requests\n eq_(\"POST\", method)\n assert url.endswith('GetItemAudioFulfillment')\n eq_('<AudioFulfillmentRequest><ItemId>bib id</ItemId><PatronId>patron id</PatronId></AudioFulfillmentRequest>', kwargs['data'])\n\n eq_(200, response.status_code)\n eq_(\"A license\", response.content)",
"def test_get_audio_stream_does_not_raise(self):\n youtube_url = \"https://www.youtube.com/watch?v=jIxas0a-KgM\"\n _ = utils.get_audio_stream(youtube_url)\n assert True # No error",
"def test_api_can_get_a_music(self):\n music = Music.objects.get()\n repsonse = self.client.get(\n reverse('details'),\n kwargs={'pk': music.id},\n format = \"json\" \n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertContains(response, music)",
"def test_update_voice_dataset(self):\n pass",
"def test_output_exists():\n assert song_decoder(\"WUWUBUBWUBUWUB\") is not None",
"def test_repair_file(self):\n\n audio_path = self.converter.audio\n self.assertTrue(audio_path.endswith('.wav'))\n # Make sure it can be loaded in moviepy\n clip = AudioFileClip(audio_path)",
"def test_read(self):\n self.reader._timing = [3, 2, 2, 1, 1, 1]\n score, time = self.reader.read(self.books[0], 0, 3)\n self.assertTrue(self.books[0].id_book not in self.reader._books)\n self.assertEqual(3, score)\n self.assertEqual(6, time)\n self.assertEqual([3, 3, 3, 2, 2, 2], self.reader._timing)\n score, time = self.reader.read(self.books[3], 4, 5)\n self.assertTrue(self.books[3].id_book not in self.reader._books)\n self.assertEqual(0, score)\n self.assertEqual(7, time)\n self.assertEqual([3, 3, 3, 2, 3, 3], self.reader._timing)",
"def test_read_not_interested(self):\n try:\n self.reader.read(self.books[2], 0, 0)\n self.fail(\"Readed book not interested\")\n except AssertionError:\n pass",
"def test_loading_document(self):",
"def test_get_a_song(self):\n self.login_client('test_user', 'testing')\n # hit the API endpoint\n response = self.fetch_a_post(self.valid_post_id)\n # fetch the data from db\n expected = Post.objects.get(pk=self.valid_post_id)\n serialized = PostSerializerSchema(expected)\n self.assertEqual(response.data, serialized.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # test with a song that does not exist\n response = self.fetch_a_post(self.invalid_song_id)\n self.assertEqual(\n response.data[\"message\"],\n \"Song with id: 100 does not exist\"\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def readAudioData(self, shouldProcess):\n if shouldProcess:\n return gatherData(self.playlists) \n else:\n return pd.read_pickle(\"data/audioDF.pkl\")",
"def test_no_audio_no_params():\n # This file doesn't exist\n no_audio_file_struct = FileStruct(\"fixtures/chirp_noaudio.mp3\")\n no_audio_file_struct.features_file = \"features/chirp_noaudio.json\"\n feat_type = FeatureTypes.framesync\n with raises(NoAudioFileError):\n CQT(no_audio_file_struct, feat_type, sr=11025).features"
] |
[
"0.7046402",
"0.6732572",
"0.6514186",
"0.6272971",
"0.6199112",
"0.617027",
"0.61287713",
"0.6095272",
"0.6043759",
"0.5952801",
"0.59451705",
"0.5902091",
"0.58871716",
"0.58851415",
"0.5864519",
"0.5856869",
"0.58567923",
"0.5825781",
"0.58153707",
"0.5800193",
"0.57910967",
"0.57629246",
"0.5737623",
"0.5733371",
"0.57329136",
"0.5727381",
"0.5700692",
"0.56909335",
"0.56905746",
"0.56853956"
] |
0.7720838
|
0
|
test that audiobook can be deleted from DB
|
def test_audiobook_can_delete(self):
num = str(5)
response = requests.delete(
"http://localhost:9001/api/delete-audio/Audiobook/"+num)
self.assertEqual(response.status_code, 200)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_delete_voice_dataset(self):\n pass",
"def test_api_can_delete_music(self):\n music = Music.objects.get()\n response = self.client.delete(\n reverse('details', kwargs={'pk': music.id}),\n format = \"json\",\n follow = True\n )\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)",
"def test_delete_book(self):\n response = self.client.delete(self.book.get_absolute_url()) \n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Book.objects.count(), 0)",
"def test_delete_record(self):\n pass",
"def test_delete(self):\n pass",
"def test_client_verification_document_delete(self):\n pass",
"def test_delete_question(self):\n que = Question.query.filter(Question.question.ilike('%chemical%'))\n qid = que[0].id\n res = self.client().delete('/questions/'+str(que[0].id))\n question = Question.query.get(qid)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted'], qid)\n self.assertEqual(question, None)",
"def test_client_document_delete(self):\n pass",
"def test_delete_note(self):\n pass",
"def test_delete_records(self):\n pass",
"def test_delete(self):\n person = Person('test_person_b')\n person.delete()\n with database() as db:\n results = db.query(\"SELECT * FROM persons WHERE person_name = 'test_person_b'\")\n self.assertEqual(results, [])",
"def delete_audiobook(_id):\r\n Audiobook.query.filter_by(id=_id).delete()\r\n # filter audio book by id and delete\r\n db.session.commit() # commiting the new change to our database\r",
"def test_delete_run(self):\n pass",
"def test_delete(self):\n # login as library manager\n self.authenticate(self.user)\n\n # check there are 3 works\n self.assertEqual(Work.objects.count(), 3)\n\n self.assertNotEqual(self.work1.song_set.count(), 0)\n\n # prune works\n response = self.client.delete(self.url)\n\n # check http status\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # check the response\n self.assertDictEqual(response.data, {\"deleted_count\": 2})\n\n # check there are only 1 work remaining\n self.assertEqual(Work.objects.count(), 1)\n\n # check artists with songs remains\n self.assertEqual(Work.objects.filter(pk=self.work2.pk).count(), 0)\n self.assertEqual(Work.objects.filter(pk=self.work3.pk).count(), 0)",
"def test_delete1(self):\n pass",
"def test_delete_a_song(self):\n self.login_client('test_user', 'testing')\n # hit the API endpoint\n response = self.delete_a_post(1)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n # test with invalid data\n response = self.delete_a_post(100)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_delete(self):\n SampleTemplate.create(self.metadata, self.new_study)\n SampleTemplate.delete(2)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.required_sample_info WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.study_sample_columns WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n with self.assertRaises(QiitaDBExecutionError):\n self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.sample_2\")",
"def test_delete_book(self):\n\n delete_books()\n\n book = create_book(\"title one\")[\"book\"]\n\n self.assertEqual(\n read_book(book[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": book\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book\n }\n )\n\n self.assertEqual(\n read_book(book[\"id\"]),\n {\n \"status\": \"error\"\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book[\"id\"])) as response:\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"error\"\n }\n )\n\n \"\"\"\n clear the table, create several books and list them, remove one and list them again, remove another one \n and list them again\n \"\"\"\n\n delete_books()\n\n book_one = create_book(\"title one\")[\"book\"]\n book_two = create_book(\"title two\")[\"book\"]\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": [\n book_one,\n book_two\n ]\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book_two[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book_two\n }\n )\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": [\n book_one\n ]\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book_one[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book_one\n }\n )\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": []\n }\n )",
"def test_questions_answers_delete_model(self):\n db.session.delete(Answers.query.filter_by(answer_text = \"Answer_Test\", correct = 0).first())\n db.session.delete(Questions.query.filter_by(question_text=\"Test_Question?\").first())\n db.session.delete(Content.query.filter_by(header = \"Test_Header\", content = \"Test_Content\").first())\n db.session.commit()\n self.assertEqual(Questions.query.filter_by(question_text=\"Test_Question?\").count(), 0)\n self.assertEqual(Answers.query.filter_by(answer_text = \"Answer_Test\").count(), 0)",
"def test_deletion(self):\n self.assertEqual(self.store.query(BatchManholePowerup).count(), 0)",
"def test_delete7(self):\n pass",
"def test_delete(self):\n # Count the number of records before the save\n existing_records_count = Track.objects.all().count()\n resp = self.api_client.delete('/api/metadata/tracks/2/')\n data = json.loads(resp.content)\n new_records_count = Track.objects.all().count()\n\n # Ensure request was successful, and the record is removed from the database.\n # Should return with a success message.\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(existing_records_count-1, new_records_count)\n self.assertEqual(data['detail'], 'Track successfully removed')",
"def test_delete_item_using_delete(self):\n pass",
"def test_delete_question(self):\n # Arrange\n self.question.insert()\n question_id = self.question.id\n\n # Act\n res = self.client().delete(f\"/questions/{question_id}\")\n data = json.loads(res.data)\n # shall return None as I have deleted this question\n question = Question.query.filter(Question.id == question_id).one_or_none()\n\n # Assert\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertEqual(data[\"deleted\"], question_id)\n self.assertTrue(data[\"questions\"])\n self.assertEqual(question, None)",
"def test_delete_song():\n\n target_song = {\n \"artist\": \"Heng\",\n \"song\": \"I can do all things\",\n \"genre\": \"Hip-Hop\",\n \"lyrics\": \"Like Steph said I can do all things...\",\n \"year\": get_timestamp_year(),\n \"timestamp\": get_timestamp()\n }\n\n make_response, code = delete_song(target_song)\n\n assert make_response == \"The song title I can do all things is deleted for artist: Heng.\"",
"def test_delete_book(self):\n\n\t\t# create book\n\t\tadd_book = {\n\t\t\t'title': 'Hello Books',\n\t\t\t'isbn': '5698745124'\n\t\t}\n\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\t\tres = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(add_book)\n\t\t)\n\n\t\t# delete book\n\t\tdel_book = self.client.delete(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}')\n\t\t)\n\n\t\tres3 = json.loads(del_book.data.decode())\n\t\tself.assertTrue(res3['message'] == 'book with id 1 has been deleted')",
"def test_delete(self):\n\n test_user = get_user_model().objects.create_user(username='tester',password='pass')\n test_user.save()\n\n test_book = Book.objects.create(\n publisher = test_user,\n name = 'Title of Blog',\n description = 'Words about the blog'\n )\n\n test_book.save()\n\n book = Book.objects.get()\n\n url = reverse('book_detail', kwargs={'pk': book.id})\n\n\n response = self.client.delete(url)\n\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT, url)",
"def test_delete_question(self):\n\n question = {\n \"title\" : \"Blue\",\n \"question\": \"How do I refactor tests with database?\"\n }\n\n self.app.post('/api/v1/questions',\n data=json.dumps(question),\n content_type='application/json'\n )\n question_id = id_generator(\"Blue\")\n res = self.app.delete('/api/v1/questions/'+str(question_id))\n self.assertEqual(res.status_code, 200)",
"def test_delete_case(self):\n pass",
"def test_delete_question(self):\n\n res = self.client().delete('/questions/28')\n\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)"
] |
[
"0.7435866",
"0.7428341",
"0.72781295",
"0.7209934",
"0.71410346",
"0.6997073",
"0.6972826",
"0.69329727",
"0.6901897",
"0.6901834",
"0.687459",
"0.6856501",
"0.6855984",
"0.6841089",
"0.6790555",
"0.67834634",
"0.67833436",
"0.67829585",
"0.678234",
"0.67810273",
"0.67750245",
"0.6740728",
"0.6736363",
"0.67274827",
"0.67259145",
"0.66689354",
"0.6662429",
"0.6627066",
"0.66238195",
"0.6607667"
] |
0.8164794
|
0
|
test that audiobook can be updated in DB
|
def test_audiobook_can_update(self):
data = {
"audiotype": "Audiobook",
"metadata": {
"title": "audiobook1",
"duration": 45678,
"author": "Solomon",
"narrator": "Aniefiok"
}
}
num = str(3)
response = requests.put(
"http://localhost:9001/api/update-audio/Audiobook/"+num, json=data)
self.assertEqual(response.status_code, 200)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_audiobook_can_insert(self):\n\n data = {\n \"audiotype\": \"Audiobook\",\n \"metadata\": {\n \"duration\": 37477,\n \"title\": \"another\",\n \"author\": \"Solomon\",\n \"narrator\": \"Ndiferke\"\n }\n }\n response = requests.post(\n \"http://localhost:9001/api/create-audio\", json=data)\n\n success = response.json()\n self.assertEqual(success[\"success\"], True)",
"def test_update_record(self):\n pass",
"def test_api_can_update_music(self):\n change_music = {'name': 'Music2'}\n res = self.client.put(\n reverse('details', kwargs={'pk':music.id}),\n change_music,\n format = 'json'\n )\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def test_update_voice_dataset(self):\n pass",
"def test_update_note(self):\n pass",
"def test_update_a_song(self):\n self.login_client('test_user', 'testing')\n # hit the API endpoint\n response = self.make_a_request(\n kind=\"put\",\n id=2,\n data=self.valid_data\n )\n self.assertEqual(response.data, self.valid_data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # test with invalid data\n response = self.make_a_request(\n kind=\"put\",\n id=3,\n data=self.invalid_data\n )\n self.assertEqual(\n response.data[\"message\"],\n \"Both title and body are required to add a song\"\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update_book_details(self):\n\n first_book_list = BookList()\n first_book = Book()\n\n first_book.create_book({\n \"title\": \"First Man\",\n \"author\": \"James R. Hansen\",\n \"year\": 2005,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 1\n })\n\n first_book_list.add_book(first_book)\n\n new_book_details = {\n \"title\": \"First Man\",\n \"author\": \"James Hansen\",\n \"year\": 2018,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 5\n }\n\n assert first_book_list.update_book_details(new_book_details) == True\n assert first_book_list.find_book(\"First Man\") == True\n\n for book in first_book_list.show_all():\n assert book.get(\"title\") == \"First Man\"\n assert book.set(\"title\", \"First Man: The Life of Neil A. Armstrong\") == True\n\n assert first_book_list.find_book(\"First Man: The Life of Neil A. Armstrong\") == True",
"def test_update_scenario(self):\n pass",
"def test_create_update_Song():\n\n target_song = {\n \"artist\": \"Heng\",\n \"song\": \"I can do all things (Remix)\",\n \"genre\": \"Hip-Hop\",\n \"lyrics\": \"Like Steph said I can do all things...\",\n \"year\": get_timestamp_year(),\n \"timestamp\": get_timestamp()\n }\n\n given_artist = \"Heng\"\n given_song_title = \"I can do all things (Remix)\"\n\n create_update_song(target_song)\n\n song_data = get_one_song(given_artist, given_song_title)\n\n assert song_data['artist'] == given_artist\n assert song_data['song'] == given_song_title",
"def test_update_case(self):\n pass",
"def test_update_one(self):\n pass",
"def test_client_verification_document_update(self):\n pass",
"def test_update_song():\n\n target_song = {\n \"artist\": \"Heng\",\n \"song\": \"I can do all things\",\n \"genre\": \"Hip-Hop\",\n \"lyrics\": \"Like Steph said I can do all things In...\",\n \"year\": get_timestamp_year(),\n \"timestamp\": get_timestamp()\n }\n\n update_song(target_song)\n\n updated_song_lyrics = \"Like Steph said I can do all things In...\"\n\n given_artist = \"Heng\"\n given_song_title = \"I can do all things\"\n\n song_data = get_one_song(given_artist, given_song_title)\n\n print(\"updated_song_data['lyrics']: \", song_data['lyrics'])\n\n assert song_data['lyrics'] == updated_song_lyrics",
"def test_update(self):\n album = Album(artist='Artist', album='Album', album_type='ep',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist')\n self.assertEqual(album_row['alalbum'], 'Album')\n self.assertEqual(album_row['altype'], 'ep')\n self.assertEqual(album_row['totaltracks'], 1)\n self.assertEqual(album_row['totalseconds'], 120)\n\n # Now update the object and save out, and test.\n album.artist = 'Artist 2'\n album.album = 'Album 2'\n album.album_type = 'live'\n album.totaltracks = 2\n album.totalseconds = 240\n album.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist 2')\n self.assertEqual(album_row['alalbum'], 'Album 2')\n self.assertEqual(album_row['altype'], 'live')\n self.assertEqual(album_row['totaltracks'], 2)\n self.assertEqual(album_row['totalseconds'], 240)",
"def test_update_book(self):\n\n delete_books()\n\n book = create_book(\"title one\")[\"book\"]\n\n with test_client.put(\n \"/book/{}/\".format(book[\"id\"]),\n data={\n \"title\": \"title one updated\"\n }\n ) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": {\n **book,\n \"title\": \"title one updated\"\n }\n }\n )\n\n self.assertEqual(\n read_book(book[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": {\n **book,\n \"title\": \"title one updated\"\n }\n }\n )\n\n \"\"\"\n clear the table, create several books, update them and read them\n \"\"\"\n\n delete_books()\n\n book_one = create_book(\"title one\")[\"book\"]\n book_two = create_book(\"title two\")[\"book\"]\n\n with test_client.put(\n \"/book/{}/\".format(book_one[\"id\"]),\n data={\n \"title\": \"title one updated\"\n }\n ) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": {\n **book_one,\n \"title\": \"title one updated\"\n }\n }\n )\n\n self.assertEqual(\n read_book(book_one[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": {\n **book_one,\n \"title\": \"title one updated\"\n }\n }\n )\n\n with test_client.put(\n \"/book/{}/\".format(book_two[\"id\"]),\n data={\n \"title\": \"title two updated\"\n }\n ) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": {\n **book_two,\n \"title\": \"title two updated\"\n }\n }\n )\n\n self.assertEqual(\n read_book(book_two[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": {\n **book_two,\n \"title\": \"title two updated\"\n }\n }\n )",
"def test_audiobook_can_read(self):\n\n response = requests.get(\n \"http://localhost:9001/api/get-audio/Audiobook\")\n\n self.assertEqual(response.status_code, 200)",
"def test_update(app):\n\n assert False",
"def test_api_can_update_book(self):\n\n\t\t# create book\n\t\tadd_book = {\n\t\t\t'title': 'Hello Books',\n\t\t\t'isbn': '5698745124'\n\t\t}\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\t\tres = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(add_book)\n\t\t)\n\n\t\t# update book\n\t\tbook = self.client.put(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(\n\t\t\t\tdict(\n\t\t\t\t\ttitle='updated book'\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['title'] == 'updated book')",
"def test_partly_update_book(self):\n data = {'isbn':'96712116-2'}\n response = self.client.patch(self.book.get_absolute_url(), data, format='json', content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n response = self.client.get(self.book.get_absolute_url())\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertContains(response, '96712116-2')",
"def test_update_book(self):\n book_information = self.books_from_json[0]\n book_id = '60773a16cb838494e13d3652'\n self.books.update = MagicMock(return_value=None) # success on update\n update_book = self.books.update_details(book_id, self.books_from_json[0])\n self.assertEqual(\"Mock Book updated!\", update_book['flash_message'])",
"def test_update(self):\n track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', conductor='Conductor', composer='Composer')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(pk)\n self.assertEqual(track_row['artist'], 'Artist')\n self.assertEqual(track_row['album'], 'Album')\n self.assertEqual(track_row['title'], 'Title')\n self.assertEqual(track_row['ensemble'], 'Ensemble')\n self.assertEqual(track_row['composer'], 'Composer')\n self.assertEqual(track_row['conductor'], 'Conductor')\n\n # Now update the object and save out, and test.\n track.artist = 'Artist 2'\n track.album = 'Album 2'\n track.title = 'Title 2'\n track.ensemble = 'Ensemble 2'\n track.composer = 'Composer 2'\n track.conductor = 'Conductor 2'\n track.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(pk)\n self.assertEqual(track_row['artist'], 'Artist 2')\n self.assertEqual(track_row['album'], 'Album 2')\n self.assertEqual(track_row['title'], 'Title 2')\n self.assertEqual(track_row['ensemble'], 'Ensemble 2')\n self.assertEqual(track_row['composer'], 'Composer 2')\n self.assertEqual(track_row['conductor'], 'Conductor 2')",
"def test_api_can_create_a_music(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)",
"def test_success_case(self):\r\n tm = TestModel.create(count=8, text='123456789')\r\n tm2 = tm.update(count=9)\r\n\r\n tm3 = TestModel.get(tm.vid)\r\n assert tm2.count == 9\r\n assert tm3.count == 9",
"def test_update_no_pk(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n with self.assertRaises(Exception):\n album.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 0)",
"def test_update_metadata1(self):\n pass",
"def test_update_metadata(self):\n pass",
"def test_update_no_commit(self):\n album = Album(artist='Artist', album='Album', album_type='ep',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist')\n self.assertEqual(album_row['alalbum'], 'Album')\n self.assertEqual(album_row['altype'], 'ep')\n self.assertEqual(album_row['totaltracks'], 1)\n self.assertEqual(album_row['totalseconds'], 120)\n\n # Now update the object and save out, and test.\n album.artist = 'Artist 2'\n album.album = 'Album 2'\n album.album_type = 'live'\n album.totaltracks = 2\n album.totalseconds = 240\n album.update(self.app.db, self.app.curs, commit=False)\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist 2')\n self.assertEqual(album_row['alalbum'], 'Album 2')\n self.assertEqual(album_row['altype'], 'live')\n self.assertEqual(album_row['totaltracks'], 2)\n self.assertEqual(album_row['totalseconds'], 240)\n self.app.db.rollback()\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist')\n self.assertEqual(album_row['alalbum'], 'Album')\n self.assertEqual(album_row['altype'], 'ep')\n self.assertEqual(album_row['totaltracks'], 1)\n self.assertEqual(album_row['totalseconds'], 120)"
] |
[
"0.7134568",
"0.7090748",
"0.70769113",
"0.69468963",
"0.65462226",
"0.6545777",
"0.6538061",
"0.6538061",
"0.6538061",
"0.64445615",
"0.6443035",
"0.64324975",
"0.6414275",
"0.6392401",
"0.6383004",
"0.63642",
"0.63418466",
"0.6337228",
"0.63190335",
"0.63148654",
"0.6284827",
"0.628377",
"0.6181544",
"0.61752397",
"0.61419183",
"0.61291754",
"0.6116776",
"0.60983366",
"0.6077967",
"0.60729116"
] |
0.78056514
|
0
|
Validates aliasing works properly when the query contains both tags_key and tags_value.
|
def test_aliasing() -> None:
processed = parse_and_process(
{
"aggregations": [],
"groupby": [],
"selected_columns": ["tags_value"],
"conditions": [["tags_key", "IN", ["t1", "t2"]]],
}
)
sql = format_query(processed).get_sql()
transactions_table_name = (
transactions_storage.get_table_writer().get_schema().get_table_name()
)
assert sql == (
"SELECT (tupleElement((arrayJoin(arrayMap((x, y -> tuple(x, y)), "
"tags.key, tags.value)) AS snuba_all_tags), 2) AS _snuba_tags_value) "
f"FROM {transactions_table_name} "
"WHERE in((tupleElement(snuba_all_tags, 1) AS _snuba_tags_key), tuple('t1', 't2'))"
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _make_generic_tag_match(key: str, value: str) -> Dict[str, Any]:\n query = {\n \"tags\": {\n \"$elemMatch\": {\n \"key\": key,\n \"$or\": [\n {\"vStr\": value},\n {\"vInt64\": value},\n {\"vBool\": value},\n {\"vFloat64\": value},\n {\"vBinary\": value},\n ],\n }\n }\n }\n return query",
"def test_tag_keys_dynamic_field_validation_failure(self):\n tag_keys = [\"valid_tag\"]\n query_params = {\"filter\": {\"bad_tag\": \"value\"}}\n serializer = OCIQueryParamSerializer(data=query_params, tag_keys=tag_keys)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)",
"def is_valid_query(query: Dict[str, Any]) -> bool:\n for name, value in query.items():\n if is_illegal_surrogate(name) or is_illegal_surrogate(value):\n return False\n return True",
"def validate(self):\n for search_tag_name in self.get_search_tag_names():\n search_tag_obj = Tag(search_tag_name)\n for search_tag_value in self.get_search_tag_values(search_tag_name):\n for new_tag_name in self.get_new_tag_names(search_tag_name, search_tag_value):\n new_tag_obj = Tag(new_tag_name)\n new_tag_value = self.get_new_tag_value(search_tag_name, search_tag_value, new_tag_name)\n if new_tag_obj.repeatable:\n if not isinstance(new_tag_value, list):\n raise KeyError('%s needs a list'%(new_tag_name))\n else:\n if isinstance(new_tag_value, list):\n raise KeyError('%s needs a scalar value'%(new_tag_name))",
"def test_bot_alias_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotTagArgs']]]]:\n return pulumi.get(self, \"test_bot_alias_tags\")",
"def test_tags_processor(\n query_body: MutableMapping[str, Any], expected_query: ClickhouseQuery\n) -> None:\n processed = parse_and_process(query_body)\n assert processed.get_selected_columns() == expected_query.get_selected_columns()\n assert processed.get_condition() == expected_query.get_condition()\n assert processed.get_having() == expected_query.get_having()",
"def test_tag_keys_dynamic_field_validation_failure(self):\n tag_keys = [\"valid_tag\"]\n query_params = {\"bad_tag\": \"value\"}\n serializer = OCIFilterSerializer(data=query_params, tag_keys=tag_keys)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)",
"def test_tag_keys_dynamic_field_validation_failure(self):\n tag_keys = [\"valid_tag\"]\n query_params = {\"bad_tag\": \"*\"}\n serializer = OCIGroupBySerializer(data=query_params, tag_keys=tag_keys)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)",
"def test_parse_restricted_tags():\n invalid_tags = {'*', '**', '***', 'a*', '*a', 'a*a*', '*a*a', '*aa*', 'a**a', '}'}\n combined_tags = valid_tags | invalid_tags\n\n # Function under test\n resultant_tags = searchtag.parse_restricted_tags(\" \".join(combined_tags))\n\n # Verify that we have the tags in the valid list\n assert resultant_tags == valid_tags",
"def alias_table_validate(self,verbose=0):\n\n no_alias = self.no_alias\n for k in self.alias_table.keys():\n if k in no_alias:\n del self.alias_table[k]\n if verbose:\n print (\"Deleting alias <%s>, it's a Python \"\n \"keyword or builtin.\" % k)",
"def is_ambiguous_align(tags, multi_align_tag):\n for t in tags:\n if t[0] == multi_align_tag:\n return True\n return False",
"def test_aliases(self):\n field = self.base_field\n self.assertFalse(field.get('aliases'))\n self.assertEqual([], SchemaField(field).aliases)\n field['aliases'] = []\n self.assertEqual([], SchemaField(field).aliases)\n field['aliases'] = ['alias1', 'Alias2']\n sch = SchemaField(field)\n self.assertEqual(field['aliases'], sch.aliases)\n # test some related method\n self.assertTrue(sch.has_alias('alias1'))\n self.assertTrue(sch.has_alias('Alias2'))\n self.assertFalse(sch.has_alias('alias2'))\n self.assertTrue(sch.has_alias('alias2', icase=True))\n self.assertFalse(sch.has_alias(field['name']))\n self.assertTrue(sch.has_name_or_alias(field['name'], 'aaaa'))\n self.assertFalse(sch.has_name_or_alias(field['name'].lower(), 'aaaa'))\n self.assertTrue(sch.has_name_or_alias(field['name'].lower(), 'aaaa', icase=True))\n self.assertFalse(sch.has_name_or_alias('aaaa', 'alias2'))\n self.assertTrue(sch.has_name_or_alias('aaaa', 'alias2', icase=True))",
"def test_tag_keys_dynamic_field_validation_failure(self):\n tag_keys = [\"valid_tag\"]\n query_params = {\"bad_tag\": \"value\"}\n serializer = OCIExcludeSerializer(data=query_params, tag_keys=tag_keys)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)",
"def _validate_tags(\n instance: typing.Dict[str, typing.Any],\n schema: typing.Dict[str, typing.Any], path: typing.List[str],\n strict: bool = False\n) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n valid_keys = {'_type', 'tags'}\n required_keys = valid_keys\n schema_keys = set(instance.keys())\n invalid_keys = schema_keys - valid_keys - opt_federation_keys\n if invalid_keys:\n raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)\n missing_keys = required_keys - schema_keys\n if missing_keys:\n raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)\n if instance['_type'] != 'tags':\n raise ValidationError('expected _type \"tags\"', path)\n if not isinstance(instance['tags'], list):\n raise ValidationError('tags must be list', path)\n errors = []\n tags = []\n for index, item in enumerate(instance['tags']):\n if not isinstance(item, str):\n errors.append(ValidationError('invalid tag type: {}'.format(type(item)), path + ['tags', str(index)]))\n elif item in tags:\n errors.append(ValidationError('duplicate tag: {}'.format(item), path + ['tags', str(index)]))\n elif item.lower() != item:\n errors.append(ValidationError('tag not lowercase: {}'.format(item), path + ['tags', str(index)]))\n elif any(c not in 'abcdefghijklmnopqrstuvwxyz0123456789_-äöüß' for c in item):\n errors.append(ValidationError('tag contains invalid character: {}'.format(item), path + ['tags', str(index)]))\n elif strict and all(c in string.digits for c in item) and not flask.current_app.config['ENABLE_NUMERIC_TAGS']:\n errors.append(ValidationError('numeric tags are not supported', path + ['tags', str(index)]))\n else:\n tags.append(item)\n\n if len(errors) == 1:\n raise errors[0]\n elif len(errors) > 1:\n raise ValidationMultiError(errors)",
"def test_useless_alias():\n with pytest.raises(ValueError, match='duplicate'):\n alias('name', ('name',))",
"def test_raises_for_missing_alias():\n with pytest.raises(AttributeError):\n alias('new_alias', ('first', 'second'))(_HasAliasedProp)",
"def cypher_unknownTag_all(self, variable_tagUnknown=\"tag_unknown\"):\n query = f'({variable_tagUnknown}{self.label}'\n if self.keyword or self.synonyms is not None:\n query += \"{\"\n if self.keyword:\n query += f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\','\n if self.synonyms:\n query += f'{self.databaseInfoTag[\"properties\"][\"synonyms\"]}:' + '[\\'' + '\\',\\''.join(self.synonyms) + '\\'],'\n query = query[:-1] + \"}\"\n return query + \")\"",
"def test_schema_directives_store_tagged_values(self):\n\n class IDummy(model.Schema):\n \"\"\"Dummy schema class.\"\"\"\n\n searchable(\"foo\")\n foo = schema.TextLine(title=\"Foo\")\n\n self.assertEqual(\n [(Interface, \"foo\", \"true\")], mergedTaggedValueList(IDummy, SEARCHABLE_KEY)\n )",
"def test_execute_tags_queries_keys_only(self):\n test_cases = [\n {\"value\": \"-1\", \"unit\": \"month\", \"resolution\": \"monthly\"},\n {\"value\": \"-2\", \"unit\": \"month\", \"resolution\": \"monthly\"},\n {\"value\": \"-10\", \"unit\": \"day\", \"resolution\": \"daily\"},\n {\"value\": \"-30\", \"unit\": \"day\", \"resolution\": \"daily\"},\n ]\n\n for case in test_cases:\n url = reverse(\"azure-tags\")\n client = APIClient()\n params = {\n \"filter[resolution]\": case.get(\"resolution\"),\n \"filter[time_scope_value]\": case.get(\"value\"),\n \"filter[time_scope_units]\": case.get(\"unit\"),\n \"key_only\": True,\n }\n url = url + \"?\" + urlencode(params, quote_via=quote_plus)\n response = client.get(url, **self.headers)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n data = response.json().get(\"data\")\n\n self.assertTrue(data)\n self.assertTrue(isinstance(data, list))\n for tag in data:\n self.assertTrue(isinstance(tag, str))",
"def test_tag_keys_dynamic_field_validation_success(self):\n tag_keys = [\"valid_tag\"]\n query_params = {\"valid_tag\": \"*\"}\n serializer = OCIGroupBySerializer(data=query_params, tag_keys=tag_keys)\n self.assertTrue(serializer.is_valid())",
"def test_get_filtered_mapping_keys(\n query: ClickhouseQuery, expected_result: Sequence[str],\n) -> None:\n assert get_filtered_mapping_keys(query, \"tags\") == expected_result",
"def test_formatting() -> None:\n assert tupleElement(\n \"tags_key\",\n arrayJoin(\n \"snuba_all_tags\",\n zip_columns(\n Column(None, None, \"tags.key\"), Column(None, None, \"tags.value\"),\n ),\n ),\n Literal(None, 1),\n ).accept(ClickhouseExpressionFormatter()) == (\n \"(tupleElement((arrayJoin(arrayMap((x, y -> tuple(x, y)), \"\n \"tags.key, tags.value)) AS snuba_all_tags), 1) AS tags_key)\"\n )\n\n assert tupleElement(\n \"tags_key\",\n arrayJoin(\n \"snuba_all_tags\",\n filter_key_values(\n zip_columns(\n Column(None, None, \"tags.key\"), Column(None, None, \"tags.value\"),\n ),\n [Literal(None, \"t1\"), Literal(None, \"t2\")],\n ),\n ),\n Literal(None, 1),\n ).accept(ClickhouseExpressionFormatter()) == (\n \"(tupleElement((arrayJoin(arrayFilter((pair -> in(\"\n \"tupleElement(pair, 1), tuple('t1', 't2'))), \"\n \"arrayMap((x, y -> tuple(x, y)), tags.key, tags.value))) AS snuba_all_tags), 1) AS tags_key)\"\n )",
"def test_tags_filter(client, example_records, h, prefix):\n # Test query (q=)\n res = client.get(f'{prefix}?tags=recommended', headers=h)\n assert res.status_code == 200\n assert res.json[\"hits\"][\"total\"] == 1",
"def isalias(tokens, x, alias):\n\n prior = Token.get(tokens, x - 1)\n token = tokens[x]\n\n # True if prior token is not a separator, grouping token or distinct token and current token is either a column token or quoted token\n return (\n alias\n and x > 0\n and not Token.isseparator(prior)\n and not Token.isgroupstart(prior)\n and not Token.isdistinct(prior)\n and (Token.iscolumn(token) or Token.isquoted(token))\n )",
"def test_raises_for_existing_alias():\n with pytest.raises(AttributeError):\n alias('existing', ('first', 'second'))(_HasAliasedProp)",
"def check_alias(self):\n es = self.get_es()\n aliased_indexes = es[self.es_alias].get('_aliases')\n return aliased_indexes.keys()",
"def find_by_unique(df: pd.DataFrame, tagged_fields: TaggedFields) -> Result:\n unique_fields = tagged_fields.get(\"unique\", [])\n result = Result(\"Duplicates By **unique** Tag\")\n\n if not unique_fields:\n result.add_info(Outcome.SKIPPED)\n return result\n\n err_keys: Set = set()\n for field in unique_fields:\n result.items_count = df[field].count()\n duplicates = df[df.duplicated(field, keep=False)][[field]]\n errors = {}\n for _, d in duplicates.groupby([field]):\n keys = list(d.index)\n msg = f\"same '{d[field].iloc[0]}' `{field}`\"\n errors[msg] = keys\n err_keys = err_keys.union(keys)\n if not duplicates.empty:\n result.add_error(\n f\"{field} contains {len(duplicates[field].unique())} duplicated value(s)\",\n errors=errors,\n )\n\n return result",
"def validateTags(self, tags):\n\t\treturn tags.replace(', ',' ')",
"def similar_qs(self):\n pass",
"def test_tags(question):\n assert \"tags\" in question[\"instance\"]\n tags = set(question[\"instance\"][\"tags\"])\n # there should be at least one tag\n assert len(tags) >= 1\n # each tags should be in VALID_TAGS\n assert len(tags - VALID_TAGS) == 0\n # there should be exactly one category-defining tag\n assert len(tags.intersection(CATEGORY_TAGS)) == 1"
] |
[
"0.57817465",
"0.570398",
"0.5615116",
"0.5476921",
"0.5441292",
"0.5437843",
"0.54195595",
"0.5388927",
"0.53832835",
"0.53694046",
"0.5329078",
"0.5325659",
"0.5304145",
"0.52602834",
"0.52379453",
"0.516322",
"0.5129891",
"0.50972956",
"0.5071068",
"0.50556076",
"0.5045664",
"0.5032249",
"0.49894845",
"0.49687228",
"0.49492308",
"0.49107966",
"0.490682",
"0.48971632",
"0.48966274",
"0.48857683"
] |
0.6864035
|
0
|
Get the project_ids associated with this Community.
|
def get_project_ids(self, *criterion):
from wkcdd.models.helpers import get_project_ids
return get_project_ids([self.id], *criterion)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)",
"def get_projects(self):\n return self.jira.projects()",
"def getProjects(self):\n\n return self.__projects",
"def get_projects(self):\n session = self.session_factory()\n results = [row.project for row in session.query(PipelineRun.project.distinct().label('project')).all()]\n session.close()\n return results",
"def get_projects(self):\n return conf.projects",
"def projects(self):\n campaigns = self.campaigns.all()\n return Project.published_objects.filter(campaigns__in=campaigns)",
"def get_projects(self):\n return self._gitlab.owned_projects(per_page=1000)",
"def projects(self):\n ret_val = []\n params = {\"fields\": Project.FIELDS}\n projects = self._request(\"get\", \"projects\", params=params)\n\n for project in projects:\n ret_val.append(Project(project))\n\n return ret_val",
"def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()",
"def projects():\n response = jsonify(projects_service.get_top_level_projects_ids())\n return response",
"def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects",
"def getProjects(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n path = '{}/projects'.format('/'.join(plone.api.portal.get().getPhysicalPath()))\n query = dict(portal_type='Project', sort_on='sortable_title', path=path)\n result = list()\n for brain in catalog(**query):\n result.append((brain.getId, brain.Title))\n return result",
"def get_projects(self):\n ret = self.v1_projects.get()\n return [each.metadata.name for each in ret.items]",
"def get_projects(cls):\n projects = []\n for project in Project.select():\n project_dict = {\n \"id\": project.id,\n \"title\": project.title,\n \"link\": project.link,\n \"description\": project.description\n }\n projects.append(project_dict)\n return projects",
"def list_projects(self):\n project_keys = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id).get(\"projectKeys\", [])\n return [DSSProject(self.client, pkey) for pkey in project_keys]",
"def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()",
"def get_client_projects(self, client=None):\n if type(client) is Client:\n return [p for p in self.project_list if client.client_id == p.client_id]",
"def get_unique_project_list(self) -> List[str]:\n return self.tasks.get_project_list()",
"def get_projects(self):\n if not self.validate():\n raise SettingCustomVisionAccessFailed\n return self.get_trainer_obj().get_projects()",
"def getSubProjects(self):\n logger.debug(\"Func: getSubProjects\")\n\n return self._subProjectsList",
"def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)",
"def get_projects(self, refresh=False):\n if refresh:\n self._projects_lookup = self.get_project_lookup()\n\n return self._projects_lookup.keys()",
"def projects(self):\r\n return p.Projects(self)",
"def list_projects(self):\n data = self._run(\n url_path=\"projects/list\"\n )\n projects = data['result'].get('projects', [])\n return [self._project_formatter(item) for item in projects]",
"def get_all_projects(self, scope):\n url = \"{0}/{1}/{2}\".format(self.keystone_server_url, DEFAULT_KEYSTONE_API_VERSION, \"projects\")\n headers = {'X-Auth-Token': scope.auth_token}\n try:\n r = self._make_request_with_auth_fallback(url, headers)\n return r['projects']\n\n except Exception as e:\n self.warning('Unable to get projects: %s', e)\n raise e\n\n return None",
"def get_projects(self):\n response = self.request(verb=requests.get, address=\"projects\")\n # FIXME: if no results, must we raise an exception?\n return response[\"results\"] if \"results\" in response else response",
"def get_communities(self):\n return self._communities.values()",
"def get_projects(self):\n unaligned_path = self.get_unaligned_path()\n logger.debug(\"collecting list of projects\")\n return [p for p in os.listdir(unaligned_path)\n if len(parsing.get_project_label(p))]",
"def get_projects(self):\n projects = []\n for project in self.server.projects:\n projects.append({'id': utils.slugify(project),\n 'name': project})\n response.content_type = 'application/json'\n return json.dumps(projects)",
"def active_projects(self):\n return self.projects.filter(active=True)"
] |
[
"0.7047133",
"0.6907629",
"0.6902153",
"0.6842879",
"0.6808918",
"0.679463",
"0.67528903",
"0.65890634",
"0.6489477",
"0.6425217",
"0.64227444",
"0.639533",
"0.6394208",
"0.63902646",
"0.63456815",
"0.630165",
"0.62439257",
"0.62269944",
"0.62134",
"0.61947155",
"0.61920667",
"0.61503273",
"0.6129672",
"0.61159945",
"0.60849005",
"0.60799104",
"0.60279244",
"0.59650695",
"0.59544396",
"0.5953029"
] |
0.69406384
|
1
|
checks if there are ids already initialized
|
def check_initial_ids(self):
if '_Base__nb_objects' in dir(Square):
type(self).initial_ids = Square.__dict__['_Base__nb_objects'] - 1
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self):\n self.ids_seen = set()",
"def __init__(self):\n initialize_db()\n self.ids_seen = set()",
"def _is_initialized(self) -> bool:\n return len(self) > 0",
"def test_ids(self):\n state1 = State()\n state2 = State()\n state3 = State()\n self.assertFalse(state1.id == state2.id)\n self.assertFalse(state1.id == state3.id)\n self.assertFalse(state2.id == state3.id)",
"def __len__(self):\n return len(self.ids)",
"def _instantiated_ids(self):\n return self._identity_map.keys()",
"def initialized(self, identifier):",
"def check_id(self, id):",
"def __test_gen_report_id_check():\n # all fresh\n report = dp.Report(md_block, md_block, md_block)\n assert_report(report) # expected_id_count=5)\n # 2 fresh\n report = dp.Report(md_block, md_block_id, md_block)\n assert_report(report) # expected_id_count=4)\n # 0 fresh\n report = dp.Report(md_block_id, dp.Text(\"test\", name=\"test-2\"))\n assert_report(report) # expected_id_count=2)",
"def test_check_all_ids(self):\r\n\r\n fasta_labels = ['sample1_1', 'sample1_2', 'sample3_3', 'sample2_4']\r\n\r\n sample_ids = ['sample1', 'sample2', 'sample3']\r\n\r\n sample_ids_not_found = check_all_ids(fasta_labels, sample_ids)\r\n\r\n # should return True as all are found\r\n\r\n self.assertEqual(sample_ids_not_found, True)\r\n\r\n fasta_labels = ['sample1_1', 'sample1_2', 'sample3_3', 'sample2_4']\r\n\r\n sample_ids = ['sample1', 'sample2', 'sample3', 'sampleX']\r\n\r\n sample_ids_not_found = check_all_ids(fasta_labels, sample_ids)\r\n\r\n # sampleX should not be found\r\n\r\n self.assertEqual(sample_ids_not_found, ['sampleX'])",
"def _build_ID_sets(self):\n raise NotImplementedError",
"def is_initialized(self) -> bool:\n return (\n self._amount_by_currency_id is not None\n and self._quantities_by_good_id is not None\n )",
"def _id_exists(self):\n return self.app_id in self.engine.app_list",
"def allocate(self):\n for id, value in enumerate(self.bool_array):\n if value == False: #The id has not been allocated\n self.bool_array[id] = True\n return id\n raise CannotAllocateException(\"No ids available\")",
"def test_random_id(self):\r\n ids = \\\r\n \"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969\".split(\r\n )\r\n assert random_id(ids, {}) in ids\r\n # just test we got something from the list, don't add stochastic test\r",
"def getIDs():",
"def testIdUnique(self):\n ids = {}\n # Vary parameters that affect the work or input data,\n # verify each id is unique\n for params in itertools.product(RequestNames, TaskNames, Inputs,\n Masks, Dbses, Acdcs):\n ele = WorkQueueElement(RequestName=params[0], TaskName=params[1],\n Inputs=params[2], Mask=params[3],\n Dbs=params[4], ACDC=params[5]\n )\n self.assertFalse(ele.id in ids)\n ids[ele.id] = None",
"def check_unique_ids(request: Request, policy: RequestPolicy, logger: Logger) -> None:\n seen: Dict[str, int] = {}\n for bundle in request.bundles:\n if bundle.id in seen:\n raise KSR_BUNDLE_UNIQUE_Violation(\n f\"More than one bundle with id {bundle.id}\"\n )\n seen[bundle.id] = 1\n\n _num_bundles = len(request.bundles)\n logger.info(f\"KSR-BUNDLE-UNIQUE: All {_num_bundles} bundles have unique ids\")\n return",
"def _check_assigned(self):\n\n if self.values is None and self.lazy:\n raise ValueError(\"This instance has not been assigned any data.\")",
"def has_id(self):\n return not self.id is None",
"def test_amenity_ids(self):\n place = Place()\n self.assertTrue(hasattr(place, \"amenity_ids\"))\n self.assertEqual(type(place.amenity_ids), list)\n self.assertEqual(place.amenity_ids, [])",
"def __set_ids__(self, line):\r\n try:\r\n self.ids = [x for x in ' '.join(line.split()).split(\" \")]\r\n except ValueError:\r\n logging.error(\"Error while reading process id's: INVALID NUMBER.\")\r\n return False\r\n except Exception as cause:\r\n logging.error(\"Error while reading process id's: %s\", cause)\r\n return False\r\n if len(self.ids) != self.no_process:\r\n logging.error(\"Given number of ids and process count doesn't match\")\r\n return False\r\n logging.info([\"Process ids: \"] + self.ids)\r\n return True",
"def test_for_empty_list(self):\n emptylist = []\n self.assertEqual(self.place.amenity_ids, emptylist)",
"def test_ids_maker(self):\n firstins = BaseModel()\n secondins = BaseModel()\n self.assertNotEqual(firstins, secondins)",
"def is_id(self):\n found = False\n for p in self.ant:\n for prop in self.con:\n if p == prop:\n found = True\n return found",
"def _is_initiated(self, context):\n user_data = context.user_data\n has_attr = 'id' in user_data and 'email' in user_data\n has_values = self._id_valid(user_data['id'])\n return has_attr and has_values",
"def __len__(self) -> int:\n return len(self.ids)",
"def test_SampleIds(self):\n exp = [\"PC.354\", \"PC.355\", \"PC.356\", \"PC.481\", \"PC.593\", \"PC.607\",\n \"PC.634\", \"PC.635\", \"PC.636\"]\n obs = self.overview_map.SampleIds\n self.assertEqual(obs, exp)\n\n obs = self.no_metadata.SampleIds\n self.assertEqual(obs, exp)\n\n obs = self.empty_map.SampleIds\n self.assertEqual(obs, [])",
"def test_init_id_equality(self):\n rect = Rectangle(1, 1)\n self.assertNotEqual(rect.id, Rectangle(1, 1).id)\n self.assertNotEqual(rect.id, Rectangle(1, 1, id=None).id)\n self.assertEqual(Rectangle(1, 1, 1, 1, 0).id, 0)\n self.assertEqual(Rectangle(1, 1, id=0.0).id, 0.0)\n self.assertEqual(Rectangle(1, 1, id=\"0\").id, \"0\")\n self.assertEqual(Rectangle(1, 1, id=[0]).id, [0])\n self.assertEqual(Rectangle(1, 1, id={0}).id, {0})\n self.assertEqual(Rectangle(1, 1, id=(0, 0)).id, (0, 0))\n self.assertEqual(Rectangle(1, 1, id={0: 0}).id, {0: 0})",
"def test_SampleIds(self):\r\n exp = [\"PC.354\", \"PC.355\", \"PC.356\", \"PC.481\", \"PC.593\", \"PC.607\",\r\n \"PC.634\", \"PC.635\", \"PC.636\"]\r\n obs = self.overview_map.SampleIds\r\n self.assertEqual(obs, exp)\r\n\r\n obs = self.no_metadata.SampleIds\r\n self.assertEqual(obs, exp)\r\n\r\n obs = self.empty_map.SampleIds\r\n self.assertEqual(obs, [])"
] |
[
"0.6757793",
"0.6522094",
"0.6364243",
"0.6272424",
"0.6165194",
"0.60231423",
"0.60110307",
"0.6009121",
"0.59739226",
"0.5962889",
"0.590866",
"0.5889949",
"0.58549154",
"0.5829648",
"0.5811595",
"0.5807346",
"0.5774812",
"0.57503724",
"0.5718341",
"0.5718137",
"0.57145154",
"0.56953794",
"0.56795853",
"0.56672674",
"0.56644267",
"0.5646718",
"0.5646546",
"0.5639053",
"0.56262344",
"0.5616667"
] |
0.7212387
|
0
|
We use a different broker_url when running the workers than when running within the flask app. Generate an appropriate URL with that in mind
|
def broker_url(host):
return '{broker_scheme}://{username}:{password}@{host}:{port}//'.format(host=host, **CONFIG_JOB_QUEUE)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def broker_url(settings):\n broker = 'amqp://'\n broker += settings.get('BROKER_USER') or 'guest'\n broker += ':' + (settings.get('BROKER_PASSWORD') or 'guest')\n broker += '@' + (settings.get('BROKER_HOST') or 'localhost')\n broker += ':' + (settings.get('BROKER_PORT') or '5672')\n\n return broker",
"def test_broker_url(self):\n rabbitmq = RabbitMQContainer()\n assert rabbitmq.broker_url() == 'amqp://{}:{}@{}/{}'.format(\n RabbitMQContainer.DEFAULT_USER,\n RabbitMQContainer.DEFAULT_PASSWORD,\n RabbitMQContainer.DEFAULT_NAME,\n RabbitMQContainer.DEFAULT_VHOST)\n\n rabbitmq = RabbitMQContainer(\n vhost='/', user='guest', password='guest', name='amqp')\n assert rabbitmq.broker_url() == 'amqp://guest:guest@amqp//'",
"def get_broker_jdbc_url():\n if not CONFIG.DATA_BROKER_DATABASE_URL:\n raise ValueError(\"DATA_BROKER_DATABASE_URL config val must provided\")\n\n return get_jdbc_url_from_pg_uri(CONFIG.DATA_BROKER_DATABASE_URL)",
"def __init__(self, worker_id=0, base_port=5005):",
"def main(args=None):\n if not args:\n args = sys.argv[1:]\n\n CONF.setup(args) # re-conf\n\n # upstream link configured in local broker\n consume(work, 'stableIDs', 'completed')",
"def base_url(self):\n return \"http://{0}:{1}/app\".format(self.host, self.port)",
"def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)",
"def __init__(self, ogc_config: OGCConfiguration, connection_file: str, catalog_name: str = CATALOG_FILENAME):\n\n super().__init__(ogc_config, connection_file, catalog_name)\n\n # Creating an MQTT Subscriber\n self._mqtt_subscriber = mqtt.Client(BROKER_HAMBURG_CLIENT_ID)\n self._mqtt_subscriber.on_connect = mqtt_util.on_connect\n self._mqtt_subscriber.on_disconnect = mqtt_util.automatic_reconnection\n self._mqtt_subscriber.on_message = self.on_message_received\n\n # Loading broker info from connection file\n if connection_file:\n connection_config_file = util.load_from_file(connection_file)\n try:\n self._sub_broker_address = connection_config_file[MQTT_KEY][MQTT_SUB_BROKER_KEY]\n except KeyError as ex:\n logging.critical('Missing parameter: \"'+str(ex)+'\" in configuration file.')\n sys.exit(ERROR_MISSING_PARAMETER)\n try:\n self._sub_broker_port = int(connection_config_file[MQTT_KEY][MQTT_SUB_BROKER_PORT_KEY])\n except KeyError as ex:\n logging.warning('No parameter \"'+str(ex)+'\" specified, default value used: '+str(BROKER_DEFAULT_PORT))\n self._sub_broker_port = BROKER_DEFAULT_PORT\n try:\n self._sub_broker_keepalive = int(connection_config_file[MQTT_KEY][MQTT_SUB_BROKER_KEEP_KEY])\n except KeyError:\n logging.warning(\n \"No subscribing broker keepalive specified, default one used: \"+str(DEFAULT_KEEPALIVE)+\" s\")\n self._sub_broker_keepalive = DEFAULT_KEEPALIVE\n\n # Loading broker info from environmental variables\n else:\n try:\n self._sub_broker_address = os.environ[MQTT_SUB_BROKER_KEY.upper()]\n except KeyError as ex:\n logging.critical('Missing environmental variable: \"'+str(ex)+'\"')\n sys.exit(ERROR_MISSING_ENV_VARIABLE)\n try:\n self._sub_broker_port = int(os.environ[MQTT_SUB_BROKER_PORT_KEY.upper()])\n except KeyError as ex:\n logging.warning('Missing Enviromental variable: \"' + str(ex) + '\" default value used: '\n + str(BROKER_DEFAULT_PORT))\n self._sub_broker_port = BROKER_DEFAULT_PORT\n\n try:\n self._sub_broker_keepalive = int(os.environ[MQTT_SUB_BROKER_KEEP_KEY.upper()])\n except KeyError:\n logging.warning(\"No subscribing broker keepalive specified, will be used the default one: \"\n + str(DEFAULT_KEEPALIVE) + \" s\")\n self._sub_broker_keepalive = DEFAULT_KEEPALIVE\n\n # broker connection test\n logging.info(\"Try to connect to broker: %s:%d for LISTENING...\"\n % (self._sub_broker_address, self._sub_broker_port))\n logging.debug(\"Client id is: '\" + BROKER_HAMBURG_CLIENT_ID + \"'\")\n self._mqtt_subscriber.connect(self._sub_broker_address, self._sub_broker_port, self._sub_broker_keepalive)",
"def _get_local_endpoint():\n return \"https://%s:8446\" % socket.getfqdn()",
"def url(self):\n return urls.Url(\n path=self.serving_path,\n host=self.pod.env.host,\n port=self.pod.env.port,\n scheme=self.pod.env.scheme)",
"def response_kafka_connection_url(self) -> str:\n return self._response_kafka_connection_url",
"def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()",
"def app(self):\n app = Flask('testapp')\n app.config.update({\n 'HADES_CELERY_APP_NAME': 'test',\n 'HADES_BROKER_URI': 'rpc://broker/',\n 'HADES_RESULT_BACKEND_URI': 'rpc://backend/',\n })\n return app",
"def __init__(self):\n # BASE_DIR:///artifice/scraper/\n self.BASE_DIR = os.path.dirname(loc)\n\n # prototypes\n self._eth0 = '0.0.0.0'\n self._exposed_port = 8080\n self._db_name = 'site.db'\n self._redis_pword = 'password'\n self._redis_host = 'localhost'\n self._redis_port = 6379\n self._celery_broker_uname = 'michael'\n self._celery_broker_pword = 'michael123'\n self._celery_broker_host = 'localhost'\n self._celery_broker_virtual_host = 'michael_vhost'\n\n # flask\n self.TESTING = False\n self.URL_PREFIX = ''\n self.FLASK_PORT = self._exposed_port\n self.FLASK_HOST = '0.0.0.0'\n self.FLASK_DEBUG = False\n self.FLASK_USE_RELOADER = False\n self.FLASK_THREADED = True\n\n # logging\n self.LOG_FILE = 'flask.log'\n self.LOG_LEVEL = 'INFO'\n self.CELERY_LOG_LEVEL = 'ERROR'\n self.CELERY_LOG_FILE = 'celery.log'\n self.STDOUT = True\n\n # database\n self.DROP_TABLES = True\n self.SQLALCHEMY_TRACK_MODIFICATIONS = False\n self.SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(\n os.path.join(self.BASE_DIR, self._db_name))\n\n # redis\n self.REDIS_URL = 'redis://{}:@{}:{}/0'.format(\n self._redis_pword,\n self._redis_host,\n self._redis_port)\n self.REDIS_HIT_COUNTER = 'HIT_COUNTER'\n\n # defaults\n self.ARGS_DEFAULT_LIMIT = 10\n self.ARGS_DEFAULT_STATUS = ['READY', 'TASKED', 'DONE']\n\n self.SUPERVISOR_ENABLED = True\n self.SUPERVISOR_DEBUG = False\n self.SUPERVISOR_POLITE = 1\n\n # celery\n self.CELERY_WORKERS = 8\n self.CELERY_MODULE = 'background'\n self.CELERY_BROKER = 'amqp://{}:{}@{}/{}'.format(\n self._celery_broker_uname,\n self._celery_broker_pword,\n self._celery_broker_host,\n self._celery_broker_virtual_host)\n self.CELERY_BACKEND = 'rpc://'\n self.CELERY_INCLUDE = ['artifice.scraper.background.tasks']\n\n # endpoints\n self.URL_FOR_STATUS = 'http://{}:{}/status'.format(self._eth0, self._exposed_port)\n self.URL_FOR_QUEUE = 'http://{}:{}/queue'.format(self._eth0, self._exposed_port)\n self.URL_FOR_CONTENT = 'http://{}:{}/content'.format(self._eth0, self._exposed_port)",
"def __init__(self, config=None, broker=None):\n pass",
"def start(config, brokerTimeout = 60.0):\n \n manager = multiprocessing.Manager()\n serverUpEvent = manager.Event()\n broker = multiprocessing.Process(target=startSTOMPBroker, args=(config,serverUpEvent))\n broker.daemon = True\n broker.name = 'STOMP-Broker'\n broker.start()\n\n serverUpEvent.wait(brokerTimeout)\n if not serverUpEvent.is_set():\n logger.fatal(\"Broker not available after %.1f seconds. Giving up\", brokerTimeout)\n return -1\n #host side logic\n host = config.get('Broker', 'host') \n port = int(config.get('Broker', 'port'))\n username = config.get('Broker', 'username')\n password = config.get('Broker', 'password')\n\n hostEngine = HostStompEngine(config)\n stompProtocolFactory = StompProtocolFactory(hostEngine, username, password)\n \n HostXMLRPCService(config).makeEngineAccesible(hostEngine)\n\n\n reactor.connectTCP(host, port, stompProtocolFactory)\n reactor.run()",
"def get_server_url():\n try:\n url = os.environ['API_HOST']\n # print('[ OK ] Server url loaded: ', url)\n except KeyError:\n url = 'http://localhost:3300/'\n print('[ WARNING ] API_HOST environment variable was not found. default server url was set at: ', url)\n\n return url",
"def get_buildbot_url():\n return \"http://10.45.4.98:8001/\"",
"def __init__(self, broker_address, handle_json_message_data=True):\n self.broker_address = broker_address\n self.producer = Producer({'bootstrap.servers': self.broker_address})\n self.handle_json_message_data = handle_json_message_data",
"def api_endpoint():\n return 'localhost'",
"def service_url(self):\n return \"http://127.0.0.1:%d/wd/hub\"%self.port",
"def existing_url(module):\n # Build the format dictionary\n url_base = \"/axapi/v3/event-notification/kafka/server\"\n\n f_dict = {}\n\n return url_base.format(**f_dict)",
"def get_manager_file_server_blueprints_root_url():\n return os.environ[MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL_KEY]",
"def run_worker():\n listen = ['default']\n conn = Redis(host=app.config['RQ_DEFAULT_HOST'],\n port=app.config['RQ_DEFAULT_PORT'],\n db=0,\n password=app.config['RQ_DEFAULT_PASSWORD'])\n\n with Connection(conn):\n worker = Worker(map(Queue, listen))\n worker.work()",
"def create_url_from_appender(appender):\n next_url = str(CFG.base_server + appender)\n return(next_url)",
"def get(port, options):\n worker_model = options.worker_model\n if worker_model == 'old-inline':\n return InlineBroker(port, options)\n if worker_model == 'old-threads':\n return MultiThreadedBroker(port, options)\n raise ValueError('unsupported value for --worker-model: %s' % worker_model)",
"def bk_worker(sockets, port):\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n websocket_origins = [f\"{BOKEH_ADDR}:{port}\", f\"{FLASK_ADDR}:{FLASK_PORT}\"]\n\n # bokeh applications\n _bkapp_maps = Application(FunctionHandler(bkapp_maps))\n _bkapp_trends = Application(FunctionHandler(bkapp_trends))\n _bkapp_histograms = Application(FunctionHandler(bkapp_histograms))\n _bkapp_models = Application(FunctionHandler(bkapp_models))\n\n bokeh_tornado = BokehTornado({'/bkapp-maps': _bkapp_maps,\n '/bkapp-trends': _bkapp_trends,\n '/bkapp-histograms': _bkapp_histograms,\n '/bkapp-models': _bkapp_models},\n extra_websocket_origins=websocket_origins,\n **{'use_xheaders': True})\n\n bokeh_http = HTTPServer(bokeh_tornado, xheaders=True)\n bokeh_http.add_sockets(sockets)\n server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)\n server.start()\n server.io_loop.start()",
"def get_sdk_worker():\n return os.path.join(util.get_flink_conf_dir(), \"sdk_worker.sh\")",
"def new_url(module):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/event-notification/kafka/server\"\n\n f_dict = {}\n\n return url_base.format(**f_dict)",
"def main():\n my_painting_mqtt_client = MyPaintingMQTTClient()\n my_painting_mqtt_client.run_app()"
] |
[
"0.68481046",
"0.63808596",
"0.6348577",
"0.5819164",
"0.574416",
"0.57123405",
"0.55717474",
"0.55477244",
"0.55355334",
"0.5522162",
"0.5520994",
"0.5511979",
"0.55012876",
"0.5496532",
"0.5467085",
"0.5446945",
"0.54385513",
"0.5428514",
"0.54229367",
"0.5410307",
"0.53947586",
"0.536469",
"0.5357858",
"0.5288478",
"0.528583",
"0.52786654",
"0.52752364",
"0.5270237",
"0.5265837",
"0.52653116"
] |
0.73196733
|
0
|
approxdp outputs eps as a function of delta based on rdp calculations
|
def approxdp(delta):
if delta < 0 or delta > 1:
print("Error! delta is a probability and must be between 0 and 1")
if delta == 0:
return rdp(np.inf)
else:
def fun(x): # the input the RDP's \alpha
if x <= 1:
return np.inf
else:
if BBGHS_conversion:
return np.maximum(rdp(x) + np.log((x-1)/x)
- (np.log(delta) + np.log(x))/(x-1), 0)
else:
return np.log(1 / delta) / (x - 1) + rdp(x)
results = minimize_scalar(fun, method='Brent', bracket=(1,2), bounds=[1, alpha_max])
if results.success:
return results.fun
else:
# There are cases when certain \delta is not feasible.
# For example, let p and q be uniform the privacy R.V. is either 0 or \infty and unless all \infty
# events are taken cared of by \delta, \epsilon cannot be < \infty
return np.inf
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def eps(newEps=None):\n\n global _eps\n if newEps is not None:\n _eps = newEps\n return _eps",
"def draw_p_to_eps(p):\n return ppf((p + 1.0) / 2)",
"def epsilon_delta(self):",
"def InterpolationDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...",
"def eps(self):\n return self._eps",
"def epsrel(self) -> float:\n return self._epsrel",
"def ADP (self):",
"def diff_1st_fwrdbwrd(fp, fm, eps):\n \n return (fp - fm)/eps",
"def rht_to_dp(temp, rh):\r\n # from https://en.wikipedia.org/wiki/Dew_point\r\n dp = temp - (0.36 * (100 - rh))\r\n # Check Calc\r\n # print(\"Temp: {} RH: {} DP: {}\".format(temp, rh, dp))\r\n return dp",
"def eps(self):\n return self.eps_mask*self.eps_scheduler.value",
"def optimizeEps(group, rep, fig=None):\n\tX = group[[\"ae1\", \"ae2\"]].to_numpy()\n\tneigh = NearestNeighbors(n_neighbors=2)\n\tnbrs = neigh.fit(X)\n\tdist, idx = nbrs.kneighbors(X)\n\t\n\tdist = np.sort(dist, axis=0)\n\td = dist[:,1]\n\tdist[:,0] = idx[:,0]\n\t#print(dist)\n\t#if fig is not None:\n\t#ax=fig.add_subplot(10,10,rep)\n\t#ax.plot(d)\n\t#plt.show()\n\t\n\trotor = Rotor()\n\trotor.fit_rotate(dist)\n\telbow_index = rotor.get_elbow_index()\n\t#ax.axhline(dist[elbow_index][1])\n\treturn(dist[elbow_index][1])",
"def frac_yea_dems(dems, reps):\n frac = np.sum(dems) / len(dems)\n return frac",
"def InterpolateDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...",
"def approxdp_func_to_fdp(func, delta_func=False):\n #\n # By default, logdelta_func is False, and func is eps as a function of delta\n # fpr = maximize_{delta} approxdp_to_fdp(eps(delta),delta)(fpr)\n # if delta_func is True, it means that 'func' is a delta as a function of eps, then\n # fpr = maximize_{delta} approxdp_to_fdp(eps,delta(eps))(fpr)\n if delta_func:\n def fdp(fpr):\n\n assert(0 <= fpr <= 1)\n if fpr == 1:\n return 0\n\n def fun(eps):\n fdp_eps = approxdp_to_fdp(eps, func(eps))\n fnr = fdp_eps(fpr)\n return -fnr\n\n results = minimize_scalar(fun, bounds=[0, +np.inf], options={'disp': False})\n if results.success:\n return -results.fun\n else:\n return 0\n else:\n def fdp(fpr):\n assert(0 <= fpr <= 1)\n if fpr == 1:\n return 0\n\n def fun(delta):\n fdp_delta = approxdp_to_fdp(func(delta), delta)\n fnr = fdp_delta(fpr)\n return -fnr\n\n results = minimize_scalar(fun, method='Bounded', bounds=[0, 1-fpr],\n options={'disp': False})\n if results.success:\n return -results.fun\n else:\n return 0\n return fdp",
"def do_dp(self, useDP):\n if useDP:\n ise.useDualPoint(int(useDP))\n\n print(\"\\tdual point: \" + str(ise.usingDualPoint()))",
"def dp(t, h):\n x = 1 - 0.01 * h;\n\n dew_point = (14.55 + 0.114 * t) * x\n dew_point += ((2.5 + 0.007 * t) * x) ** 3\n dew_point += (15.9 + 0.117 * t) * x ** 14\n dew_point = t - dew_point\n\n return dew_point",
"def pagd(x0,gradient,p,min_step=1e-15,max_iters=100,previous_x=0,\n previous_grad=0,verbose=0,**kwargs):\n x = x0; i = 0; step_size = np.Inf\n while step_size > min_step and i < max_iters:\n x0 = x\n grad = gradient(x0)\n dgrad = grad-previous_grad\n norm = np.linalg.norm(dgrad)\n if norm == 0.0:\n break\n learning_rate = abs(np.sum((x0-previous_x)*dgrad)/norm**2)\n previous_x = x0\n previous_grad = grad\n x = p(x0 - learning_rate*grad)\n step_size = np.linalg.norm(x-x0)\n i += 1\n \n specs = {\n 'iterations' : i,\n 'final_step' : step_size,\n 'final_gradient' : np.linalg.norm(grad),\n 'previous_x' : previous_x,\n 'previous_grad' : previous_grad,\n }\n if step_size<=min_step or norm == 0.0:\n specs['stop'] = True\n else:\n specs['stop'] = False\n return x, specs",
"def diff_2nd_xy(fpp, fpm, fmp, fmm, eps1, eps2):\n \n return (fpp - fpm - fmp + fmm)/(4.0*eps1*eps2)",
"def calc_eps(self, steps):\n eps_threshold = self.EPS_END + (self.EPS_START - self.EPS_END) * \\\n math.exp(-1. * steps / self.EPS_DECAY)\n return eps_threshold",
"def rdp(points, epsilon):\n dmax = 0.0\n index = 0\n for i in range(1, len(points) - 1):\n d = point_line_distance(points[i], points[0], points[-1])\n if d > dmax:\n index = i\n dmax = d\n if dmax >= epsilon:\n results = rdp(points[:index+1], epsilon)[:-1] + rdp(points[index:], epsilon)\n else:\n results = [points[0], points[-1]]\n return results",
"def rdp(points, epsilon):\n dmax = 0.0\n index = 0\n for i in range(1, len(points) - 1):\n d = point_line_distance(points[i], points[0], points[-1])\n if d > dmax:\n index = i\n dmax = d\n if dmax >= epsilon:\n results = rdp(points[:index+1], epsilon)[:-1] + rdp(points[index:], epsilon)\n else:\n results = [points[0], points[-1]]\n return results",
"def rdp(points, epsilon):\r\n dmax = 0.0\r\n index = 0\r\n for i in range(1, len(points) - 1):\r\n d = point_line_distance(points[i], points[0], points[-1])\r\n if d > dmax:\r\n index = i\r\n dmax = d\r\n if dmax >= epsilon:\r\n results = rdp(points[:index+1], epsilon)[:-1] + rdp(points[index:], epsilon)\r\n else:\r\n results = [points[0], points[-1]]\r\n return results",
"def plot_prec_value2(self):\n self.query_dict={'code':code2.value,'exchange':exchange2.value,\\\n 'structure':struct2.value,'element':element2.value,'properties':prop2.value}\n print ('POSTING', self.query_dict)\n self.query_api(endpoint='evk')\n\n layout_doc.children[4].children[1] = self.plot_pade_figure()",
"def plot_prec_value1(self):\n# self.query_dict={'code':code.value,'exchange':exchange.value,\\\n# 'structure':struct.value,'element':element.value,'properties':prop.value}\n# print ('POSTING', self.query_dict)\n# self.query_api(endpoint='evk')\n\n #layout_doc.children[4].children[0] = self.plot_pade_figure()\n\n\n self.query_dict={'code':code.value,'exchange':exchange.value,\\\n 'structure':struct.value,'element':element.value,'properties':prop.value}\n print ('POSTING', self.query_dict)\n self.query_api(endpoint='evk')\n\n layout_doc.children[4].children[0] = self.plot_pade_figure()",
"def __CalculateApproximation(self, contour):\r\n epsilon = 0.1 * cv2.arcLength(contour, True)\r\n return cv2.approxPolyDP(contour, epsilon, True)",
"def set_fd_step(eps):\n assert isinstance(eps, (float, complex))\n global EPS\n EPS = eps",
"def get_epsilon(step: int):\n return (epsilon_0 - epsilon) * math.exp(-step) + epsilon",
"def eps_percent_diff(self, eps_percent_diff):\n\n self._eps_percent_diff = eps_percent_diff",
"def test_rr_pz(results):\n truepz = np.asarray([4.43677606e-09, 7.89531535e-01, 7.89747372e-01,\n 8.04473756e-08, 2.22044605e-15])\n test_pz = results.params_pvalue()\n assert test_pz == pytest.approx(truepz)",
"def rel_error(deriv, orig):\n\treturn abs(orig - deriv) / abs(orig)"
] |
[
"0.6053316",
"0.6033463",
"0.59292716",
"0.5863483",
"0.57953846",
"0.5786605",
"0.56892174",
"0.56870705",
"0.5685585",
"0.56785506",
"0.5676544",
"0.5573095",
"0.5561564",
"0.5548653",
"0.5540436",
"0.5539918",
"0.54265237",
"0.5423283",
"0.54080003",
"0.53545034",
"0.53545034",
"0.532522",
"0.52542377",
"0.52509576",
"0.52420247",
"0.5232004",
"0.5219196",
"0.52133197",
"0.520626",
"0.5196884"
] |
0.7399501
|
0
|
from an approxdp function to fdp
|
def approxdp_func_to_fdp(func, delta_func=False):
#
# By default, logdelta_func is False, and func is eps as a function of delta
# fpr = maximize_{delta} approxdp_to_fdp(eps(delta),delta)(fpr)
# if delta_func is True, it means that 'func' is a delta as a function of eps, then
# fpr = maximize_{delta} approxdp_to_fdp(eps,delta(eps))(fpr)
if delta_func:
def fdp(fpr):
assert(0 <= fpr <= 1)
if fpr == 1:
return 0
def fun(eps):
fdp_eps = approxdp_to_fdp(eps, func(eps))
fnr = fdp_eps(fpr)
return -fnr
results = minimize_scalar(fun, bounds=[0, +np.inf], options={'disp': False})
if results.success:
return -results.fun
else:
return 0
else:
def fdp(fpr):
assert(0 <= fpr <= 1)
if fpr == 1:
return 0
def fun(delta):
fdp_delta = approxdp_to_fdp(func(delta), delta)
fnr = fdp_delta(fpr)
return -fnr
results = minimize_scalar(fun, method='Bounded', bounds=[0, 1-fpr],
options={'disp': False})
if results.success:
return -results.fun
else:
return 0
return fdp
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def InterpolateDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...",
"def InterpolationDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...",
"def derivative_func(t, x, Approx_func):\n return x.dot(Approx_func)",
"def _correct_p(self, f0, f1):\n return self.p * np.exp(self.dbeta * (f0 + f1) / 2)",
"def approxdp(delta):\n\n if delta < 0 or delta > 1:\n print(\"Error! delta is a probability and must be between 0 and 1\")\n if delta == 0:\n return rdp(np.inf)\n else:\n def fun(x): # the input the RDP's \\alpha\n if x <= 1:\n return np.inf\n else:\n if BBGHS_conversion:\n return np.maximum(rdp(x) + np.log((x-1)/x)\n - (np.log(delta) + np.log(x))/(x-1), 0)\n else:\n return np.log(1 / delta) / (x - 1) + rdp(x)\n\n results = minimize_scalar(fun, method='Brent', bracket=(1,2), bounds=[1, alpha_max])\n if results.success:\n return results.fun\n else:\n # There are cases when certain \\delta is not feasible.\n # For example, let p and q be uniform the privacy R.V. is either 0 or \\infty and unless all \\infty\n # events are taken cared of by \\delta, \\epsilon cannot be < \\infty\n return np.inf",
"def diff_1st_fwrdbwrd(fp, fm, eps):\n \n return (fp - fm)/eps",
"def InterpolateFunctions(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...",
"def test_fdr_correction(self):\r\n pvals = array([.1, .7, .5, .3, .9])\r\n exp = array([.5, .7 * 5 / 4., .5 * 5 / 3., .3 * 5 / 2., .9])\r\n obs = fdr_correction(pvals)\r\n self.assertFloatEqual(obs, exp)",
"def _approximate(self, f, num, lobatto, use_mp, dps):\n with self.context(use_mp, dps):\n pts = self.collocation_points(num, lobatto=lobatto, use_mp=use_mp, dps=dps)\n self.set_coefficients(\n [f(x) for x in pts],\n physical_space=True, lobatto=lobatto, use_mp=use_mp, dps=dps\n )",
"def get_fy(self):\n\n def fy(p):\n p0, p1 = p\n e = anp.exp(-(p0 + p1) * self.ts)\n x = (\n 1\n / (-p0 - p1)\n * anp.array(\n [\n [-p1 - p0 * e, -p1 + p1 * e],\n [-p0 + p0 * e, -p0 - p1 * e],\n ]\n )\n )\n y = anp.einsum(\"mnr,n->mr\", x, self.x0)\n return y\n\n return fy",
"def finite_difference(f, p, h):\n tweaker = FunctionTweak(f, h)\n tweaked_funcs = tweaker(np.atleast_2d(p))\n main_func = tweaked_funcs[0]\n\n def finite_func(t):\n list_of_diffs = [(tweaked_func(t) - main_func(t))/h\n for tweaked_func in tweaked_funcs[1:]]\n return np.column_stack(list_of_diffs)\n\n return finite_func",
"def fd_1deriv(f, eps, x, dx, scheme):\n if scheme == 'fd1' :\n f0 = f(x)\n fp = f(x+eps*dx)\n return (1.0/eps)*(fp-f0)\n elif scheme == 'bd1':\n f0 = f(x)\n fm = f(x-eps*dx)\n return (1.0/eps)*(f0-fm)\n elif scheme == 'cd2':\n fp = f(x+eps*dx)\n fm = f(x-eps*dx)\n return (0.5/eps)*(fp-fm)\n elif scheme == 'cd6':\n fp3 = f(x+3*eps*dx)\n fp2 = f(x+2*eps*dx)\n fp1 = f(x+eps*dx)\n fm1 = f(x-eps*dx)\n fm2 = f(x-2*eps*dx)\n fm3 = f(x-3*eps*dx)\n return (1.0/60.0/eps)*(45.0*(fp1-fm1)-9.0*(fp2-fm2)+(fp3-fm3))",
"def partial_derivative(f, x, i, epsilon = 1e-10):\n x_ = np.copy(x).astype(np.float64)\n x_[i] = x_[i] + epsilon\n value = (f(x_) - f(x)) / epsilon\n\n return value",
"def finite_difference(target, values, p, DELTA = 1.e-7):\n import copy\n functional = target(values)\n tempvals = copy.deepcopy(values)\n tempvals.reference[p] += DELTA\n\n dfunctional = target(tempvals)\n #calculate by finite_difference\n finite_g = (dfunctional-functional )/DELTA\n return finite_g",
"def fundemental_derivative(tab, spec, *XYf):\n if tab._backend != 'vdw':\n raise ValueError('This derived variable is only compatible with the vdw backend!')\n XYf_DT = XYf[:]\n rho = XYf_DT[0]\n units = EosUnits(tab.Pt_DT._requested_units, 'cgs')\n Pt = tab.get_table('P{s}_DT', spec)(*XYf_DT)*units.o2r('P')\n delta = tab.Pt_DT['delta']\n a = tab.Pt_DT['a']\n b = tab.Pt_DT['b']\n P_frac_1 = (Pt + a*rho**2)/(1./rho - b)**2\n P_frac_2 = rho*(Pt + a*rho**2)/(1./rho - b)\n num = (delta+1)*(delta+2) * P_frac_1 - 6*a*rho**4 \n denum = 2*(delta+1)*P_frac_2 - 4*a*rho**4\n return num/denum",
"def func(x, f, fp):\n\n return np.sqrt((1+fp(x)**2) / (2 * g * f(x)))",
"def InterpolationFunctions(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...",
"def ppf(self,x):\n ppfValue = self.invCDF(x)\n return ppfValue",
"def approx_gradient(f, x, epsilon):\n n = len(x)\n g = np.zeros(n)\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g[i] = (f(x + ei) - f(x - ei)) / epsilon\n ei[i] = 0\n return g",
"def approx_gradient(f, x, epsilon, args=()):\n n = x.shape[0]\n npts = 1\n if len(x.shape) > 1:\n npts = x.shape[1]\n g = np.zeros((n, npts))\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g[i, :] = (f((x.T + ei).T, *args) - f((x.T - ei).T, *args)) / epsilon\n ei[i] = 0\n return g.squeeze()",
"def p2f (p):\n #return 11000**((p+1)/2)\n #return (p+1)*11000\n return (p+1)*5500",
"def _compute_f(self, p, dh, dv):\n return dh / (self.beta * p * dv)",
"def multiInterp(x, xp, fp, extrap='bounded'):\r\n # Sanity\r\n x = np.asarray(x)\r\n xp = np.asarray(xp)\r\n assert fp.shape[1]==len(xp), 'Second dimension of fp should have the same length as xp'\r\n\r\n j = np.searchsorted(xp, x) - 1\r\n dd = np.zeros(len(x))\r\n bOK = np.logical_and(j>=0, j< len(xp)-1)\r\n bLower =j<0\r\n bUpper =j>=len(xp)-1\r\n jOK = j[bOK]\r\n dd[bOK] = (x[bOK] - xp[jOK]) / (xp[jOK + 1] - xp[jOK])\r\n jBef=j \r\n jAft=j+1\r\n # \r\n # Use first and last values for anything beyond xp\r\n jAft[bUpper] = len(xp)-1\r\n jBef[bUpper] = len(xp)-1\r\n jAft[bLower] = 0\r\n jBef[bLower] = 0\r\n if extrap=='bounded':\r\n pass\r\n # OK\r\n elif extrap=='nan':\r\n dd[~bOK] = np.nan\r\n else:\r\n raise NotImplementedError()\r\n\r\n return (1 - dd) * fp[:,jBef] + fp[:,jAft] * dd",
"def approximate_response_function(f, fstar):\n return (3 / 10) / (1 + 0.6 * (f / fstar)**2)",
"def fD(self, vpd):\n\t if vpd < 0.1:\n\t return 1.\n\t else:\n\t return 3/13./sqrt(vpd/1000.)",
"def eval_numerical_gradient(f, x, verbose = True, h = 0.00001):\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x) # iterate over all indexese in x\n it = np.nditer(x, flags = ['multi_index'], op_flags = ['readwrite'])\n while not it.finished:\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evaluate f(x+h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x-h)\n x[ix] = oldval # restore\n \n #compute the partial derivative with centered fromula.\n grad[ix] = (fxph - fxmh) / (2 * h)\n if verbose:\n print(ix, grad[ix])\n it.iternext()\n return grad",
"def diff_2nd_xx(fp, f0, fm, eps):\n \n return (fp - 2.0*f0 + fm)/eps**2",
"def diff_1st_central(fp, fm, eps):\n \n return (fp - fm)/(2.0*eps)",
"def _compute_f1(self, tp: torch.Tensor, fp: torch.Tensor,\n fn: torch.Tensor) -> float:\n precision = tp / (tp + fp).clamp(min=1e-8)\n recall = tp / (tp + fn).clamp(min=1e-8)\n f1 = 2 * precision * recall / (precision + recall).clamp(min=1e-8)\n return float(f1.mean())",
"def _estimate_inverse_function(f, samples, f_domain=None, endpoints=None):\n if f_domain is None:\n f_domain = f.domain\n xs = np.linspace(*f_domain, samples)\n ys = [f(x) for x in xs]\n if endpoints is not None:\n fa, fb = endpoints\n if fa is not None:\n ys[0] = fa\n if fb is not None:\n ys[-1] = fb\n ys, xs = _filter_nonmonotone_data_points(ys, xs)\n if ys[0] > ys[-1]: # reversed order\n ys = list(reversed(ys))\n xs = list(reversed(xs))\n # PchipInterpolator guarantees monotonicity of interpolant\n interp = PchipInterpolator(ys, xs, extrapolate=True)\n return interp"
] |
[
"0.6328194",
"0.6294494",
"0.60166425",
"0.60097474",
"0.599097",
"0.5985712",
"0.59741604",
"0.5957853",
"0.5893827",
"0.5860794",
"0.5855705",
"0.5840527",
"0.5767828",
"0.5689522",
"0.5686636",
"0.5654464",
"0.56200004",
"0.56181914",
"0.5612902",
"0.5598372",
"0.55951947",
"0.5592147",
"0.5587857",
"0.55662227",
"0.5564661",
"0.556025",
"0.5554478",
"0.55136776",
"0.5504341",
"0.5492163"
] |
0.68981594
|
0
|
split by block extract 1st level data (timestamp, and raw data of block) to dict
|
def _split_by_block(self, path=None,category='meminfo'):
with open(path, "r") as f:
text = f.read()
lst = re.split('zzz', text, flags=re.DOTALL) # to list based on time
lst = [x for x in lst if x] # remove empty strings
"""
Python 2.x
lst = map(lambda v: re.split('(\s\W{1,}\w{3}\s\w{3}\s\w{2,3}\s\d{2}:\d{2}:\d{2}\s\w{3}\s\d{4})', v), lst)
"""
lst = [re.split('(\s\W{1,}\w{3}\s\w{3}\s\w{2,3}\s\d{2}:\d{2}:\d{2}\s\w{3}\s\d{4})', v) for v in lst]
block_dict = []
for v in lst:
timestamp=v[1]
value=v[2]
_d = [{'timestamp':timestamp,
'category': category,
'sub_category': '',
'key': 'raw_block',
'value': value}]
block_dict.extend(_d)
return block_dict
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def extract_data_from_block(self,block,skip_lines=7):\n \n lines = block.split('\\n') #process block line by line\n chrom = {}\n chrom['title'] = lines[1] #block full title in 2nd line\n dump,chrom['short_title'] = lines[1].rsplit(' ',maxsplit=1) #taking last part of the line which is mz\n \n time = [] # time, minutes\n signal = [] #raw signal\n signal_rel =[] #relative signal\n \n for i in range(skip_lines,len(lines)):\n (t,s,sr) = lines[i].split()\n time.append(float(t))\n signal.append(float(s))\n signal_rel.append(float(sr))\n \n \n \n chrom['time']=time\n chrom['signal']=signal\n chrom['signal_rel']=signal_rel\n signal_zero = [s - min(signal) for s in signal] #baseline adjusted signal\n \n \n chrom['signal_zero'] = signal_zero\n \n return chrom",
"def blockParser(block):\n struct = []\n first = True\n record = False\n for line in block:\n if line.startswith('Structure #'):\n record = True\n if not first:\n yield struct\n struct = []\n first = False\n if record:\n struct.append(line)\n yield struct",
"async def get_sub_block_dicts(self) -> Tuple[Dict[uint32, bytes32], Dict[uint32, SubEpochSummary]]:\n\n res = await self.db.execute(\"SELECT * from sub_block_records WHERE is_peak = 1\")\n row = await res.fetchone()\n await res.close()\n if row is None:\n return {}, {}\n\n peak: bytes32 = bytes.fromhex(row[0])\n cursor = await self.db.execute(\n \"SELECT header_hash,prev_hash,sub_height,sub_epoch_summary from sub_block_records\"\n )\n rows = await cursor.fetchall()\n await cursor.close()\n hash_to_prev_hash: Dict[bytes32, bytes32] = {}\n hash_to_height: Dict[bytes32, uint32] = {}\n hash_to_summary: Dict[bytes32, SubEpochSummary] = {}\n\n for row in rows:\n hash_to_prev_hash[bytes.fromhex(row[0])] = bytes.fromhex(row[1])\n hash_to_height[bytes.fromhex(row[0])] = row[2]\n if row[3] is not None:\n hash_to_summary[bytes.fromhex(row[0])] = SubEpochSummary.from_bytes(row[3])\n\n sub_height_to_hash: Dict[uint32, bytes32] = {}\n sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}\n\n curr_header_hash = peak\n curr_sub_height = hash_to_height[curr_header_hash]\n while True:\n sub_height_to_hash[curr_sub_height] = curr_header_hash\n if curr_header_hash in hash_to_summary:\n sub_epoch_summaries[curr_sub_height] = hash_to_summary[curr_header_hash]\n if curr_sub_height == 0:\n break\n curr_header_hash = hash_to_prev_hash[curr_header_hash]\n curr_sub_height = hash_to_height[curr_header_hash]\n return sub_height_to_hash, sub_epoch_summaries",
"def to_dict(block):\n uncles = [hash_of(u) for u in block['uncles']]\n parentHash = hash_of(block['parentHash'])\n return {\n 'number': block['number'],\n 'timestamp': block['timestamp'],\n 'hash': hash_of(block),\n 'difficulty': block['difficulty'],\n 'totalDifficulty': block['totalDifficulty'],\n 'size': block['size'],\n 'gasUsed': block['gasUsed'],\n 'parentHash' : parentHash,\n 'gasLimit': block['gasLimit'],\n 'uncles': uncles,\n 'parents': [parentHash] + uncles,\n }",
"def _parseBlockette(self, blkt_type):\n blkt_dict = SimpleOrderedDict()\n # Check the blockette number.\n if blkt_type == 1000:\n unpack_values = unpack('%s3B' % self.endian,\n self.file.read(3))\n blkt_dict['Encoding Format'] = int(unpack_values[0])\n blkt_dict['Word Order'] = int(unpack_values[1])\n blkt_dict['Data Record Length'] = int(unpack_values[2])\n elif blkt_type == 1001:\n unpack_values = unpack('%sBBxB' % self.endian,\n self.file.read(4))\n blkt_dict['Timing quality'] = int(unpack_values[0])\n blkt_dict['mu_sec'] = int(unpack_values[1])\n blkt_dict['Frame count'] = int(unpack_values[2])\n return blkt_dict",
"def parse_data(block):\n rval = {\n 'Total' : None,\n 'Electronic' : None,\n 'Translational' : None,\n 'Rotational' : None,\n 'Vibrational' : None\n }\n for line in block.splitlines():\n if re.match(r'^\\s*Total', line):\n key = 'Total'\n elif re.match(r'^\\s*Electronic', line):\n key = 'Electronic'\n elif re.match(r'^\\s*Translational', line):\n key = 'Translational'\n elif re.match(r'^\\s*Rotational', line):\n key = 'Rotational'\n elif re.match(r'^\\s*Vibrational', line):\n key = 'Vibrational'\n else:\n key = None\n if key:\n words = line.strip().split()\n try:\n rval[key] = float(words[1])\n except ValueError:\n raise ValueError('Invalid thermodynamic format.')\n return rval",
"def transform_block(block):\n return {\n 'type': 'block',\n 'children': [transform_child(child) for child in block]\n }",
"def _prepare_blocks():\n\n counter = blocks[0]['freeStart']\n maxBlocks = blocks[0]['maxBlocks']\n while(counter < maxBlocks) :\n try:\n # print (mount['parent'] + '/linddata.' + str(counter))\n f = open(mount['parent'] + '/linddata.' + str(counter), 'r') \n except IOError, e:\n return STATUS['M_BD']\n else :\n fdatastring = f.next()\n fdata = deserializedata(fdatastring)\n blocks[counter] = fdata\n counter += 1\n \n return STATUS['OK']",
"def get_block_dict(self) -> dict:\n return self.blocks",
"def parse_file(filepath):\n with fitz.open(filepath) as doc:\n block_dict = {(idx + 1): page.getText(\"blocks\") for idx, page in enumerate(doc)}\n block_dict = {\n key: [block[4] for block in value] for key, value in block_dict.items()\n }\n return block_dict",
"def _getBlockettes(self):\n self.blockettes = SimpleOrderedDict()\n cur_blkt_offset = self.fixed_header['First blockette']\n # Loop until the beginning of the data is reached.\n while True:\n if cur_blkt_offset >= self.fixed_header['Beginning of data']:\n break\n # Seek to the offset.\n self.file.seek(cur_blkt_offset, 0)\n # Unpack the first two values. This is always the blockette type\n # and the beginning of the next blockette.\n blkt_type, next_blockette = unpack('%s2H' % self.endian,\n self.file.read(4))\n blkt_type = int(blkt_type)\n next_blockette = int(next_blockette)\n cur_blkt_offset = next_blockette\n self.blockettes[blkt_type] = self._parseBlockette(blkt_type)\n # Also break the loop if next_blockette is zero.\n if next_blockette == 0:\n break",
"def subparse(block):\n\n verses = []\n context = None\n for char in block:\n\n if char == \"[\":\n if verses: verses[-1][\"quran\"] = verses[-1][\"quran\"].strip()\n verses.append({\"surah\": \"\", \"verse\": \"\", \"quran\": \"\"})\n context = \"surah\"\n elif char == \":\" and context == \"surah\":\n verses[-1][\"surah\"] = int(verses[-1][\"surah\"])\n context = \"verse\"\n elif char == \"]\":\n verses[-1][\"verse\"] = int(verses[-1][\"verse\"])\n context = \"quran\"\n else: verses[-1][context] += char\n\n verses[-1][\"quran\"] = verses[-1][\"quran\"].strip()\n return verses",
"def readchunk(self):\n chunksize = self.readdword()\n chunktype = ChunkType(self.readword())\n chunkdata = self.readbytearr(chunksize - 6)\n return {\n \"type\": chunktype,\n \"data\": _ParseChunk(chunktype, chunkdata, self.PIXELSIZE),\n }",
"def parse_block(lines):\n term = {\"alt_id\": [], \"relationship\": []}\n splitkv = re.compile(r\"(^[a-zA-Z_]+): (.+)\")\n for line in lines:\n m = re.search(splitkv, line)\n # assert m, f\"unexpected line: {line}\"\n key = m.group(1)\n value = m.group(2)\n if key in [\"id\", \"name\", \"namespace\", \"is_obsolete\"]:\n term[key] = value\n elif key == \"alt_id\":\n term[\"alt_id\"].append(value)\n elif key == \"is_a\":\n goid = value.split(\"!\")[0].strip()\n term[\"relationship\"].append({\"type\": \"is_a\", \"id\": goid})\n elif key == \"relationship\":\n typedef, goid = value.split(\"!\")[0].strip().split(\" \")\n term[\"relationship\"].append({\"type\": typedef, \"id\": goid})\n return term",
"def split(self, line):\n parts = line.split()\n return {\n 'size': 0 if parts[9] == '-' else int(parts[9]), \n 'file_requested': parts[6]\n }",
"def test_block_split(self):\n block1 = self.geographies.find({ 'geoid': '150010210051016' }) \n self.assertEqual(block1.count(), 1)\n block1 = block1[0]\n\n split_block_pop = 448 \n block1_land_pct = float(184458) / 587158 # AREALAND_INT / AREALAND_2000\n block1_pop_2000 = int(block1_land_pct * split_block_pop)\n block1_pop_2010 = 22 \n block1_pop_delta = block1_pop_2010 - block1_pop_2000\n block1_pop_pct_change = float(block1_pop_delta) / block1_pop_2000\n\n self.assertAlmostEqual(block1['xwalk']['150010210011337']['POPPCT00'], block1_land_pct, places=4)\n self.assertAlmostEqual(block1['xwalk']['150010210011337']['HUPCT00'], block1_land_pct, places=4)\n self.assertAlmostEqual(block1['data']['2000']['P1']['P001001'], block1_pop_2000)\n self.assertAlmostEqual(float(block1['data']['2010']['P1']['P001001']), block1_pop_2010)\n self.assertAlmostEqual(float(block1['data']['delta']['P1']['P001001']), block1_pop_delta)\n self.assertAlmostEqual(float(block1['data']['pct_change']['P1']['P001001']), block1_pop_pct_change)",
"def parse_blocks(fblocks):\n print('Parse blocks: ', end='')\n result = []\n\n for line in fblocks:\n stripped = line.strip()\n if len(stripped) > 0 and stripped[0] != '#':\n match = re.match(r\"([0-9A-F]+)\\.{2}([0-9A-F]+);\\s+(.+)\", stripped)\n result.append({\n 'begin': int(match.group(1), 16),\n 'end': int(match.group(2), 16),\n 'name': match.group(3)\n })\n\n print('done')\n return result",
"def split_to_blocks(self,raw_data,token1='[MS Chromatogram]',token2 = '\\n\\n'):\n data = []\n \n pos = 0 # position in the raw data string\n flag = False # flag set if end of file reached \n while not flag:\n pos_A=raw_data.find(token1,pos) # searching for token1 - start of block\n if pos_A == -1: # if token1 not found - quit cycle\n flag = True\n else:\n pos_B=raw_data.find(token2,pos_A)\n \n if pos_B == -1: #if token2 (explicit block end) not found, take the rest of raw data string\n pos_B = len(raw_data)\n else:\n pos = pos_B\n \n block = raw_data[pos_A:pos_B]\n data.append(block)\n return data",
"def get_structure(self):\n main = {}\n for line in self.load():\n match = re.match('^\\s*([A-Za-z0-9_]+)(\\((\\d+)\\))?=(.*)$', line)\n if match:\n key = match.group(1)\n index = match.group(3)\n value = match.group(4)\n if index is None:\n main[key] = self.parse_data_value(value)\n else:\n if key not in main:\n main[key] = []\n main[key].append(self.parse_data_value(value))\n #else:\n # print(line)\n return main",
"def parse_header_block(lines):\n data = [line for line in lines[:MAX_HEADER_HEIGHT] if line.strip()]\n if not data or not INVITATION_RE.match(data[0]):\n return None\n out = {'number':None, 'type':None, 'date':None, 'time':None, 'place':None, 'datetime':None}\n for item in data:\n # typ a poradove cislo zastupitelstva\n m = TITLE_RE.match(item)\n if m:\n out['number'] = m.group(1).strip()\n out['type'] = m.group(2).strip()\n\n # den konani zastupitelstva\n m = TERM_DATE_RE.match(item)\n if m:\n try:\n out['date'] = date(int(m.group(3).strip()), int(m.group(2).strip()), int(m.group(1).strip()))\n except ValueError:\n pass\n\n # cas konani zastupitelstva\n m = TERM_TIME_RE.match(item)\n if m:\n try:\n out['time'] = time(int(m.group(1).strip()), int(m.group(2).strip()))\n except ValueError:\n pass\n\n # misto konani zastupitelstva\n m = PLACE_RE.match(item)\n if m:\n out['place'] = m.group(1).strip()\n\n # poskladani kompletniho datetime objektu\n out['datetime'] = out['date'] and out['time'] and \\\n datetime.combine(out['date'], out['time']) or None\n\n return out",
"def create_frames_from_data(self, data, blocks):\n item_dict = {}\n for (item_name, data) in data.items():\n item_dict[item_name] = MeasurementFrame(\n name=data['name'],\n pixel_pose_x=data['pixel_pose_x'],\n pixel_pose_y=data['pixel_pose_y'],\n pixel_pose_theta=data['pixel_pose_theta'],\n block=blocks.get(data['block']))\n item_dict[item_name].save()\n return item_dict",
"async def get_sub_block_records(\n self,\n ) -> Tuple[Dict[bytes32, SubBlockRecord], Optional[bytes32]]:\n cursor = await self.db.execute(\"SELECT * from sub_block_records\")\n rows = await cursor.fetchall()\n await cursor.close()\n ret: Dict[bytes32, SubBlockRecord] = {}\n peak: Optional[bytes32] = None\n for row in rows:\n header_hash = bytes.fromhex(row[0])\n ret[header_hash] = SubBlockRecord.from_bytes(row[3])\n if row[5]:\n assert peak is None # Sanity check, only one peak\n peak = header_hash\n return ret, peak",
"def _extract_traces_chunk(self, blocks):\n if 0 in blocks:\n self._save_genesis_block()\n blocks_traces = self._get_traces(blocks)\n self._set_trace_hashes(blocks_traces)\n self._set_parent_errors(blocks_traces)\n self._save_internal_transactions(blocks_traces)\n self._save_miner_transactions(blocks_traces)\n self._save_traces(blocks)",
"def parse_one_block_blockcypher(blockchain, block_number):\n results = {}\n response = requests.get('https://api.blockcypher.com/v1/%s/main/blocks/%s' % (blockchain, block_number))\n if response.status_code == 200:\n r = json.loads(response.content.decode('latin1'))\n results[columns[3]] = r['fees'] * 1E-8 # convert to non-satoshi\n results[columns[0]] = r['height']\n results[columns[1]] = r['n_tx']\n results['time'] = r['time']\n results['nonce'] = r['nonce']\n results['blockchain'] = r[\"chain\"]\n return results\n else:\n return -1",
"def get_source(block):\n raw_src = \"\"\n\n if (block.get(\"if\", None)):\n raw_src = block[\"if\"][\"condition\"][\"src\"]\n elif (block.get(\"return\", None)): \n raw_src = block[\"return\"][\"src\"]\n else:\n raise KeyError(\"Asking for the source of unknown contract block\")\n\n split_source = raw_src.split(\":\")\n \n return {\"offset\" : int(split_source[0]), \n \"length\" : int(split_source[1]) }",
"def parse_part(self):\n parts = []\n for part in re.split(r'\\*\\*\\* ([A-Z- ]+) \\*\\*\\*', self.hand_file): # return [ 'part1', 'splitter1', 'part2',..\n parts.append(part)\n\n for i in range(0, len(parts)):\n if i == 0:\n self.part_dict['HEADER'] = parts[i]\n if i % 2 != 0: # number is odd\n self.part_dict[parts[i]] = parts[i + 1]",
"def _parse_ctrl_block(raw):\n blk = {}\n attrs = zip(CTRL_BLOCK_FIELDS, struct.unpack(CTRL_STRUCT_MASK, raw[:96]))\n # if there is a function to handle the raw data, call it, otherwise use raw data\n for k, v in attrs:\n if type(k) in (tuple, list):\n key, filter = k\n blk[key] = filter(v)\n else:\n blk[k] = v\n return blk",
"def _parse_line(self, line):\n msg_info = {'raw_message': line}\n line_split = line.split(None, 2)\n try:\n msg_info['timestamp'] = datetime.strptime(' '.join(line_split[:2]), self.time_format)\n msg_info['message'] = line_split[2]\n except (ValueError, IndexError):\n pass\n return msg_info",
"def parse_blockcypher(blockchain, first_block=None, n_block=200):\n r = []\n if not first_block:\n first_block = get_first_block(blockchain)\n for block_number in range(first_block, first_block - n_block, -1):\n block = parse_one_block_blockcypher(blockchain, block_number)\n if block != -1:\n r.append(block)\n else:\n print('Error after block number %s (%s blocks done)' % (block_number, first_block - block_number))\n break\n df = pd.DataFrame(r)\n df[columns[4]] = pd.to_datetime(df['time'], format=\"%Y-%m-%dT%H:%M:%SZ\")\n df[columns[2]] = df[columns[3]] / df[columns[1]]\n return df",
"def _parse_metadata_fields(key_value_block: str) -> Dict[str, str]:\n key_value_block = key_value_block.lstrip()\n field_lines = re.split(r'\\n', key_value_block)\n field_name = 'unknown'\n fields_builder: Dict[str, str] = {}\n for field_line in field_lines:\n field_match = RE_FIELD_COMPONENTS.match(field_line)\n if field_match and field_match.group('field') in NAMED_FIELDS:\n field_name = field_match.group(\n 'field').lower().replace('-', '_')\n field_name = re.sub(r'_no$', '_num', field_name)\n fields_builder[field_name] = field_match.group(\n 'value').rstrip()\n elif field_name != 'unknown':\n # we have a line with leading spaces\n fields_builder[field_name] += re.sub(r'^\\s+', ' ', field_line)\n return fields_builder"
] |
[
"0.71398336",
"0.6421845",
"0.6310692",
"0.6232713",
"0.6159134",
"0.6008795",
"0.58964336",
"0.5853936",
"0.5750201",
"0.57200634",
"0.5679504",
"0.56746316",
"0.56639814",
"0.5650643",
"0.56322706",
"0.56211495",
"0.5621063",
"0.55872095",
"0.55845296",
"0.5560303",
"0.5558033",
"0.55416286",
"0.55367863",
"0.552458",
"0.54981714",
"0.5497638",
"0.5489235",
"0.5483555",
"0.54732573",
"0.54468364"
] |
0.73097277
|
0
|
split by keypair element within a line
|
def _split_by_keypair(self, osw_dict={}):
lst = osw_dict
keypair_dict = []
for d in lst:
if d['key'] == 'raw_line':
keypair_lst = re.split(r',',d['value'])
for k,v in keypair_lst:
_d = [{'timestamp':d['timestamp'] ,
'category': d['category'],
'sub_category': d['sub_category'],
'key': k,
'value': v}]
keypair_dict.extend(_d)
return keypair_dict
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def splitkv(s):\n a=re.split('(\\w*)\\s*=\\s*\"([^=\"]*)\"\\s*', s)\n a=[ t for t in a if t!='']\n return a",
"def tokenize_key_value_pair(kv_pair):\n key, value = kv_pair.strip().split('\\t')\n key = tuple(key.strip().split())\n value = tuple(value.strip().split())\n return (key, value)",
"def __kv_pair(line):\n\n splitline = line.split(\"=\")\n\n if len(splitline) <= 1:\n return None, None\n\n key = splitline[0].strip()\n\n val = \"=\".join(splitline[1:]).strip()\n\n return key, val",
"def _label_line_parser(record, splitter, strict=True):\n labels = []\n result = {}\n for line in record:\n try:\n key, val = splitter(line.rstrip())\n except:\n if strict:\n raise RecordError(\n \"Failed to extract key and value from line %s\" %\n line)\n else:\n continue # just skip the line if not strict\n\n if key in result:\n result[key].append(val)\n else:\n result[key] = [val]\n labels.append(key)\n return result, labels",
"def parse(line):\n return dict([pair.split(':') for pair in line.split()])",
"def extract_values(line):\n key, value = line.split(':')\n key, value = key.strip(), value.strip()\n key = key.replace(' ', '_')\n\n # values as lists\n if key.lower() in ('flags', 'bugs'):\n value = value.split()\n return key.lower(), value",
"def get_key_value_pairs(mm, sep='='):\n idxl = all_lines_with_tag(mm, sep)\n entry = {}\n for idx in idxl:\n mm.seek(idx)\n ibegin = mm.rfind(b'\\n', 0, idx)\n mm.seek(ibegin)\n line = mm.readline() # skip \\n\n line = mm.readline()\n tokens = line.split(sep.encode())\n name = tokens[0].strip() # strip whitespace\n val = tokens[1].strip()\n entry[name.decode()] = val.decode()\n return entry",
"def split(value, key):\n return str(value).split(key)",
"def parse_line(line, key):\n keys = key.split(', ')\n ParseLine = (line.get(id) for id in keys)\n\n return ParseLine",
"def splitline (self, line):\n\t\treturn line.split('\\t')",
"def tokenize(lines):\n\tfor line in lines:\n\t\tif line == '':\n\t\t\tcontinue\n\t\t\n\t\tif line.startswith('#'):\n\t\t\tcontinue\n\n\t\tif line.startswith('[') and line.endswith(']'):\n\t\t\tyield ('key', line[1:-1])\n\t\t\tcontinue\n\t\t\n\t\tvalues = tuple(map(float, line.split(' ')))\n\t\tyield ('values', values)\n\t\tcontinue",
"def parseLine(line):\n\n\teq = line.find('=')\n\tif eq == -1: raise Exception()\n\tkey = line[:eq].strip()\n\tvalue = line[eq+1:-1].strip()\n\treturn key, parseValue(value)",
"def parse_partitions(line):\n return [parse_partition(entry) for entry in line.split(\";\")]",
"def split_lines(lines):\n sep = '= ' # NOTE: the trailing space is important\n pairs = []\n extra_lines = []\n for line in lines:\n try:\n descr, value = map(str.strip, line.split(sep))\n pairs.append((descr, value))\n except:\n extra_lines.append(line)\n return pairs, extra_lines",
"def split(self, line):\n parts = line.split()\n return {\n 'size': 0 if parts[9] == '-' else int(parts[9]), \n 'file_requested': parts[6]\n }",
"def parse_pairs(pairs, extra_lines):\n onoff_pairs = pairs[12:-8]\n keyval_pairs = pairs[:12] + pairs[-8:]\n\n # \"Additional notes\" at the end of the file\n # We append that to the key-value pair list and parse it as any other\n notes = '\\n'.join(extra_lines[1:]).strip()\n keyval_pairs.append(('notes', notes))\n\n # Parsed key-value pairs as dictionary\n items = {}\n for pair, plan_step in zip(keyval_pairs, presto_inf_parsing_plan):\n descr, value = pair\n keyname, keytype = plan_step\n items[keyname] = keytype(value)\n return items",
"def string_to_keypair(self, data): \n return keypair_lst",
"def split(line, skipspace=0):\n a = re.sub('\\s+',' ', line.strip()) ##normalize white's -> 1 space\n a = re.split('(\\s)', a) ##split/keep space\n for aa in a: ## for each PPH, convert it to SPH\n if aa==' ':\n if skipspace == 0: yield aa\n else:\n for aaa in toSPH.pph2sph(aa): yield aaa",
"def _decode_sensor_line(self, line):\n key_value_sep = ':'\n line = line.decode('utf-8')\n line = line.split(key_value_sep)\n return line",
"def key_value_pair(line):\n key = None\n value = None\n try:\n key, value = line.split(\"=\", 1)\n except ValueError:\n print(\"line must be format: key=value, but now is:\", line)\n sys.exit(1)\n try:\n value = int(value)\n except ValueError:\n print(\"Error: you input value must be integer, but now is:\", value)\n sys.exit(1)\n return key, value",
"def _split_lines(self, lines, separator_marker):\n result = []\n current_group = []\n for line in lines:\n if re.match(rf'[^\\S\\n]*{separator_marker}\\w+(\\(.*\\))?:', line):\n if current_group:\n result.append(current_group)\n current_group = []\n current_group.append(line)\n if current_group:\n result.append(current_group)\n return result",
"def process_line(line):\n [label, text] = line.split('\\t')\n return text.split()",
"def extract_key_value_pairs(string, joiner='=', separator=','):\n return dict([x.strip() for x in s.split(joiner, 1)] for s in string.split(separator))",
"def split_line_robust(line):\n\n line_split0 = [x.rstrip('\\n') for x in line.split(' ') if x]\n line_split1 = [x.split('\\t') for x in line_split0 if x]\n line_split = []\n for l_one in line_split1:\n for l_two in l_one:\n if l_two: line_split.append(l_two)\n return(line_split)",
"def parseFileLine(self, line):\n c = line.strip().split(\":\")\n return (c[0], c[1], c[2], c[3])",
"def parse_list_output(output):\n lines = output.splitlines()\n keys = filter(None, lines[1].split('|'))\n keys = [x.lower().strip() for x in keys]\n r = []\n for line in lines[3:-1]:\n if len(line.split()) <= 1:\n continue\n values = filter(None, line.split('|'))\n values = [x.strip() for x in values]\n assert len(keys) == len(values)\n record = dict(zip(keys, values))\n r.append(record)\n return r",
"def __split_line_in_molspec(self, line):\n frg = 0\n if line.find('=') != -1:\n myline = line.replace('(',' ').replace(')',' ').replace('=',' ')\n items = myline.split()\n atomname = items[0]\n frg = int(items[2])\n coord = [ float(items[3]), float(items[4]), float(items[5]) ]\n else:\n myline = line\n items = myline.split()\n if len(items) > 4:\n frg = int(items[4])\n atomname = items[0]\n coord = [ float(items[1]), float(items[2]), float(items[3]) ]\n rec = {'name': atomname, 'coord': coord, 'frg': frg}\n return rec",
"def tokenize_and_split(sms_file):\n \n dic = {}\n list1 = []\n list2 = []\n i = -1\n ham = True\n for line in open(sms_file, 'r').readlines():\n w = []\n for word in line.split():\n i = i + 1\n if word == \"ham\":\n ham = True\n i = i - 1\n elif word == \"spam\":\n ham = False\n i = i - 1\n else:\n if word not in dic:\n dic[word] = i\n w.append(dic[word])\n else : \n i = i - 1\n w.append(dic[word])\n if ham and w !=[]:\n list2.append(w)\n elif ham == False and w !=[]:\n list1.append(w)\n \n return dic,list1,list2",
"def parse_groups(data):\n for k, g in itertools.groupby((line.strip() for line in data), bool):\n if k:\n yield list(g)",
"def split_order_info(keydict):\n\n if not isinstance(keydict, dict):\n raise ValueError(\"Expected an input dictionary\")\n\n # has beam name fits token\n token = re.compile('^[a-zA-Z]*_(?:[+\\-]){0,1}[a-zA-Z0-9]{0,1}_*')\n rangekey = re.compile('^[a-zA-Z]*_[0-1]{1,1}$')\n rdict = dict() # return dictionary\n beams = list()\n\n # prefetch number of Beams, beam is the second string\n for key in keydict:\n if token.match(key):\n b = key.split(\"_\")[1].upper()\n if b not in beams:\n beams.append(b)\n for b in beams:\n rdict[b] = dict()\n\n # assumes that keys are sep with underscore and beam is in second section\n for key in keydict:\n if not token.match(key):\n rdict[key] = keydict[key] # not associated with a beam\n else:\n b = key.split(\"_\")[1].upper()\n newkey = key.replace(\"_{}\".format(b), \"\")\n rdict[b][newkey] = keydict[key]\n\n # look for range variables to make them into tuples\n for b, d in rdict.items():\n keys = d.keys()\n rkeys = []\n odict = {}\n for k in keys:\n if rangekey.match(k):\n rkeys.append(k)\n for k in rkeys:\n mlist = [m for m in rkeys if k.split(\"_\")[0] in m]\n root = mlist[0].split(\"_\")[0]\n if root not in odict:\n for mk in mlist:\n if eval(mk[-1]) == 0:\n zero = d[mk]\n elif eval(mk[-1]) == 1:\n one = d[mk]\n else:\n raise ValueError(\"Unexpected range variable {}\"\n .format(mk))\n odict[root] = (zero, one)\n # combine the dictionaries and remove the old keys\n d.update(odict)\n for k in rkeys:\n del d[k]\n\n return rdict"
] |
[
"0.6456821",
"0.63751614",
"0.6154483",
"0.6092737",
"0.60747635",
"0.59243345",
"0.5920596",
"0.59065396",
"0.5904815",
"0.58083063",
"0.5758918",
"0.5717138",
"0.5714947",
"0.5691351",
"0.5686674",
"0.56773335",
"0.55829465",
"0.5543476",
"0.54818356",
"0.54575175",
"0.54502136",
"0.54269344",
"0.5390861",
"0.5390809",
"0.5389787",
"0.5380582",
"0.53697795",
"0.53566206",
"0.5314843",
"0.53140694"
] |
0.72110385
|
0
|
analyze oswmem free memory check if free mmemory <= min memory
|
def oswmem_free_memory(self,min=0):
result = self.df[self.df['free mmemory'] > min].all
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _checkAvailableMemory():\n #execute free -m to get output in MB\n logging.debug(\"checking total memory\")\n cmd = [\n basedefs.EXEC_FREE, \"-m\"\n ]\n output, rc = utils.execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_EXP_FREE_MEM)\n\n #itterate over output and look for the line: \"Mem: 1 something\"\n #and extract 1 from it (1 is an example to the free memory)\n availableMemory = 0\n for line in output.split(\"\\n\"):\n result = re.match(\"Mem:\\s+(\\d+)\\s+.+\", line)\n if result:\n logging.debug(\"Found a match, amount of memory: %s\" % result.group(1))\n availableMemory = result.group(1)\n\n #compare found memory to restrictions\n availableMemory = int(availableMemory)\n #multiplying CONST_MIN_MEMORY by 0.95 to have tolerance of 5%\n if availableMemory < (basedefs.CONST_MIN_MEMORY_MB * 0.95):\n logging.error(\"Availble memory (%s) is lower then the minimum requirments (%s)\" % (availableMemory, basedefs.CONST_MIN_MEMORY_MB))\n raise Exception(output_messages.ERR_EXP_NOT_EMOUGH_MEMORY)\n\n if availableMemory < basedefs.CONST_WARN_MEMORY_MB:\n logging.warn(\"There is less then %s available memory \" % basedefs.CONST_WARN_MEMORY_MB)\n controller.MESSAGES.append(output_messages.WARN_LOW_MEMORY)",
"def check_mem(self, values):\n try:\n virt_mem = psutil.virtual_memory()\n values[keys.KEY_VIRTUAL_MEM_TOTAL] = virt_mem.total\n values[keys.KEY_VIRTUAL_MEM_PERCENT] = virt_mem.percent\n except:\n logging.error(\"Error collecting memory stats.\")",
"def MAXMEM(self):",
"def hasmem(state, mem):\n if mem <= state[HEAD][MEM]:\n return True\n else:\n state[HEAD][STATUS] = OOM\n return False",
"def ram_condition(min_gb=3):\n return get_free_gb() < min_gb",
"def enough_free_memory(memory_info, slc_name):\n if slc_name not in memory_info:\n return [], []\n crr_limit_mb = memory_info[slc_name]['memory.limit_in_bytes'] // 1024**2\n crr_usage_mb = memory_info[slc_name]['memory.usage_in_bytes'] // 1024**2\n margin = crr_usage_mb / 5 # 20%\n if crr_usage_mb + margin > crr_limit_mb:\n return [], ['memory:{0} has more than 80% usage'.format(slc_name)]\n return [], []",
"def get_memory():\n with open('/proc/meminfo', 'r') as mem:\n free_memory = 0\n for i in mem:\n sline = i.split()\n if str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):\n free_memory += int(sline[1])\n print(\"____________________ \" + str(free_memory) + \"____________________\")\n return free_memory",
"def check_available_memory(self,unit='B'):\n free = psutil.virtual_memory().available\n\n if unit == 'MB':\n\n return free/10**6\n\n elif unit == 'GB':\n\n return free/10**9\n\n else:\n\n return free",
"def check_if_sufficient_memory():\n percent_memory = psutil.virtual_memory().percent\n if percent_memory > 75:\n raise ValueError('Please use a device with more CPU ram or a smaller dataset')",
"def get_advanced_memory_check(memory_info):\n memory_check = get_memory_check(memory_info)\n checks = {}\n checks['sum_limits'] = CGCheck([memory_check], check_sum_limits, memory_info)\n checks['root_usage'] = CGCheck([memory_check], enough_free_memory, memory_info, '.')\n checks['user_usage'] = CGCheck([memory_check], enough_free_memory, memory_info, 'user.slice')\n checks['sys_usage'] = CGCheck([memory_check], enough_free_memory, memory_info, 'system.slice')\n checks['mac_usage'] = CGCheck([memory_check], enough_free_memory, memory_info, 'machine.slice')\n checks['sp_usage'] = CGCheck([memory_check], enough_free_memory, memory_info, 'storpool.slice')\n return CGCheck(checks.values(), lambda: ([], []))",
"def _checkMemLeak(self):\n\t\t### Memory leak code:\n\t\t#self.stats['memlist'].append(mem.mySize()/1024)\n\t\tself.stats['memlist'].append(mem.active())\n\t\tmemfree = mem.free()\n\t\tminavailmem = 64*1024; # 64 MB, size of one image\n\t\tif(memfree < minavailmem):\n\t\t\tapDisplay.printError(\"Memory is low (\"+str(int(memfree/1024))+\"MB): there is probably a memory leak\")\n\n\t\tif(self.stats['count'] > 15):\n\t\t\tmemlist = self.stats['memlist'][-15:]\n\t\t\tn = len(memlist)\n\t\t\t\n\t\t\tgain = (memlist[n-1] - memlist[0])/1024.0\n\t\t\tsumx = n*(n-1.0)/2.0\n\t\t\tsumxsq = n*(n-1.0)*(2.0*n-1.0)/6.0\n\t\t\tsumy = 0.0; sumxy = 0.0; sumysq = 0.0\n\t\t\tfor i in range(n):\n\t\t\t\tvalue = float(memlist[i])/1024.0\n\t\t\t\tsumxy += float(i)*value\n\t\t\t\tsumy += value\n\t\t\t\tsumysq += value**2\n\t\t\t###\n\t\t\tstdx = math.sqrt(n*sumxsq - sumx**2)\n\t\t\tstdy = math.sqrt(n*sumysq - sumy**2)\n\t\t\trho = float(n*sumxy - sumx*sumy)/float(stdx*stdy+1e-6)\n\t\t\tslope = float(n*sumxy - sumx*sumy)/float(n*sumxsq - sumx*sumx)\n\t\t\tmemleak = rho*slope\n\t\t\t###\n\t\t\tif(self.stats['memleak'] > 3 and slope > 20 and memleak > 512 and gain > 2048):\n\t\t\t\tapDisplay.printError(\"Memory leak of \"+str(round(memleak,2))+\"MB\")\n\t\t\telif(memleak > 32):\n\t\t\t\tself.stats['memleak'] += 1\n\t\t\t\tapDisplay.printWarning(\"substantial memory leak \"+str(round(memleak,2))+\"MB\")\n\t\t\t\tprint \"(\",str(n),round(slope,5),round(rho,5),round(gain,2),\")\"",
"def freemem(extra_alloc=0):\r\n gc.collect()\r\n gc.collect()\r\n gc.collect()\r\n n_mallocs = cuda.cuda_ndarray.cuda_ndarray.outstanding_mallocs()\r\n\r\n if hasattr(cuda.cuda_ndarray.cuda_ndarray, \"theano_allocated\"):\r\n theano_alloc = cuda.cuda_ndarray.cuda_ndarray.theano_allocated()\r\n return (\"(n malloc/theano mem allocated in KB)\",\r\n n_mallocs + extra_alloc,\r\n int(theano_alloc / 1024) + extra_size)\r\n\r\n return (\"n malloc on the gpu\", n_mallocs + extra_alloc)\r\n # I don't use the following by default as if there is other stuff running\r\n # on the GPU, this won't work.\r\n mem_info = cuda.cuda_ndarray.cuda_ndarray.mem_info()\r\n gpu_used = (mem_info[1] - mem_info[0]) / 1024 ** 2\r\n mem_info_msg = \"(n malloc/gpu mem used in MB)\"\r\n return (mem_info_msg, n_mallocs, int(gpu_used))",
"def getmemusage(self): # 3\n res,resargs = self.__obj.getmemusagetask()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _meminuse_return_value,_maxmemuse_return_value = resargs\n return _meminuse_return_value,_maxmemuse_return_value",
"def memory(self):\n # Run 'free -m' command and make a list from output.\n mem_data = self.execCMD('free', '-m').split()\n total_mem = int(mem_data[7]) / 1024.\n used_mem = int(mem_data[15]) / 1024.\n # Caculate percentage\n used_mem_percent = int(used_mem / (total_mem / 100))\n\n # Results are in kilobyte.\n return total_mem, used_mem, used_mem_percent",
"def get_memory(isamAppliance, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\n \"Retrieving the Memory Usage Statistics\",\n \"/statistics/systems/memory.json{0}\".format(\n tools.create_query_string(\n timespan=statistics_duration)),requires_model=requires_model)",
"def free_ram():\n return int(convert.bytetomb(psutil.virtual_memory().available))",
"def test_mem_available():\n result = _run_metric('mem_available')\n assert result.exit_code == 0",
"def check_mem_usage():\n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n return mem",
"def memUsedGpu(self):\n return None # amount not known",
"def check_memory(self, lambda_memory):\n if (lambda_memory < 128) or (lambda_memory > 1536):\n raise Exception('Incorrect memory size specified')\n else:\n res = lambda_memory % 64\n if (res == 0):\n return lambda_memory\n else:\n return lambda_memory - res + 64",
"def search_space_size(self):",
"def getMemory():\n return tracemalloc.take_snapshot()",
"def getMemory():\n return tracemalloc.take_snapshot()",
"def getMemory():\n return tracemalloc.take_snapshot()",
"def getMemory():\n return tracemalloc.take_snapshot()",
"def getMemory():\n return tracemalloc.take_snapshot()",
"def getMemory():\n return tracemalloc.take_snapshot()",
"def getMemory():\n return tracemalloc.take_snapshot()",
"def test_instant_memory_statistics(self):\n from supvisors.statistics import instant_memory_statistics\n stats = instant_memory_statistics()\n # test bounds (percent)\n self.assertIs(float, type(stats))\n self.assertGreaterEqual(stats, 0)\n self.assertLessEqual(stats, 100)",
"def used_ram():\n return total_ram() - free_ram()"
] |
[
"0.67095083",
"0.6632159",
"0.6463974",
"0.64158916",
"0.6393297",
"0.63684064",
"0.6331907",
"0.6315069",
"0.6303074",
"0.62661755",
"0.62638867",
"0.6201548",
"0.6164832",
"0.6159131",
"0.61274666",
"0.60981977",
"0.6093252",
"0.6063132",
"0.6053817",
"0.60520935",
"0.6044838",
"0.6034463",
"0.6034463",
"0.6034463",
"0.6034463",
"0.6034463",
"0.6034463",
"0.6034463",
"0.60097843",
"0.6009679"
] |
0.7907078
|
0
|
Add a label on edge
|
def add_edge_label(self, edge, label, color):
# Sort vertices index min - max
p0, p1 = edge
p0, p1 = min(p0, p1), max(p0, p1)
self.edges_label[(p0, p1)].append((label, color))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def createLabels(edge):\n k = removeLabel(edge)\n return k + \"_L\", k + \"_R\"",
"def _edgeLabel(self, node, parent):\r\n return self.word[node.idx + parent.depth: node.idx + node.depth]",
"def add_edge(source, target, label, side_label):\n if (elements is not self and\n target not in elements):\n return\n if simple:\n result.add_edge([source, target])\n elif side == \"twosided\":\n result.add_edge([source, target, (label, side_label)])\n else:\n result.add_edge([source, target, label])",
"def add_edge(source, sink, label=\"\"):\n source.add_outgoing_edge(sink, label)\n sink.add_incoming_edge(source, label)",
"def addLabel(*args):",
"def add_point_label(self, point, label, color):\n self.vertices_label[point].append((label, color))",
"def label(self) -> str: # Enforcing every node defines a label\n pass",
"def _draw_label(label, label_x, label_y):\n pass",
"def removeLabel(edge):\n return edge[:-2]",
"def removeLabel(edge):\n return edge[:-2]",
"def __init__(self, label):\n self.label = label.replace('\\n', '')\n self.children = list()\n self.edge_labels = {}",
"def create_link_label(pos_edge_index, neg_edge_index):\n num_links = pos_edge_index.size(1) + neg_edge_index.size(1)\n link_labels = torch.zeros(num_links, dtype=torch.float,\n device=pos_edge_index.device)\n link_labels[:pos_edge_index.size(1)] = 1.\n return link_labels",
"def add_label(self, label):\n if not self.has_label(label):\n self.add_category(scheme=LABELS_SCHEME,\n term='%s#%s' % (LABELS_NS, label),\n label=label)",
"def draw_shape_label(self, label, xform, colour):\n #TODO deal with alignment, rotation\n pos = xform.chain(Point(label.x, label.y))\n self.canvas.text((pos.x, pos.y), label.text, fill=colour)",
"def AddLabel(self, label):\n if self.labels is None:\n self.labels = set()\n self.labels.add(label)",
"def label(self):\n return '->'",
"def _get_axis_label(\n self,\n label: float | str | Mobject,\n axis: Mobject,\n edge: Sequence[float],\n direction: Sequence[float],\n buff: float = SMALL_BUFF,\n ) -> Mobject:\n\n label = self.x_axis._create_label_tex(label)\n label.next_to(axis.get_edge_center(edge), direction=direction, buff=buff)\n label.shift_onto_screen(buff=MED_SMALL_BUFF)\n return label",
"def addkid(self, node):\n self.children.append(node)\n # uniof of edge labels\n # self.edge_labels = {**self.edge_labels, **node.edge_labels}\n return self",
"def add_label(xml_str, node):\n if xml_str:\n s = u'<?xml version=\"1.0\" ?><label>' + xml_str + u\"</label>\"\n node.appendChild( parseString(s.encode(\"utf-8\")).documentElement )",
"def add_edge(self, v1, v2):\n pass # TODO",
"def write_label(self, label):\n self._write_line('label ' + label) # TODO generate unique labels?",
"def __add_new_label(self, name, value):\n self.__labels_dict[name] = value",
"def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)",
"def _plot_label(self, ax, x_coord):\n if self.label is None:\n return\n fontdict = updated_dict(\n {\"color\": \"white\", \"family\": \"sans-serif\", \"weight\": \"bold\", \"size\": 10},\n self.label_fontdict,\n )\n ax.text(\n x_coord,\n -self.migration_distance,\n self.label,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontdict=fontdict,\n transform=ax.transData,\n bbox=dict(boxstyle=\"round\", fc=self.band_color, lw=0),\n )",
"def create_label(self, org, name):\n pass",
"def dumpLabel(g):\n for v in g.nodes():\n g.node[v]['label'] = getLabel(g.node[v])\n if 'attribute' in g.node[v]:\n del g.node[v]['attribute']\n\n for e in g.edges():\n g.edge[e[0]][e[1]]['label'] = getLabel(g.edge[e[0]][e[1]])\n if 'attribute' in g.edge[e[0]][e[1]]:\n del g.edge[e[0]][e[1]]['attribute']",
"def add_edge(graph, tail_node, head_node, label=\"[0%]\", style=\"solid, bold\", color=\"black\", constraint=\"true\"):\n edge = Edge(tail_node, head_node, label=label, style=style, color=color, constraint=\"true\")\n graph.add_edge(edge)\n return",
"def label(self):\r\n raise NotImplementedError",
"def add_edge(self, head, child, label=None):\n self._heads[child] = head\n self._labels[child] = label\n\n # keep track of all dependents of a node\n self._deps[head].append(child)\n\n # keep track of the left/rights dependents\n if child < head:\n self._left_deps[head].append(child)\n else:\n self._right_deps[head].append(child)",
"def UpdateLabel(self) -> _n_6_t_0:"
] |
[
"0.72381914",
"0.7027375",
"0.6901796",
"0.6890718",
"0.6868756",
"0.6738556",
"0.66855615",
"0.66676563",
"0.66360575",
"0.66360575",
"0.6628755",
"0.65694034",
"0.64525896",
"0.63817084",
"0.63212174",
"0.628949",
"0.62592226",
"0.6235304",
"0.62335396",
"0.6216172",
"0.6216129",
"0.62088645",
"0.619283",
"0.61928",
"0.61812",
"0.6144017",
"0.61244154",
"0.6111744",
"0.6108399",
"0.60941833"
] |
0.8186049
|
0
|
Add a label on point
|
def add_point_label(self, point, label, color):
self.vertices_label[point].append((label, color))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _draw_label(label, label_x, label_y):\n pass",
"def draw_shape_label(self, label, xform, colour):\n #TODO deal with alignment, rotation\n pos = xform.chain(Point(label.x, label.y))\n self.canvas.text((pos.x, pos.y), label.text, fill=colour)",
"def draw_label(self, image, point, label, font=cv2.FONT_HERSHEY_SIMPLEX, font_scale=0.8, thickness=1):\n # gets the size of the label\n size = cv2.getTextSize(label, font, font_scale, thickness)[0]\n # where the position is\n x, y = point\n # gets the rectangle size\n cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED)\n cv2.putText(image, label, point, font, font_scale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)",
"def put_label(i):\n i = min(i, len(x)-2)\n dx = sx[i+1] - sx[i]\n dy = sy[i+1] - sy[i]\n rotation = np.rad2deg(math.atan2(dy, dx)) + rotation_offset\n pos = [(x[i] + x[i+1])/2. + offset[0], (y[i] + y[i+1])/2 + offset[1]]\n plt.text(pos[0], pos[1], label_text, size=9, rotation=rotation, color = line.get_color(),\n ha=\"center\", va=\"center\", bbox = dict(ec='1',fc='1',alpha=0.8))",
"def put_label(i):\n i = min(i, len(x) - 2)\n dx = sx[i + 1] - sx[i]\n dy = sy[i + 1] - sy[i]\n rotation = np.rad2deg(math.atan2(dy, dx)) + rotation_offset\n pos = [(x[i] + x[i + 1]) / 2. + offset[0],\n (y[i] + y[i + 1]) / 2 + offset[1]]\n plt.text(pos[0],\n pos[1],\n label_text,\n size=9,\n rotation=rotation,\n color=line.get_color(),\n ha=\"center\",\n va=\"center\",\n bbox=dict(ec='1', fc='1', alpha=0.8))",
"def addLabel(*args):",
"def _plot_label(self, ax, x_coord):\n if self.label is None:\n return\n fontdict = updated_dict(\n {\"color\": \"white\", \"family\": \"sans-serif\", \"weight\": \"bold\", \"size\": 10},\n self.label_fontdict,\n )\n ax.text(\n x_coord,\n -self.migration_distance,\n self.label,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontdict=fontdict,\n transform=ax.transData,\n bbox=dict(boxstyle=\"round\", fc=self.band_color, lw=0),\n )",
"def DrawPointLabel(self, dc, mDataDict):\n # ----------\n dc.SetPen(wx.Pen(wx.BLACK))\n dc.SetBrush(wx.Brush( wx.BLACK, wx.SOLID ) )\n \n sx, sy = mDataDict[\"scaledXY\"] #scaled x,y of closest point\n dc.DrawRectangle( sx-5,sy-5, 10, 10) #10by10 square centered on point\n px,py = mDataDict[\"pointXY\"]\n cNum = mDataDict[\"curveNum\"]\n pntIn = mDataDict[\"pIndex\"]\n legend = mDataDict[\"legend\"]\n #make a string to display\n s = \"Crv# %i, '%s', Pt. (%.2f,%.2f), PtInd %i\" %(cNum, legend, px, py, pntIn)\n dc.DrawText(s, sx , sy+1)\n # -----------",
"def pointLabels(self):\n global ptA, ptB, ptC, ptD, ptE\n ptA = always_redraw(\n lambda : MathTex(\"\\\\text{A}\").scale(0.75).next_to(dot_center, LEFT)\n )\n\n ptB = always_redraw(\n lambda : MathTex(\"\\\\text{B}\").scale(0.75).next_to(radius_ang_end_dot, UP)\n )\n\n ptC = always_redraw(\n lambda : MathTex(\"\\\\text{C}\").scale(0.75).next_to(small_tangent_end_dot, DOWN)\n )\n\n ptD = always_redraw(\n lambda : MathTex(\"\\\\text{D}\").scale(0.75).next_to(radius_horiz_end_dot, DOWN)\n )\n\n ptE = always_redraw(\n lambda : MathTex(\"\\\\text{E}\").scale(0.75).next_to(dropped_dot, DOWN)\n )\n\n self.play(Write(ptA),\n Write(ptB),\n Write(ptC),\n Write(ptD),\n Write(ptE))\n self.wait(0.5)",
"def _create_label(self, x, y, text, width=50, **config):\n\n self.main_canvas.create_text(x, y, text='%6s' % text, width=width, **config)",
"def draw_label(\n self,\n x: Union[Quantity, float],\n y: Union[Quantity, float],\n text: str,\n **kwargs: Any,\n ) -> None:\n\n if isinstance(x, Quantity) and isinstance(y, Quantity):\n _x = x.to_value(u.deg)\n _y = y.to_value(u.deg)\n relative = False\n else:\n _x = x\n _y = y\n relative = True\n self.plot.add_label(_x, _y, text, relative=relative, **kwargs)",
"def label_annotation(tt):\n t_akt = tt[1]\n label_idx = max(np.where(man_all.label_borders <= t_akt)[0])\n label = man_all.label_chain[label_idx]\n pl.annotate(label, xy=(0.07, .8), color = \"black\",\n xycoords = \"axes fraction\",\n xytext=None, arrowprops=None)",
"def addlabels(x, y):\n\n for i in range(len(x)):\n plt.text(i, y[i], y[i], ha='center')",
"def _draw_x_label(self):\n overlay = self.image.getOverlay()\n TextRoi.setGlobalJustification(TextRoi.CENTER)\n offset = self.image.getHeight() - self.extend_label\n label_pos = self.image.getWidth() / 2\n text = TextRoi(label_pos, offset, 'Energy loss [eV]', self.font)\n text_width = text.getFloatWidth()\n text_y = text.getYBase()\n text.setLocation(label_pos - text_width / 2, text_y)\n text.setStrokeColor(Color(1.00, 1.00, 1.00))\n overlay.add(text)",
"def add_keypoints_label(self, label):\n self._ensure_has_keypoints_label(label)",
"def label(self, location, *args, **kwargs):\n\n if isinstance(location, fslimage.Image):\n return self.maskLabel(location, *args, **kwargs)\n else:\n return self.coordLabel(location, *args, **kwargs)",
"def autolabel(X_pos,values,height_lift):\r\n\theight= np.round(np.nan_to_num(values),2);y_pos = height_lift*height\r\n\tfor i in range(len(height)):\r\n\t\tax.text(X_pos[i],y_pos[i],'%4.2f' % height[i], ha='center', va='bottom',size=4)",
"def draw_label(label_text, label_position, scene):\n\n # Custom settings for the label\n label_height = 10\n label_xoffset = 0\n label_yoffset = 50\n label_space = 20\n label_font = 'serif'\n label_text_colour = color.black\n label_line_color = color.black\n\n the_label = label(\n canvas=scene,\n pos=label_position,\n text=label_text,\n height=label_height,\n xoffset=label_xoffset,\n yoffset=label_yoffset,\n space=label_space,\n font=label_font,\n color=label_text_colour,\n linecolor=label_line_color\n )\n\n return the_label",
"def _format_label(self, lbl, plot_src):\n lbl.text = \"{0:}\".format(plot_src.name)",
"def draw_points():\n\n for node in self._nodes:\n\n x = node_properties[\"node_x\"][node]\n y = node_properties[\"node_y\"][node]\n ax.scatter(\n x,\n y,\n zorder=10,\n edgecolors=\"k\",\n linewidths=0.5,\n **self.get_node_data(node),\n )\n\n for label in self._nodes:\n\n x = node_properties[\"label_x\"][label]\n y = node_properties[\"label_y\"][label]\n rotation = node_properties[\"rotation\"][label]\n ha = node_properties[\"ha\"][label]\n\n attr = {**dict(backgroundcolor=\"white\"), **text_attr}\n ax.text(\n x,\n y,\n textwrap.shorten(text=label, width=TEXTLEN),\n rotation=rotation,\n ha=ha,\n va=\"center\",\n rotation_mode=\"anchor\",\n bbox=dict(\n facecolor=\"w\",\n alpha=1.0,\n edgecolor=\"gray\",\n boxstyle=\"round,pad=0.5\",\n ),\n zorder=11,\n **attr,\n )",
"def draw_label(self, text, event_name, num_items = 1, item = 0):\n width = self.XCOLUMNSKIP//num_items\n self.guiElements[event_name] = Draw.Label(\n text,\n self.xPos + item*width, self.yPos, width, self.YLINESKIP)\n if item + 1 == num_items:\n self.yPos -= self.YLINESKIP",
"def DrawLabel(self, screen):\r\n screen.blit(self.label, self.pos)",
"def draw_position_label(self):\n x, y, z = self.player.pos\n x, y, z = math.floor(x), math.floor(y), math.floor(z)\n self.label.text = 'FPS: {}, X: {}, Y: {}, Z: {}'.format(round(pyglet.clock.get_fps()), x, y, z)\n self.label.draw()",
"def create_label(self, org, name):\n pass",
"def UpdateLabel(self) -> _n_6_t_0:",
"def __init__(self, date, text, label_point, color='black', bbox=None, event_point=None, put_circle_around_point=True, marker=None, relpos=(.5, .5)):\n super().__init__(label_point.x, label_point.y)\n self.date = date\n self.text = text\n self.color = color\n self.bbox = bbox\n self.event_point = event_point\n self.put_circle_around_point = put_circle_around_point\n self.marker = marker\n self.relpos = relpos",
"def draw_label(self):\n x, y, z, phi, theta, psi = self.airplane.eta\n u, v, w, p, q, r = self.airplane.nu\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot = self.airplane.nu_dot\n alpha = np.arctan(w/u)\n V_a = np.sqrt(u**2+v**2+w**2)\n beta = np.arcsin(v/V_a)\n\n self.labels[0].text = 'Roll [deg]: %.2f' % (phi*180/np.pi,)\n self.labels[0].draw()\n self.labels[1].text = 'Pitch [deg]: %.2f' % (theta*180/np.pi,)\n self.labels[1].draw()\n self.labels[3].text = 'Pos: (%.2f, %.2f, %.2f)' % (x, y, z)\n self.labels[3].draw()\n self.labels[4].text = 'Speed: %.2f (%.2f, %.2f, %.2f)' % (V_a, u, v, w)\n self.labels[4].draw()\n self.labels[5].text = 'Acceleration: (%.2f, %.2f, %.2f)' % (u_dot, v_dot, w_dot)\n self.labels[5].draw()\n self.labels[6].text = 'Angle of attack: %.2f' % (alpha,)\n self.labels[6].draw()\n self.labels[7].text = 'Sideslip angle: %.2f' % (beta,)\n self.labels[7].draw()\n\n self.labels[9].text = 'Drag: %.2f' % (self.airplane.f_drag,)\n self.labels[9].draw()\n self.labels[10].text = 'Lift: %.2f' % (self.airplane.f_lift,)\n self.labels[10].draw()\n self.labels[11].text = 'Thruster: %.2f' % (self.airplane.f_thruster,)\n self.labels[11].draw()\n self.labels[12].text = 'Elevators: %.2f' % (self.airplane.elevator,)\n self.labels[12].draw()\n self.labels[13].text = 'Ailerons: %.2f' % (self.airplane.aileron,)\n self.labels[13].draw()\n self.labels[14].text = 'Rudder angle: %.2f' % (self.airplane.rudder_angle,)\n self.labels[14].draw()\n self.labels[15].text = 'Flaps: %.2f' % (self.airplane.flaps,)\n self.labels[15].draw()\n\n if (alpha > CRITICAL_STALL_ANGLE):\n self.stall_warning.text = 'Stall!'\n self.stall_warning.draw()",
"def add_text_next_to_xlabel(fig, ax, text):\n\n xlbl = ax.xaxis.get_label()\n\n # draw figure using renderer because axis position only fixed after drawing\n fig.canvas.draw()\n\n transform = xlbl.get_transform()\n font_properties = xlbl.get_font_properties()\n position = xlbl.get_position()\n ha = xlbl.get_horizontalalignment()\n va = xlbl.get_verticalalignment()\n\n txt = ax.text(0., 0, text)\n\n txt.set_transform(transform)\n txt.set_position((position[0] * 1.7, position[1]))\n txt.set_font_properties(font_properties)\n txt.set_horizontalalignment(ha)\n txt.set_verticalalignment(va)",
"def setLabel(*args):",
"def setLabel(*args):"
] |
[
"0.7767723",
"0.7519163",
"0.7506295",
"0.7415118",
"0.7404069",
"0.7259624",
"0.70306754",
"0.70253646",
"0.68806297",
"0.6823486",
"0.68070644",
"0.67730355",
"0.67621034",
"0.6754007",
"0.67432547",
"0.6664425",
"0.66360676",
"0.65894914",
"0.65858585",
"0.6573907",
"0.6553606",
"0.6538524",
"0.64840776",
"0.6396831",
"0.6391177",
"0.6372448",
"0.6371153",
"0.6368006",
"0.6358759",
"0.6358759"
] |
0.8212442
|
0
|
Transform a 3D point from the mesh to a 3D point of the world by multiplying with the matrix world
|
def _world_point(self, point_3d):
return self.obj.matrix_world @ point_3d
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def world_to_object(self, point: Point) -> Point:\n if self.parent:\n point = self.parent.world_to_object(point)\n result = self.transform.inverse() * point\n return result",
"def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])",
"def translate3d(p, a=0, b=0, c=0):\n translation_mat = np.matrix([\n [1,0,0,0],\n [0,1,0,0],\n [0,0,1,0],\n [a,b,c,1]\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p",
"def function_3d(point):\n return point[0]**2 + point[1]**2 + point[2]**2 - 1",
"def project_point(self, point: Point3D) -> Point3D:\n x, y, z = point\n cam_x, cam_y, cam_z = self._pos\n x -= cam_x\n y -= cam_y\n z -= cam_z\n dx = self._cy*(self._sz*y + self._cz*x) - self._sy*z\n dy = self._sx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) + self._cx*(self._cz*y - self._sz*x)\n dz = self._cx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) - self._sx*(self._cz*y - self._sz*x)\n return self._scale * dx/dz, self._scale * dy/dz, dz",
"def _project(self):\n ghosts_w = self.input_field.topology.ghosts()\n self.input_field.data[0], self.input_field.data[1], \\\n self.input_field.data[2] = \\\n fftw2py.projection_om_3d(self.input_field.data[0],\n self.input_field.data[1],\n self.input_field.data[2], ghosts_w)",
"def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result",
"def transform( self, matrix3 ):\n self._coords = matrix3._matrix * self._coords\n return self",
"def Compute3d(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_Compute3d(self, *args)",
"def TransformPoint(transform, x, y, z):\n result = np.matmul(transform, np.array([x, y, z, 1.]))\n return result[0], result[1], result[2]",
"def matrix_apply_to_3d_3x3(vol, matrix: np.matrix):\n from scipy import mgrid\n\n cx = vol.shape[0]/2\n cy = vol.shape[1]/2\n cz = vol.shape[2]/2\n\n # Calculate the new coordinates of every point\n grid = mgrid[-cx:vol.shape[0]-cx, -cy:vol.shape[1]-cy, -cz:vol.shape[2]-cz]\n temp = grid.reshape((3, grid.size / 3))\n # Add the fourth dimension (just 1s but needed for the computations)\n # Use the matrix to calculate the new positions of every point\n temp = np.dot(matrix, temp)\n # Delete the fourth dimension\n temp = np.array(temp)\n grid = np.reshape(temp, (3, vol.shape[0], vol.shape[1], vol.shape[2]))\n\n grid[0] += cx\n grid[1] += cy\n grid[2] += cz\n\n from scipy.ndimage.interpolation import map_coordinates\n d = map_coordinates(vol, grid, order=3)\n\n return d",
"def scale3d(p, a=1, b=1, c=1):\n translation_mat = np.matrix([\n [a,0,0,0],\n [0,b,0,0],\n [0,0,c,0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p",
"def translate(self, x=0, y=0, z=0):\n\t\ttranslation = np.identity(4)\n\t\ttranslation[0, 3] += x\n\t\ttranslation[1, 3] += y\n\t\ttranslation[2, 3] += z\n\t\t\n\t\tself.matrix = np.matmul(self.matrix, translation)",
"def convert_coordinate_system_3d(x, y, z):\n\n return x, -z, y",
"def cam_to_world(cam_point, world_to_cam):\n # cam_point = np.array([cam_pose[0], cam_pose[1], cam_pose[2]])\n\n obj_vector = np.concatenate((cam_point, np.ones(1))).reshape((4, 1))\n world_point = np.dot(world_to_cam, obj_vector)\n\n world_point = [p[0] for p in world_point]\n return world_point[0:3]",
"def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M",
"def to_world(self, uv):\n return self._projective_transform(self.A, uv)",
"def project_on_trans(self, F):\r\n\r\n x0 = self.x1\r\n y0 = self.y1\r\n self.z1 = F - math.sqrt(F * F - x0 * x0 - y0 * y0)\r\n\r\n x0 = self.x2\r\n y0 = self.y2\r\n self.z2 = F - math.sqrt(F * F - x0 * x0 - y0 * y0)\r\n\r\n x0 = self.x3\r\n y0 = self.y3\r\n self.z3 = F - math.sqrt(F * F - x0 * x0 - y0 * y0)",
"def immoment3D(X, Y, Z, p, q, r):\n assert len(X) == len(Y)\n assert len(Y) == len(Z)\n return (X ** p * Y ** q * Z ** r).sum()",
"def setLocalTransform(self):\n\n # Position\n posX = OpenMaya.MPlug(self.thisObj, self.localPositionX).asFloat()\n posY = OpenMaya.MPlug(self.thisObj, self.localPositionY).asFloat()\n posZ = OpenMaya.MPlug(self.thisObj, self.localPositionZ).asFloat()\n\n glFT.glTranslatef(posX, posY, posZ)\n\n # Scale\n scaleX = OpenMaya.MPlug(self.thisObj, self.localScaleX).asFloat()\n scaleY = OpenMaya.MPlug(self.thisObj, self.localScaleY).asFloat()\n scaleZ = OpenMaya.MPlug(self.thisObj, self.localScaleZ).asFloat()\n\n glFT.glScalef(scaleX, scaleY, scaleZ)",
"def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()\n ymin, ymax = self.get_ylim3d()\n zmin, zmax = self.get_zlim3d()\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0\n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates\n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down\n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M",
"def TransformVector(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD3_TransformVector(self, *args)",
"def problem3():\n t = np.array([-27.1, -2.9, -3.2])\n principal_point = np.array([8, -10])\n focal_length = 8\n\n # model transformations\n T = gettranslation(t)\n Ry = getyrotation(135)\n Rx = getxrotation(-30)\n Rz = getzrotation(90)\n print(T)\n print(Ry)\n print(Rx)\n print(Rz)\n\n K = getcentralprojection(principal_point, focal_length)\n\n P,M = getfullprojection(T, Rx, Ry, Rz, K)\n print(P)\n print(M)\n\n points = loadpoints()\n displaypoints2d(points)\n\n z = loadz()\n Xt = invertprojection(K, points, z)\n\n Xh = inverttransformation(M, Xt)\n\n worldpoints = hom2cart(Xh)\n displaypoints3d(worldpoints)\n\n points2 = projectpoints(P, worldpoints)\n displaypoints2d(points2)\n\n plt.show()",
"def matmul3d(X, W):\n Xr = tf.reshape(X, [-1, tf.shape(X)[2]])\n XWr = tf.matmul(Xr, W)\n newshape = [tf.shape(X)[0], tf.shape(X)[1], tf.shape(W)[1]]\n return tf.reshape(XWr, newshape)",
"def matmul3d(X, W):\n Xr = tf.reshape(X, [-1, tf.shape(X)[2]])\n XWr = tf.matmul(Xr, W)\n newshape = [tf.shape(X)[0], tf.shape(X)[1], tf.shape(W)[1]]\n return tf.reshape(XWr, newshape)",
"def apply(self, point):\n m = numpy.dot(self.matrix, numpy.array([point[0], point[1], 1.0]).transpose())\n return pygame.Vector2(m[0], m[1])",
"def rigid_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A = mat3(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A += w[i]*p_adj.transpose_multiply(q_adj)\r\n A_arr = np.array(A.matrix).reshape(3, 3)\r\n U, S, V = np.linalg.svd(A_arr)\r\n M_arr = np.matmul(np.transpose(V), np.transpose(U))\r\n M = mat3(M_arr.ravel().tolist())\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out",
"def translation(self, x, y, z) -> None:\n ...",
"def sat_3d_position(sat_2d_position):\n return np.dot(transformation_parameter, xi_eta(sat_2d_position))",
"def translate ( self, dx, dy, dz):\n self.x = self.x + dx\n self.y = self.y + dy\n self.z = self.z + dz\n self.xyz = np.array((self.x, self.y, self.z))"
] |
[
"0.6619691",
"0.643642",
"0.6416658",
"0.6408308",
"0.63418496",
"0.6291406",
"0.6243179",
"0.6203395",
"0.6108228",
"0.6091871",
"0.6077667",
"0.6073341",
"0.6024039",
"0.5998349",
"0.59783256",
"0.5972309",
"0.59351104",
"0.59069496",
"0.587442",
"0.5872318",
"0.586021",
"0.58180463",
"0.5815774",
"0.58071727",
"0.58071727",
"0.5787287",
"0.5784568",
"0.5778434",
"0.5763457",
"0.5760023"
] |
0.769006
|
0
|
Return the normal vector (pointing outside) to an object and a pair of vertices
|
def _normal_vector(o, p0_3d, p1_3d):
# The vector between middle point of v1-v2 and object center location
# is the normal vector I'm looking for
vn = p0_3d.lerp(p1_3d, 0.5) - o.matrix_world.translation
# normalize so I can to length computation on it
vn.normalize()
return vn
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def normal(self) -> Vec:\n # The three points are in clockwise order, so compute differences\n # in the clockwise direction, then cross to get the normal.\n point_1 = self.planes[1] - self.planes[0]\n point_2 = self.planes[2] - self.planes[1]\n\n return Vec.cross(point_1, point_2).norm()",
"def normal(self) -> Vector:\n return normalize(cross(self.d1, self.d2))",
"def twoDNormal(self):\n return vector((-1) * self.y, self.x, 0)",
"def normal_vector(origin, vectors):\n return np.cross(vectors[0] - origin, vectors[1] - origin)",
"def normal(self) -> Vec:\n return abs(self.up_axis.cross(self.forward()))",
"def normal_at(self, u, v, world=True):\n u = u * pi\n v = v * PI2\n x = cos(u) * sin(v)\n y = sin(u) * sin(v)\n z = cos(v)\n normal = Vector(x, y, z)\n if world:\n normal.transform(self.transformation)\n return normal",
"def generate_normals(v1, v2, v3, normalize_result=True):\n # make vectors relative to v2\n # we assume opengl counter-clockwise ordering\n a = v1 - v2\n b = v3 - v2\n n = cross(b, a)\n if normalize_result:\n n = normalize(n)\n return n",
"def make_inward_normal(tetrahedron):\n\n convert_to_np_array = lambda v: np.array([v.x, v.y, v.z])\n np_vertices = list(map(convert_to_np_array, [tetrahedron.get_vertex(i) for i in range(4)]))\n # This is the middle point\n # midpoint = np.mean(np_vertices, axis=0)\n\n midpoint = np_vertices[0]\n for i in range(1, 4):\n midpoint += np_vertices[i]\n midpoint = midpoint / 2.0\n\n for i in range(4):\n face = tetrahedron.get_face(i)\n d = distance(face, midpoint)\n if d < 0:\n face.nx *= -1.0\n face.ny *= -1.0\n face.nz *= -1.0\n face.d *= -1.0",
"def normal(self) -> Vector:\n return self._normal",
"def vector(self) -> Vector:\n return self._normal * self._distance_from_origin",
"def unit_normals(self):\n return np.stack(self.centers_cartesian(), axis=-1)",
"def normal(self, uv):\n res = GeomLProp_SLProps(self.surface(), uv[0], uv[1], 1, 1e-9)\n if not res.IsNormalDefined():\n return (0, 0, 0)\n normal = geom_utils.gp_to_numpy(res.Normal())\n if self.reversed():\n normal = -normal\n return normal",
"def normal_vector(self, facet):\n assert len(facet) == 3\n pos = self.cluster.get_positions()\n v1 = pos[facet[1], :] - pos[facet[0], :]\n v2 = pos[facet[2], :] - pos[facet[0], :]\n n = np.cross(v1, v2)\n length = np.sqrt(np.sum(n**2))\n return n / length",
"def normals(t, v):\n n = numpy.zeros((len(t), 3))\n for i in range(0, len(t)):\n p = vertices(t[i], v)\n n[i] = triangle.normal(p)\n return n",
"def angle_normal(self):\n return atan2(-self.v.x, self.v.y)",
"def _update_surface_normals(self):\n\n # This is the case if there are too few points to\n # compute normals so there can be values to remove\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n self.director.data[...] = 0.0\n else:\n\n particle_coords = self.swarm.particleCoordinates.data\n\n Nx = np.empty(self.swarm.particleLocalCount)\n Ny = np.empty(self.swarm.particleLocalCount)\n Nz = np.empty(self.swarm.particleLocalCount)\n\n for i, xyz in enumerate(particle_coords):\n r, neighbours = self.kdtree.query(particle_coords[i], k=4)\n\n # this point is neighbour[0] and neighbour points are neighbours[(1,2,3)]\n XYZ1 = self.kdtree.data[neighbours[1]]\n XYZ2 = self.kdtree.data[neighbours[2]]\n XYZ3 = self.kdtree.data[neighbours[3]]\n\n dXYZ1 = XYZ2 - XYZ1\n dXYZ2 = XYZ3 - XYZ1\n\n # Cross product of those 2 vectors can be use as the local normal (perhaps)\n\n Nx[i], Ny[i], Nz[i] = np.cross(dXYZ1, dXYZ2)\n #if i == 0:\n # print(Nx, Ny, Nz)\n # print(xyz[0], xyz[1],xyz[2])\n # print((self.insidePt[0] - xyz[0]) * Nx[i] )\n\n if (self.insidePt):\n sign = np.sign( (self.insidePt[0] - xyz[0]) * Nx[i] +\n (self.insidePt[1] - xyz[1]) * Ny[i] +\n (self.insidePt[2] - xyz[2]) * Nz[i] )\n Nx[i] *= sign\n Ny[i] *= sign\n Nz[i] *= sign\n\n\n for i in range(0, self.swarm.particleLocalCount):\n scale = 1.0 / np.sqrt(Nx[i]**2 + Ny[i]**2 + Nz[i]**2)\n Nx[i] *= scale\n Ny[i] *= scale\n Nz[i] *= scale\n\n\n self.director.data[:,0] = Nx[:]\n self.director.data[:,1] = Ny[:]\n self.director.data[:,2] = Nz[:]\n\n print(\"Surf Norms\")\n\n return",
"def uv(vec):\n return vec / sqrt(dot(vec, vec))",
"def get_normal(self):\n c, s = np.cos(self.eangles), np.sin(self.eangles)\n r = np.array([[c, -s], [s, c]])\n u = np.array([1, 0])\n return np.dot(r, u)",
"def normals(self, point):\n vertex_0, vertex_1, vertex_2 = self._vertices(point)\n normals_at_point = 0.5 * gs.cross(vertex_1 - vertex_0, vertex_2 - vertex_0)\n return normals_at_point",
"def compute_normalvect(self):\n normvect = np.zeros((len(self.tri_pnts),3,3))\n zvec = np.array([0, 0, 1])\n for itri, tri in enumerate(self.tri_pnts):\n #import pdb; pdb.set_trace()\n tri0, tri1, tri2 = tri\n x1,y1 = self.points[tri1]-self.points[tri0]\n v1 = np.array([x1,y1,0])\n x2,y2 = self.points[tri2]-self.points[tri1]\n v2 = np.array([x2,y2,0])\n x3,y3 = self.points[tri0]-self.points[tri2]\n v3 = np.array([x3,y3,0])\n v1 = v1/np.linalg.norm(v1)\n v2 = v2/np.linalg.norm(v2)\n v3 = v3/np.linalg.norm(v3)\n #import pdb; pdb.set_trace()\n normvect[itri,:,:] = np.cross(v1,zvec), np.cross(v2,zvec), np.cross(v3,zvec)\n #import pdb; pdb.set_trace()\n return normvect",
"def normal(point_one, point_two):\n return numpy.array([point_one[1] - point_two[1], point_two[0] - point_one[0]])",
"def unit_normals(p,q,r): \n vx1 = p[0] - r[0] # x1 - x3. \n vy1 = p[1] - r[1] # y1 - y3. \n vz1 = p[2] - r[2] # z1 - z3. \n\n vx2 = q[0] - r[0] # x2 - x3. \n vy2 = q[1] - r[1] # y2 - y3. \n vz2 = q[2] - r[2] # z2 - z3. \n\n vnx = vy1*vz2 - vz1*vy2 \n vny = vz1*vx2 - vx1*vz2 \n vnz = vx1*vy2 - vy1*vx2 \n\n len_vn = math.sqrt(vnx*vnx + vny*vny + vnz*vnz) \n vnx = vnx/len_vn \n vny = vny/len_vn \n vnz = vnz/len_vn \n\n return vnx, vny, vnz",
"def Normal(self):\n return Vector(self.normal)",
"def get_normals(self):\n c, s = np.cos(self.eangles), np.sin(self.eangles)\n r = np.array([[c, -s], [s, c]])\n us = np.array([[1, 0], [0, 1], [-1, 0], [0, -1]])\n nsyms = 4 if self.halfexts[0] == self.halfexts[1] else 2\n return [(np.dot(r, u), nsyms) for u in us]",
"def normal(vx,vy,n):\n if vx==0:\n if vy==0: \n return (0,0)\n else:\n return (0,n)\n elif vy==0:\n return (n,0)\n else:\n return (n/sqrt(1+(vy/vx)**2),n/sqrt(1+(vx/vy)**2))",
"def getNormalizedNormalVec(self):\n TriPos = self.position\n # calc normalized normal vecor for Tri\n # get vectors Vert1Vert2 & Vert2Vert3\n TriVectors = np.subtract(TriPos[1:],TriPos[:-1])\n # get crossproduct of Vert1Vert2 & Vert2Vert3 (= surface normal)\n TriNorm = np.cross(TriVectors[0],TriVectors[1])+0.0\n # get length of surface normal\n length = np.linalg.norm(TriNorm)\n # divide each component of surface normal by length (= normalized surface normal)\n NormalizedNormalVec = np.around(TriNorm / length, decimals=5) # rounded, otherwise different values, equals not found\n # create string of tuple for segment dict \n #SegmDict = str(tuple(NormalizedNormalVec))\n return NormalizedNormalVec.tolist()",
"def getNormalVector(self):\n vector = self.unit_vector\n vector.rotate(math.pi / 2)\n return vector",
"def normal_vector_3p(a: Vector, b: Vector, c: Vector) -> Vector:\n return (b - a).cross(c - a).normalize()",
"def vertex_normals(self) -> np.ndarray:\n\n if self._vertex_normals is None:\n self.compute_vertex_normals()\n assert self._vertex_normals is not None\n return self._vertex_normals",
"def GetNormal(self, *args):\n return _itkLineSpatialObjectPointPython.itkLineSpatialObjectPoint2_GetNormal(self, *args)"
] |
[
"0.6874289",
"0.68552566",
"0.66967344",
"0.6693776",
"0.6652025",
"0.6616837",
"0.65679944",
"0.6470694",
"0.6464965",
"0.6430536",
"0.6415808",
"0.6406652",
"0.6401626",
"0.63933384",
"0.6393271",
"0.6362454",
"0.63607025",
"0.63591343",
"0.6353536",
"0.6314179",
"0.629924",
"0.62755483",
"0.6273679",
"0.6252028",
"0.62420833",
"0.6230524",
"0.619217",
"0.6174477",
"0.61614144",
"0.6136948"
] |
0.7952905
|
0
|
Load 10 products from dump.json.
|
def loadProducts():
dump = os.path.dirname(os.path.abspath(__file__)) + "/dump.json"
data = open(dump, 'r')
for deserialized_object in serializers.deserialize("json", data):
deserialized_object.save()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_products():\n\n print \"Loading Products\"\n\n for i, row in enumerate(open(\"data/mock_product_data.csv\")):\n row = row.rstrip()\n title, price, inventory = row.split(\",\")\n\n product = Product(title=title,\n price=price,\n available_inventory=inventory)\n\n db.session.add(product)\n\n db.session.commit()",
"def add_imported(products):\n \n for product in products:\n add_product(product[\"product_name\"], product[\"product_quantity\"], product[\"product_price\"], product[\"date_updated\"])",
"def products(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/products'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def get_data(self):\n products_list = []\n for category in CATEGORIES:\n json_data = self.url_to_json(category)\n pages_nb = self.retrieve_cat_pages_nb(json_data)\n for page in range(pages_nb):\n page_json_data = self.page_to_json(category, page+1)\n products = page_json_data[\"products\"]\n for p in products:\n params = {\n 'brands': \"\",\n 'product_name_fr': \"\",\n 'nutrition_grades': \"\",\n 'stores': \"\",\n 'url': \"\",\n 'categories': \"\"\n }\n for key in params:\n try:\n params[key] = p[key]\n except KeyError:\n continue\n if params['product_name_fr'] != \"\" and params['nutrition_grades'] != \"\" and params['url'] != \"\" and params['categories'] != \"\":\n product = Product(brand=params['brands'],\n name=params['product_name_fr'],\n nutrition_grade=params['nutrition_grades'],\n stores=params['stores'], url=params['url'],\n category=params['categories'])\n products_list.append(product)\n try:\n self.manager.save_all(self.clean_data(products_list))\n print(f\"\\n La base de données |{DB_NAME}| a été peuplée \\n\")\n except:\n print(\"\\n Une erreur s'est produite lors \"\n \"du peuplement de la base de données \\n\")",
"def load_data(self):\n if not os.path.isfile(\"{}/OFF_data.json\".format(settings.DIR_PATH)):\n self.request_constructor(settings.R_COLLECTION['category'], 'NULL', 'tags')\n self.crawl_data('category')\n i = 0\n for item in self.categories:\n i += 1\n cat = item.get(\"name\")\n self.request_constructor(settings.R_COLLECTION['product'], cat, 'products')\n self.crawl_data('product')\n\n self.data = {\"categories\": self.categories, \"products\": self.products}\n self.save_data('OFF_data.json')\n else:\n with open(\"{}/OFF_data.json\".format(settings.DIR_PATH), 'r') as f:\n self.data = json.load(f)\n self.categories = self.data[\"categories\"]\n self.products = self.data[\"products\"]\n return self.categories, self.products",
"def test_get_product_list(self):\n self._create_products(5)\n resp = self.app.get(\"/products\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 5)",
"def __init__(self, filepath):\n for product in load_data(filepath):\n if product['shop_id'] not in self:\n self[product['shop_id']] = []\n\n product['popularity'] = float(product['popularity'])\n product['quantity'] = int(product['quantity'])\n self[product['shop_id']].append(product)",
"def test_load_products(self):\n\n call_command('load_products')\n\n products_count = len(DjangoProductRepository().all())\n\n self.assertEqual(\n products_count + 1,\n self._get_num_lines_from_csv('products.csv'),\n )",
"def products(self):\n response = requests.get(self._url(self._PRODUCTS_PATH), headers=self._headers)\n return response.json()",
"def get_products(self):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products;\")\n res = cur.fetchall()\n if res:\n prdcts=[]\n for prodct_item in res:\n picked_prdct = {\n 'product_id':prodct_item[0],\n 'product_name':prodct_item[1],\n 'price':prodct_item[2],\n 'quantity':prodct_item[3]\n }\n prdcts.append(picked_prdct)\n return jsonify({\"Products\": prdcts}), 200\n return jsonify({\"message\":\"No products in store\"})",
"def test_loads_all_example_data(self):\n for file_name in EXAMPLE_CARTS:\n with open(f\"examples/{file_name}\", \"r\") as f:\n cart = json.load(f)\n\n Item.Schema(many=True).load(cart)",
"def get_products(self, filename: str):\n try:\n file = open(filename, \"r\")\n products = json.load(file)\n except FileNotFoundError as err:\n logging.error(f\"[ERROR] File {filename} not found. Traceback: {err}\")\n return False\n else:\n return products",
"def get_products(self) -> dict:\n\t\tproducts = dict()\n\n\t\twith open(self._file_path) as json_data:\n\t\t\tdata = json.load(json_data)\n\t\t\t\n\t\t\t# ::TODO:: check data validity\n\n\t\t\tfor product in data[\"data\"][\"products\"]:\n\t\t\t\tif product[\"code\"] not in products:\n\n\t\t\t\t\t# ::TODO:: Here I assumed that all the data was correctly parsed in the JSON file\n\n\t\t\t\t\tproducts[product[\"code\"]] = Product(product[\"code\"], product[\"price\"], product[\"lastupdate\"], product[\"currency\"])\n\n\t\treturn products",
"def get_products(self, adi):\r\n obj = None\r\n if self.from_copy:\r\n with open(self.products_copy.format(adi), encoding='utf-8') as f:\r\n obj = json.load(f)\r\n return obj\r\n else:\r\n return self.rf.get_products(self.urls[adi])",
"def _create_products(self, count):\n products = []\n for _ in range(count):\n test_product = ProductFactory()\n resp = self.app.post(\n \"/products\", json=test_product.serialize(), content_type=\"application/json\"\n )\n self.assertEqual(\n resp.status_code, status.HTTP_201_CREATED, \"Could not create test product\"\n )\n new_product = resp.get_json()\n test_product.id = new_product[\"id\"]\n products.append(test_product)\n return products",
"def test_loads_all_example_data(self):\n with open(EXAMPLE_BASE_PRICES_PATH, \"r\") as f:\n base_prices = json.load(f)\n\n BasePrice.Schema(many=True).load(base_prices)",
"def get_info_from_json(self, json):\n k = 0\n list_names_products = []\n while k < constant.LIMIT_PRODUCTS:\n try:\n if json['products'][k]['product_name'].lower().strip() in list_names_products:\n k += 1\n else:\n self.info_products.append(\n (json['products'][k]['product_name'],\n json['products'][k]['ingredients_text_fr'],\n json['products'][k]['nutrition_grade_fr'],\n json['products'][k]['purchase_places'],\n json['products'][k]['url'],\n json['products'][k]['image_url'],\n json['products'][k]['nutriments']['fat_100g'],\n json['products'][k]['nutriments']['saturated-fat_100g'],\n json['products'][k]['nutriments']['salt_100g'],\n json['products'][k]['nutriments']['sugars_100g']))\n list_names_products.append(self.info_products[-1][0].lower().strip())\n k += 1\n except KeyError as error:\n print(error)\n k += 1\n continue",
"def load_products_data():\r\n data_path = join(current_path(), '..', 'data')\r\n product_files = glob(join(data_path, 'products', '*.xml'))\r\n products_list = []\r\n for product_filename in product_files:\r\n tree = etree.parse(product_filename)\r\n product_dict = dict_from_element(tree.getroot())\r\n if not re.match('^[0-9a-z.]+$', product_dict['index_name']):\r\n raise Exception('Invalid index name %s at %s, must match [a-z.]+' %\r\n (product_dict['index_name'] , basename(product_filename)))\r\n products_list.append(product_dict)\r\n return products_list",
"def get_product_data_off(self):\n list_products_name = []\n for x in self.list_categories: \n \"\"\"get products' data from openfoodfacts api with string as paramaters\"\"\"\n parameters = {\n 'action': 'process',\n 'json': 1,\n 'countries': 'France',\n 'page_size': 100,\n 'page': 1,\n 'tagtype_0': 'categories',\n 'tag_contains_0': 'contains',\n 'tag_0': x\n }\n r = requests.get('https://fr.openfoodfacts.org/cgi/search.pl',\n params=parameters) # passing parameters in URL\n print(r.url)\n data = r.json() # r. from requests module decodes json file\n products = data['products'] #access dictionnary items by referring to its key name, products ordered by id\n list_products_name.append(products) \n self.list_products = list_products_name # list_categories_name is passed in the instance property",
"def loadproducts(lid):\r\n db = get_db()\r\n\r\n b_id = session.get(\"user_id\")\r\n product_list = {}\r\n\r\n if lid == \"Products\":\r\n query = \"SELECT product_id, product_name FROM product WHERE for_business = ? AND quantity > 0\"\r\n warehouses = db.execute(query, (b_id,)).fetchall()\r\n for products in warehouses:\r\n product_list[products[0]] = products[1]\r\n else:\r\n query = \"SELECT prod_id FROM warehouse where loc_id = ? AND b_id = ?\"\r\n warehouses = db.execute(query, (lid, b_id,)).fetchall()\r\n for products in warehouses:\r\n product_name = db.execute(\r\n \"SELECT product_name FROM product WHERE product_id = ? AND for_business = ?\",\r\n (products[\"prod_id\"], b_id,),\r\n ).fetchone()\r\n product_list[products[\"prod_id\"]] = product_name[\"product_name\"]\r\n\r\n return jsonify(product_list)",
"def fill_products(self):\n cursor = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n categories = dict()\n for page in range(1, 2):\n result = requests.get(\n 'https://fr.openfoodfacts.org/cgi/search.pl?page_size=1000&page={}&action=process&json=1'.format(\n page)).json()\n for element in result['products']:\n try:\n cursor.execute(\n \"INSERT INTO product (name, store, nutrition_grade, url) VALUES (%s, %s, %s, %s) RETURNING \"\n \"id, name\",\n (element[\"product_name\"], element[\"stores\"], element[\"nutrition_grade_fr\"], element[\"url\"]))\n # un except pour éviter les erreurs de clés\n query_result = cursor.fetchone()\n for category in element[\"categories_tags\"]:\n try:\n cursor.execute(\"INSERT INTO product_category(product_id, category_id) VALUES (%s, %s)\",\n (query_result[0], self.categories[category]))\n except KeyError:\n print(\"Categorie insertion failed\")\n\n print(element[\"product_name\"])\n except KeyError:\n print(f'product insertion failed:')\n\n self.conn.commit()\n cursor.close()",
"def product_list(request):\n error = {\n 'status': False,\n 'name': None,\n 'text': None,\n 'level': None,\n 'debug': None\n }\n\n limit, error = get_limit(request, error)\n\n serializer = FreshSerializer()\n queryset = Product.objects.all()[:limit]\n\n if not queryset:\n error = {\n \"status\": True,\n \"name\": \"No Products\",\n \"text\": \"No Products found\",\n \"level\": \"Information\",\n \"debug\": \"\"\n }\n\n data = {\n \"products\": json.loads(serializer.serialize(queryset)),\n \"error\": error\n }\n\n return HttpResponse(json.dumps(data), content_type=\"application/json\")",
"def api_all():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tall_products = cur.execute('SELECT * FROM products WHERE inventory_count>0;').fetchall()\r\n\treturn jsonify(all_products)",
"def test_wiki_products(self):\n\n prod_vals = (\n (ProductFactory(slug='b2g'), 0),\n (ProductFactory(slug='mobile'), 1),\n (ProductFactory(slug='desktop'), 2),\n )\n\n for prod, total in prod_vals:\n for i in range(total):\n doc = DocumentFactory(locale=u'en-US', category=10)\n doc.products.add(prod)\n RevisionFactory(document=doc, is_approved=True)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 1, 'format': 'json'}\n\n for prod, total in prod_vals:\n qs.update({'product': prod.slug})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])",
"def prod_parser(self, response):\n products = []\n pk = Product.objects.last()\n if pk:\n i = pk.id\n else:\n i = 0\n for product in response:\n # crawling product for name, desc, API_url, image_url, nutriscore, nutient_100g\n if 'ingredients_text_fr' not in product:\n desc = \"\"\n else:\n desc = product['ingredients_text_fr']\n nutrigrade = \"\".join(product[\"nutrition_grades_tags\"])\n if nutrigrade in (\"a\", \"b\", \"c\", \"d\", \"e\") \\\n and 'fat_100g' in product['nutriments'] \\\n and 'image_url' in product \\\n and 'product_name_fr' in product:\n i += 1\n product[i] = {\n \"id\": i,\n \"name\": product['product_name_fr'],\n \"desc\": desc,\n \"categories\": product[\"categories\"].split(\", \"),\n \"API_link\": product['url'],\n \"photo\": product['image_url'],\n \"nutriscore\": nutrigrade,\n \"nutrient_100g\":\n \"saturated_fat_100g:{}:{}, \".format(\n product['nutriments']['saturated-fat_100g'],\n product['nutrient_levels']['saturated-fat']) +\n \"fat_100g:{}:{}, \".format(\n product['nutriments']['fat_100g'], product['nutrient_levels']['fat']) +\n \"salt_100g:{}:{}, \".format(\n product['nutriments']['salt_100g'], product['nutrient_levels']['salt']) +\n \"sugars_100g:{}:{} \".format(\n product['nutriments']['sugars_100g'], product['nutrient_levels']['sugars'])\n }\n products.append(product[i])\n else:\n pass\n return products",
"def load_products():\n\n for i, row in enumerate(open(\"seed_data/category.product\")):\n row = row.rstrip()\n name = row.split(\"|\")\n product_category = ProductCategoryModel(name=name)\n db.session.add(product_category)\n\n for i, row in enumerate(open(\"seed_data/product.product\")):\n row = row.rstrip()\n name, short_description, long_description, product_category_id, img_path_xs, img_path_sm, img_path_md, img_path_lg = row.split(\"|\")\n product = ProductModel(name=name,\n short_description=short_description,\n long_description=long_description,\n product_category_id=product_category_id,\n img_path_xs=img_path_xs,\n img_path_sm=img_path_sm,\n img_path_md=img_path_md,\n img_path_lg=img_path_lg)\n db.session.add(product)\n\n for i, row in enumerate(open(\"seed_data/location.product\")):\n row = row.rstrip()\n name, description, address1, address2, city, state, zip_code, country, latitude, longitude, direction_url = row.split(\"|\")\n location = LocationModel(name=name,\n description=description,\n address1=address1,\n address2=address2,\n city=city,\n state=state,\n zip_code=zip_code,\n country=country,\n latitude=latitude,\n longitude=longitude,\n direction_url=direction_url)\n db.session.add(location)\n\n for i, row in enumerate(open(\"seed_data/location_product.product\")):\n row = row.rstrip()\n location_id, product_id, price, num_available = row.split(\"|\")\n location_product = LocationProductModel(location_id=location_id,\n product_id=product_id,\n price=price,\n num_available=num_available)\n db.session.add(location_product)\n\n db.session.commit()",
"def get_products(categories_info):\n\n for name, urlnames in categories_info.items():\n for i in range(1, (NB_PAGES+1)):\n url = 'https://world.openfoodfacts.org/cgi/search.pl?\\\n search_tag=categories&search_terms={}&\\\n purchase_places=France&page_size={}&page={}&json=1'.format(\n urlnames, str(PRODUCTS_PER_PAGE), str(i))\n data = requests.get(url).json()\n name_file = 'data/Products_' + name + str(i) + '.json'\n with open(name_file, 'w') as file:\n file.write(json.dumps(data, indent=4))",
"def populate_initial_inventory(self):\r\n\r\n weapons_file = open('initial-inventory.json', \"r\")\r\n json_data = json.loads(weapons_file.read())\r\n weapons_file.close()\r\n\r\n weapons = json_data['weapons']\r\n for weapon in weapons:\r\n requests.post(\"http://\" + self.ip_address + \":3000/Weapons\", data=weapon)",
"def get_products(self, data, category):\r\n for product_information in data['products']:\r\n name = product_information.get('product_name', None)\r\n # in order to remove linebreak from product name\r\n # print(\"WITH LINEBREAK : \", repr(name))\r\n if name:\r\n name = name.replace('\\n', '')\r\n # print(\"WITHOUT LINEBREAK : \", repr(name))\r\n category = Categories.objects.get(name=category)\r\n nutriscore = product_information.get('nutrition_grades', None)\r\n link = product_information.get('url', None)\r\n image = product_information.get('image_url', None)\r\n nutrition_image = product_information.get\\\r\n ('image_nutrition_url', None)\r\n if category is None \\\r\n or name is None \\\r\n or len(name) > 75 \\\r\n or nutriscore is None \\\r\n or link is None \\\r\n or image is None \\\r\n or nutrition_image is None:\r\n continue\r\n else:\r\n try:\r\n product, created = Products.objects.get_or_create(\r\n name=str(name),\r\n category=category,\r\n nutriscore=nutriscore,\r\n link=link,\r\n image=image,\r\n nutrition_image=nutrition_image,\r\n )\r\n if created:\r\n product.save()\r\n print(product.name)\r\n\r\n except Products.DoesNotExist:\r\n raise CommandError(\"Products %s could not been reached\"\r\n % name)\r\n except IntegrityError:\r\n continue",
"def products():\n username = session['username']\n api_key = session['api_key']\n url = 'https://consumernotebook.com/api/v1/products/?username={0}&apikey={1}'.format(username, api_key)\n r = requests.get(url)\n products = []\n if r.status_code != 200:\n error = \"{0} error. Are you sure you entered a valid API key?\".format(r.status_code)\n return render_template('products.html', error=error)\n else:\n products_json = json.loads(r.content)\n for product in products_json[u'objects']:\n products.append(product[u'title'])\n return render_template('products.html', products=products)"
] |
[
"0.65700144",
"0.6372123",
"0.63135356",
"0.6217432",
"0.6184496",
"0.6161715",
"0.6153932",
"0.61406696",
"0.60312456",
"0.60286355",
"0.6022135",
"0.6015756",
"0.60124123",
"0.60045147",
"0.59682816",
"0.5967611",
"0.596441",
"0.59624016",
"0.5950973",
"0.5938882",
"0.59184307",
"0.59048086",
"0.59042263",
"0.58838004",
"0.58766514",
"0.5822973",
"0.5811894",
"0.58060557",
"0.5748261",
"0.5724048"
] |
0.8113893
|
0
|
Test that core.learncurve.learning_curve raises NotADirectoryError
|
def test_learncurve_raises_not_a_directory(dir_option_to_change,
specific_config,
tmp_path, device):
options_to_change = [
{"section": "LEARNCURVE", "option": "device", "value": device},
dir_option_to_change
]
toml_path = specific_config(
config_type="learncurve",
model="teenytweetynet",
audio_format="cbin",
annot_format="notmat",
options_to_change=options_to_change,
)
cfg = vak.config.parse.from_toml_path(toml_path)
model_config = vak.config.model.config_from_toml_path(toml_path, cfg.learncurve.model)
# mock behavior of cli.learncurve, building `results_path` from config option `root_results_dir`
results_path = cfg.learncurve.root_results_dir / 'results-dir-timestamp'
with pytest.raises(NotADirectoryError):
vak.learncurve.learning_curve(
model_name=cfg.learncurve.model,
model_config=model_config,
dataset_path=cfg.learncurve.dataset_path,
window_size=cfg.dataloader.window_size,
batch_size=cfg.learncurve.batch_size,
num_epochs=cfg.learncurve.num_epochs,
num_workers=cfg.learncurve.num_workers,
root_results_dir=None,
results_path=results_path,
spect_key=cfg.spect_params.spect_key,
timebins_key=cfg.spect_params.timebins_key,
normalize_spectrograms=cfg.learncurve.normalize_spectrograms,
shuffle=cfg.learncurve.shuffle,
val_step=cfg.learncurve.val_step,
ckpt_step=cfg.learncurve.ckpt_step,
patience=cfg.learncurve.patience,
device=cfg.learncurve.device,
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_validate_nagl_model_path_failed():\n with pytest.raises(FileNotFoundError):\n validate_nagl_model_path(\"does-not-exist.pt\")",
"def testNotADirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"not_a_directory\")",
"def test_invalid_dir(self):\n self.assertRaises(OSError, awstats_reader.AwstatsReader, '/tmp/XYZ', 'example.com')",
"def testNoSuchDirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"no_such_directory\")",
"def test_bad_dirrectory_path(self):\n\n portfolio = PortfolioPerformanceData(self.data_path+'NonExisting')\n for func in (portfolio.calculate_asset_performance,\n portfolio.calculate_currency_performance,\n portfolio.calculate_total_performance):\n self.assertIsNone(func(*self.boarder))",
"def validate(self):\n if not self.path.is_dir() or not self.path.exists():\n raise NotADirectoryError",
"def test_error(file_path):\n assert check_file(file_path), \"Training file is not generated\"",
"def test_valid_dir_raises():\n with pytest.raises(ValueError):\n assert cli._valid_dir(__file__)",
"def path_check(dataset: LAMLDataset):\n roles = dataset.roles\n features = dataset.features\n for f in features:\n assert roles[f].name == \"Path\", \"Only path accepted in this transformer\"",
"def test_education():\n test_path = tempfile.mkdtemp()\n x_train, metadata = education(test_path)\n try:\n assert x_train.shape == (50, 6)\n except:\n shutil.rmtree(test_path)\n raise()",
"def test_validate_nagl_model_path(model_name):\n model_path = validate_nagl_model_path(model_name)\n assert os.path.exists(model_path)",
"def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It wasn't found directory {directory_path}\"",
"def test_BenchmarkSuite_invalid_path_access(benchmark_suite: typing.Callable):\n bs = benchmark_suite()\n with test.Raises(TypeError):\n _ = bs.path",
"def _validate_path(self, data_dir):\n if (os.path.exists(data_dir) \n and os.path.isdir(data_dir)\n and os.listdir(data_dir)):\n\n self.tr_img_dir = data_dir + self.TRAIN_IMAGES\n self.tr_lbl_dir = data_dir + self.TRAIN_LABELS\n self.te_img_dir = data_dir + self.TEST_IMAGES\n self.te_lbl_dir = data_dir + self.TEST_LABELS\n\n if (self._check_images_and_labels(self.tr_img_dir, self.tr_lbl_dir) \n and self._check_images_and_labels(self.te_img_dir, self.te_lbl_dir)):\n \n return True\n \n return False",
"def test_doesnt_have_directory(self, check_fn_false):\n\n # setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n\n # when\n test2 = has_directory(\"./data/tests\")\n\n # result\n assert test2 is False",
"def test_ensure_dir_exists(self):\n pass",
"def test_get_dataset_path_unknown_domain(self) -> None:\n framework = \"tensorflow\"\n domain = \"domain\"\n with self.assertRaises(Exception):\n get_dataset_path(framework, domain)",
"def test_error_is_thrown_if_directory_does_not_exist(fs):\n\n output_dir = 'user_specified_directory'\n assert not os.path.exists(output_dir)\n\n with pytest.raises(NotADirectoryError) as exception_info:\n verify_output_dir(output_dir)\n\n assert exception_info.value.args[0] == 'The \"user_specified_directory\" directory, which was specified by ' \\\n 'the --output-dir command-line argument, is not an existing directory. ' \\\n 'Please either create that directory or specify a different one.'",
"def test_dfi_raises_if_folder_missing(self):\n with TemporaryDirectory() as tmpdirname:\n # start with empty project (no data/coefficients subdirectory)\n with raises(SmifDataNotFoundError):\n CSVDataStore(tmpdirname)",
"def test_get_denoiser_data_dir(self):\r\n\r\n obs = get_denoiser_data_dir()\r\n\r\n self.assertTrue(exists(obs))\r\n self.assertTrue(exists(obs + 'FLX_error_profile.dat'))",
"def test_learning_curves():\n\n p = pipeline.Pipeline(\n FX_TRAIN,\n FX_TEST,\n FX_LOOKUP,\n RESULTS_DIR\n )\n\n data = p.learning_curves()",
"def test_get_dataset_path_unknown_framework(self) -> None:\n framework = \"unknown\"\n domain = \"image_recognition\"\n with self.assertRaises(Exception):\n get_dataset_path(framework, domain)",
"def test_make_dataset_happy_path(self):\n # User story: user runs src.make_dataset() on the current directory\n # and gets a fully functional dataset\n pass",
"def test_missing_paths():\n with pytest.raises(InputError):\n make_config([])",
"def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)",
"def test_failure():\n with pytest.raises(ModuleNotFoundError):\n import torch # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n import tensorflow # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n import horovod # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n from ray import tune # noqa: F401",
"def test_invalid_estimator(estimator: Any) -> None:\n mapie = MapieClassifier(estimator=estimator)\n with pytest.raises(ValueError, match=r\".*Invalid estimator.*\"):\n mapie.fit(X_toy, y_toy)",
"def test_nonExistentDir(self):\n e = self.assertRaises(\n IOError, logfile.LogFile, self.name, \"this_dir_does_not_exist\"\n )\n self.assertEqual(e.errno, errno.ENOENT)",
"def test_test_directory_no_workspace(self):\n self.logger.info(\"STEP: Enter a test directory without a workspace.\")\n self.workspace = Workspace(Mock())\n self.logger.info(\"STEP: Verify that an exception was raised.\")\n with self.assertRaises(Exception):\n with self.workspace.test_directory(\"dir1\"):\n pass",
"def test_text_classifier_create_from_path(self):\n pass"
] |
[
"0.6309109",
"0.6294871",
"0.61584276",
"0.59203106",
"0.59100646",
"0.5750234",
"0.5733092",
"0.5653635",
"0.56445676",
"0.5610109",
"0.55922353",
"0.55654114",
"0.5561165",
"0.5520227",
"0.5439886",
"0.5432369",
"0.5410431",
"0.54093164",
"0.5398113",
"0.53962386",
"0.5392251",
"0.5387092",
"0.5381794",
"0.5301192",
"0.52937937",
"0.5293073",
"0.52897674",
"0.5285915",
"0.5285476",
"0.52801454"
] |
0.7250528
|
0
|
Downloads and extracts a zip file from S3.
|
def download_zip_file(s3_client, bucket, key):
temp_file = tempfile.NamedTemporaryFile()
with tempfile.NamedTemporaryFile() as temp_file:
s3_client.download_file(bucket, key, temp_file.name)
with zipfile.ZipFile(temp_file.name, "r") as zip_file:
yield zip_file
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def s3_download(path):\n with s3_read(path):\n # Reading the file will cache the file locally.\n pass",
"def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)",
"def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)",
"def get_s3_object(self, remote_s3_url):\n try:\n _file = tempfile.mkstemp()[1]\n parsed_s3_path = remote_s3_url.split(\"/\", 3) # s3://bucket-name/key\n remote_bucket = parsed_s3_path[2] # Bucket name\n remote_key = parsed_s3_path[3] # Key\n self.download_file(remote_bucket, remote_key, _file)\n return _file\n except Exception as e:\n message = {'FILE': __file__.split('/')[-1],\n 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}\n self.logger.exception(message)\n raise",
"def s3_get(url, temp_file, proxies=None):\n\ts3_resource = boto3.resource (\"s3\", config=Config (proxies=proxies))\n\tbucket_name, s3_path = split_s3_path (url)\n\ts3_resource.Bucket (bucket_name).download_fileobj (s3_path, temp_file)",
"def download(url, bucket_id, key_prefix):\n\n baseFile = '_'.join(url.split('/')[-4:]) #os.path.basename(url)\n\n #move the file to a more uniq path\n os.umask(0002)\n temp_path = \"/tmp/\"\n file = os.path.join(temp_path,baseFile)\n bucket = conn.get_bucket(bucket_id)\n key = bucket.get_key(key_prefix + baseFile, validate=False)\n s3_exists = key.exists()\n file_exists = os.path.isfile(file)\n \n if not file_exists and s3_exists:\n sys.stderr.write(\"Downloading %s from S3\\n\"%url)\n key.get_contents_to_filename(file)\n sys.stderr.write(\"Downloaded %s from S3\\n\"%url)\n elif not file_exists and not s3_exists:\n sys.stderr.write(\"Downloading %s from the web\\n\"%url)\n try:\n req = urllib2.urlopen(url)\n total_size = int(req.info().getheader('Content-Length').strip())\n downloaded = 0\n CHUNK = 256 * 10240\n with open(file, 'wb') as fp:\n while True:\n chunk = req.read(CHUNK)\n downloaded += len(chunk)\n #print math.floor( (downloaded / total_size) * 100 )\n if not chunk: break\n fp.write(chunk)\n except urllib2.HTTPError, e:\n sys.stderr.write(\"HTTP Error: %s %s\\n\"%(e.code , url))\n return False\n except urllib2.URLError, e:\n sys.stderr.write(\"URL Error: %s %s\\n\"%(e.reason , url))\n return False\n sys.stderr.write(\"Downloaded %s from the web\\n\"%url)\n\n if not s3_exists:\n sys.stderr.write(\"Uploading %s to S3\\n\"%url)\n key.set_contents_from_filename(file)\n\n sys.stderr.write(\"File ready: %s\\n\"%url)\n return file",
"def download_from_s3(s3_resource, photo):\n try:\n bucket, key = photo.replace(\"s3://\", \"\").split(\"/\", 1)\n local_file = os.path.basename(photo)\n except ValueError as err:\n logger.exception(\"Couldn't get S3 info for %s: %s\", photo)\n raise\n\n try:\n logger.info(\"Downloading %s\", photo)\n s3_resource.Bucket(bucket).download_file(key, local_file)\n except ClientError:\n logger.exception(\"Couldn't download %s from S3.\", photo)\n raise\n\n return local_file",
"def download_reference_file_from_s3(self, s3_file, local_file, unzip=False):\n if unzip:\n rc = subprocess.call(\"{s3cmd} get {s3_file} - | gunzip > {local_file}\"\n .format(\n s3cmd=self.cmds[\"s3cmd\"],\n s3_file=s3_file,\n local_file=local_file, \n ), shell=True)\n else:\n rc = subprocess.call(\"{s3cmd} get {s3_file} {local_file}\"\n .format(\n s3cmd=self.cmds[\"s3cmd\"],\n s3_file=s3_file,\n local_file=local_file,\n ), shell=True)\n return rc",
"def download_file_from_s3_public_bucket(bucket, object, output_file):\n botocore_config = Config(signature_version=UNSIGNED)\n s3_client = boto3.client(\"s3\", config=botocore_config)\n s3_client.download_file(bucket, object, output_file)",
"def get_zipped_artifact(s3, artifact):\n tmp_dir = tempfile.mkdtemp()\n bucket = artifact['location']['s3Location']['bucketName']\n key = artifact['location']['s3Location']['objectKey']\n with tempfile.NamedTemporaryFile() as tmp_file:\n s3.download_file(bucket, key, tmp_file.name)\n with zipfile.ZipFile(tmp_file.name, 'r') as zip:\n zip.extractall(tmp_dir)\n return tmp_dir",
"def download_file(bucket,file_name):\n with open(file_name, 'wb') as f:\n s3.download_fileobj(bucket, file_name,f)\n print(file_name, \": is downloaded\")",
"def download_from_s3(self, filename: str, filename_output: Optional[str] = None) -> str:\n if self.aws_access_key_id is None:\n raise Exception(\n 'To use `download_from_s3` you need to pass '\n '`aws_access_key_id` and '\n '`aws_secret_access_key`'\n )\n\n s3 = boto3.client('s3',\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key)\n\n # Progress bar\n meta_data = s3.head_object(Bucket=self.bucket_name, Key=filename)\n size = int(meta_data.get('ContentLength', 0))\n progress_bar = self._progress(size)\n\n # Downloading file\n s3.download_file(self.bucket_name, filename,\n filename if filename_output is None else filename_output,\n Callback=progress_bar)",
"def download(self, cloud_path):\n zip_file = os.path.join(self.root, ZIPFILE)\n unzip_dir = os.path.join(self.root, UNZIP_NAME)\n\n if os.path.isfile(zip_file):\n logger.debug(f\"File {zip_file} exists. Skip download.\")\n else:\n client = GCSClient()\n object_key = os.path.join(NYU_GCS_PATH, ZIPFILE)\n\n logger.debug(\n f\"Downloading file {zip_file} from gs://{const.GCS_BUCKET}/\"\n f\"{object_key}\"\n )\n client.download(const.GCS_BUCKET, object_key, zip_file)\n\n if os.path.isdir(unzip_dir):\n logger.debug(f\"File {unzip_dir} exists. Skip unzip.\")\n else:\n # unzip the file\n with ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall(self.root)\n logger.debug(f\"Unzip file from {zip_file}\")",
"def unpack(uri):\n conn = boto.connect_s3(anon=True, host='s3.amazonaws.com')\n bucket = conn.get_bucket('commoncrawl')\n key_ = Key(bucket, uri)\n file_ = warc.WARCFile(fileobj=GzipStreamFile(key_))\n return file_",
"def download_folder_from_s3(s3_uri, folder_path, connection=None):\n if connection:\n run_out = connection.run(f\"aws s3 cp --recursive {s3_uri}/ {folder_path}/\")\n else:\n run_out = run(f\"aws s3 cp --recursive {s3_uri}/ {folder_path}/\")\n\n return run_out.return_code",
"def downloaded_from_s3(filepath, output_path):\n # Declare S3 as the destination\n s3 = boto3.client('s3',\n aws_access_key_id=key_id,\n aws_secret_access_key=secret_key)\n\n filename = filepath.split(\"/\")[-1]\n if output_path:\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n s3.download_file('newsphi', filepath, output_path+filename)",
"def download_s3_file(key, bucket, output_filename):\n s3_client = boto3.client('s3')\n s3_client.download_file(bucket, key, output_filename)\n return True",
"def _s3_get_file(url):\n try:\n return S3().get_contents_from_url(url)\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))",
"def download_chain(s3_path, local_path, bucket_name='lwr-inverse-us-east'):\n s3 = boto3.resource(\"s3\")\n lwr_AIES = s3.Bucket(bucket_name)\n try:\n lwr_AIES.download_file(Key=s3_path, Filename=local_path)\n print(\"Download successful\")\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\")\n else:\n raise",
"def download_file(s3_path, local_path):\n s3.meta.client.download_file(bucket_name, s3_path, local_path)",
"def _download_s3_folder(s3, bucket_name, s3_store_path, local_dir):\n bucket = s3.Bucket(bucket_name)\n for obj in bucket.objects.filter(Prefix=s3_store_path):\n target = os.path.join(local_dir, os.path.relpath(obj.key, s3_store_path))\n if not os.path.exists(os.path.dirname(target)):\n os.makedirs(os.path.dirname(target))\n if obj.key[-1] == '/':\n continue\n bucket.download_file(obj.key, target)\n logger.info(\"{} Downloaded.\".format(obj.key)) # log progress",
"def _download_file(bucket: str, key: str) -> str:\n tmp_file_name = f\"/tmp/logs\"\n\n try:\n with open(tmp_file_name, \"wb\") as data:\n s3cl.download_fileobj(bucket, key, data)\n except Exception as e:\n print(type(e).__name__, e)\n f = open(tmp_file_name, \"w\")\n f.write(\"\")\n f.close()\n try:\n with gzip.open(tmp_file_name, mode=\"rt\") as f:\n x = f.read()\n return x\n except Exception as e:\n print(type(e).__name__, e, key)\n return \"\"",
"def download(self, bucket_name=None,\n object_key=None,\n dest=None):\n\n if bucket_name == None or \\\n object_key == None or \\\n dest == None:\n u_print(\" Error - argument is missing\")\n\n u_print_d('S3.download() - bucket=[{}] key=[{}] dest=[{}]'.format(bucket_name,\n object_key,\n dest))\n return self.s3.Object(bucket_name, object_key).download_file(dest)",
"def pull_zip_file(bucket, client, source, file_name, version):\n\n version_details = {\"version_id\": None, \"last_modified\": None}\n\n try:\n if version is None:\n\n response = client.list_object_versions(\n Bucket=bucket, Prefix=f\"{source}/{file_name}\"\n )\n version_details[\"version_id\"] = [\n x[\"VersionId\"] for x in response[\"Versions\"] if x[\"IsLatest\"]\n ][0]\n last_modified = [\n x[\"LastModified\"] for x in response[\"Versions\"] if x[\"IsLatest\"]\n ][0]\n version_details[\"last_modified\"] = datetime.strftime(\n last_modified, \"%Y-%m-%d %H:%M:%S\"\n )\n\n client.download_file(bucket, f\"{source}/{file_name}\", file_name)\n\n else:\n head = client.head_object(Bucket=bucket, Key=f\"{source}/{file_name}\")\n version_details[\"version_id\"] = version\n version_details[\"last_modified\"] = datetime.strftime(\n head[\"LastModified\"], \"%Y-%m-%d %H:%M:%s\"\n )\n client.download_file(\n bucket,\n f\"{source}/{file_name}\",\n file_name,\n ExtraArgs={\"VersionId\": version},\n )\n\n except ClientError as e:\n logging.error(e)\n\n return (False, version_details)\n print(\n f\"Downloaded {file_name.split('/')[-1]} version {version_details['version_id']} last modified {version_details['last_modified']}\"\n )\n\n return (True, version_details)",
"def main(file_url, file_path):\n\n # extract file from the link\n\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n r = requests.get(str(file_url))\n\n #unzip the zip file\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path = file_path)",
"def download_from_s3(s3_path, local_path):\n # Connect to s3 using aws access key\n try:\n s3 = boto3.resource('s3',\n aws_access_key_id=os.environ.get(\"AWS_ACCESS_KEY_ID\"),\n aws_secret_access_key=os.environ.get(\"AWS_SECRET_ACCESS_KEY\"))\n logger.info(\"AWS S3 Connected.\")\n except botocore.exceptions.PartialCredentialsError:\n logger.error(\"AWS Credentials Invalid.\")\n\n bucket_name, s3_store_path = _parse_s3(s3_path)\n _download_s3_folder(s3, bucket_name, s3_store_path, local_path)\n logger.info(\"All Image Downloaded from S3.\")",
"def download(self):\n if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0:\n return\n print('Downloading %s' % self.path)\n if dry_run:\n return\n ldir = os.path.dirname(self.lpath)\n if not os.path.isdir(ldir):\n os.makedirs(ldir, 0o755)\n self.arts.s3_bucket.download_file(self.path, self.lpath)",
"def _download_s3_file(\n local_path: Path, s3_path: str,\n download_mode: DownloadMode = DownloadMode.SIZE_AND_TIMESTAMP\n) -> None:\n if (download_mode == DownloadMode.FILE_DOES_NOT_EXIST and local_path.exists()): # noqa: E501\n return\n\n s3 = boto3.resource(\"s3\")\n parsed = urlparse(s3_path)\n bucket = s3.Bucket(parsed.netloc)\n\n # make sure local parent dir is created\n key = parsed.path.strip('/')\n\n s3_obj = bucket.Object(key=key)\n\n if (local_path.exists() and download_mode in (DownloadMode.SIZE_ONLY, DownloadMode.SIZE_AND_TIMESTAMP)): # noqa: E501\n stat = local_path.stat()\n\n if s3_obj.content_length == stat.st_size:\n if download_mode == DownloadMode.SIZE_ONLY:\n return\n\n if s3_obj.last_modified.timestamp() == stat.st_mtime:\n return\n\n local_path.parent.mkdir(parents=True, exist_ok=True)\n\n try:\n s3_obj.download_file(str(local_path))\n except botocore.exceptions.ClientError as e:\n error_code = e.response['Error']['Code']\n if error_code == '404':\n raise InvalidS3Path(\n s3_path=s3_path, reason=InvalidS3Path.Reason.NO_OBJECT_FOUND\n ) from e\n raise e",
"def _read_s3_url(cls, s3_url):\n\n parsed_url = urllib.parse.urlparse(s3_url)\n return cls.s3.get_object(Bucket=parsed_url.netloc,\n Key=parsed_url.path.lstrip(\"/\"))[\"Body\"].read()",
"def download(self, file_url):\n url = self.base_url + \"/storage-service/cloud-storage/s3/file/download?url={0}\".format(file_url)\n\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response"
] |
[
"0.722156",
"0.7164689",
"0.7164689",
"0.6928136",
"0.687183",
"0.68641627",
"0.67871815",
"0.67756826",
"0.6773488",
"0.6746224",
"0.67218864",
"0.6695276",
"0.66722906",
"0.66696244",
"0.6659661",
"0.66388583",
"0.66098696",
"0.6505979",
"0.6503254",
"0.64721113",
"0.64717543",
"0.64709246",
"0.6469283",
"0.6456141",
"0.6442348",
"0.6425615",
"0.6424021",
"0.64160216",
"0.64158964",
"0.6362425"
] |
0.72148407
|
1
|
Fix END (missing END, End > END, END position should be the same as FOR etc).
|
def fix_end(self, node):
if node.header.tokens[0].type == Token.SEPARATOR:
indent = node.header.tokens[0]
else:
indent = Token(Token.SEPARATOR, self.formatting_config.separator)
node.end = End([indent, Token(Token.END, "END"), Token(Token.EOL)])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def end():\n return EndBlock()",
"def RespEnd(builder):\n return End(builder)",
"def GroundExcelEnd(builder):\n return End(builder)",
"def end(self):\n self.set_initial_offset(1e6)",
"def GachaCraftNodeExcelEnd(builder):\n return End(builder)",
"def endComment():\r\n\tglobal sEType, sEVar, sEData, iIndent\r\n\tsEType = BRIEF\r\n\tsEVar = None\r\n\tsEData = \"\"\r\n\tiIndent = -1",
"def VoiceCommonExcelEnd(builder):\n return End(builder)",
"def _trim_end(self, tokens: list[Token]) -> Block:\n i = last_token = self.end - 1\n while tokens[i].name in NON_CODING_TOKENS | {'DEDENT', 'NEWLINE'}:\n # if we find an indented comment inside our block, keep it\n if (\n tokens[i].name in {'NL', 'NEWLINE'} and\n tokens[i + 1].name == UNIMPORTANT_WS and\n len(tokens[i + 1].src) > self._initial_indent(tokens)\n ):\n break\n # otherwise we've found another line to remove\n elif tokens[i].name in {'NL', 'NEWLINE'}:\n last_token = i\n i -= 1\n return self._replace(end=last_token + 1)",
"def EquipmentStatExcelEnd(builder):\n return End(builder)",
"def AttendanceRewardExcelEnd(builder):\n return End(builder)",
"def end(self, end: pos.Pos) -> None:\n self.__end = end",
"def dend():\n #\n # this is the omega\n inlist = list(\"end\" + \"\\n\") # WTF?\n #\n # change data into a list element\n outlist[1247:1250] = inlist # place data in the list in the correct place\n outstr = \"\".join(outlist)\n # print(outstr)\n # print(len(outstr))\n # of = open(\"workfile\", \"w\")\n # of.write(outstr)",
"def remove_bottom_end_tags(resume_output):\n # print(\"template before removing end tag:\", resume_output)\n\n # sets line count to 0\n line_count = 0\n\n # iterates through all lines in resume output file\n for line in resume_output:\n # counts the line\n line_count += 1\n # finds body end tag and stores its line number\n if \"</body>\" in line:\n body_end_tag_line_index = line_count - 1\n\n # deletes html code starting from body end tag to the end of the file\n del resume_output[body_end_tag_line_index:]\n\n # print(\"template after removing end tags:\", resume_output)\n # print(resume_output)\n \n # returns output code\n return resume_output",
"def end(self):\n ...",
"def _finish_element(self):\n assert self.currentelem.indexend is True\n self.currentelem.indexend = self._parser.CurrentByteIndex + self.baseposition\n self.currentelem = self.currentelem.parent",
"def end(self):\n while self.position < len(self.document.characters\n ) and self.document.characters[\n self.position].character != '\\n':\n self.position += 1",
"def FrameEnd(builder):\n return End(builder)",
"def end_table(self):\n pass",
"def end(self) -> None:",
"def seekEnd(self):\n self.oFile.seek(0, 2)",
"def end_table(self):\n raise NotImplementedError",
"def PricingEnd(builder):\n return End(builder)",
"def _update_end_lineno():\n if origin:\n record.origin.line_end = lineno",
"def EventContentMissionExcelEnd(builder):\n return End(builder)",
"def end(self):\n pass",
"def end(self):\n pass",
"def end(self):\n pass",
"def position_at_end(self, block):\n self._curblock = block\n self._lastop = block.tail or 'tail'",
"def li_recurse_end(self, lin):\n\n raise NotImplementedError(\"li_recurse_end() not implemented \"\n \"in superclass\")",
"def InvocationEnd(builder):\n return End(builder)"
] |
[
"0.6314465",
"0.62066823",
"0.6137427",
"0.60304046",
"0.59303176",
"0.58672017",
"0.5803466",
"0.57732284",
"0.57505035",
"0.5628803",
"0.5566523",
"0.5530937",
"0.55015606",
"0.54930145",
"0.54926085",
"0.54899275",
"0.5474391",
"0.5457475",
"0.545294",
"0.54398143",
"0.54327327",
"0.5397803",
"0.5387508",
"0.53657115",
"0.53643996",
"0.53643996",
"0.53643996",
"0.53618073",
"0.5359611",
"0.5350799"
] |
0.721261
|
0
|
Split statements from node for those that belong to it and outside nodes.
|
def collect_inside_statements(self, node):
new_body = [[], []]
is_outside = False
starting_col = self.get_column(node)
for child in node.body:
if not isinstance(child, EmptyLine) and self.get_column(child) <= starting_col:
is_outside = True
new_body[is_outside].append(child)
while new_body[0] and isinstance(new_body[0][-1], EmptyLine):
new_body[1].insert(0, new_body[0].pop())
return new_body
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def split_sub_statement(stream, node_types):\n \n if isinstance(stream, Node):\n stream = stream.get_inner_body()\n \n current_node = None\n \n try:\n while True:\n \n token = next(stream)\n #print('current token ', token)\n \n matched = False\n \n for node_type in node_types:\n match = Node.match_begin(node_type, token, stream)\n if match:\n \n matched = True\n previous_node = current_node\n \n # build current node\n current_node = node_type()\n current_node.children += match\n \n if previous_node:\n yield previous_node\n \n # stop looking for a match \n break\n \n # non matching token \n if not matched:\n \n if current_node:\n current_node.children.append(token)\n else:\n yield token\n except:\n pass\n\n if current_node: \n yield current_node",
"def _split_node(self, node):\n if isinstance(node, Expression):\n if node.field in self._annotations:\n # If the node is querying an annotation it has to be applied to\n # the annotated query\n return (None, node)\n else:\n # Otherwise, it can be applied to the underlying query\n return (node, None)\n elif isinstance(node, AndNode):\n # This takes advantage of the fact that, because we are combining with\n # AND, the ordering doesn't matter\n left, right = [], []\n for n in node.children:\n _0, _1 = self._split_node(n)\n if _0:\n left.append(_0)\n if _1:\n right.append(_1)\n if len(left) >= 2:\n left = AndNode(*left)\n else:\n left = left[0] if left else None\n if len(right) >= 2:\n right = AndNode(*right)\n else:\n right = right[0] if right else None\n return (left, right)\n elif isinstance(node, OrNode):\n # Once ORs are involved, we can't split the query - either the whole\n # node is applied to the underlying query or the whole node is applied\n # to this query directly\n for n in node.children:\n left, right = self._split_node(n)\n if right:\n return (None, node)\n return (node, None)\n elif isinstance(node, NotNode):\n # We can't split a NOT query either\n left, right = self._split_node(node.child)\n if right:\n return (None, node)\n return (node, None)\n else:\n raise ValueError(\"Unknown node type '{}'\".format(repr(node)))",
"def statements(self):\n node = self.annotated_ast_node\n nodes_subtexts = list(_split_code_lines(node.body, self.text))\n if nodes_subtexts == [(self.ast_node.body, self.text)]:\n # This block is either all comments/blanks or a single statement\n # with no surrounding whitespace/comment lines. Return self.\n return (PythonStatement._construct_from_block(self),)\n cls = type(self)\n statement_blocks = [\n cls.__construct_from_annotated_ast(subnodes, subtext, self.flags)\n for subnodes, subtext in nodes_subtexts]\n # Convert to statements.\n statements = []\n for b in statement_blocks:\n statement = PythonStatement._construct_from_block(b)\n statements.append(statement)\n # Optimization: set the new sub-block's ``statements`` attribute\n # since we already know it contains exactly one statement, itself.\n assert 'statements' not in b.__dict__\n b.statements = (statement,)\n return tuple(statements)",
"def BCT_operators(self):\n leaves_original = [node for node, degree in self.tree.out_degree().items() if degree == 0]\n\n def split(node):\n # ##\n # Return bin-tuple of the orphaned tree\n #\n # Parameter:\n # node: string\n #\n # Return:\n # (split_node_parent, node)\n # ##\n\n # Trivialize the orphaned subtree and re-allocate the position to\n # the child of the split_node_parent does not be cut.\n split_node_parent = M.tree.predecessors(node)[0]\n\n if M.tree.predecessors(split_node_parent) != []:\n split_node_parent_parent = M.tree.predecessors(split_node_parent)[0]\n else:\n split_node_parent_parent = None\n M.tree.node[node]['position'] = 'trivial'\n the_other_child = [n for n in M.tree.successors(split_node_parent) if n != node][0]\n\n # Shift the child to the position of its parent\n M.tree.node[the_other_child]['position'] = M.tree.node[split_node_parent]['position']\n M.tree.node[split_node_parent]['position'] = 'trivial'\n\n # Cutting the edge which is related to the parent of the split node\n try:\n cutting_edges_list = [(n1, n2) for n1, n2 in M.tree.edges() if\n (n1 == split_node_parent or n2 == split_node_parent) and (n2 != node)]\n\n # Remove edeges\n for i, j in cutting_edges_list:\n M.tree.remove_edge(i, j)\n\n # Establish the edge on the new movement\n if split_node_parent_parent:\n i, j = [n1 for n1, n2 in cutting_edges_list if n1 == split_node_parent_parent][0], \\\n [n2 for n1, n2 in cutting_edges_list if n2 == the_other_child][0]\n\n M.tree.add_edge(i, j)\n\n except Exception:\n raise Exception(\"in the part of split() suc-function of BCT_operator() function\")\n\n return split_node_parent, node\n\n\n def insertion(M, insert_node, split_node_parent, split_node, operator, split_node_position):\n # ##\n # Return a new binary construction tree\n #\n # Parameter:\n # split_node_parent: string, the old operator\n # split_node: string, the cut node, is used to reinsert to the binary construction tree\n # operator: string, the new operator, typically two options (\"series\" and \"parallel\")\n # split_node_position: string, the new position allocated to the cut node.\n #\n # Return:\n # M: BinaryConstructionTree\n # ##\n\n try:\n # Relabel the arributes of relevant nodes.\n mapping = {split_node_parent: operator}\n M.tree = nx.relabel_nodes(M.tree, mapping)\n M.tree.node[operator]['position'] = M.tree.node[insert_node]['position']\n insert_node_parent = M.tree.predecessors(insert_node)[0]\n\n # Re-allocate the position to the relevant nodes.\n if split_node_position == 'right' and split_node_position == M.tree.node[insert_node]['position']:\n M.tree.node[insert_node]['position'] = 'left'\n\n elif split_node_position == 'left' and split_node_position == M.tree.node[insert_node]['position']:\n M.tree.node[insert_node]['position'] = 'right'\n M.tree.node[split_node]['position'] = split_node_position\n\n # print M.tree.edges()\n # Remove the related edges of the current binary tree\n M.tree.remove_edge(insert_node_parent, insert_node)\n\n # Add the new edges to binary tree\n M.tree.add_edge(insert_node_parent, operator)\n M.tree.add_edge(operator, insert_node)\n\n # Eliminate the redundant edge.\n # for i, j in M.tree.edges():\n\n except Exception:\n raise Exception('in tha part of insertion() sub-function of BCT_operator() function')\n\n return M\n\n # To preserve meta structure.\n M_self = copy.deepcopy(self)\n\n # To traverse all possible isomorphic binary tree\n for node in leaves_original:\n\n M = copy.deepcopy(M_self)\n\n # Do split operation\n split_node_parent, split_node = split(node)\n\n # The children that are used to be inserted\n leaves_insertion = leaves_original[:]\n leaves_insertion.remove(split_node)\n\n # print leaves_insertion\n # Do insertion operation\n for insertnode in leaves_insertion:\n\n # Choose the operator (\"series\" and \"parallel\")\n for operator in ['series', 'parallel']:\n operator_id = len([n for n in M.tree.nodes() if n.__contains__(operator)])\n operator_entity = operator + str(operator_id)\n for position in ['left', 'right']:\n M_split = copy.deepcopy(M)\n M_split = insertion(M_split, insertnode, split_node_parent, split_node, operator_entity,\n position)\n\n is_heteromorphism = 0\n\n identity_series_order = M_split.identity_isomorphic_order()\n\n for m in self.heteromorphism:\n m_identity_series_order = m.identity_isomorphic_order()\n # print \"m_identity:{}\".format(m_identity_series_order)\n # print \"M_split:{}\".format(identity_series_order)\n if identity_series_order != m_identity_series_order:\n is_heteromorphism += 1\n\n if is_heteromorphism == len(self.heteromorphism):\n self.heteromorphism.append(M_split)\n # print M_split.series_partial_order_position_representation()\n # print M_split.tree.nodes(data = True)\n # print\n\n return",
"def _analyse_statements(\n self, statements: List[ast.stmt], *, next: CFNode\n ) -> CFNode:\n for statement in reversed(statements):\n analyse = getattr(self, \"_analyse_stmt_\" + type(statement).__name__)\n next = analyse(statement, next=next)\n return next",
"def _split_code_lines(ast_nodes, text):\n if not ast_nodes:\n yield ([], text)\n return\n assert text.startpos <= ast_nodes[0].startpos\n assert ast_nodes[-1].startpos < text.endpos\n if text.startpos != ast_nodes[0].startpos:\n # Starting noncode lines.\n yield ([], text[text.startpos:ast_nodes[0].startpos])\n end_sentinel = _DummyAst_Node()\n end_sentinel.startpos = text.endpos\n for node, next_node in zip(ast_nodes, ast_nodes[1:] + [end_sentinel]):\n startpos = node.startpos\n next_startpos = next_node.startpos\n assert startpos < next_startpos\n # We have the start position of this node. Figure out the end\n # position, excluding noncode lines (standalone comments and blank\n # lines).\n if hasattr(node, 'endpos'):\n # We have an endpos for the node because this was a multi-line\n # string. Start with the node endpos.\n endpos = node.endpos\n assert startpos < endpos <= next_startpos\n # enpos points to the character *after* the ending quote, so we\n # know that this is never at the beginning of the line.\n assert endpos.colno != 1\n # Advance past whitespace an inline comment, if any. Do NOT\n # advance past other code that could be on the same line, nor past\n # blank lines and comments on subsequent lines.\n line = text[endpos : min(text.endpos, FilePos(endpos.lineno+1,1))]\n if _is_comment_or_blank(line):\n endpos = FilePos(endpos.lineno+1, 1)\n else:\n endpos = next_startpos\n assert endpos <= text.endpos\n # We don't have an endpos yet; what we do have is the next node's\n # startpos (or the position at the end of the text). Start there\n # and work backward.\n if endpos.colno != 1:\n if endpos == text.endpos:\n # There could be a comment on the last line and no\n # trailing newline.\n # TODO: do this in a more principled way.\n if _is_comment_or_blank(text[endpos.lineno]):\n assert startpos.lineno < endpos.lineno\n if not text[endpos.lineno-1].endswith(\"\\\\\"):\n endpos = FilePos(endpos.lineno,1)\n else:\n # We're not at end of file, yet the next node starts in\n # the middle of the line. This should only happen with if\n # we're not looking at a comment. [The first character in\n # the line could still be \"#\" if we're inside a multiline\n # string that's the last child of the parent node.\n # Therefore we don't assert 'not\n # _is_comment_or_blank(...)'.]\n pass\n if endpos.colno == 1:\n while (endpos.lineno-1 > startpos.lineno and\n _is_comment_or_blank(text[endpos.lineno-1]) and\n (not text[endpos.lineno-2].endswith(\"\\\\\") or\n _is_comment_or_blank(text[endpos.lineno-2]))):\n endpos = FilePos(endpos.lineno-1, 1)\n assert startpos < endpos <= next_startpos\n yield ([node], text[startpos:endpos])\n if endpos != next_startpos:\n yield ([], text[endpos:next_startpos])",
"def visit_Compound(self, node):\n for statement in node.statements:\n self.visit(statement)",
"def split_node(node: saldag.OpNode):\n\n # Only dealing with single child case for now\n assert (len(node.children) <= 1)\n clone = copy.deepcopy(node)\n clone.out_rel.rename(node.out_rel.name + \"_obl\")\n clone.parents = set()\n clone.children = set()\n clone.is_mpc = True\n child = next(iter(node.children), None)\n saldag.insert_between(node, child, clone)",
"def _split_sql(self):\n return [stmt.strip() for stmt in RE_SQL_STATEMENT.split(self.sql) if stmt.strip()]",
"def split_merge_comments(statements):\n\n def is_genuine_ending(cur):\n return ((cur.strip()[:2] == '(*' and cur.strip()[-2:] == '*)') # cur is just a comment\n or cur.strip()[-1:] in '.}'\n or cur.strip() in BRACES)\n\n cur = None\n comment_level = 0\n for stmt in statements:\n str_count = 0\n for i in re.split(r'(?<=\\*\\)) ', stmt.replace('*)', '*) ')):\n if str_count % 2 == 0:\n i2 = re.sub('\"[^\"]*\"|\"[^\"]*$', '', i)\n else:\n i2 = re.sub('^[^\"]*\"|\"[^\"]*\"', '', i)\n str_count += i.count('\"')\n #print((repr(i), comment_level, repr(i2)))\n if '(*' not in i2 and '*)' not in i2:\n if comment_level < 0:\n if cur is not None:\n for ret in split_leading_braces(cur + i):\n yield ret\n cur = None\n else:\n raw_input('UNEXPECTED COMMENT: %s' % i)\n yield i\n comment_level = 0\n elif comment_level == 0:\n if cur is None:\n for ret in split_leading_braces(i):\n yield ret\n else:\n cur += i\n elif cur is None:\n cur = i\n else:\n cur += i\n else:\n comment_level += i2.count('(*') - i2.count('*)')\n if cur is None:\n cur = i\n else:\n cur += i\n if cur is not None:\n curs = list(split_leading_braces(cur))\n while curs and curs[0].strip() in BRACES:\n yield curs.pop(0)\n cur = ''.join(curs) if curs else None\n if cur is not None and is_genuine_ending(cur) and comment_level == 0: # clear out cur, if we're done with it\n yield cur\n cur = None\n if cur is not None:\n print('Unterminated comment')\n yield cur",
"def __traverse_node(self, node):\n lines = []\n for path, node_elem in node.filter(javalang.tree.BinaryOperation):\n if node_elem.operator == 'instanceof' and node_elem.operandl.position is not None:\n code_line = node_elem.operandl.position.line or node_elem.operandr.position.line\n lines.append(code_line)\n for path, node_elem in node.filter(javalang.tree.MethodInvocation):\n if node_elem.member == 'isInstance':\n lines.append(node_elem.position.line)\n\n return lines",
"def getVisitableNodesNamed(self):\n\n return ((\"statements\", self.subnode_statements),)",
"def visit_async_stmt(self, node: Node) -> Iterator[Line]:\n yield from self.line()\n\n children = iter(node.children)\n for child in children:\n yield from self.visit(child)\n\n if child.type == token.ASYNC or child.type == STANDALONE_COMMENT:\n # STANDALONE_COMMENT happens when `# fmt: skip` is applied on the async\n # line.\n break\n\n internal_stmt = next(children)\n if Preview.improved_async_statements_handling in self.mode:\n yield from self.visit(internal_stmt)\n else:\n for child in internal_stmt.children:\n yield from self.visit(child)",
"def Statements(self):\n states = list()\n while self.currtok[1].name in {\"SEMI\", \"LCURLY\", \"IDENT\", \"if\", \"print\", \"while\", \"return\"}:\n state = self.Statement()\n states.append(state)\n return StatementsStmt(states)",
"def visit_simple_stmt(self, node: Node) -> Iterator[Line]:\n prev_type: Optional[int] = None\n for child in node.children:\n if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child):\n wrap_in_parentheses(node, child, visible=False)\n prev_type = child.type\n\n is_suite_like = node.parent and node.parent.type in STATEMENT\n if is_suite_like:\n if (\n self.mode.is_pyi or Preview.dummy_implementations in self.mode\n ) and is_stub_body(node):\n yield from self.visit_default(node)\n else:\n yield from self.line(+1)\n yield from self.visit_default(node)\n yield from self.line(-1)\n\n else:\n if (\n not (self.mode.is_pyi or Preview.dummy_implementations in self.mode)\n or not node.parent\n or not is_stub_suite(node.parent)\n ):\n yield from self.line()\n yield from self.visit_default(node)",
"def bypass_conds(self):\n for block in self.get_basic_blocks_followed_by_branches():\n constants = collect_constant_assigns(block.statements)\n branch = block.outgoing_edge[0]\n cond = deepcopy(branch.cond)\n cond = specialize_constants(cond, constants)\n try:\n if eval(astor.to_source(cond), silica.operators):\n # FIXME: Interface violation, need a remove method from blocks\n branch.true_edge.incoming_edges.add((block, \"\"))\n block.outgoing_edges = {(branch.true_edge, \"\")}\n else:\n branch.false_edge.incoming_edges.add((block, \"\"))\n block.outgoing_edges = {(branch.false_edge, \"\")}\n branch.incoming_edges.remove((block, \"\"))\n except NameError as e:\n # print(e)\n pass",
"def statements(self):\n\n while self.token.value not in ('EOF', 'else', 'end'):\n\n with self.resync('\\n', consume=True):\n self.statement()\n\n if not self.match(Tokens.SYMBOL, \";\"):\n self.error(\"expected ';' after statement \", token=self.prev_token, after_token=True)\n\n # consume the 'end' token if there is one\n self.match(Tokens.KEYWORD, 'end')",
"def visit_stmt(\n self, node: Node, keywords: Set[str], parens: Set[str]\n ) -> Iterator[Line]:\n normalize_invisible_parens(\n node, parens_after=parens, mode=self.mode, features=self.features\n )\n for child in node.children:\n if is_name_token(child) and child.value in keywords:\n yield from self.line()\n\n yield from self.visit(child)",
"def body(self, statements):\n for stmt in statements:\n self.current_context = None\n self.visit(stmt)\n if self.current_label is not None:\n # Create a noop statement to hold the last label:\n self.create_stmt(dast.NoopStmt, statements[-1], nopush=True)",
"def gen_compound_statement(self, statement) -> None:\n for inner_statement in statement.statements:\n self.gen_stmt(inner_statement)",
"def _break_query(queryStr):\n logicStatements = []\n opList = []\n\n #TODO: Add check for balanced parenthesis\n\n\n if('(' in queryStr and ')' in queryStr):\n\n currentPairLevel = 0 #indicates the current nest level of parens\n pairSearchLevel = 0 #level of open paren that match is being found for\n openPairIndex = 0 #the index of the open parenthesis in queryStr\n closePairIndex = 0 #index of close parenthesis in queryStr\n outerOpenFound = False\n indexPairs = []\n for index, char in enumerate(queryStr):\n\n if(char=='('):\n currentPairLevel += 1\n #if first open parenthesis\n if(not outerOpenFound):\n openPairIndex = index\n pairSearchLevel = currentPairLevel\n outerOpenFound = True\n elif(char==')'):\n #if the parenthesis is at the same nest level as the open\n if(currentPairLevel == pairSearchLevel):\n closePairIndex = index\n indexPairs.append([openPairIndex,closePairIndex])\n outerOpenFound = False\n currentPairLevel -= 1\n\n #used the positions of the parenthesis to pull sliced from the query\n for index, pair in enumerate(indexPairs):\n logicStatements.append(queryStr[(pair[0]+1):pair[1]])\n\n #if not the last parenthesis pair then get operator after it\n if not(index == len(indexPairs)-1):\n opList.append(queryStr[pair[1]+1])\n \n return logicStatements, opList",
"def visit_Compound(self, n):\n self._create_new_node = True\n for stmt in n.block_items:\n self.visit(stmt)",
"def lify_split_buffers(lines):\n code_len = len(lines)\n for pos in range(code_len):\n line = lines[pos]\n if line.find('variable=buf_data_split') != -1:\n # Search for the variable declaration section\n decl_pos = -1\n prev_pos = pos - 1\n while prev_pos >= 0:\n prev_line = lines[prev_pos]\n if prev_line.find('Variable Declaration') != -1:\n decl_pos = prev_pos\n break\n prev_pos -= 1\n # Move the two code lines at [pos - 1] and [pos] to [decl_pos] and [decl_pos + 1]\n indent = lines[decl_pos].find('/*')\n line1 = ' ' * indent + lines[pos - 1].lstrip()\n line2 = ' ' * indent + lines[pos].lstrip()\n del lines[pos - 1]\n del lines[pos - 1]\n lines.insert(decl_pos, line1)\n lines.insert(decl_pos + 1, line2)\n\n return lines",
"def _process_stmt(self, node: ast.stmt) -> None:\n if isinstance(node, (ast.ClassDef, ast.FunctionDef)):\n self._process_def(node)\n elif isinstance(node, ast.Assign):\n self._process_assign(node)\n elif isinstance(node, ast.Expr):\n self._process_expr(node)\n else:\n self.visit(node)",
"def __ParseBlock(self, ast):\n for node in ast:\n node_name = node[0]\n node_value = node[1]\n if node_name == 'statement':\n self.__ParseStatement(node_value)\n else:\n logging.info('Unknown AST node in message block: %s' % (node_name))",
"def getVisitableNodes(self):\n\n return self.subnode_statements",
"def normalize_and_split(self, statement):\n\n # normalize target in operand\n match = self.Statement.match(statement)\n if match:\n before, target, after = match.groups()\n\n # target is an immediate number\n if target.startswith('#'):\n number = self._address_parser.number(target[1:])\n if (number < 0x00) or (number > self.byteMask):\n raise OverflowError\n statement = before + '#$' + self.byteFmt % number\n\n # target is the accumulator\n elif target in ('a', 'A'): \n pass\n\n # target is an address or label\n else:\n address = self._address_parser.number(target)\n statement = before + '$' + self.addrFmt % address + after\n \n # strip unnecessary whitespace\n opcode = statement[:3].upper()\n operand = ''.join(statement[3:].split()).upper().strip()\n return (opcode, operand)",
"def _partition_literate(code, spans, opener):\n code = StringView(code, 0, len(code))\n code_acc = code[0:0]\n for span in spans:\n if isinstance(span, Comment) and span.v.match(opener):\n if code_acc:\n yield Code(code_acc)\n code_acc = code[span.v.end:span.v.end]\n yield span\n else:\n code_acc += span.v\n if code_acc:\n yield Code(code_acc)",
"def visit_match_case(self, node: Node) -> Iterator[Line]:\n normalize_invisible_parens(\n node, parens_after=set(), mode=self.mode, features=self.features\n )\n\n yield from self.line()\n for child in node.children:\n yield from self.visit(child)",
"def parse_statements(script):\n # pylint: disable=too-many-branches\n stmt = ''\n quote = None\n for char in script:\n if quote != '--':\n stmt += char\n if quote is None:\n if char == ';':\n yield stmt.strip()\n stmt = ''\n elif char == \"'\":\n quote = \"'\"\n elif char == '\"':\n quote = '\"'\n elif char == '$':\n quote = '$'\n elif char == '-':\n quote = '-'\n elif quote in ('\"', \"'\"):\n if quote == char:\n quote = None\n elif quote == '-':\n if char == '-':\n quote = '--'\n stmt = stmt[:-2]\n else:\n quote = None\n elif quote == '--':\n if char == '\\n':\n quote = None\n elif quote.startswith('$'):\n if quote != '$' and quote.endswith('$'):\n if stmt.endswith(quote):\n quote = None\n else:\n quote += char\n stmt = stmt.strip()\n if stmt:\n yield stmt"
] |
[
"0.6498111",
"0.5984435",
"0.59213006",
"0.57918197",
"0.57530564",
"0.56118774",
"0.56068283",
"0.5518854",
"0.54685843",
"0.54046243",
"0.5394992",
"0.5346872",
"0.53465676",
"0.5308791",
"0.52554667",
"0.5254312",
"0.5253405",
"0.5246265",
"0.51586336",
"0.51581365",
"0.5158118",
"0.51508373",
"0.5148516",
"0.51270175",
"0.51038295",
"0.50948673",
"0.50909096",
"0.50440574",
"0.50233465",
"0.50045437"
] |
0.73233235
|
0
|
Render action This action returns a wiki page with optional message, or redirects to new page.
|
def render(self):
_ = self.request.getText
form = self.request.form
if form.has_key('cancel'):
# User canceled
return self.page.send_page(self.request)
try:
if not self.allowed():
raise ActionError(_('You are not allowed to edit this page.'))
elif not self.page.exists():
raise ActionError(_('This page is already deleted or was never created!'))
self.package()
except ActionError, e:
return self.page.send_page(self.request, msg=e.args[0])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def render_result():\n\n action = request.form['action']\n session['visibility'] = request.form['vis'] == 'pub'\n if(action == 'list'):\n resp = utils.get_posts(g.graph,session['visibility'],session['page']['id'])\n return render_template('display_posts.html', data = resp, next = resp['next'])\n elif (action == 'add'): \n return render_template('input_post.html')\n else:\n return redirect(url_for('error')) #TODO: add this page",
"def pagemainred():\n return render_template('do_action.html')",
"def new(self):\n flash_message(_(\"success message\"), \"success\")\n flash_message(_(\"warning message\"), \"warning\")\n flash_message(_(\"error message\"), \"error\")\n flash_message(_(\"notice message\"), \"notice\")\n return render('/derived/rock/new.mako')",
"def render():\n html = request.get_data().decode('utf-8')\n sio.emit('render', html)\n return 'OK'",
"def render_wpage():\n print('Render wiki page')\n try:\n data = json.load(open('result.json'))\n except:\n print(\"FAIL: Could not read result.json file\")\n return None\n\n delta = int(data['delta'])\n if delta > 3600:\n delta = \"%s hours\" % (delta / 3600)\n elif delta > 60:\n delta = \"%s minutes\" % (delta / 60)\n start_time = time.strftime(\"%Y-%m-%d %H:%M:%S UTC\", time.gmtime(data['start_time']))\n\n cdir = os.path.dirname(os.path.abspath(__file__))\n j2_loader = jinja2.FileSystemLoader(cdir)\n j2_env = jinja2.Environment(loader=j2_loader, trim_blocks=True)\n template = j2_env.get_template(\"wikitemplate.j2\")\n template_vars = {'updated': datetime.datetime.utcnow(), 'results': data[\"results\"],\n 'delta': delta, 'start_time': start_time}\n return template.render(template_vars)",
"def get(self, request, *args, **kwargs):\n return render(request, self.template_name)",
"def get(self, request, *args, **kwargs):\n return render(request, self.template_name)",
"def get(self, request, *args, **kwargs):\n return render(request, self.template_name)",
"def get(self, request, *args, **kwargs):\n return render(request, self.template_name)",
"def get(self, request, *args, **kwargs):\n return render(request, self.template_name)",
"def process_request(self, req):\r\n add_stylesheet(req, 'common/css/wiki.css')\r\n name = req.args.get('page', '')\r\n node = self._get_node(req)\r\n if ('create' in req.args):\r\n parent=req.args.get('parent', '')\r\n if not WikiPage(self.env, parent).exists:\r\n parent = ''\r\n req.redirect(req.href.wiki(name, action='edit', parent=parent, title=req.args.get('title', '')))\r\n if (req.args.get('action', '') == 'new'):\r\n data = dict({'node' : node, '_' : _ })\r\n return 'wiki_create.html', data, None",
"def post(self, request, *args, **kwargs):\n return self.render_to_response(self.get_context_data())",
"def renderPage():\n return render_template(\"index.html\")",
"def default(self, *args, **kwargs):\n Page = startHTML + \"I don't know where you're trying to go, so have a 404 Error.\"\n cherrypy.response.status = 404\n return Page",
"def new(): \n pages_object = Pages()\n page = pages_object.page\n \n language_name = languages_object.get_languages(3)\n \n # Creation new page\n if request.method == 'POST':\n if pages_object.new():\n return redirect(url_for('pages.overview'))\n \n # Come back a message when there is an error\t\n if not pages_object.message is None:\n message = pages_object.message\n status = pages_object.status\n \n return render_template('{}/new.html'.format(MODULE_DIR), **locals())",
"def render_newpage(self, user, subject=\"\", post_content=\"\", error=\"\"):\n self.render(\"mod_post_base.html\",\n subject=subject,\n post_content=post_content,\n error=error,\n user=user,\n main_heading=self.MAIN_HEADING)",
"def get(self):\n self.render('view.html')",
"def presentation():\n return redirect(url_for('render.render', presentation='true', **request.args))",
"def view_html_page():\n\n return render_template(\"moby.html\")",
"def __display_form(self, template, message=None):\r\n # page = memcache.get(str(language_code) + key, namespace='Pages')\r\n # if page is None:\r\n template_values = {\r\n 'user_email': self.user_email,\r\n constants.VAR_NAME_ERRORMESSAGE: message\r\n }\r\n template = self.jinja2_env.get_template(template)\r\n page = template.render(template_values)\r\n # memcache.add(str(language_code) + key, page, namespace='Pages')\r\n self.response.out.write(page)",
"def regularpage(pagename=None): \n return \"You've arrived at \" + pagename\n #if pagename==None: \n # raise Exception, 'page_not_found' \n #return render_template(pagename) ",
"def serve(self, request, *args, **kwargs):\n\n template = self.get_template(request)\n\n if request.method == 'POST':\n\n form = self.get_form(request.POST, page=self, user=request.user)\n\n if form.is_valid():\n self.process_form_submission(form)\n return HttpResponseRedirect(self.url + '?thank=you')\n\n else:\n\n thanks = request.GET.get('thank', False)\n if thanks:\n form = None\n template = self.get_landing_page_template(request)\n if self.thanks_page_title:\n self.title = self.thanks_page_title\n else:\n form = self.get_form(page=self, user=request.user)\n\n context = self.get_context(request)\n context['form'] = form\n if form:\n context['conditional_rules'] = json.dumps(form.conditional_rules)\n\n return render(\n request,\n template,\n context\n )",
"def feedback():\n return render_template(\"feedback.html\")",
"def story(request):\n return render(request, 'coreapp/story.html')",
"def message_page(request,page_name):\n return HttpResponse(\"This will be the {0} page.\".format(page_name))",
"def show_story():\n print(story)\n ans = request.args\n mad_story = story.generate(ans)\n return render_template(\"story.html\", story=mad_story)",
"def about() -> Any:\n return render_template(\"about.html\")",
"def post(self, request, *args, **kwargs):\n return render(request, self.template_name, self.get_context_data(**kwargs))",
"def default(self, *args, **kwargs):\n Page = \"I don't know where you're trying to go, so have a 404 Error.\"\n cherrypy.response.status = 404\n return Page",
"def render():\n\n path = request.args.get('markdown', '')\n raw = request.args.get('raw', False)\n\n username, user_id = g.user.username, g.user.id\n\n tmpl = 'markdown-rendered.html'\n if raw:\n tmpl = 'markdown-raw.html'\n elif request.args.get('presentation'):\n # TODO(dan?) fix presentation post\n # presentation_post = {}\n # presentation_post['authors_string'] = post.author_string\n # presentation_post['tldr'] = post.tldr\n # presentation_post['html'] = html\n # html = create_presentation_text(presentation_post)\n tmpl = \"markdown-presentation.html\"\n\n if not current_app.config.get('REPOSITORY_INDEXING_ENABLED', True):\n return _render_preview(path=path, tmpl=tmpl)\n\n post = (db_session.query(Post)\n .filter(Post.path == path)\n .first())\n if not post:\n knowledge_aliases = current_repo.config.aliases\n if path in knowledge_aliases:\n # TODO: reframe as redirect\n post = (db_session.query(Post)\n .filter(Post.path == knowledge_aliases[path])\n .first())\n if not post:\n raise Exception(\"unable to find post at {}\".format(path))\n\n if post.contains_excluded_tag:\n # It's possible that someone gets a direct link to a post that has an excluded tag\n return render_template(\"error.html\")\n\n if post.private:\n groups = post.groups\n users = set()\n for group in groups:\n user_ids = [user.id for user in group.users]\n users.update(user_ids)\n if user_id not in users and username not in current_repo.config.editors:\n return render_template(\"permission_ask.html\", authors=post.authors_string)\n\n html = render_post(post)\n raw_post = render_post_raw(post) if raw else None\n\n comments = post.comments\n for comment in comments:\n comment.author = db_session.query(User).filter(User.id == comment.user_id).first().username\n if not raw:\n comment.text = render_comment(comment)\n\n user_obj = (db_session.query(User)\n .filter(User.id == user_id)\n .first())\n\n tags_list = [str(t.name) for t in post.tags]\n user_subscriptions = [str(s) for s in user_obj.get_subscriptions]\n\n is_author = user_id in [author.id for author in post.authors]\n\n rendered = render_template(tmpl,\n html=html,\n post_id=post.id,\n post_path=path,\n raw_post=raw_post,\n comments=comments,\n username=username,\n post_author=post.authors_string,\n title=post.title,\n page_views=post.view_count,\n unique_views=post.view_user_count,\n likes=post.vote_counted_for_user(user_id=user_id),\n total_likes=post.vote_count,\n tags_list=tags_list,\n user_subscriptions=user_subscriptions,\n webeditor_buttons=False,\n web_uri=post.kp.web_uri,\n table_id=None,\n is_private=(post.private == 1),\n is_author=is_author)\n return rendered"
] |
[
"0.6232014",
"0.62105066",
"0.62075084",
"0.6181259",
"0.6165267",
"0.6127661",
"0.6127661",
"0.6127661",
"0.6127661",
"0.6127661",
"0.61201984",
"0.6097872",
"0.60584813",
"0.5958934",
"0.5923101",
"0.5914462",
"0.59116375",
"0.5910482",
"0.5901794",
"0.58796734",
"0.586973",
"0.5845904",
"0.5844962",
"0.5834426",
"0.5828117",
"0.5808656",
"0.5807172",
"0.5805462",
"0.5798711",
"0.5794361"
] |
0.6526012
|
0
|
Create a discrete control set that is shaped like a cosine function.
|
def gen_controls_cos(complex_controls, control_count, control_eval_count,
evolution_time, max_control_norms, periods=10.):
period = np.divide(control_eval_count, periods)
b = np.divide(2 * np.pi, period)
controls = np.zeros((control_eval_count, control_count))
# Create a wave for each control over all time
# and add it to the controls.
for i in range(control_count):
# Generate a cosine wave about y=0 with amplitude
# half of the max.
max_norm = max_control_norms[i]
_controls = (np.divide(max_norm, 2)
* np.cos(b * np.arange(control_eval_count)))
# Replace all controls that have zero value
# with small values.
small_norm = max_norm * 1e-1
_controls = np.where(_controls, _controls, small_norm)
controls[:, i] = _controls
#ENDFOR
# Mimic the cosine fit for the imaginary parts and normalize.
if complex_controls:
controls = (controls - 1j * controls) / np.sqrt(2)
return controls
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def C(i, x):\n if i == 1:\n return np.array([[1., 0., 0.],\n [0., cos(x), sin(x)],\n [0., -sin(x), cos(x)]])\n elif i == 2:\n return np.array([[cos(x), 0., -sin(x)],\n [0., 1., 0.],\n [sin(x), 0., cos(x)]])\n elif i == 3:\n return np.array([[cos(x), sin(x), 0.],\n [-sin(x), cos(x), 0.],\n [0., 0., 1.]])",
"def test_basis_categorical():\n cat_data = ['sand'] * 20 + [np.nan] * 5 + ['cement'] * 10 + [np.nan] * 5\n curve_cat = Curve(cat_data, index=range(0, 40))\n curve_new = curve_cat.to_basis(start=5, stop=30, step=1)\n assert len(curve_new) == 26",
"def discrete_cosine_tranform(self, matrix):\n\n\n x = matrix.shape[0]\n y = matrix.shape[1]\n N = x\n\n #Fourier Transform matrix:\n dct = np.zeros([x,y])\n\n for u in range(0, x):\n for v in range(0, y):\n sum_ft = 0\n for i in range(0, x):\n for j in range(0, y):\n\n sum_ft = sum_ft + matrix[i, j] * (np.cos(((2*np.pi)/N)*(u*i + v*j)))\n\n dct[u, v] = sum_ft\n\n return dct",
"def cepstrum(input, nceps):\n # apply the Discrete Cosine Transform\n output = dct(input, norm='ortho')[:, 0:nceps]\n\n # myplot(output, 'Before lifter')\n\n # apply liftering\n # output = lifter(output)\n\n # myplot(output, 'After lifter')\n\n return output",
"def cosineFunction(self):\n \n w = np.zeros((self.N))\n l = self.l\n for i in range(self.r.shape[0]):\n r = np.abs(self.r[i])\n if r <= l:\n tf = lambda r,l : 0.5 * (np.cos(np.pi * r/l) + 1)\n w[i] = tf(r,l)\n else:\n w[i] = 0\n self.w = w",
"def direct_dctii(x):\n n = x.size\n a = np.empty((n, n), dtype = x.dtype)\n for i in xrange(n):\n for j in xrange(n):\n a[i, j] = x[j] * np.cos(np.pi * (0.5 + j) * i / n)\n\n a[0] *= np.sqrt(1. / n)\n a[1:] *= np.sqrt(2. / n)\n\n return a.sum(axis = 1)",
"def gen_centers(self):\n\n \"\"\"x_track = self.cs.discrete_rollout()\n t = np.arange(len(x_track))*self.dt\n # choose the points in time we'd like centers to be at\n c_des = np.linspace(0, self.cs.run_time, self.n_bfs)\n self.c = np.zeros(len(c_des))\n for ii, point in enumerate(c_des):\n diff = abs(t - point)\n self.c[ii] = x_track[np.where(diff == min(diff))[0][0]]\"\"\"\n\n # desired activations throughout time\n des_c = jnp.linspace(0, self.cs.run_time, self.n_bfs)\n\n self.c = np.ones(len(des_c))\n for n in range(len(des_c)):\n # finding x for desired times t\n self.c[n] = jnp.exp(-self.cs.ax * des_c[n])\n self.c = jnp.array(self.c)",
"def cos_series(x, n):\n return sum((cos_term(x, i) for i in xrange(0, n)))",
"def dctmtx(n):\n x,y = np.meshgrid(range(n), range(n))\n D = np.sqrt(2.0/n) * np.cos(np.pi * (2*x+1) * y / (2*n))\n D[0] /= np.sqrt(2)\n return D",
"def cos_term(x, i):\n n = 2*i\n return alternate(i, exp_term(x, n))",
"def cosines(amount, start, stop, truncated, sequence):\n\n for x in range(start, amount):\n y = abs(round(stop * math.cos(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def make_curve_data(control_points):\n spline = CSpline(control_points)\n t_extrapolation = 1 # how far to draw the straight lines past the\n t_min = 0 - t_extrapolation\n t_max = len(control_points) // 3 - 1 + t_extrapolation\n return np.array(\n [\n spline.get_pos(t)\n for t in np.linspace(t_min, t_max, num=int(50 * (t_max - t_min) + 1))\n ]\n )",
"def cos_vecs(x, y):\r\n _t = np.sum((x * y), axis=1)\r\n norm_x = np.linalg.norm(x, axis=1, keepdims=True)\r\n norm_y = np.linalg.norm(y, axis=1, keepdims=True)\r\n _t = np.reshape(_t, (-1, 1))\r\n ret = _t / (norm_x * norm_y + 1e-10)\r\n return ret",
"def initiatecos(width, x0):\n y = np.cos((x-x0)*np.pi/width)\n for i in range(xsize):\n if x[i]<-width/2. or x[i]>width/2.:\n y[i] = 0\n return y",
"def C(t):\n out = [0.]*D #: For each t we return a list of D coordinates\n for d in range(D): #: Iterate over 0-indexed dimension indices\n out[d] = S(t,d)\n return out",
"def trans(x):\r\n return np.arccos(1-2*x)",
"def define_cx_scaled(self, n_col: int, n_shooting: int, initial_node) -> list[MX | SX, ...]:\n _cx = [self.nlp.cx() for _ in range(n_shooting + 1)]\n for node_index in range(n_shooting + 1):\n _cx[node_index] = [self.nlp.cx() for _ in range(n_col)]\n for idx in self.nlp.variable_mappings[self.name].to_first.map_idx:\n for node_index in range(n_shooting + 1):\n for j in range(n_col):\n sign = \"-\" if np.sign(idx) < 0 else \"\"\n _cx[node_index][j] = vertcat(\n _cx[node_index][j],\n self.nlp.cx.sym(\n f\"{sign}{self.name}_{self.name_elements[abs(idx)]}_phase{self.nlp.phase_idx}_node{node_index + initial_node}.{j}\",\n 1,\n 1,\n ),\n )\n return _cx",
"def generateCoefficients (self):\n\t\tself.ws = []\n\t\tif not self.sine:\n\t\t\tself.bs = []\n\t\tmean = np.zeros(self.dim)\n\t\tcov = np.eye(self.dim)*(2*self.gammak)\n\n\t\tif self.sine:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\telse:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\t\t\tself.bs.append(nr.uniform(0.0, 2*np.pi))",
"def get_cosin_sim(question, contexts):\r\n cos_sim_for_question = []\r\n for context in contexts :\r\n cv = CountVectorizer(stop_words=MY_STOPWORDS, lowercase=False)\r\n matrix = cv.fit_transform(pd.DataFrame([question, context])[0]).toarray()\r\n cos_sim = dot(matrix[0], matrix[1])/(norm(matrix[0])*norm(matrix[1]))\r\n cos_sim_for_question.append(cos_sim)\r\n return pd.Series(cos_sim_for_question)",
"def _get_dscp_cos(self):\n return self.__dscp_cos",
"def dct(n_filters, n_input):\n\n basis = np.empty((n_filters, n_input))\n basis[0, :] = 1.0 / np.sqrt(n_input)\n\n samples = np.arange(1, 2*n_input, 2) * np.pi / (2.0 * n_input)\n\n for i in range(1, n_filters):\n basis[i, :] = np.cos(i*samples) * np.sqrt(2.0/n_input)\n\n return basis",
"def discreteComplexTransform(self,s):\n N=len(s)\n return sum([s[n]*cmath.exp(2j*cmath.pi*k*n/N) for n in range(N)])/N",
"def cosmo(self):\n return self.cls(*self.cls_args, **self.cls_kwargs)",
"def C_factory(P, n=2, V_type=\"clamped\"):\n\n # TODO: check that p_len is ok with the degree and > 0\n m = len(P) # the number of points in P\n D = len(P[0]) # the dimension of a point (2D, 3D)\n\n # Create the knot vector\n V = make_knot_vector(n, m, V_type)\n # TODO: check the validity of the input knot vector.\n # TODO: create an initial Vector Point.\n\n #############################################################################\n # The following line will be detailed later. #\n # We create the highest degree basis spline function, aka. our entry point. #\n # Using the recursive formulation of b-splines, this b_n will call #\n # lower degree basis_functions. b_n is a function. #\n #############################################################################\n b_n = basis_factory(n)\n\n @memoize\n def S(t, d):\n \"\"\" The b-spline funtion, as defined in eq. 3. \"\"\"\n out = 0.\n for i in range(m): #: Iterate over 0-indexed point indices\n out += P[i][d]*b_n(t, i, V)\n return out\n\n def C(t):\n \"\"\" The b-spline curve, as defined in eq. 4. \"\"\"\n out = [0.]*D #: For each t we return a list of D coordinates\n for d in range(D): #: Iterate over 0-indexed dimension indices\n out[d] = S(t,d)\n return out\n\n C.P = P #: The control polygone\n C.V = V #: The knot vector used by the function\n C.spline = S #: The spline function.\n C.basis = b_n #: The highest degree basis function. Useful to do some plotting.\n C.min = V[0] #: The domain of definition of the function, lower bound for t\n C.max = V[-1] #: The domain of definition of the function, upper bound for t\n C.endpoint = C.max!=V[-1] #: Is the upper bound included in the domain.\n return C",
"def coset_enumeration_c(fp_grp, Y):\n # Initialize a coset table C for < X|R >\n C = CosetTable(fp_grp, Y)\n X = fp_grp.generators\n R = fp_grp.relators()\n A = C.A\n # replace all the elements by cyclic reductions\n R_cyc_red = [rel.identity_cyclic_reduction() for rel in R]\n R_c = list(chain.from_iterable((rel.cyclic_conjugates(), (rel**-1).cyclic_conjugates()) \\\n for rel in R_cyc_red))\n R_set = set()\n for conjugate in R_c:\n R_set = R_set.union(conjugate)\n # a list of subsets of R_c whose words start with \"x\".\n R_c_list = []\n for x in C.A:\n r = set([word for word in R_set if word[0] == x])\n R_c_list.append(r)\n R_set.difference_update(r)\n for w in Y:\n C.scan_and_fill_f(0, w)\n for x in A:\n C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]])\n i = 0\n while i < len(C.omega):\n alpha = C.omega[i]\n i += 1\n for x in C.A:\n if C.table[alpha][C.A_dict[x]] is None:\n C.define_f(alpha, x)\n C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]])\n return C",
"def cosine(xs: Tensor, ys: Tensor, epsilon: float = 1e-8) -> Tensor:\n mat = xs @ ys.t()\n x_norm = xs.norm(2, dim=1) + epsilon\n y_norm = ys.norm(2, dim=1) + epsilon\n x_diag = (1 / x_norm).diag()\n y_diag = (1 / y_norm).diag()\n return x_diag @ mat @ y_diag",
"def cofactors(self,x,y):\r\n return self.factorset(x) & self.factorset(y)",
"def phasesin14xymult_cfix(param, xyord,crossord,t, x, y):\n # 2010-04-27 11:49 IJC: Created\n # 2010-05-28 15:42 IJC: Added x*y cross-terms\n # 2010-07-21 13:02 IJC: switched to a mostly-additive model\n\n param = array(param,copy=True)\n x = array(x,copy=True)\n y = array(y,copy=True)\n t = array(t,copy=True)\n\n xparam = zeros((0,14),float)\n yparam = zeros((0,14),float)\n crossparam = zeros((0,14),float)\n\n cparam = param[3:17].copy()\n # Ensure that prod(1.+cparam) equals zero\n cparam[0] = 1./(1.+cparam[1::]).prod() - 1.\n if xyord>=1:\n for ii in range(xyord):\n xparam = vstack((xparam,param[17+ii*28:31+ii*28]))\n yparam = vstack((yparam,param[31+ii*28:45+ii*28]))\n\n lastxyparamind = 45+(xyord-1)*28\n if crossord>=1:\n for ii in [0]: #range(crossparam):\n crossparam = vstack((crossparam,param[lastxyparamind:lastxyparamind+(ii+1)*14]))\n\n #cparam -= mean(cparam)\n param[2] = param[2] % (2*pi)\n \n if len(t.shape)==1:\n was1d = True\n t = t.reshape(14, len(t)/14.)\n x = x.reshape(14, len(x)/14.)\n y = y.reshape(14, len(y)/14.)\n else:\n was1d = False\n\n # Subtract the mean from the X and Y data\n x -= x.mean(1).reshape(14,1)\n y -= y.mean(1).reshape(14,1)\n\n # Zeroth-order model:\n ret = param[0] - abs(param[1]) *cos(2*pi*t +param[2])\n\n # Apply constant and X/Y offsets:\n #ret *= (1. + tile(cparam, (t.shape[1],1)).transpose())\n offset_term = (1. + tile(cparam, (t.shape[1],1)).transpose())\n if xyord>=1:\n for ii in range(xyord):\n offset_term += tile(xparam[ii], (t.shape[1],1)).transpose()*x**(ii+1)\n offset_term += tile(yparam[ii], (t.shape[1],1)).transpose()*y**(ii+1)\n\n if crossord>=1:\n for ii in [0]: \n offset_term += tile(crossparam[ii], (t.shape[1],1)).transpose()*x*y\n\n # Apply the (1+c+dx+ey) term:\n ret *= offset_term\n\n if was1d:\n ret = ret.ravel()\n\n return ret",
"def algebra_generators(self):\n return self.basis().keys().semigroup_generators().map(self.monomial)",
"def stokes_horizontal():\n return np.array([1, 1, 0, 0])"
] |
[
"0.62159336",
"0.5716321",
"0.5659428",
"0.543885",
"0.54238886",
"0.5407454",
"0.5336914",
"0.5282942",
"0.5246328",
"0.5203804",
"0.5200005",
"0.5176265",
"0.5172793",
"0.5138024",
"0.50859714",
"0.50710136",
"0.50569665",
"0.5047774",
"0.5034469",
"0.50343806",
"0.5020108",
"0.501968",
"0.5009305",
"0.49824393",
"0.49561447",
"0.49560082",
"0.49450758",
"0.49385783",
"0.49369442",
"0.4922354"
] |
0.5927754
|
1
|
Create a discrete control set that is shaped like a flat line with small amplitude.
|
def gen_controls_flat(complex_controls, control_count, control_eval_count,
evolution_time, max_control_norms, periods=10.):
controls = np.zeros((control_eval_count, control_count))
# Make each control a flat line for all time.
for i in range(control_count):
max_norm = max_control_norms[i]
small_norm = max_norm * 1e-1
control = np.repeat(small_norm, control_eval_count)
controls[:, i] = control
#ENDFOR
# Mimic the flat line for the imaginary parts, and normalize.
if complex_controls:
controls = (controls - 1j * controls) / np.sqrt(2)
return controls
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_basis_categorical():\n cat_data = ['sand'] * 20 + [np.nan] * 5 + ['cement'] * 10 + [np.nan] * 5\n curve_cat = Curve(cat_data, index=range(0, 40))\n curve_new = curve_cat.to_basis(start=5, stop=30, step=1)\n assert len(curve_new) == 26",
"def make_curve_data(control_points):\n spline = CSpline(control_points)\n t_extrapolation = 1 # how far to draw the straight lines past the\n t_min = 0 - t_extrapolation\n t_max = len(control_points) // 3 - 1 + t_extrapolation\n return np.array(\n [\n spline.get_pos(t)\n for t in np.linspace(t_min, t_max, num=int(50 * (t_max - t_min) + 1))\n ]\n )",
"def ipset_linear():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 11), x_new=np.linspace(2, 8, 51))",
"def discretized_line(x_start, y_start, x_end, y_end, n_elements):\n n_pts = n_elements + 1\n x = np.linspace(x_start, x_end, n_pts)\n y = np.linspace(y_start, y_end, n_pts)\n x1 = x[:-1]\n y1 = y[:-1]\n x2 = x[1:]\n y2 = y[1:]\n return x1, y1, x2, y2",
"def create_line(uniform = True, *args):\n axis = cmds.radioButtonGrp(widgets[\"lineAxisRBG\"], q=True, sl=True)\n length = cmds.floatFieldGrp(widgets[\"lineLenFFG\"], q=True, v1=True)\n density = cmds.floatFieldGrp(widgets[\"lineDenFFG\"], q=True, v1=True)\n\n numCvs = length * density\n if numCvs < 3.0: # curve needs 3 cvs (for 3 dg curve)\n numCvs = 3.0\n\n cvDist = length/numCvs\n\n # make a list of pt dist along some axis\n axisList = []\n for x in range(0,int(numCvs)+1):\n axisList.append(x)\n\n pts = []\n\n if axis == 1:\n for y in range(0, int(numCvs)+1):\n pt = [axisList[y]*cvDist, 0, 0]\n pts.append(pt)\n\n if axis == 2:\n for y in range(0, int(numCvs)+1):\n pt = [0, axisList[y]*cvDist, 0]\n pts.append(pt)\n\n if axis == 3:\n for y in range(0, int(numCvs)+1):\n pt = [0, 0, axisList[y]*cvDist]\n pts.append(pt)\t\t\t\n \n line = cmds.curve(name = \"line_01\", d=3, p=pts)\n shp = cmds.listRelatives(line, s=True)[0]\n cmds.rename(shp, \"{0}Shape\".format(line))\n if uniform:\n line = cmds.rebuildCurve(line, rebuildType = 0, spans = 0, keepRange = 0, replaceOriginal=True, end=1, keepControlPoints=0)[0]\n\n cmds.select(line, r=True)",
"def __init__(self, borderFunc, step=1, max_iter=150, v=1):\n self._u = None # contorno C => levelset\n self.data = borderFunc # funcao da borda\n self.step_size = step\n self.max_iter = max_iter\n self.name = \"Traditional Levelset\"\n self.v = v",
"def exercise_one():\n\n interval = (0,1)\n\n control_net = np.matrix([\n [-0.2, 2],\n [-0.3, 6.2],\n [-1.2, 4.8],\n [-2.8, 8.8],\n [-0.7, 14],\n [1.4, 14.7],\n [3.6, 10.2],\n [3.2, 5.1],\n [1.5, 6.2],\n [1.4, 2],\n ])\n\n# First we plot a curve where internal knots have maximum multiplicities\n arguments = { \n 'order':4, \n 'interval':interval,\n 'internal_knots':sample_internal_knots_uniformly_in(interval, 3),\n 'control_net':control_net,\n 'multiplicities':[2,2,2]\n }\n\n curve = draw(**arguments)\n\n\n# After we plot a curve where each internal knot have multiplicity 1.\n plot_curve(curve, control_net, axis=[-4, 4, 0, 16])\n\n arguments = { \n 'order':4, \n 'interval':interval,\n 'internal_knots':sample_internal_knots_uniformly_in(interval, 6),\n 'control_net':control_net,\n }\n\n curve = draw(**arguments)\n\n plot_curve(curve, control_net, axis=[-4, 4, 0, 16])",
"def set_control_points(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n \r\n # t: consists of N points\r\n # p: is a finer grid made from t (default is 10 times N points)\r\n cls.t = np.fromfunction(lambda j : (1.0/2.0 + j)*cls.DT, (cls.N, ))\r\n cls.p = np.linspace(cls.t[0], cls.t[-1], num=10*cls.N)",
"def ipset_y_0d():\n return IPSet(x=np.linspace(0, 10, 1), y=np.array(1), x_new=np.linspace(2, 5, 7))",
"def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497],[174.9497,176.0104]]])\r\n cutlineset.extend([[[175.4800,-3.05],[175.4800,-4.55]],[[174.7300,-3.8],[176.2300,-3.8]]])\r\n \r\n for cutline in cutlineset:\r\n for pos in cutline:\r\n pos[0]=pos[0]+globalconfig.CUTLINE_X_OFFSET\r\n pos[1]=pos[1]+globalconfig.CUTLINE_Y_OFFSET\r\n \r\n for row in range(0,globalconfig.X_ARRAY_NUM):\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,-3.0+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,174.68+globalconfig.CUTLINE_Y_OFFSET]])\r\n for line in range(0,globalconfig.Y_ARRAY_NUM):\r\n cutlineset.append([[0.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[-3.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[171.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[174.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n return cutlineset",
"def DrawBands(self, count):\n value = self.little[0]\n mobile_average = float(sum([float(self.little[i])\n for i in range(len(self.little))])) / float(self.period)\n standard_derivation = sqrt(sum([pow(self.little[i] - mobile_average, 2)\n for i in range(len(self.little))]) / self.period)\n upper_band = mobile_average + (standard_derivation * self.sd_coef)\n lower_band = mobile_average - (standard_derivation * self.sd_coef)\n self.upper.insert(0, upper_band)\n self.lower.insert(0, lower_band)\n if len(self.upper) >= self.period:\n self.upper.pop()\n if len(self.lower) >= self.period:\n self.lower.pop()\n if count >= self.period:\n for i in range(len(self.little) - 1):\n self.canvas.create_line((i * self.incr / 1.725) + self.incr * 4,\n self.height - self.incr * 4 + (self.little[i] - 1) * 5000 - 200,\n (i * self.incr / 1.725) + self.incr * 4 + self.incr / 1.725,\n self.height - self.incr * 4 + (self.little[i + 1] - 1) * 5000 - 200,\n fill = \"#FFFF00\", width = 2)\n for i in range(len(self.upper) - 1):\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.upper[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.upper[i + 1] - 1) * 5000 - 200,\n fill = \"#FF6600\", width = 3)\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.lower[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.lower[i + 1] - 1) * 5000 - 200,\n fill = \"#FF0000\", width = 3)",
"def create_curve(self):\n self._define_amplitude()\n self._define_width()\n self._define_horizontal()\n self._cache_values()\n print(self)",
"def __init__(self, controlPoints=None):\n super(CatmullRomSpline, self).__init__(controlPoints)",
"def plot_control_points(self, fig, ax, linewidth=1.25, linestyle='-.', color='red', markersize=5, markerstyle='o'):\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n Px = np.real(self.P)\n u = np.linspace(0, 1, Px.size)\n line, = ax.plot(u, Px[0,:])\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n Px, Py = np.real(self.P)\n line, = ax.plot(Px, Py)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n Px, Py, Pz = np.real(self.P)\n line, = ax.plot(Px, Py, Pz)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n else: raise Exception('The number of dimensions must be 2 or 3')\n\n return fig, ax",
"def _extended_discrete_xaxis(x_axis, n_points=100, eps=0.10):\n min_value = np.min(x_axis)\n max_value = np.max(x_axis)\n distance = max_value - min_value\n return np.linspace(min_value - eps * distance, max_value + eps * distance,\n num=n_points)",
"def discrete_layer(width: float, steps: int) -> list:\n\n min_x = 0.001\n steps = steps/2\n\n def sum_function(stretch_factor):\n return width - min_x * ((1 - stretch_factor**steps)/(1 - stretch_factor))\n\n stretch = float(fsolve(sum_function, 1.3)[0])\n\n return sub_division(width, min_x, stretch)",
"def clearLineshape(self):\n self.x = np.arange(self.start,self.stop,round(self.step,4))\n self.lineshape = np.zeros(len(self.x))",
"def sawtooth_factory(motor, start, stop, step_size):\n if stop < start:\n start, stop = stop, start\n\n num_pos = int((stop - start) // step_size)\n j = itertools.count()\n last_group = None\n\n def x_motion_per_step(dets, stream_name):\n nonlocal last_group\n if last_group is not None:\n yield from bps.wait(last_group)\n yield from bps.trigger_and_read(dets, stream_name)\n last_group = short_uid()\n target = start + step_size * (next(j) % num_pos)\n yield from bps.abs_set(motor, target, group=last_group)\n\n return x_motion_per_step",
"def gen_controls_white(complex_controls, control_count, control_eval_count,\n evolution_time, max_control_norms, periods=10.):\n controls = np.zeros((control_eval_count, control_count))\n\n # Make each control a random distribution of white noise.\n for i in range(control_count):\n max_norm = max_control_norms[i]\n stddev = max_norm/5.0\n control = np.random.normal(0, stddev, control_eval_count)\n controls[:, i] = control\n #ENDFOR\n\n # Mimic the white noise for the imaginary parts, and normalize.\n if complex_controls:\n controls = (controls - 1j * controls) / np.sqrt(2)\n\n return controls",
"def timinggrid(self):\n\n gelem = Element(\"g\") # create a group\n for i in range(int(self.cycles)):\n\n lelem = Element(\"line\")\n lelem.attrib['x1'] = str(i*self.period + self.period/2.0 + self.xzero)\n lelem.attrib['y1'] = str(0);\n lelem.attrib['x2'] = str(i*self.period + self.period/2.0 + self.xzero)\n lelem.attrib['y2'] = str(self.signalcnt*(self.height + self.signalspacing) + self.signalspacing)\n lelem.attrib['stroke'] = \"grey\"\n lelem.attrib['stroke-width'] = \"0.5\"\n gelem.append(lelem)\n\n \n self.svgelem.append(gelem)\n self.svgelem.append(self.signalselem)",
"def raster(self, event_times_list, color='k'):\n ax = plt.gca()\n for ith, trial in enumerate(event_times_list):\n plt.vlines(trial, ith + .5, ith + 1.5, color=color)\n plt.ylim(.5, len(event_times_list) + .5)\n return ax",
"def make_lattice_points(a=1.0, lattice_type=\"sc\",\r\n ind_min=-10, ind_max=10, CAR=None):\r\n\r\n # generate basis\r\n if lattice_type == \"sc\":\r\n a1 = np.array([1., 0., 0.])\r\n a2 = np.array([0., 1., 0.])\r\n a3 = np.array([0., 0., 1.])\r\n elif lattice_type == \"fcc\":\r\n a1 = np.array([0.5, 0.5, 0.])\r\n a2 = np.array([0., 0.5, 0.5])\r\n a3 = np.array([0.5, 0., 0.5])\r\n elif lattice_type == \"bcc\":\r\n a1 = np.array([0.5, 0.5, -0.5])\r\n a2 = np.array([-0.5, 0.5, 0.5])\r\n a3 = np.array([0.5, -0.5, 0.5])\r\n elif lattice_type == \"hcp\":\r\n if CAR is None:\r\n # default c-to-a ratio = 2.0 * np.sqrt(2.0 / 3.0)\r\n CAR = 2.0 * np.sqrt(2.0 / 3.0)\r\n a1 = np.array([1.0, 0., 0.])\r\n a2 = np.array([-0.5, 0.5*np.sqrt(3.0), 0.])\r\n a3 = np.array([0.0, 0.0, CAR])\r\n else:\r\n raise ValueError(\"Invalid lattice_type: \", lattice_type)\r\n\r\n # generate indices\r\n ind_list = np.arange(ind_min, ind_max + 1, 1)\r\n h, k, l = np.meshgrid(ind_list, ind_list, ind_list)\r\n\r\n h = np.reshape(h, (h.size, 1))\r\n k = np.reshape(k, (k.size, 1))\r\n l = np.reshape(l, (l.size, 1))\r\n\r\n hkl = np.hstack((np.hstack((h, k)), l))\r\n\r\n # calculate coordinates\r\n if lattice_type != \"hcp\":\r\n A = np.vstack((np.vstack((a1, a2)), a3)) # lattice\r\n return a * np.dot(hkl, A)\r\n else:\r\n A = np.vstack((np.vstack((a1, a2)), a3)) # lattice\r\n A_coor = a * np.dot(hkl, A)\r\n B_coor = np.zeros((2*len(A_coor), 3))\r\n B_coor[::2] = A_coor\r\n B = 2./3.*a1 + 1./3.*a2 + 0.5*a3 # the other atom in the basis\r\n B_coor[1::2] = A_coor + a * np.tile(B[None, :], (len(A_coor), 1))\r\n return B_coor.copy()",
"def set_control_points(self, control_points):\r\n for curve_index, points in enumerate(control_points):\r\n self.curves[curve_index].control_points = points",
"def __init__(\n self,\n line=None,\n *,\n knl=None,\n filled=True,\n resolution=1000,\n line_length=None,\n **kwargs,\n ):\n\n if knl is None:\n if line is None:\n raise ValueError(\"Either line or knl parameter must not be None\")\n knl = range(max([e.order for e in line.elements if hasattr(e, \"order\")]) + 1)\n if isinstance(knl, int):\n knl = range(knl + 1)\n if not isinstance(knl, str):\n knl = [[[f\"k{n}l\" for n in knl]]]\n if line is None and line_length is None:\n raise ValueError(\"Either line or line_length parameter must not be None\")\n self.S = np.linspace(0, line_length or line.get_length(), resolution)\n self.filled = filled\n\n super().__init__(\n on_x=\"s\",\n on_y=knl,\n **kwargs,\n )\n\n # create plot elements\n def create_artists(i, j, k, a, p):\n kwargs = dict(\n color=f\"C{order(p)}\",\n alpha=0.5,\n label=self.label_for(p, unit=True),\n )\n if self.filled:\n return a.fill_between(self.S, np.zeros_like(self.S), zorder=3, lw=0, **kwargs)\n else:\n return a.plot([], [], **kwargs)[0]\n\n self._create_artists(create_artists)\n\n for a in self.axflat:\n a.plot(self.S, np.zeros_like(self.S), \"k-\", lw=1, zorder=4)\n self.legend(show=\"auto\", ncol=5)\n\n # set data\n if line is not None:\n self.update(line, autoscale=True)",
"def ipset():\n return IPSet(x=np.linspace(0, 10, 11), y=np.random.randn(11), x_new=np.linspace(3, 9, 20))",
"def advanced_line():\n\n # Make dataset specifying arguments\n dataset_a = DataSet(sine,line_style='-',line_width=1.5,marker_style='o',marker_size='4')\n\n # Make dataset changing options using setters\n dataset_b = DataSet(cosine)\n dataset_b.set_line(style='--',width=1.5)\n dataset_b.set_colour(colour='royalblue')\n\n # Make plot object and adjust properties using setters\n plot = Plot()\n plot.set_text(latex=True,label=12)\n plot.add_dataset(dataset_a)\n plot.add_dataset(dataset_b)\n plot.set_axes(xlim=(0,8),ylim=(-1.1,1.1),xlabel=r'$x$',ylabel=r'$f\\left(x\\right)$',xticks=(1.0,0.2),yticks=(0.2,0.05))\n\n # Plot graph and display\n plot.plot()\n plot.save(name='./figures/2d_advanced_line',fmt='png')\n plot.display()",
"def baseline_splines(traces, n_control_points, std_cutoff=2.25):\n\n # assuming time by traces x trials\n if traces.ndim is 1:\n traces = np.atleast_2d(traces).T\n if traces.ndim is 2:\n traces = np.atleast_3d(traces)\n \n num_points = traces.shape[0]\n num_traces = traces.shape[1]\n num_trials = traces.shape[2]\n\n fit_baselines = np.zeros_like(traces)\n \n for trial in range(num_trials):\n masked_traces = mask_deviations(traces[:,:,trial], std_cutoff=std_cutoff)\n\n for trace in range(num_traces):\n num_segments = n_control_points - 2\n edge_size = int(np.ceil(masked_traces.shape[0] * 0.1))\n if num_segments>0:\n trace_in_parts = np.array_split(masked_traces[edge_size:-edge_size,trace], n_control_points-2)\n\n means = [x.mean() for x in trace_in_parts] # could also consider the median point\n\n segment_length = len(trace_in_parts[0])\n center_of_first = segment_length / 2 \n xs = [center_of_first+segment_length*i for i in range(num_segments)]\n else: # only using endpoints\n means = []\n xs = []\n\n # add the average of the first ten and last ten points to the spline\n means.insert(0, masked_traces[0:edge_size,trace].mean(axis=0))\n xs.insert(0,0)\n means.append(masked_traces[-edge_size:,trace].mean(axis=0))\n xs.append(num_points)\n\n # replace all nans with the average of the rest of the control point locations.\n means = np.array(means)\n means[np.isnan(means)] = means[np.logical_not(np.isnan(means))].mean()\n\n # fit spline and generate a baseline\n if n_control_points<=3:\n k=1\n else:\n k=3\n tck = splrep(xs,means,k=k)#, w=weights)#,s=20)\n xnew = np.arange(0,num_points)\n\n fit_baselines[:,trace,trial] = splev(xnew,tck)\n\n return np.squeeze(fit_baselines)",
"def ipset_one_x_new():\n x = np.linspace(-3, 3, 11)\n return IPSet(x=x, y=x ** 2, x_new=1)",
"def make_control_character():\n # Add one character made up of one codepoint each from\n # (High Surrogates + High Private Use Surrogates) and Low Surrogates.\n # We expect each such pair to behave as a single high-codepoint\n # character.\n controls = ('0000', '001F')\n return [unicode_char(char)\n for char in range(int(controls[0], 16), int(controls[1], 16)+1)]",
"def Draw(Uk): \n vecx = np.zeros([n,1])\n for i in range(n):\n vecx[i][0] =(float(2*i-n+1)/(n-1))*L\n plt.plot(vecx, Uk, linewidth=1.0)\n plt.show()"
] |
[
"0.5583016",
"0.5450101",
"0.5243448",
"0.52267694",
"0.51810956",
"0.5128539",
"0.5126478",
"0.5073892",
"0.50508",
"0.5050274",
"0.4984794",
"0.49822906",
"0.497473",
"0.49142426",
"0.49085063",
"0.48147082",
"0.4802282",
"0.47470176",
"0.47278032",
"0.47173092",
"0.4714509",
"0.4713603",
"0.4712668",
"0.4709511",
"0.4701805",
"0.46996695",
"0.46751356",
"0.4666466",
"0.46625525",
"0.46617013"
] |
0.5548447
|
1
|
Sanitize `initial_controls` with `max_control_norms`. Generate both if either was not specified.
|
def initialize_controls(complex_controls,
control_count,
control_eval_count, evolution_time,
initial_controls, max_control_norms):
if max_control_norms is None:
max_control_norms = np.ones(control_count)
if initial_controls is None:
controls = gen_controls_flat(complex_controls, control_count, control_eval_count,
evolution_time, max_control_norms)
else:
# Check that the user-specified controls match the specified data type.
if complex_controls:
if not np.iscomplexobj(initial_controls):
raise ValueError("The program expected that the initial_controls specified by "
"the user conformed to complex_controls, but "
"the program found that the initial_controls were not complex "
"and complex_controls was set to True.")
else:
if np.iscomplexobj(initial_controls):
raise ValueError("The program expected that the initial_controls specified by "
"the user conformed to complex_controls, but "
"the program found that the initial_controls were complex "
"and complex_controls was set to False.")
# Check that the user-specified controls conform to max_control_norms.
for control_step, step_controls in enumerate(initial_controls):
if not (np.less_equal(np.abs(step_controls), max_control_norms + _NORM_TOLERANCE).all()):
raise ValueError("The program expected that the initial_controls specified by "
"the user conformed to max_control_norms, but the program "
"found a conflict at initial_controls[{}]={} and "
"max_control_norms={}."
"".format(control_step, step_controls, max_control_norms))
#ENDFOR
controls = initial_controls
return controls, max_control_norms
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gen_controls_white(complex_controls, control_count, control_eval_count,\n evolution_time, max_control_norms, periods=10.):\n controls = np.zeros((control_eval_count, control_count))\n\n # Make each control a random distribution of white noise.\n for i in range(control_count):\n max_norm = max_control_norms[i]\n stddev = max_norm/5.0\n control = np.random.normal(0, stddev, control_eval_count)\n controls[:, i] = control\n #ENDFOR\n\n # Mimic the white noise for the imaginary parts, and normalize.\n if complex_controls:\n controls = (controls - 1j * controls) / np.sqrt(2)\n\n return controls",
"def autostrip(cls):\n fields = [(key, value) for key, value in cls.base_fields.iteritems()\n if isinstance(value, forms.CharField)]\n for field_name, field_object in fields:\n def get_clean_func(original_clean):\n return lambda value: original_clean(value and value.strip())\n clean_func = get_clean_func(getattr(field_object, 'clean'))\n setattr(field_object, 'clean', clean_func)\n return cls",
"def gen_controls_flat(complex_controls, control_count, control_eval_count,\n evolution_time, max_control_norms, periods=10.):\n controls = np.zeros((control_eval_count, control_count))\n\n # Make each control a flat line for all time.\n for i in range(control_count):\n max_norm = max_control_norms[i]\n small_norm = max_norm * 1e-1\n control = np.repeat(small_norm, control_eval_count)\n controls[:, i] = control\n #ENDFOR\n\n # Mimic the flat line for the imaginary parts, and normalize.\n if complex_controls:\n controls = (controls - 1j * controls) / np.sqrt(2)\n\n return controls",
"def test_construct_values_calls_passed_normalize_function(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['FiRsT_FaLue', 'sEcOnd_vAlUE', 'LaST_VaLue']\n expected = '_'.join(ea for ea in values if ea).casefold()\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields, values)))\n self.form.cleaned_data = cleaned_data\n def normal_lower(val): return val.lower()\n def normal_upper(val): return val.upper()\n lower = self.form.construct_value_from_values(constructor_fields, normalize=normal_lower)\n upper = self.form.construct_value_from_values(constructor_fields, normalize=normal_upper)\n\n self.assertEqual(expected.lower(), lower)\n self.assertEqual(expected.upper(), upper)",
"def autostrip(cls):\r\n fields = [(key, value)\r\n for key, value in cls.base_fields.iteritems()\r\n if isinstance(value, CharField)]\r\n for field_name, field_object in fields:\r\n def get_clean_func(original_clean):\r\n return lambda value: original_clean(value and value.strip())\r\n clean_func = get_clean_func(getattr(field_object, 'clean'))\r\n setattr(field_object, 'clean', clean_func)\r\n return cls",
"def strip_controls(complex_controls, controls):\n # Flatten the controls.\n controls = np.ravel(controls)\n # Transform the controls to R2 if they are complex.\n if complex_controls:\n controls = np.hstack((np.real(controls), np.imag(controls)))\n \n return controls",
"def reset_limits(self) -> None:\n if self.userDefinedLimits:\n if self.userMinimum is None or self.userMaximum is None:\n return\n self.setMinimum(self.userMinimum)\n self.setMaximum(self.userMaximum)\n else:\n if self._lower_ctrl_limit is None or self._upper_ctrl_limit is None:\n return\n self.setMinimum(self._lower_ctrl_limit)\n self.setMaximum(self._upper_ctrl_limit)",
"def normalizer(img, norm_min: int = 0, norm_max: int = 255, norm_auto: bool = False):\n rgb_planes = cv2.split(img)\n result_planes = []\n\n for idx, plane in enumerate(rgb_planes[:3]):\n if norm_auto:\n auto_min = np.min(np.where((norm_min <= 25, 255)))\n auto_max = np.max(np.where((norm_min <= 220, 0)))\n plane = np.where(plane <= auto_min, auto_min, plane)\n plane = np.where(plane >= auto_max, auto_max, plane)\n else:\n plane = np.where(plane <= norm_min, norm_min, plane)\n plane = np.where(plane >= norm_max, norm_max, plane)\n norm_img = cv2.normalize(plane, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)\n result_planes.append(norm_img)\n\n return cv2.merge(result_planes)",
"def test_construct_values_raises_on_invalid_normalize(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['first_value', 'second_value', 'last_value']\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields, values)))\n self.form.cleaned_data = cleaned_data\n normalize = 'not a valid normalize function'\n message = \"The normalize parameter must be a callable or None. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.construct_value_from_values(constructor_fields, normalize=normalize)",
"def form_MinMaxLength(request):\n schema = schemaish.Structure()\n schema.add('minmax', schemaish.String(validator=validatish.Length(min=4,max=8)))\n\n form = formish.Form(schema, 'form')\n return form",
"def _clean_inputs(self, nanana = False):\n\t\tif nanana:\n\t\t\tself.txt_usr.delete(0,END)\n\t\tself.txt_pwd.delete(0, END)",
"def _get_init_controls(self):\n\n u_perf_0 = None\n k_fb_perf_0 = None\n k_fb_lqr = self.get_lqr_feedback()\n\n if self.do_shift_solution and self.n_fail == 0:\n if self.n_safe > 1:\n k_fb_safe = np.copy(self.k_fb_safe_all)\n\n # Shift the safe controls\n k_ff_safe = np.copy(self.k_ff_safe)\n\n u_0 = k_ff_safe[0, :]\n\n if self.n_safe > self.r and self.n_perf > self.n_safe: # the first control after the shared controls\n k_ff_perf = np.copy(self.k_ff_perf)\n k_ff_r_last = (k_ff_perf[0, :] + k_ff_safe[self.r - 1,\n :]) / 2 # mean of first perf ctrl and safe ctrl after shared\n else:\n k_ff_r_last = k_ff_safe[-1, :] # just the last safe control\n\n k_ff_safe_new = np.vstack((k_ff_safe[1:self.r, :], k_ff_r_last))\n\n if self.n_safe > self.r + 1:\n k_ff_safe_new = np.vstack((k_ff_safe_new, k_ff_safe[self.r:, :]))\n else:\n u_0 = self.u_apply\n k_ff_safe_new = np.array([])\n\n if self.n_perf - self.r > 0:\n k_ff_perf = np.copy(self.k_ff_perf)\n k_ff_perf_new = np.vstack((k_ff_perf[1:, :], k_ff_perf[-1, :]))\n\n if self.perf_has_fb:\n k_fb_perf_0 = np.copy(self.k_fb_perf_0)\n else:\n k_fb_perf_0 = np.array([])\n else:\n k_ff_perf_new = np.array([])\n k_fb_perf_0 = np.array([])\n else:\n k_fb_safe = np.empty((self.n_safe - 1, self.n_s * self.n_u))\n for i in range(self.n_safe - 1):\n k_fb_safe[i] = cas_reshape(k_fb_lqr, (1, -1))\n\n k_ff_safe_new = np.zeros((self.n_safe - 1, self.n_u))\n u_0 = np.zeros((self.n_u, 1))\n\n k_ff_perf_new = np.array([])\n if self.n_perf > 1:\n k_ff_perf_new = np.zeros((self.n_perf - self.r, self.n_u))\n\n if self.perf_has_fb:\n k_fb_perf_0 = k_fb_lqr\n else:\n k_fb_perf_0 = np.array([])\n\n if self.n_safe > 1:\n k_fb_safe_new = np.vstack((k_fb_safe[1:, :], k_fb_safe[-1, :]))\n\n else:\n k_fb_safe_new = np.array([])\n\n return u_0, k_ff_safe_new, k_fb_safe, k_ff_perf_new, k_fb_perf_0",
"def normalize(self):\n normalized = self.all_details.get('normalized', '')\n if normalized:\n return normalized\n\n if self.is_digit():\n self.all_details['normalized'] = 'Numeric'\n elif self.is_uuid():\n self.all_details['normalized'] = 'UUID'\n elif self.is_gibberish():\n self.all_details['normalized'] = 'Gibberish'\n else:\n for nr in self.normalized_regex_list:\n regex = nr['regex']\n groups = r'{}'.format(nr['groups'])\n ua = regex.sub(groups, self.user_agent)\n if ua != self.user_agent:\n self.all_details['normalized'] = ua\n break\n else:\n self.all_details['normalized'] = ''\n\n return self.all_details['normalized']",
"def clean(self):\n if any(self.errors):\n return\n\n if len(set([a.id for a in self.instance.affiliations.all()]\n + [f.instance.id for f in self.forms])) > self.max_forms:\n raise forms.ValidationError('Maximum number of allowed items exceeded.')\n\n names = []\n for form in self.forms:\n # This is to allow empty unsaved form\n if 'name' in form.cleaned_data:\n name = form.cleaned_data['name']\n if name in names:\n raise forms.ValidationError('Affiliation names must be unique.')\n names.append(name)",
"def default_label_sanitizer(s: str) -> str:\n\n out: str = unidecode.unidecode(s)\n\n # Remove invalid characters\n out = re.sub(r\"[^0-9a-zA-Z_]\", \"_\", out)\n\n # Remove leading characters until we find a letter or underscore\n out = re.sub(r\"^[^a-zA-Z_]+\", \"_\", out)\n\n return out",
"def normalization(channels):\n return GroupNorm32(32, channels)",
"def cleaned_multiple_modes(mu, xv_min, xv_max, n_modes=10, ratio=0.25):\n modes = multiple_modes(mu, xv_min, xv_max, n_modes=n_modes)\n return clean_multiple_modes(mu, modes, ratio=ratio)",
"def normalize_bcftools(orig_vcf, conf):\n norm_orig_vcf = orig_vcf.replace(\".vcf.gz\", \".norm.bcftools\" + util.randstr() + \".vcf\")\n norm_orig_cmd = conf.get('main', 'bcftools') + \" norm \" + \" -c w -f \" + conf.get('main', 'ref_genome') + \" \" + orig_vcf + \" -o \" + norm_orig_vcf\n subprocess.check_call(norm_orig_cmd.split())\n return util.bgz_tabix(norm_orig_vcf, conf)",
"def _normalize_range():\n clipped = tf.clip_by_value(inputs, self.minimum, self.maximum)\n return -1 + 2 * (clipped - self.minimum) / length",
"def clean(self):\n if any(self.errors):\n return\n\n if len(set([p.id for p in self.instance.publications.all()]\n + [f.instance.id for f in self.forms])) > self.max_forms:\n raise forms.ValidationError('Maximum number of allowed items exceeded.')",
"def check_for_cliping(mixture_max, sources_list_norm):\n # Initialize renormalized sources and loudness\n renormalize_loudness = []\n clip = False\n # Recreate the meter\n meter = pyln.Meter(RATE)\n # Check for clipping in mixtures\n if np.max(np.abs(mixture_max)) > MAX_AMP:\n clip = True\n weight = MAX_AMP / np.max(np.abs(mixture_max))\n else:\n weight = 1\n # Renormalize\n for i in range(len(sources_list_norm)):\n new_loudness = meter.integrated_loudness(sources_list_norm[i] * weight)\n renormalize_loudness.append(new_loudness)\n return renormalize_loudness, clip",
"def test_construct_values_as_expected(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['FirstValue', 'SecondValue', 'LastValue']\n expected = '_**_'.join(ea for ea in values if ea).casefold()\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields, values)))\n self.form.cleaned_data = cleaned_data\n actual = self.form.construct_value_from_values(constructor_fields, '_**_')\n simple = self.form.construct_value_from_values(constructor_fields)\n\n self.assertEqual(expected, actual)\n self.assertEqual('firstvalue_**_secondvalue_**_lastvalue', actual)\n self.assertEqual('_'.join(values).casefold(), simple)\n self.assertEqual('firstvalue_secondvalue_lastvalue', simple)",
"def preliminary_check_controls(self):\n\n # is the program still in a binding state?\n if self.is_binding:\n self.error_msg['text'] = 'You are still binding'\n self.display_object_on_canvas(\n self.error_msg,\n 50,\n self.controller.GAME_HEIGHT - self.error_msg.winfo_reqheight() - 15\n )\n\n # are the controls set all unique?\n elif len({\n self.controller.slide_up_control,\n self.controller.slide_down_control,\n self.controller.slide_left_control,\n self.controller.slide_right_control\n }) != 4:\n self.error_msg['text'] = 'All controls must be unique'\n self.display_object_on_canvas(\n self.error_msg,\n 50,\n self.controller.GAME_HEIGHT - self.error_msg.winfo_reqheight() - 15\n )\n\n # all tests passed?\n else:\n # save to file - do this\n\n # move to main menu frame\n self.controller.show_frame(MainMenu)",
"def normalize(amount, min=2, max=4):\n if not amount:\n return amount\n\n # To Decimal, round to highest desired precision\n d = round(Decimal(amount), max)\n s = str(d)\n\n # Truncate as many extra zeros as we are allowed to\n for i in range(max-min):\n if s[-1] == '0':\n s = s[:-1]\n\n if s[-1] == '.':\n s = s[:-1]\n\n return s",
"def make_specialfields(unique_id,id1,id2,size,fieldtext,hgf_field,help_text,sbmfield,config,typ,inst):\n\tspecialfields = config[\"default\"][\"specialfields\"]\n\tif \"specialfields\" in config[inst].keys():\n\t\tif hgf_field in config[inst][\"specialfields\"].keys():\n\t\t\tspecialfields = config[inst][\"specialfields\"]\n\t\telse: \n\t\t\twarning(\"Please define %s in specialfields. we take %s from the default\" %(hgf_field,hgf_field))\n\t\t\n\telse: \n\t\twarning(\"Please define specialfields under config['%s']. we take specialfields from default\" %inst)\n\t\t\t\n\tvalues = specialfields[hgf_field] #get special values for radio buttons\n\tgroupclass = get_groupclass(sbmfield[2])\n\tmog = \"\" # this variable is set for group mandatory fields\n\tif groupclass != \"\": mog = \"MOG\"\n\tif sbmfield[2].startswith(\"m\"): #fieldlevel\n\t\tif unique_id == sbmfield[0].replace(\"hgf_\",\"\"): #no marccode but Input-field \n\t\t\tspanclass = '<span class=\"MG MG%(unique_id)s %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s ML\">%(fieldtext)s</label> %(help_text)s'\n\t\t\tinputclass = 'class=\"MI %s\"' % groupclass\n\t\t\tinputfield = get_input(values,inputclass,typ) \n\t\telse:\n\t\t\tspanclass = '<span class=\"MG%(id2)s G%(id2)s MG%(id1)s G%(id1)s MG G %(mog)s\"><label for=\"I%(unique_id)s\" class=\"ML%(id2)s L%(id2)s ML%(id1)s L%(id1)s ML L\">%(fieldtext)s</label> %(help_text)s'\n\t\t\tinputclass = 'class=\"MI%(id2)s I%(id2)s MI%(id1)s I%(id1)s MI I %s\"' %groupclass\n\t\t\tinputfield = get_input(values,inputclass,typ) \n\telse:\n\t\tif unique_id == sbmfield[0].replace(\"hgf_\",\"\"): #no marccode but Input-field\n\t\t\tspanclass = '<span class=\"G G%(unique_id)s\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s L\">%(fieldtext)s</label> %(help_text)s'\n\t\t\tinputclass = 'class=\"I %s\"' %groupclass\n\t\t\tinputfield = get_input(values,inputclass,typ) \t \n\t\telse:\n\t\t\tspanclass = '<span class=\"G%(id2)s G%(id1)s G\"> <label for=\"I%(unique_id)s\" class=\"L%(id2)s L%(id1)s L\">%(fieldtext)s</label> %(help_text)s'\n\t\t\tinputclass = 'class=\"I%(id2)s I%(id1)s I %s\"' %groupclass\n\t\t\tinputfield = get_input(values,inputclass,typ) \n\tend = '</span>'\n\tspan_field = spanclass + inputfield + end\n\tspan_field = span_field %{'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'inputclass':inputclass,'mog':mog}\n\treturn span_field",
"def clean(self):\n if any(self.errors):\n return\n\n if len(set([r.id for r in self.instance.languages.all()]\n + [f.instance.id for f in self.forms])) > self.max_forms:\n raise forms.ValidationError('Maximum number of allowed items exceeded.')\n\n names = []\n for form in self.forms:\n # This is to allow empty unsaved form\n if 'name' in form.cleaned_data:\n name = form.cleaned_data['name']\n if name in names:\n raise forms.ValidationError('Languages must be unique.')\n names.append(name)",
"def _get_default_controls(self):\n\n pass",
"def normalize(predictors):\n normalization_constant = np.max(np.max(predictors, axis=1), axis=0)\n return predictors / normalization_constant",
"def clean_5_words(self):\n self.first_word.configure(text=\"\")\n self.second_word.configure(text=\"\")\n self.third_word.configure(text=\"\")\n self.fourth_word.configure(text=\"\")\n self.fifth_word.configure(text=\"\")",
"def _compute_input_normalization(*amps):\n if len(amps) < 2:\n raise ValueError('At least 2 amplitudes must be provided.')\n n_bosons = len(amps)\n left_range = range(n_bosons)\n right_ranges = list(itertools.permutations(left_range))\n total = 0.\n for right_range in right_ranges:\n i_prod = 1.\n for idx1, idx2 in zip(left_range, right_range):\n # if `idx1` and `idx2` are equal the contribution is given\n # by the inner product of an amplitude with itself. Given\n # that we are assuming the amplitudes to be normalized,\n # the result is always 1 and we can just skip it\n if idx1 == idx2:\n pass\n # otherwise we update the partial product computing the\n # inner product of the two relevant amplitudes (states)\n i_prod *= np.vdot(amps[idx1], amps[idx2])\n total += i_prod\n return np.sqrt(total)"
] |
[
"0.5009028",
"0.49913824",
"0.48872063",
"0.48187238",
"0.47032735",
"0.46083987",
"0.45783016",
"0.45767888",
"0.45071185",
"0.44361663",
"0.4389645",
"0.43579558",
"0.43299943",
"0.43290603",
"0.43231726",
"0.4312022",
"0.42951974",
"0.42844656",
"0.42695424",
"0.42504078",
"0.42445943",
"0.42332506",
"0.42325622",
"0.42279944",
"0.42272532",
"0.42236874",
"0.42064047",
"0.4177614",
"0.4169332",
"0.4156676"
] |
0.6003062
|
0
|
Set the real state of the system when the program (re)start Transition init > any is done callback leave_init() is call manually. No email/sms alert will be sent
|
def init_state(self):
self.read_inputs()
if (self.in_power.value == 1) and (self.in_alert.value == 1):
self.state = 'alert'
elif (self.in_power.value == 1):
self.state = 'on'
else:
self.state = 'off'
self.leave_init()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def leave_init(self):\n msg = 'init (state: {})'.format(self.state)\n logger.info(msg)\n event = 'init'\n self.push_socket_event(event) \n color = NoxAlarm.colors[NoxAlarm.events.index(event)]\n self.make_DBLog('system', msg, color)",
"def set_state( self ):",
"def init(self):\n self.connect_to_switches()\n self.reset_states()",
"def onConfigureMessage(self, config):\n self.setState(\"starting\")",
"def state_wait_enter(cfg, app, win):",
"def init_state(self):\n if self.event == 'e_start':\n return 'RUNNING'\n else:\n return 'INIT'",
"def _state_cb(self, msg):\n if self.current_mode == '':\n self.current_mode = msg.mode\n self.state = msg",
"def on_start(self):\n self.state = STARTED",
"def on_pre_enter(self):\n self.setup()\n self.start()",
"def on_start(self):\n self.init()",
"def send_init_event(self):\n self.status['type'] = '__init__'\n self._send()",
"def state_finish_enter(cfg, app, win):",
"def _handle_reset(self):\n # reset own state\n self.grbl_version = None\n self.in_check_mode = False\n self.line_active = False\n # wait for init\n self._wait_for_grbl_init()",
"def on_configure(self, state) -> TransitionCallbackReturn:\n return TransitionCallbackReturn.SUCCESS",
"def on_start(self): # noqa: D401\n logger.debug(\"on_start()\")\n\n state = self.read_state(self.statefile)\n if state:\n self.restore_state(state)",
"def change_state(self):\n new_state = 0 if self.state.state == 1 else 1\n answer = UsbHost.send_query(self.state.ser, \"SetState\", str(self.state.device_id), new_state)\n if answer in wrong_answers:\n error_message(\"Не удалось сменить состояние\")\n self.statusbar.showMessage(answer_translate[answer])\n else:\n self.statusbar.clearMessage()\n self.state.state = new_state\n if new_state == 1:\n self.set_auto_active()\n if new_state == 0:\n self.set_hand_active()",
"def initialize(self):\n self.currState = self.startState",
"def setStateAfterLogin(self, workMode=AgentWorkMode.Unknown):\n if self.state == AgentState.Login:\n self.state = AgentState.NotReady\n if self.state == AgentState.NotReady:\n self.dn.eventAgentNotReady(None, addPrm={\"ReferenceID\": 0})\n if self.state == AgentState.Ready:\n self.dn.eventAgentReady(None, addPrm={\"ReferenceID\": 0})",
"def _localSetState(self,pdict):\n super()._localSetState(pdict)\n self.transition = pdict.pop('transition')\n self.steadyStatePb = pdict.pop('steadyStatePb')",
"def exec(self):\n self._root.after(100, self.change_state, States.INITIAL) # enter the state once gui is setup\n super().exec()",
"def state_wait_do(cfg, app, win, events):",
"def autonomousInit(self):\n #self.timer.reset()\n #self.timer.start()\n pass",
"def state_cb(self, msg):\n self.prev_state = deepcopy(self.current_state)\n self.current_state = msg\n\n if self.current_state.mode == \"MANUAL\":\n if self.offboard_point_streaming:\n rospy.loginfo(\"Setpoint stream DISABLED\")\n self.stop_streaming_offboard_points()\n\n if self.current_state.mode == \"POSCTL\":\n if not self.offboard_point_streaming:\n rospy.loginfo(\"Setpoint stream ENABLED\")\n self.start_streaming_offboard_points()\n if not self.prev_state.mode == \"POSCTL\":\n # just switched into POSCTL, call hover\n self.hover()\n\n if self.current_state.mode == \"OFFBOARD\":\n if not self.prev_state.mode == \"OFFBOARD\":\n # just switched to OFFBOARD, call move\n rospy.loginfo(\"Entering OFFBOARD Mode\")\n for i in range(0,len(velocities)):\n maneuver_velocity_setpoint=velocities[i]\n maneuver_reference_frame = maneuver_reference_Frame\n maneuver_duration=duration[i]\n self.execute_maneuver( self.maneuver_velocity_setpoint, \n self.maneuver_reference_frame, \n self.maneuver_duration)",
"def _reinitialize(self):\n if self.sync:\n self.write_allowed.clear()\n else:\n # Give the OpenEVSE at most RESET_SERIAL_TIMEOUT seconds to reboot\n self.s.timeout = RESET_SERIAL_TIMEOUT\n # Read the next received line, which should start with \"ST\"\n line = self._read_line()\n self.s.timeout = STANDARD_SERIAL_TIMEOUT\n if line[:2] == 'ST':\n new_status = states[int(line.split()[1], 16)]\n if self.callback:\n self.callback(new_status)\n self.new_status = new_status\n else:\n raise EvseError",
"def setup(self, event, old_state):\n pass",
"def state_finish_do(cfg, app, win, events):",
"def set_state(self, state: ApplicationState) -> None:\n self.state = state\n if state == ApplicationState.IDLE:\n self.generate_cards.config(text=\"Generate Bingo Game\")\n elif state == ApplicationState.GENERATING_GAME:\n self.generate_cards.config(text=\"Stop Generating Game\")\n else: # state == ApplicationState.GAME_GENERATED\n self.generate_cards.config(text=\"Regenerate Game\")",
"def on_start(self):\n self._state = service.ServiceStateMachine(['READY'], default_state='READY')\n self._temperature = 50\n self._set_state_internal(force=True)",
"def state_processing_enter(cfg, app, win):",
"def event_m20_11_5000():\n \"\"\"State 0,2: [Preset] Living Altar_SubState\"\"\"\n assert event_m20_11_x82()\n \"\"\"State 1: Rerun\"\"\"\n RestartMachine()"
] |
[
"0.6935918",
"0.64275587",
"0.6396648",
"0.6330707",
"0.6299827",
"0.62608606",
"0.6241156",
"0.62019324",
"0.61741126",
"0.6166896",
"0.61495113",
"0.61159533",
"0.609935",
"0.6081462",
"0.6050688",
"0.60496455",
"0.6041429",
"0.6035328",
"0.60256886",
"0.60246044",
"0.60233694",
"0.60201937",
"0.5983629",
"0.59816617",
"0.59778184",
"0.59656316",
"0.59366584",
"0.59300125",
"0.58954257",
"0.589047"
] |
0.6807303
|
1
|
wrapper method to call mail & sms alerts
|
def make_alert(*args):
try: SmsAlarmAlert(*args)
except: logger.exception('Fail calling SmsAlarmAlert()')
try: EmailAlarmAlert(*args)
except: logger.exception('Fail calling EmailAlarmAlert()')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text",
"def handle_inbound_sms_call_me(to, from_):\n handle_call_me(to, from_)",
"def send_ext_customer_task(email,name,password,phone,shop,address,lead_mail,mem_mail,website):\n print(\"member email\",mem_mail)\n logger.info(\"in sending existing customer mail task\")\n return send_ext_customer_mail(email,name,password,phone,shop,address,lead_mail,mem_mail,website)",
"def sendsms(window,refrenceid,image,email):\n try:\n buttons = getAppButtons(window)\n atomacclick(buttons[10])\n childwindow = refrenceid.windowsR()\n protectMoreDevicesbuttons = getAppButtons(childwindow[0])\n protectMoreDevicestitle = childwindow[0].getApplicatontitle()\n ldtp.enterstring(protectMoreDevicestitle,image,email)\n #Need to write after click\n except Exception as er:\n return False\n print \"Not able to send SMS\"",
"def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):\n\n headers = {\n 'User-Agent': self.app_id,\n }\n if self.call == StrmlabsCall.ALERT:\n\n data = {\n 'access_token': self.access_token,\n 'type': self.alert_type.lower(),\n 'image_href': self.image_href,\n 'sound_href': self.sound_href,\n 'message': title,\n 'user_massage': body,\n 'duration': self.duration,\n 'special_text_color': self.special_text_color,\n }\n\n try:\n r = requests.post(\n self.notify_url + self.call.lower(),\n headers=headers,\n data=data,\n verify=self.verify_certificate,\n )\n if r.status_code != requests.codes.ok:\n # We had a problem\n status_str = \\\n NotifyStreamlabs.http_response_code_lookup(\n r.status_code)\n\n self.logger.warning(\n 'Failed to send Streamlabs alert: '\n '{}{}error={}.'.format(\n status_str,\n ', ' if status_str else '',\n r.status_code))\n\n self.logger.debug(\n 'Response Details:\\r\\n{}'.format(r.content))\n return False\n\n else:\n self.logger.info('Sent Streamlabs alert.')\n\n except requests.RequestException as e:\n self.logger.warning(\n 'A Connection error occured sending Streamlabs '\n 'alert.'\n )\n self.logger.debug('Socket Exception: %s' % str(e))\n return False\n\n if self.call == StrmlabsCall.DONATION:\n data = {\n 'name': self.name,\n 'identifier': self.identifier,\n 'amount': self.amount,\n 'currency': self.currency,\n 'access_token': self.access_token,\n 'message': body,\n }\n\n try:\n r = requests.post(\n self.notify_url + self.call.lower(),\n headers=headers,\n data=data,\n verify=self.verify_certificate,\n )\n if r.status_code != requests.codes.ok:\n # We had a problem\n status_str = \\\n NotifyStreamlabs.http_response_code_lookup(\n r.status_code)\n\n self.logger.warning(\n 'Failed to send Streamlabs donation: '\n '{}{}error={}.'.format(\n status_str,\n ', ' if status_str else '',\n r.status_code))\n\n self.logger.debug(\n 'Response Details:\\r\\n{}'.format(r.content))\n return False\n\n else:\n self.logger.info('Sent Streamlabs donation.')\n\n except requests.RequestException as e:\n self.logger.warning(\n 'A Connection error occured sending Streamlabs '\n 'donation.'\n )\n self.logger.debug('Socket Exception: %s' % str(e))\n return False\n\n return True",
"def alert_for_pending_mails_1(request):\n\n\tprint(\">>>>>>>>>>>>>>>>>>>>>Beginning of alert_for_pending_mails_1<<<<<<<<<<<<<<<<<<<<\")\n\n\tThread(target=alert_for_pending_mails_1_worker).start()\n\n\tprint(\">>>>>>>>>>>>>>>>>>>>>End of alert_for_pending_mails_1<<<<<<<<<<<<<<<<<<<<\")\n\n\tresponse = {}\n\n\tresponse[\"info_to_contact\"] = \"Ok\"\n\n\treturn response",
"def sendSMS(sender,recipients,smsBody,provider_api_username='herve.m',provider_api_password='jamiroquai'):\n def printOutput(sender,recipients,smsBody):\n \"\"\"dev, debugging utility method\"\"\"\n message = ' sender : ' + sender\n message += '\\n to : ' + recipients[0]\n message += '\\n body : ' + smsBody\n print ''\n print ''\n print '____________________________________________________________________'\n print message\n print '____________________________________________________________________'\n\n def parseOutput(output):\n \"\"\"Returns parsed values from output with format:\n SUCCESS MessageId: 357958; Cost: 0.80; 0: Accepted for delivery;\n\n Returns:\n boolean (success),\n int (MessageId),\n int (status),\n float (cost),\n string (status message)\n \"\"\"\n vls=output.split(';')\n if len(vls)>=3:\n sm=vls[0].split(' ')\n cs=vls[1].split(':')\n st=vls[2].split(':')\n return str(sm[0]).find('SUCCESS')>=0,int(sm[2]),int(st[0].lstrip()),float(cs[1].lstrip()),st[1].lstrip()\n else:\n return False,-1,-1,0,output\n\n url='http://www.amdtelecom.net/api/sendsms.php'\n parameters={\n 'from' : sender,\n 'to' : recipients[0],\n 'username' : provider_api_username,\n 'password' : provider_api_password,\n 'text' : stringToAscii(smsBody)\n }\n fetchRes=None\n msg='util.sendSMS:logging.info'\n try:\n logging.info('util.sendSMS.fetchHttpRequestData')\n msg='FETCHING SMS SEND FROM API'\n fetchRes=fetchHttpRequestData(parameters,\n url,\n request_output='text',\n request_method='GET')\n if fetchRes is not None:\n msg='PARSING SMS SEND FETCH API OUTPUT: '\n bst,msgid,stid,cs,msg=parseOutput(fetchRes)\n if not bst:logging.error('ERROR RETURNED FROM SMS SEND API:'+fetchRes+' - PARAMS'+str(parameters))\n return fetchRes,bst,msgid,stid,float(cs),msg\n else:\n logging.error(msg+' - PARAMS'+str(parameters))\n return (None,False,-1,-1,float(0),\n msg+' - PARAMS'+str(parameters))\n except Exception, ex:\n if fetchRes is None:fetchRes='None'\n logging.error('ERROR '+msg+' - EXCEPTION:'+str(ex)+'- FETCH RES:'+fetchRes)\n return (None,False,-1,-1,float(0),\n msg+' - PARAMS'+str(parameters)+' - FETCH RES:'+fetchRes)",
"def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)",
"def send_lead_task(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website):\n\n logger.info(\"in send lead mail task\")\n return send_lead_email(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website)",
"def send_warning(self):\n\n # Check whether all the necessary parameters for SMS are present\n if self.your_phone != '' and self.twilio_phone != '' and self.account_sid != '' and self.auth_token != '':\n client = Client(self.account_sid, self.auth_token)\n\n try:\n sms = client.messages.create(\n body=\"\"\"Last will: It was at least 30 days since your last check in. \n This is a reminder to check in in the next 24 hours.\"\"\",\n from_=self.twilio_phone,\n to=self.your_phone)\n sms\n print(\"\\nSMS sent\")\n except Exception as e:\n print(f\"An error occurred while trying to send the SMS. Error: {e}\")\n\n else:\n print(\"\\nMissing SMS parameters. SMS not sent\")\n\n # Check whether all the necessary parameters for email are present\n if self.sender_name != '' and self.recipient_email != '' and self.email != '' and self.email_pwd != '':\n message = f\"\"\"It has been at least 30 days since you last checked in. \nYou need to check in in the next 24 hours.\\n\nOtherwise at {self.deadline} the email with the important info will be sent to the designated recipient.\\n\nIn order to reset simply go to the working directory and run python3 last_will.py\"\"\"\n\n # send_email will return 0 if everything went ok, otherwise it will return an error message\n status = send_email(self.sender_name, self.your_email,\n self.email, self.email_pwd,\n subject='Last will: Reminder to check in', unencrypted_message=message)\n\n if status != 0:\n print(status)\n exit(1)\n else:\n print(\"Email sent\\n\")\n\n print(f\"You have until {self.deadline} to check in. \"\n f\"In order to do that simply go to the working directory and run ./last_will.sh\\n\")\n else:\n print(\"Missing email parameters. Email not sent.\\n\")\n exit(1)",
"def notify(self, title, text, url=False):\n body = text + ' - ' + url\n sms = SmsApi(self.smsapi_username, self.smsapi_password)\n sms.send_sms(body, recipient=self.smsapi_recipient)\n _logger.info(\"SMSAPI sent: [%s] %s\" % (self.smsapi_recipient, body))",
"def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)\n self.stopRouter()",
"def send_sms(self, sms):\n pass",
"def send_alert_to_phone_number(self, from_=None, to=None, body=None,\n media_url=None):\n #try:\n # Send the alert using the Twilio Messages resource.\n self.twilio_client.messages.create(from_=from_, to=to,\n body=body, media_url=media_url)\n #except TwilioException as e:\n # # Error out if the request fails.\n # raise MotionAlertError(\"Error sending MMS with Twilio: \"\n # \"{0}\".format(e))",
"def post(self):\n return send_email(request.args)",
"def notifySysOperator(self):\n msg = self.generateNotifyMessage()\n print(msg)\n # with smtplib.SMTP('smtp.gmail.com', 587) as smtp:\n # smtp.ehlo()\n # smtp.starttls()\n # smtp.ehlo()\n\n # smtp.login(\"[email protected]\", \"qwerQWER123.\")\n\n # smtp.sendmail(\"[email protected]\", \"[email protected]\", msg)\n\n # smtp.close()\n return False",
"def send_sms(to, datas, temp_id):\n # celery's client only depend on the function name and the args.\n cpp = CCP()\n ret = cpp.sendTemplateSMS(to, datas, temp_id)\n # return celery async_result value\n return ret",
"def test_smsmessage_user():",
"def mail(request):\n email_admin.delay('testinggg')\n return JsonResponse({\"details\":\"working\"})",
"def send_reminder(self):\n pass",
"def __send_msg_by_mail__(self, text):\n\n print(\"NotifyManager __send_msg_by_mail__ enters\")\n print(\"message=%s\" % text)\n\n subject = \"One ERROR message From Kipchoge with Breaking 2 pace!\"\n content1 = \"Hi, \\n\\n\"\n content2 = \" One ERROR message is recived. Detail content is :\\n\\n\"\n content = content1 + content2 + text\n \n message = MIMEText(content, \"plain\", \"utf-8\")\n message[\"Subject\"] = subject\n message[\"From\"] = self.__email_sender__\n message[\"To\"] = \",\".join(self.__email_receiver_list__)\n\n try:\n smtp_obj = smtplib.SMTP()\n smtp_obj = smtplib.SMTP_SSL(self.__email_host__)\n smtp_obj.login(\n self.__email_user__,\n self.__email_passwd__)\n smtp_obj.sendmail(\n self.__email_sender__,\n self.__email_receiver_list__,\n message.as_string())\n smtp_obj.quit()\n except smtplib.SMTPException as e:\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n print(\"Error! Exception is found to post message by email\")\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n print(e)\n\n return",
"def test_get_sms_message(self):\n pass",
"def _send_email_helper(settings, excel):\n try:\n server = smtplib.SMTP(settings.smtp_server, str(settings.smtp_port))\n server.starttls()\n server.login(settings.user,settings.password)\n dest = [str(settings.user), str(settings.dest_addr)]\n server.sendmail(settings.user, dest, Email._set_email(settings,excel).as_string())\n server.quit()\n\n FileHelper.archive(settings, excel)\n excel.clear_sheet()\n excel.gen_dates()\n Popups.email_sent()\n except Exception:\n print(\"Send email failed.\")",
"def send_email(request):\n # send emails and return some manner of success response\n send(**request.params)\n return {'success': 'mail sent!'}",
"def notify(nt_id, application, action, remedy, subj, heading):\n\n email = get_email(nt_id)\n lambda_client = boto3.client('lambda')\n messages = create_messages(application, action, remedy)\n print(email)\n email_data = {\n 'sender_mail': SENDER_EMAIL,\n 'email': email,\n 'subj': subj,\n 'heading': heading,\n 'messages': messages,\n 'region': os.environ.get(\"AWS_DEFAULT_REGION\")\n }\n invoke_email_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"formatted_email\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(email_data)\n )\n err = checkError(invoke_email_response, \"Error sending email!\")\n if err:\n print(str(err))\n\n slack_data = {\n 'application_url': APP_URL,\n 'channel': CHANNEL,\n 'message': messages[1].rsplit(\"\\n\",5)[0],\n 'channel_id': CHANNEL_ID,\n 'nt_ids': [nt_id]\n }\n invoke_slack_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"slack_message\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(slack_data)\n )\n err = checkError(invoke_slack_response, \"Error sending slack message!\")\n if err:\n print(str(err))",
"def main():\n for stock_symbol, company_name in zip(STOCKS, COMPANIES):\n stock_difference = get_stock_difference(stock_symbol)\n stock_highlights = get_news(company_name)\n sms = create_sms(stock_difference, stock_highlights, company_name)\n send_message(sms)",
"def _initialize_alerting(self, jobid, mail=None, text=None):\n\n self._smsaddress = \"[email protected]\"\n password_from_keyring = keyring.get_password(\n \"astroquery:cosmosim.SMSAlert\", self._smsaddress)\n\n if password_from_keyring:\n self._smspw = password_from_keyring\n\n if not password_from_keyring:\n log.warning(\"CosmoSim SMS alerting has not been initialized.\")\n warnings.warn(\"Initializing SMS alerting.\")\n keyring.set_password(\"astroquery:cosmosim.SMSAlert\",\n self._smsaddress, \"LambdaCDM\")\n\n self.alert_email = mail\n self.alert_text = text\n\n # first check to see if the job has errored (or is a job that has\n # already completed) before running on a loop\n phase = self._check_phase(jobid)\n if phase in ['COMPLETED', 'ABORTED', 'ERROR']:\n warnings.warn(\"JobID {0} has finished with status {1}.\"\n .format(jobid, phase))\n self.alert_completed = True\n elif phase in ['EXECUTING', 'PENDING', 'QUEUED']:\n self.alert_completed = False\n else:\n self.alert_completed = False",
"def sms():\n def send_sms(number, message):\n #get session bus\n try:\n session_bus = dbus.SessionBus()\n except dbus.exceptions.DBusException:\n click.echo(chalk.red('Have a display you must'))\n return\n\n #check for kdeconnect\n try:\n devices_dbus_obj = session_bus.get_object('org.kde.kdeconnect','/modules/kdeconnect/devices')\n except dbus.exceptions.DBusException:\n click.echo(chalk.red('kdeconnect not installed it appears'))\n return\n\n #get devices ids\n devices_xml = devices_dbus_obj.Introspect(dbus_interface='org.freedesktop.DBus.Introspectable')\n devices_xml = ET.fromstring(devices_xml)\n nodes = devices_xml.findall('node')\n if(len(nodes) is 0):\n click.echo(chalk.red('Devices there are not'))\n return\n deviceIDs = list()\n for node in nodes:\n deviceIDs.append(node.get('name'))\n\n #get devices properties\n deviceID_Props = dict()\n for ID in deviceIDs:\n try:\n device = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + ID)\n deviceProps = device.GetAll('', dbus_interface='org.freedesktop.DBus.Properties')\n deviceID_Props[ID] = deviceProps\n except dbus.exceptions.DBusException:\n #don't create an entry in the dictionary if the object, or a GetAll method does not exist\n pass\n if(len(deviceID_Props) is 0):\n click.echo(chalk.red('Devices there are not'))\n return\n\n #eliminate non sms devices\n devices_no_sms = list()\n for device in deviceID_Props:\n keeping = False\n for plugin in deviceID_Props[device]['supportedPlugins']:\n if('sms' in plugin):\n keeping = True\n if(not keeping):\n devices_no_sms.append(device)\n for device in devices_no_sms:\n del deviceID_Props[device]\n\n #if there are no devices that support sms\n if(len(deviceID_Props) is 0):\n click.echo(chalk.red('Devices that support sms there are not'))\n return\n #elif only one device was found that supports sms\n elif(len(deviceID_Props) is 1):\n click.echo(chalk.yellow('Device using: ' + str(list(deviceID_Props.values())[0]['name'])))\n sendMessage = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + str(list(deviceID_Props.keys())[0]) + '/sms')\n sendMessage.sendSms(number, message, dbus_interface='org.kde.kdeconnect.device.sms')\n return\n #otherwise get user to choose device\n else:\n choice_map = dict()\n for idx, device in enumerate(deviceID_Props, start=1):\n click.echo(chalk.green(str(idx) + ': ' + deviceID_Props[device]['name']))\n choice_map[str(idx)] = device\n choice = click.prompt(chalk.blue('Device, you must select: '), default='1', type=click.Choice(choice_map.keys()))\n #click.echo('you chose: ' + choice_map[the_chosen_device] + ' with id: ' + deviceNames_IDs[choice_map[the_chosen_device]])\n sendMessage = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + choice_map[choice] + '/sms')\n sendMessage.sendSms(number, message, dbus_interface='org.kde.kdeconnect.device.sms')\n return\n\n click.echo(chalk.blue('For whom you want to send an sms'))\n friend_name = input().strip()\n friend_name_lower = friend_name.lower()\n if os.path.isfile(PEOPLE_CONFIG_FILE_PATH):\n with open(PEOPLE_CONFIG_FILE_PATH) as fin:\n contents = yaml.load(fin)\n entries = contents['entries']\n for entry in entries:\n if(friend_name == entry['name'] or friend_name_lower == entry['name']):\n number = entry['mobile']\n break\n if('number' not in locals()):\n click.echo(chalk.red('Friend not found.'))\n else:\n if(len(number) is not 0):\n click.echo(chalk.blue('Message, you must enter: '))\n message = input(':')\n click.echo(chalk.yellow('Device to send sms to ' + number + ' looking for: '))\n send_sms(number, message)\n else:\n click.echo(chalk.red('Friends number not in people file, run `yoda people setup` to add it.'))\n else:\n click.echo(chalk.red('The People file does not exist, run `yoda people setup` to create an entry.'))",
"def send_email(self, message):\n pass",
"def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ..."
] |
[
"0.60806",
"0.5978295",
"0.5954807",
"0.59376353",
"0.59341943",
"0.5933434",
"0.59218425",
"0.59176993",
"0.5902045",
"0.588773",
"0.58752245",
"0.5865574",
"0.5864",
"0.5854789",
"0.58476853",
"0.58456624",
"0.5824656",
"0.57738054",
"0.5769169",
"0.57681054",
"0.5756664",
"0.57438076",
"0.5717831",
"0.57094944",
"0.57090837",
"0.5704866",
"0.56952965",
"0.5688842",
"0.56848633",
"0.56822306"
] |
0.6651949
|
0
|
wrapper method to call DBLog.new() on alarm event
|
def make_DBLog(subject, event, badge, detail=''):
app = create_app()
with app.app_context():
DBLog.new(subject=subject, scope="nox", badge=badge, message=event, ip='-', user='-', detail=detail)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def new_archive_record(self, event):\n \n # Reset the alarm counter\n self.alarm_count = 0",
"def set_new_alarm():\r\n time = request.args.get('alarm')\r\n name = request.args.get('two')\r\n news = request.args.get('news')\r\n weather = request.args.get('weather')\r\n date = time[:10:] + \" \" + time[11::]\r\n if news is None:\r\n news = False\r\n else:\r\n news = True\r\n if weather is None:\r\n weather = False\r\n else:\r\n weather = True\r\n alarms.insert(0, {\"title\":date, \"content\":name, \"news\":news, \"weather\":weather, \"id\":1})\r\n set_alarms(alarms[0], s)\r\n logging.info(\"Alarm created in set_new_alarm()\")",
"def __init__(self, abs_path_logfile):\n\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n handler = logging.handlers.TimedRotatingFileHandler(abs_path_logfile, when='D', interval=1)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)",
"def alarm(self, interval, call):",
"def __init__(__self__,\n resource_name: str,\n args: AlarmArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def init(self, *args, **kwargs):\n name = self.__class__.__name__\n self.call__init__(Logger, name)\n self._db_sardanas = {}",
"def new_log(self, name: str, timestamp: str, level: str, message: str) -> None:\n if level in CRITICAL_LOG_STATES:\n self.notification_app.notify(\n kind=\"single\",\n level=\"emergency\",\n title=\"Appdaemon Fehler in Log!\",\n message=(\n f\"Die App {name} hat den folgenden Fehler \"\n f\"um {timestamp} ausgelöst: {message}.\"\n ),\n targets=self.notifications[\"targets\"],\n )",
"def log_create(action, *args, **kw):\n from olympia.activity.models import ActivityLog\n\n return ActivityLog.create(action, *args, **kw)",
"def InsertLog():",
"def event_log(self):\n pass",
"def Log(self, msg):\n self.DBExecute(\"INSERT INTO Log (class, instance, event) VALUES (%s, %s, %s)\",\n self.__class__.__name__, self._instance, msg)\n print '%s/%s: %s' % (self.__class__.__name__, self._instance, msg)",
"def __init__(self, database):\n logging.Handler.__init__(self)\n if (not(isinstance(database,LogMaster)) and not(isinstance(database,LogReadWrite))):\n raise TypeError('A LogMaster or LogReadWrite object must be specified. A {:} was specified instead'.format(type(database)))\n self.database_name = database.database_name\n self.write_log = database.write_log",
"def __init__(self, *args, **kwargs):\n super(BaseHandler, self).__init__(*args, **kwargs)\n self.log = logbook.Logger(self.LOGGER)",
"def __init__(self):\r\n super(LogParser, self).__init__([CmdQueryHandler(),\r\n UpdateQueryHandler(),\r\n StandardQueryHandler(),\r\n TimeLineHandler()])",
"def on_a(self):\r\n self.log()",
"def __init__(self, database):\n logging.Handler.__init__(self)\n if (not(isinstance(database,LogMaster)) and not(isinstance(database,LogReadWrite))):\n raise TypeError('A LogMaster or LogReadWrite object must be specified. A {:} was specified instead'.format(type(database)))\n self.database_name = database.database_name\n self.write_log_buffer = database.write_log_buffer",
"def __init__(self, _Notify):\n _snap.TLogNotify_swiginit(self, _snap.new_TLogNotify(_Notify))",
"def test_post_add_log_event(self):\n pass",
"def test_watchdogs_create_alert(self):\n\n # distillery with categories\n distillery = Distillery.objects.get_by_natural_key('elasticsearch.test_index.test_docs')\n distillery.collection.insert = Mock(return_value=self.mock_doc_id)\n doc_id = distillery._save_and_send_signal(self.data)\n watchdog = Watchdog.objects.get_by_natural_key('inspect_emails')\n\n alerts = Alert.objects.all()\n self.assertEqual(alerts.count(), 1)\n\n self.assertEqual(alerts[0].alarm_type.name, 'watchdog')\n self.assertEqual(alerts[0].alarm_id, watchdog.pk)\n self.assertEqual(alerts[0].alarm, watchdog)\n self.assertEqual(alerts[0].level, 'HIGH')\n self.assertEqual(doc_id, self.mock_doc_id)",
"def make_alert(*args):\n try: SmsAlarmAlert(*args)\n except: logger.exception('Fail calling SmsAlarmAlert()')\n try: EmailAlarmAlert(*args)\n except: logger.exception('Fail calling EmailAlarmAlert()')",
"def __on_alarm(\n self, event_name: str, data: dict, kwargs: dict) -> None:\n self.run_in(self.__delayed_announcement, 40)",
"def on_start(self):\r\n self.log()",
"def __on_backup_created(self, logger, *args):",
"def logger():\n return TestListenerDB()",
"def __new__(cls):\n if not TestLog.__instance:\n TestLog.__instance = super(TestLog, cls).__new__(cls)\n return TestLog.__instance",
"def new_event(szene, time, bedingung=\"\", permanent=0):\n con = mdb.connect(constants.sql_.IP, constants.sql_.USER, constants.sql_.PASS,\n constants.sql_.DB)\n with con:\n cur = con.cursor()\n value_string = '\"%s\", \"%s\", \"%s\", \"%s\"' % (szene, str(time), str(bedingung),\n str(permanent))\n insertstatement = ('INSERT INTO %s (Szene, Time, Bedingung, permanent) VALUES(%s)' %\n (constants.sql_tables.cron.name, value_string))\n cur.execute(insertstatement)\n con.close",
"def __init__(self, db, verbose, notification_type, notification_origin, process_id):\n\n self.db = db\n self.verbose = verbose\n\n self.notification_type = notification_type\n self.notification_origin = notification_origin\n self.process_id = process_id",
"def log_create(sender, instance, created, **kwargs):\n if created:\n changes = model_instance_diff(None, instance)\n\n log_entry = LogEntry.objects.log_create(\n instance,\n action=LogEntry.Action.CREATE,\n changes=json.dumps(changes),\n )\n log_created.send(\n sender=LogEntry,\n old_instance=None,\n new_instance=instance,\n log_instance=log_entry,\n )",
"def __init__(self):\n super(APTHistoryLogTextPlugin, self).__init__()\n self._event_data = None",
"def alarm(self, alarm):\n\n self._alarm = alarm"
] |
[
"0.6016517",
"0.59253764",
"0.58328515",
"0.5806967",
"0.5676623",
"0.5542574",
"0.55408454",
"0.5536402",
"0.5530844",
"0.552155",
"0.5516598",
"0.5515396",
"0.54688793",
"0.5462742",
"0.54492784",
"0.5418729",
"0.53916174",
"0.5350781",
"0.5349644",
"0.5328095",
"0.5293502",
"0.5291637",
"0.5252815",
"0.52514213",
"0.5233634",
"0.52211237",
"0.5212486",
"0.5210165",
"0.520837",
"0.51876414"
] |
0.64143056
|
0
|
Computes phase from given timestamps. Phase is normalized time from 0 to 1.
|
def normalize_time(full_timestamps, half_timestamp):
phases = (half_timestamp - full_timestamps[0]) / (full_timestamps[-1] - full_timestamps[0])
return phases
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def calc_phase(p, t):\n\n return (t % p)/p",
"def calc_phase(self, time):\n dur = self.get_duration()\n phase = time / dur\n\n if self.enable_loop():\n phase -= np.floor(phase)\n else:\n phase = np.clip(phase, 0.0, 1.0)\n\n return phase",
"def phase(freqs, p0, p1, p2):\n x = utils.reduce_by_midpoint(freqs)\n phi = p0 + p1 * x + p2 * x ** 2\n return np.exp(1j * phi)",
"def phase(self):\r\n\r\n #XXX calcluate this from the standard output, instead of recalculating:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n\r\n phase = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n phase[i][j] = np.angle(\r\n self.spectrum[i][j])\r\n\r\n phase[j][i] = np.angle(\r\n self.spectrum[i][j].conjugate())\r\n return phase",
"def Phase(self, *xpars):\n return np.angle(self.combineResult(*xpars))",
"def phasefold(t, P, t0, starting_phase=-0.5):\n t = np.array(t)\n dt = t0shft( np.array(t), P, t0)\n tshift = t + dt\n t_phasefold = np.mod(tshift - starting_phase*P, P) + starting_phase * P\n phase = t_phasefold / P\n cycle = np.floor(tshift/P - starting_phase).astype(int)\n return t_phasefold, phase, cycle",
"def phase_fold(times, magnitudes, errors, period):\n phase_times_unordered = (times % period) / period\n\n ordered_ind = np.argsort(phase_times_unordered, axis=0)\n\n phase_times = phase_times_unordered[ordered_ind]\n phase_magnitudes = magnitudes[ordered_ind]\n phase_errors = errors[ordered_ind]\n\n num_obs = phase_times.shape[0]\n phase_times = phase_times.reshape(num_obs, 1)\n phase_magnitudes = phase_magnitudes.reshape(num_obs, 1)\n phase_errors = phase_errors.reshape(num_obs, 1)\n\n return phase_times, phase_magnitudes, phase_errors",
"def phase_shift(annuli,annulus):\n delta_t = viscous_timescale(annuli[annulus+1]) - viscous_timescale(annuli[annulus])\n return int(delta_t)",
"def compute_phase(self, dt, phase_speed):\n num_time_steps = int(self._traj_len / phase_speed)\n\n phase = Phase(dt=self._dt, phase_speed=phase_speed, time_steps=num_time_steps)\n\n return phase",
"def get_phases(t,P,t0):\n if type(t) is not float:\n phase = ((t - np.median(t0))/np.median(P)) % 1\n ii = np.where(phase>=0.5)[0]\n phase[ii] = phase[ii]-1.0\n else:\n phase = ((t - np.median(t0))/np.median(P)) % 1\n if phase>=0.5:\n phase = phase - 1.0\n return phase",
"def phase_to_day(phase):\n if phase < 0:\n phase += 2*np.pi\n return phase*(365./(2*np.pi))",
"def phase_to_day(phase):\n if phase < 0:\n phase += 2*np.pi\n return phase*(365./(2*np.pi))",
"def get_phases(t,P,t0):\n if type(t) is not float:\n phase = ((t - np.median(t0))/np.median(P)) % 1\n ii = np.where(phase>=0.5)[0]\n phase[ii] = phase[ii]-1.0\n else: \n phase = ((t - np.median(t0))/np.median(P)) % 1\n if phase>=0.5:\n phase = phase - 1.0\n return phase",
"def phases(self):\r\n\r\n phase = tsa.cache_to_phase(self.cache, self.ij)\r\n\r\n return phase",
"def phase_time_delta(phase):\n if not phase.get('start'):\n return ''\n time_format = '%Y%m%d%H%M%SZ'\n phase_start = datetime.strptime(phase['start'], time_format)\n if 'end' not in phase:\n # The phase failed so set the end time to now\n phase_end = datetime.now()\n else:\n phase_end = datetime.strptime(phase['end'], time_format)\n delta = str(phase_end - phase_start).split(\".\")[0] # Trim microseconds\n\n return delta",
"def phases(self,dataset):\n start = '1984-1-1'\n if dataset == \"ISCCP_raw\":\n stop = '2007-12-31'\n else:\n stop = '2009-12-31'\n X = getattr(self,dataset)(time=(start,stop))\n R,P = sc.fast_annual_cycle(X)\n return MV.masked_where(np.isnan(P),P)",
"def phase(state, i):\n particles = bin(state >> i + 1).count(\"1\")\n return 1 if particles % 2 == 0 else -1",
"def signal_phase(signal):\n pi2 = 2.0*np.pi\n\n # Get pro-phase\n prophase = np.mod(np.angle(scipy.signal.hilbert(signal)), pi2)\n\n # Transform a pro-phase to a real phase\n sort_idx = np.argsort(prophase) # Get a sorting index\n reverse_idx = np.argsort(sort_idx) # Get index reversing sorting\n tht = pi2 * np.arange(prophase.size)/(prophase.size) # Set up sorted real phase\n phase = tht[reverse_idx] # Reverse the sorting of it\n\n return phase",
"def test_cache_to_phase():\r\n ij = [(0, 1), (1, 0)]\r\n x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.random.rand(t.shape[-1])\r\n y = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.random.rand(t.shape[-1])\r\n ts = np.vstack([x, y])\r\n freqs, cache = tsa.cache_fft(ts, ij)\r\n ph = tsa.cache_to_phase(cache, ij)",
"def phase_dist(phi1,phi2=None):\n shape = phi1.shape\n \n if phi2 is None:\n dist = np.abs(phi1).ravel()\n else:\n dist = np.abs(phi1-phi2).ravel()\n dist[dist>np.pi] = np.pi - dist[dist>np.pi]%np.pi\n return dist.reshape(shape)",
"def Closure_Phase(self, uv1, uv2, uv3):\n phi1 = self.Phase(*uv1)\n phi2 = self.Phase(*uv2)\n phi3 = self.Phase(*uv3)\n cphi = phi1 - phi2 + phi3\n cphi[cphi < -pi] += 2 * pi\n cphi[cphi > pi] -= 2 * pi\n return cphi",
"def phase(self):\n return np.arctan(np.sum(np.imag(self.values)) / np.sum(np.real(self.values)))",
"def _ampl_phase(self, coeffs):\n return coeffs[:len(coeffs)/2], coeffs[len(coeffs)/2:]",
"def getPhase(phase):",
"def _convert_phase(self, phase):\n\n assert (0 <= phase < 359.988), 'Phase must be between 0 and 359.987 degrees'\n\n # Convert phase into POW\n POW_step = 0.02197265\n POW = round(phase/POW_step)\n\n # Convert POW into bytes\n BYTE0 = 0x00 | (POW >> 8)\n BYTE1 = 0xFF & POW\n return [BYTE0, BYTE1]",
"def phaseEstimator(phases,omegas,T_s,k):\n length = phases.shape[0]\n pis = np.tile(2*np.pi,length)\n a = phases - T_s*k*omegas\n phaseShifts = np.mod(a,pis)\n b = phases-phaseShifts\n omega_hat = np.mod(b,pis)\n n = omega_hat/omegas\n estimatedTime = np.sum(n)/length\n \n estimatedPhase = phaseShifts + estimatedTime*omegas\n \n return estimatedPhase",
"def freq_of_t(self, times):\n y = np.zeros(len(times))\n amplitudes, phases = self._ampl_phase(self.fourier_coeffs)\n for n, (ampl, phase) in enumerate(zip(amplitudes, phases)):\n omega_n = 2*np.pi*self.pzt_freq*(n+1)\n y+=ampl*np.cos(omega_n*times + phase)\n return y",
"def Phase(data):\r\n hil = signal.hilbert(data)\r\n return np.unwrap(np.arctan2(hil.imag, hil.real))",
"def phase_lifetime(r, freq=1):\n return np.tan(np.angle(r)) / (2 * np.pi * freq)",
"def phase_by_time(sig, fs, f_range, hilbert_increase_n=False, remove_edges=True, **filter_kwargs):\n\n sig_filt, kernel = filter_signal(sig, fs, infer_passtype(f_range), f_range=f_range,\n remove_edges=False, return_filter=True, **filter_kwargs)\n\n pha = np.angle(robust_hilbert(sig_filt, increase_n=hilbert_increase_n))\n\n if remove_edges:\n pha = remove_filter_edges(pha, len(kernel))\n\n return pha"
] |
[
"0.69019604",
"0.6539628",
"0.6517303",
"0.62012905",
"0.60972494",
"0.60425115",
"0.6018554",
"0.59292054",
"0.5912953",
"0.5860659",
"0.58079934",
"0.58079934",
"0.58040315",
"0.5802654",
"0.5798167",
"0.5787505",
"0.57755685",
"0.56984144",
"0.569676",
"0.56709915",
"0.5662755",
"0.56352246",
"0.56337976",
"0.5632741",
"0.5623485",
"0.55885243",
"0.5548893",
"0.553501",
"0.5494194",
"0.5400959"
] |
0.7095126
|
0
|
Compute a TxN matrix of features of the given phase vector using Gaussian basis functions. Where T is the number of elements in the phase vector and N is the number of basis functions.
|
def compute_feature_matrix(phases, N, h):
T = len(phases)
# Uniformly distribute the centers of N basis functions in domain[-2h,2h+1].
centers = np.linspace(-2 * h, 1 + 2 * h, num=N)
# compute a TxN matrix with centers
C = np.repeat(centers.reshape(1, N), T, axis=0)
# compute a TxN matrix with phases
P = np.repeat(phases.reshape(T, 1), N, axis=1)
# compute a TxN feature matrix
Phi = np.exp(- 0.5 / h * np.square(P - C))
# normalize the feature matrix
Phi = Phi / np.sum(Phi, axis=1).reshape(T, 1)
return Phi
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def TMM(x,N,n,trun_basis):\n Mat = np.zeros([len(trun_basis),len(trun_basis)])\n print('making TMM')\n perms = [int((x**n * iii)%N) for iii in trun_basis] # Modular multiplication\n for iii in range(len(trun_basis)):\n if trun_basis.__contains__(perms[iii]):\n Mat[iii,trun_basis.index(perms[iii])] = 1\n return Mat",
"def U_QFT(n):\n dim = 2**n # Hilbert space dimensionality\n Gate= [[np.exp(2 * np.pi * 1j * x * y / dim) for x in range(dim)] for y in range(dim)]\n Gate = np.array(Gate)/np.sqrt(dim)\n return Gate",
"def idft(X):\n N = len(X)\n x = np.zeros(N, 'complex')\n \n K = np.arange(0, N, 1)\n for n in range(0, N, 1):\n x[n] = np.dot(X, np.exp(-1j * 2 * np.pi * K * n / N))\n return x / N",
"def ogfftf(x, N):\n m = N // 2\n for ii in np.arange(1,int(np.log2(N)) + 1):\n M = N // (2**(ii-1))\n for kk in np.arange(0,N,M):\n for jj in np.arange(m):\n idx = jj + kk \n p = (2**(ii-1))*jj\n w = np.exp(-1j*2*PI*(p/N))\n x_0 = x[idx] + x[idx + m]\n x_1 = (x[idx] - x[idx + m])*w\n x[idx] = x_0\n x[idx + m] = x_1\n m = m // 2\n return brc(x)",
"def dft(X): \n N = len(X)\n x = np.zeros(N, 'complex')\n K = np.arange(0, N, 1)\n for n in range(0, N, 1):\n x[n] = np.dot(X, np.exp(1j * 2 * np.pi * K * n / N))\n return x",
"def G(x,mu,T):\r\n den1 = np.cosh(mu/(kb*T))/np.sinh(x/(kb*T))\r\n den2 = np.tanh(x/(kb*T))\r\n\r\n return 1/(den1 + den2)",
"def Gaussian_conv(n):\n\n\t# Choose sigma and mu\n\tsigma = 1\n\tmu = 0\n\n\t# Create table\n\tb = sigma * np.random.randn(n) + mu\n\tsum = b.sum()\n\t#b = b/sum\n\tprint(b)\n\treturn b",
"def mu_HF(T, n):\n T, n = np.broadcast_arrays(np.atleast_1d(T), np.atleast_1d(n))\n mu, _ = physics_solver_mu(n, T)\n return mu",
"def sigMatrixGen(input_matrix, n):\n\n result = []\n\n for i in range(n):\n sig = sigGen(input_matrix)\n result.append(sig)\n\n # return a ndarray\n print(\"\\nsig matrix:\")\n print(np.array(result))\n print()\n return np.array(result)",
"def make_tauchen_ar1(N, sigma=1.0, ar_1=0.9, bound=3.0):\n yN = bound * sigma / ((1 - ar_1**2) ** 0.5)\n y = np.linspace(-yN, yN, N)\n d = y[1] - y[0]\n trans_matrix = np.ones((N, N))\n for j in range(N):\n for k_1 in range(N - 2):\n k = k_1 + 1\n trans_matrix[j, k] = stats.norm.cdf(\n (y[k] + d / 2.0 - ar_1 * y[j]) / sigma\n ) - stats.norm.cdf((y[k] - d / 2.0 - ar_1 * y[j]) / sigma)\n trans_matrix[j, 0] = stats.norm.cdf((y[0] + d / 2.0 - ar_1 * y[j]) / sigma)\n trans_matrix[j, N - 1] = 1.0 - stats.norm.cdf(\n (y[N - 1] - d / 2.0 - ar_1 * y[j]) / sigma\n )\n\n return y, trans_matrix",
"def make_gabor(x, frequency, phase, sigma):\n return np.cos(frequency*x + phase) * np.exp(-x**2/2./sigma**2)",
"def generate_basis(n, dim):\n planes = [np.random.randn(dim) for i in range(n)]\n return [p / np.linalg.norm(p) for p in planes]",
"def Gaussian(x,t,sigma):\n return np.exp(-(x-t)**2/(2*sigma**2))",
"def createModEnvelopes(TR, N, t0, t1, jitterTiming, jitterLength, gkW=0, weak=0.1):\n out = _N.empty((TR, N))\n\n if gkW > 0:\n gk = gauKer(gkW)\n\n AM = _N.empty(N)\n\n for tr in xrange(TR):\n w = int(((t1-t0) + jitterLength * _N.random.randn())/2.)\n m = int(0.5*(t1+t0) + jitterTiming * _N.random.randn())\n\n if m-w > 0:\n AM[m-w:m+w] = 1\n AM[0:m-w] = weak\n else:\n AM[0:m+w] = 1\n\n AM[m+w:] = weak\n if gkW > 0:\n out[tr] = _N.convolve(AM, gk, mode=\"same\")\n else:\n out[tr] = AM\n out[tr] /= _N.max(out[tr])\n\n return out",
"def doKuramoto(N, Tmax, phases, base = 2, nBits = 8):\n\n T = arange(10, Tmax - 10)\n\n syncAux = zeros([len(T), 1])\n\n for t in range(0, len(T)):\n\n ku = sum(cos(phases[:, T[t]]) + 1j * sin(phases[:, T[t]])) / N\n syncAux[t] = abs(ku)\n\n metastabAux = std(syncAux)\n shEntropy = getEntropy(syncAux[:,0], base, nBits)\n\n return metastabAux, syncAux, shEntropy",
"def fn_Generate_STM_polynom(zeta,nStates):\n stm = np.eye(nStates,dtype=np.float64);\n for yindex in range (0,nStates):\n for xindex in range (yindex,nStates): # STM is upper triangular\n stm[yindex,xindex] = np.power(zeta,xindex-yindex)/float(math.factorial(xindex-yindex));\n return stm;",
"def gaussian_nonorm(t, params):\n # TODO Add zeroes for t>t_final\n t_final = tf.cast(params['t_final'].get_value(), dtype=tf.float64)\n sigma = params['sigma'].get_value()\n gauss = tf.exp(-(t - t_final / 2) ** 2 / (2 * sigma ** 2))\n return gauss",
"def e_step(X, taus, mus, sigmas):\n K, N = mus.shape[0], X.shape[0] # dimensions, K: num of hidden component, N: number of data points\n get_component_prob = lambda x: component_pdfs(x, mus, sigmas)\n T = np.apply_along_axis(arr=X, func1d=get_component_prob, axis=1) # gaussian component probabilities in row format (NxK)\n taus_rep = np.tile(taus, reps=(N, 1)) # repeat tau along N-axis so elementwise product can work\n\n norm_const = np.sum(T*taus_rep, axis=1) # the normalisation factor \\sum_{k=1}^K p_k * tau_k, and is currently estimated likelihood\n norm_const_rep = np.tile(norm_const, reps=(K, 1)).T # repeat normalisation constant along K-axis\n\n T = T*taus_rep/norm_const_rep # calculate the posterior \n return T.T #return the transposed matrix so that the index is matched",
"def X_n(a, b, n, T):\n def X(t):\n return a * np.cos((np.pi * 2 * n * t) / T) + b * np.sin((np.pi * 2 * n * t) / T)\n return X",
"def make_theta(n, N_real_bucket):\n\n thet = np.zeros(n)\n M = 32 # number of particles in each beamlet\n nb = int(\n np.round(n / M)\n ) # number of beamlet via Fawley between 64 to 256 (x16=1024 to 4096)\n if M * nb != n:\n raise ValueError(\"n must be a multiple of 4\")\n\n effnoise = np.sqrt(3 * M / (N_real_bucket / nb)) # Penman algorithm for Ns/nb >> M\n for i in range(nb):\n for j in range(M):\n thet[i * M + j] = 2 * np.pi * (j + 1) / M + 2 * np.random.rand(1) * effnoise\n return thet",
"def generate_data(n_features=2, n_samples=500, period=10, order=1,\n noise_variance=1, random_state=None):\n\n rng = check_random_state(random_state)\n\n X = np.empty((n_samples, n_features), dtype='f8')\n\n for i in range(order):\n X[i] = rng.normal(size=(n_features,))\n\n mu = rng.normal(size=(n_features,))\n\n if order > 0:\n A = rng.normal(size=(order, n_features, n_features))\n else:\n A = None\n\n phases = np.cos(2 * pi * np.arange(n_samples) / period)\n for t in range(order, n_samples):\n phase = phases[t]\n xt = mu * phase\n if order > 0:\n for m in range(1, order + 1):\n xt += np.dot(A[m - 1] * phase, X[t - m])\n\n X[t] = rng.multivariate_normal(\n xt, cov=(noise_variance*np.eye(n_features)))\n\n return X, mu, A, phases",
"def Gaussian(x, t, sigma):\n return np.exp(-(x - t)**2 / (2 * sigma**2))",
"def gaussian(N, dt, width):\n df = 1/(N*dt) # frequency step\n f = np.arange(0, round(0.5*N), 1, dtype=float)*df # frequency array\n w = 2*np.pi*f # angular frequency\n\n G = np.array([0]*N, dtype=float)\n G[0:round(N/2)] = np.exp(-w**2/(4*width**2))/dt\n G_lr = np.flip(G)\n G[round(N/2)+1:] = G_lr[-len(G[round(N/2)+1:]):]\n return G",
"def fitzhugh_nagumo(x, t, a, b, tau, I):\n return np.array([x[0] - x[0]**3 - x[1] + I, \n (x[0] - a - b * x[1])/tau])",
"def obtain_Tss(self, X):\n Xisa = self.obtain_Xisa(X)\n x4einsum = list(it.chain(*zip(Xisa,\n [[0, i+1] for i in range(self.N)])))\n x4einsum.append(np.arange(self.N+1).tolist()) # output format\n Xs = np.einsum(*x4einsum, optimize=self.opti)\n\n return np.einsum(self.T, np.arange(self.N+2).tolist(), # trans. tensor\n Xs, np.arange(self.N+1).tolist(), # policies\n [0, self.N+1], optimize=self.opti) # output format",
"def _gamma(self, nOrT : float) -> vector:\n pass",
"def ec_matrix_vector(p0, T, n): \n if(n<=0):\n EC=np.zeros(T.shape)\n return EC\n else:\n \"\"\"Probability vector after (k=0) propagations\"\"\" \n p_k=1.0*p0\n \"\"\"Sum of vectors after (k=0) propagations\"\"\"\n p_sum=1.0*p_k \n for k in xrange(n-1):\n \"\"\"Propagate one step p_{k} -> p_{k+1}\"\"\"\n p_k=np.dot(p_k,T) \n \"\"\"Update sum\"\"\"\n p_sum+=p_k \n \"\"\"Expected counts\"\"\"\n EC=p_sum[:,np.newaxis]*T \n return EC",
"def ogfft2(x, N):\n x_p = brc(x)\n PI = np.pi\n for ii in np.arange(1,int(np.log2(N)) + 1):\n M = int(2**ii)\n w_M = np.exp(1j*((2*PI)/M))\n for kk in np.arange(0,N,M):\n w = 1\n m = int(M/2)\n for jj in np.arange(m):\n t = w*x_p[kk + jj + m]\n u = x_p[kk + jj]\n x_p[kk + jj] = u + t\n x_p[kk + jj + m] = u - t\n w = w*w_M\n return x_p",
"def transmission(matID, t, keV, density=None):\n return np.exp(-mu(matID, keV, density) * t)",
"def gauss_xmat(filename, natoms):\n full = io.read_file(filename)\n nmodes = 3*natoms-6\n lines = full.split('X matrix')[1].split('Resonance')[0]\n lines = lines.split('\\n')\n del lines[0]\n del lines[-1]\n\n xmat = np.zeros((nmodes, nmodes))\n rangemod = 1\n if nmodes % 5 == 0:\n rangemod = 0\n marker = 0\n\n for m in range(0, nmodes/5+rangemod):\n length = nmodes - m * 5\n a = np.array(lines[marker+1:marker+length+1])\n for i in range(length):\n for j in range(0, len(a[i].split())-1):\n xmat[m*5 + i, m*5 + j] = a[i].split()[j+1]\n xmat[m*5 + j, m*5 + i] = a[i].split()[j+1]\n marker += length+1\n\n return xmat"
] |
[
"0.6072341",
"0.5897467",
"0.5807051",
"0.5731498",
"0.5680174",
"0.56800663",
"0.5603116",
"0.5543852",
"0.548797",
"0.5432222",
"0.5381884",
"0.53484786",
"0.5343271",
"0.5334068",
"0.53140724",
"0.5287831",
"0.5281735",
"0.527351",
"0.5270905",
"0.5261998",
"0.5241274",
"0.5229854",
"0.52157897",
"0.5206838",
"0.5204981",
"0.51797074",
"0.51787025",
"0.5171563",
"0.51700854",
"0.51637894"
] |
0.71907866
|
0
|
This function prints out header keywords as part of BPIXTAB verification procedure. Parameter(s)
|
def bpix_kw(bpixtab):
print('Verifying the header keywords of UVIS bad pixel table {}...'.format(bpixtab))
print('USEAFTER:')
print(fits.getheader(bpixtab)['USEAFTER'])
print(' ')
print('PEDIGREE:')
print(fits.getheader(bpixtab)['PEDIGREE'])
print(' ')
print('DESCRIP:')
print(fits.getheader(bpixtab)['DESCRIP'])
print(' ')
print('COMMENT:')
print(fits.getheader(bpixtab)['COMMENT'])
print(' ')
print('HISTORY:')
print(fits.getheader(bpixtab)['HISTORY'])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def print_header():\n print()\n print(\"*\" * 45)\n print(\"Please, select algorithm:\")\n print(\"*\" * 45)",
"def print_header_information():\n\t\tprint \"Elijah Molloy\"\n\t\tprint \"70-510 - Spring 1 - 2018\"\n\t\tprint \"PROGRAMMING ASSIGNMENT #4\\n\"",
"def Show_Headers( self ):\r\n self.system.Change_Seq( \"Header\" )",
"def print_header():\n print(\"STEM Center Temperature Project\")\n print(\"Shaotong Wen\")",
"def print_header():\n print('------------------------------------')\n print(' Lesson04')\n print(' Kata Fourteen Assignment')\n print('------------------------------------\\n')",
"def print_header(self):\n print()\n print(\"=\"*25)\n print()\n print(\"Have fun in your blackjack round!\")\n print()\n print(\"=\"*25)",
"def header_print(output):\n print(\"\\n----------------------------------------------------------------\")\n print(output)\n print(\"----------------------------------------------------------------\")",
"def printHWHeader(keyword, count):\n\n print (\n \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\\n\" +\n \"Social Computing HW1\\n\" +\n \"Donghun Lee, 2012-23867\\n\" +\n \"This is twitter result for\\n\" +\n \"Keyword :\" + keyword + \" Query amount :\" + str(count))",
"def print_header(banner_name):\n print()\n print()\n print(\"----------------------------------------------------\")\n print(\" {0}\".format(banner_name))\n print(\"-----------------------------------------------------\")\n print()",
"def __print_header():\n __collen[\"id\"] = max(__collen[\"id\"], 2) # min is \"ID\"\n __collen[\"name\"] = max(__collen[\"name\"], 14) # min is \"Subvolume Name\"\n __collen[\"used_lim\"] = max(__collen[\"used_lim\"], 10) # min is \"Max (Used)\"\n __collen[\"excl_lim\"] = max(__collen[\"excl_lim\"], 11) # min is \"Max (Excl.)\"\n print(\"ID{:s} | Subvolume Name{:s} | {:s}Used | {:s}Max (Used) | {:s}Exclusive | {:s}Max (Excl.)\".format(\n \" \"*(__collen[\"id\"]-2),\n \" \"*(__collen[\"name\"]-14),\n \" \"*(MAX_SIZE-4),\n \" \"*(__collen[\"used_lim\"]-10),\n \" \"*(MAX_SIZE-9),\n \" \"*(__collen[\"excl_lim\"]-11)))",
"def print_header():\n\n print(\"\"\"\n _____ _ ____ _____ ____ ____ _____ ____ _____\n /__ __\\/ \\/ _\\ /__ __\\/ _ \\/ _\\ /__ __\\/ _ \\/ __/ 1 | 2 | 3\n / \\ | || / _____ / \\ | / \\|| / _____ / \\ | / \\|| \\ 4 | 5 | 6\n | | | || \\_\\____\\| | | |-||| \\_\\____\\| | | \\_/|| /_ 7 | 8 | 9\n \\_/ \\_/\\____/ \\_/ \\_/ \\|\\____/ \\_/ \\____/\\____|\n\n To play Tic-Tac-Toe, you need to get three in a row...\n Your choices are defined, they must be from 1 to 9...\n \"\"\")",
"def section_header(text):\n\n print \"---- %s ----\" % text",
"def header(out_file=sys.stdout, ac=None):\n if ac is not None:\n print(*Features.FEATURE_COLS, \"AC\", sep=\"\\t\", file=out_file)\n else:\n print(*Features.FEATURE_COLS, sep=\"\\t\", file=out_file)",
"def print_header(message):\n print('-----------')\n print(message)\n print('-----------')",
"def print_header(title):\n underline = \"\".join(['=' for char in title])\n print(title)\n print(underline)",
"def print_the_header():\n print('-------------------')\n print(' Weather APP')\n print('-------------------')\n print()",
"def _print_header():\n print()\n print(\n \" ┌─────────────────────── Measurements in BPM ─────────────────────┐\"\n )\n print(\n \"ID Date Activity Distance Elevation Start Duration 5s 30s 60s 5m 10m 20m 30m 60m 90m 120m\"\n )\n _print_separator()",
"def show_header():\n return {};",
"def _verboseHeader(self):\n\n if verbose:\n name = self._getName()\n methodName = self._getMethodName()\n\n title = f\"Running {name}.{methodName}\"\n print('{}\\n{}'.format(title, '-' * len(title)))",
"def header(name, value):\n print '%s: %s\\n' % (name, value)",
"def print_header(name, texfile):\n texfile.write('\\n')\n texfile.write('%--------------------\\n')\n texfile.write('%---' + name.upper() + ('-' * (17 - len(name))) + '\\n')\n texfile.write('%--------------------\\n')",
"def headerDA(blocks,block_size,extended):\n if(extended):\n header =[\"Address\",\"Tag\",\"Real Address\",\"Index\",\"WordOffset\",\"ByteOffset\"]\n else:\n header =[\"Address\"]\n for i in range(0,blocks):\n for x in range(0,block_size):\n header.append(\"B%i W%i\"%(i,x))\n header.append(\"Result\")\n return header",
"def get_headers_for_print(self):\n lines_for_print = []\n for header in self.metadata:\n lines_for_print.append(self.metadata[header])\n lines_for_print.append('\\t'.join(self.header))\n lines_for_print[-1] = '#' + lines_for_print[-1]\n return lines_for_print",
"def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out",
"def format_report_header(self):",
"def print_header(msg):\n\n tf.print(BColors.BOLD + msg + BColors.ENDC, output_stream=sys.stderr)",
"def headerFA(block_size,extended=True):\n if(extended):\n header =[\"Address\",\"Tag\",\"Real Address\"]\n else:\n header =[\"Address\"]\n for x in range(0,block_size):\n header.append(\"W%i\"%(x))\n header.append(\"Result\")\n return header",
"def header(self, format=None):\n return [\" ID \",\n \"East\",\n \"North\",\n \"TARGET ELEV\",\n \" LENGTH\",\n \" AZ\",\n \" DIP\",\n \"PLAN ELEV\"]",
"def header(self):\n ...",
"def print_header(self):\n print(\"\\nTesting {} - {}\\n\".format(self.__schema_name, self.__schema_path))"
] |
[
"0.6929973",
"0.6687926",
"0.6540516",
"0.6525296",
"0.6507055",
"0.6387599",
"0.6346713",
"0.6323378",
"0.6303749",
"0.62909293",
"0.6277778",
"0.6241794",
"0.6148025",
"0.6145261",
"0.61236537",
"0.61071616",
"0.60899454",
"0.60371745",
"0.5962819",
"0.5959928",
"0.5950905",
"0.5928007",
"0.58855915",
"0.5804087",
"0.5795246",
"0.57857907",
"0.5778186",
"0.57676506",
"0.5766224",
"0.5763287"
] |
0.75132596
|
0
|
The main function for the UVIS bad pixel table verification procedure.
|
def bpixtab_test(bpixtab, path='/grp/hst/wfc3j/jmedina/bpixtab_test/'):
# Verifying the header keywords
bpix_kw(bpixtab)
# Generating an image of the bad pixels using the bad pixel table
# which can be inspected using DS9
bpix_image(bpixtab, path) # uses default path
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def main():\n run_test_draw_upside_down_wall()",
"def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass",
"def main():\n\n # Parse the arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--ttylogin', action='store_true')\n parser.add_argument('-u', '--users', action='append')\n parser.add_argument('-s', '--subnets', action='append')\n args = parser.parse_args()\n\n # Grab the sessions from utmp\n sessions = get_active_sessions()\n\n # Gather the metrics\n illegal_logins = get_illegal_tty_logins(sessions, args.ttylogin)\n illegal_users = get_illegal_users(sessions, args.users)\n illegal_address = get_illegal_source_addresses(sessions, args.subnets)\n\n # Set the status codes if requested and we have detected breaches\n code = NAGIOS_OK\n if illegal_logins:\n code = NAGIOS_CRITICAL\n if illegal_users:\n code = NAGIOS_CRITICAL\n if illegal_address:\n code = NAGIOS_CRITICAL\n\n # Return the status\n status = ['OK', 'WARNING', 'CRITICAL'][code]\n print '{}: tty logins {}; illegal users {}; illegal addresses {}'.format(\n status,\n ', '.join(illegal_logins) if illegal_logins else 'none',\n ', '.join(illegal_users) if illegal_users else 'none',\n ', '.join(illegal_address) if illegal_address else 'none')\n\n sys.exit(code)",
"def main():\n \n # Help string to be shown using the -h option\n descStr = \"\"\"\n Scan a directory for ASCII files containing Stokes I, Q and U spectra.\n Each file should contain Stokes spectra in four columns:\n \n [frequency_Hz, StokesI_Jy, StokesQ_Jy, StokesU_Jy]\n \n The script reads in the spectra, sorts the channels by assending frequency\n and re-writes the vectors to the ASCII files. The script can use wildcard\n pattern matching to filter the files in the directory, or read a list of\n filenames from a text file. \n\n Examples:\n \n ./1_verify_ascii_data.py -f *I.fits -Q *Q.fits -U *U.fits testData/\n \"\"\"\n\n # Parse the command line options\n parser = argparse.ArgumentParser(description=descStr,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('dataPath', metavar='PATH/TO/DATA', default='.',\n nargs='?', help='Path to data directory [.]')\n parser.add_argument('-m', dest='pattern', default=patDefault,\n help='Pattern to match the data files [%s]' % patDefault)\n parser.add_argument('-f', dest='listFile',\n help='File containing list of data files')\n parser.add_argument('-c', dest='col', type=int, nargs='?', default=4,\n help='Column in listFile containing the list of files [4]')\n args = parser.parse_args()\n dataPath = args.dataPath\n pattern = args.pattern\n listFile = args.listFile\n col = args.col\n \n # Call the verify function\n verify_image_data(dataPath, pattern, listFile, col)",
"def main():\n options = get_options()\n data_path = str(options.data_dir)\n bloom = BloomCategory(malicious_path=data_path + '/malicious-ips.bloom',\n predicted_path=data_path + '/predicted-ips.bloom',\n has_intel_path=data_path + '/ip-threat-intel.bloom')\n with FileInput(sys.stdin) as file_handle:\n for line in file_handle:\n ip_address = line.strip()\n if bloom.check_ip(ip_address,\n check_malicious=options.malicious,\n check_predicted=options.predicted,\n check_suspicious=options.suspicious):\n print(ip_address)",
"def main():\n print(\"Program version: 1.5\")\n StartTime = datetime.now()\n args = parseArguments()\n\n verbose = args.verbose\n images = args.images\n ignore_warnings = args.ignore_warnings\n if(args.silent):\n verbose = False\n images = False\n ignore_warnings = True\n\n if(args.images):\n plt.ioff()\n\n if(args.ignore_warnings):\n warnings.simplefilter('ignore', UserWarning)\n\n #sample header keywords\n # OBJECT = 'P016+03_P1_JKdeep' / Original target\n # RA = ' 01:06:37.759' / 01:06:37.7 RA (J2000) pointing\n # DEC = ' 03:32:36.096' / 03:32:36.0 DEC (J2000) pointing\n # EQUINOX = 2000. / Standard FK5 (years)\n # RADECSYS= 'FK5 ' / Coordinate reference frame\n # CRVAL1 = 16.65733 / 01:06:37.7, RA at ref pixel\n # CRVAL2 = 3.54336 / 03:32:36.0, DEC at ref pixel\n # CRPIX1 = 447. /Ref pixel in X\n # CRPIX2 = 452. / Ref pixel in Y\n # CDELT1 = -8.0000000000000E-5 / SS arcsec per pixel in RA\n # CDELT2 = 8.00000000000003E-5 / SS arcsec per pixel in DEC\n # CTYPE1 = 'RA---TAN' / pixel coordinate system\n # CTYPE2 = 'DEC--TAN' / pixel coordinate system\n # PC1_1 = 0.000000 / Translation matrix element\n # PC1_2 = 1.000000 / Translation matrix element\n # PC2_1 = -1.000000 / Translation matrix element\n # PC2_2 = 0.000000 / Translation matrix element\n\n fits_image_filenames = args.input\n\n #if directory given search for appropriate fits files\n\n if(os.path.isdir(fits_image_filenames[0])):\n print(\"detected a directory. Will search for fits files in it\")\n path = fits_image_filenames[0]\n fits_image_filenames = []\n for file in os.listdir(path):\n if file.endswith(\".fits\") and \"_astro\" not in file:\n fits_image_filenames.append(path+\"/\"+file)\n print(fits_image_filenames)\n\n multiple = False\n if(len(fits_image_filenames)>1):\n multiple = True\n not_converged = []\n converged_counter = 0\n for fits_image_filename in fits_image_filenames:\n\n result,_ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=0, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent =args.silent, sigma_threshold_for_source_detection= args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if((not result) and args.rotation_scaling):\n print(\"Did not converge. Will try again with full rotation and scaling\")\n result, _ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=args.rotation_scaling, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent=args.silent, sigma_threshold_for_source_detection=args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if(result):\n print(\"Astrometry was determined to be good.\")\n converged_counter = converged_counter+1\n else:\n print(\"Astrometry was determined to be bad.\")\n not_converged.append(fits_image_filename)\n if(args.save_bad_result):\n print(\"Result was saved anyway\")\n else:\n print(\"Result was not saved.\")\n # print(\"\")\n # print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n # print(\"> Astrometry for {} \".format(fits_image_filename))\n #\n # with fits.open(fits_image_filename) as hdul:\n # #print(hdul.info())\n # if(args.verbose):\n # print(\"if image is not at first position in the fits file the program will break later on\")\n # #print(hdul[0].header)\n #\n # hdu = hdul[0]\n # #hdu.verify('fix')\n # hdr = hdu.header\n #\n #\n # image_or = hdul[0].data.astype(float)\n # median = np.nanmedian(image_or)\n # image_or[np.isnan(image_or)]=median\n # image = image_or - median\n #\n # observation = find_sources(image, args.vignette)\n # #print(observation)\n #\n # positions = (observation['xcenter'], observation['ycenter'])\n # apertures = CircularAperture(positions, r=4.)\n #\n #\n # #world coordinates\n # print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n # print(WCS(hdr))\n #\n # hdr[\"NAXIS1\"] = image.shape[0]\n # hdr[\"NAXIS2\"] = image.shape[1]\n #\n # #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n # wcsprm = WCS(hdr).wcs\n # wcsprm_original = WCS(hdr).wcs\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n # wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, args.ra, args.dec, args.projection_ra, args.projection_dec)\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n #\n # #print(wcsprm)\n # #wcsprm.pc = [[2, 0],[0,1]]\n #\n #\n # #Possibly usefull examples of how to use wcsprm:\n # #print(wcsprm.set())\n # #print(wcsprm.get_pc())\n # #pc = wcsprm.get_pc()\n # #print(np.linalg.det(pc))\n # #print(wcsprm.get_cdelt())\n # #wcs.fix()\n # #print(wcsprm.print_contents())\n # #print(repr(hdr.update(wcsprm.to_header().encode('utf-8')))) #not working\n #\n # #hdu.verify(\"fix\")\n # #print(repr(hdr))\n # #wcs.wcs_pix2world(pixcrd, 1)\n # #wcs.wcs_world2pix(world, 1)\n # #wcs.wcs.crpix = [-234.75, 8.3393]\n # # wcs.wcs.cdelt = np.array([-0.066667, 0.066667])\n # # wcs.wcs.crval = [0, -90]\n # # wcs.wcs.ctype = [\"RA---AIR\", \"DEC--AIR\"]\n # # wcs.wcs.set_pv([(2, 1, 45.0)])\n # # For historical compatibility, three alternate specifications of the linear transformations\n # # are available in wcslib. The canonical PCi_ja with CDELTia, CDi_ja, and the deprecated CROTAia\n # # keywords. Although the latter may not formally co-exist with PCi_ja,\n # # the approach here is simply to ignore them if given in conjunction with PCi_ja.\n # # has_pc, has_cd and has_crota can be used to determine which of these alternatives are present in the header.\n # # These alternate specifications of the linear transformation matrix are translated immediately to PCi_ja by set\n # # and are nowhere visible to the lower-level routines. In particular, set resets cdelt to unity if CDi_ja is present\n # # (and no PCi_ja). If no CROTAia is associated with the latitude axis, set reverts to a unity PCi_ja matrix.\n #\n #\n #\n #\n #\n # #get rough coordinates\n # #print(hdr[\"RA\"])\n # #coord = SkyCoord(hdr[\"RA\"], hdr[\"DEC\"], unit=(u.hourangle, u.deg), frame=\"icrs\")\n # coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # if(not PIXSCALE_UNCLEAR):\n # if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n # print(\"central value outside of the image, moving it to the center\")\n # coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n # coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # #print(wcsprm)\n #\n #\n #\n # #better: put in nice wrapper! with repeated tries and maybe try synchron!\n # print(\">Dowloading catalog data\")\n # radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n # catalog_data = query.get_data(coord, radius, args.catalog)\n # #reference = reference.query(\"mag <20\")\n # max_sources = 500\n # if(INCREASE_FOV_FLAG):\n # max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n # if(catalog_data.shape[0]>max_sources):\n # catalog_data = catalog_data.nsmallest(400, \"mag\")\n #\n # if(args.catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n # print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n # catalog_data2 = query.get_data(coord, radius, \"PS\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n # elif(args.catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n # print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n # catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n #\n # #remove duplicates in catalog?\n #\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n #\n # #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Input for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n #\n # plt.xlim(-200,image.shape[0]+200)\n # plt.ylim(-200,image.shape[1]+200)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_before.pdf\")\n #\n # ###tranforming to match the sources\n # print(\"---------------------------------\")\n # print(\">Finding the transformation\")\n # if(args.rotation_scaling):\n # print(\"Finding scaling and rotation\")\n # wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=args.verbose)\n # if(args.xy_transformation):\n # print(\"Finding offset\")\n # wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= args.verbose)\n #\n # #correct subpixel error\n # obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=3)\n # rms = np.sqrt(np.mean(np.square(distances)))\n # best_score = len(obs_x)/(rms+10) #start with current best score\n # fine_transformation = False\n # if(args.fine_transformation):\n # for i in [2,3,5,8,10,6,4, 20,2,1,0.5]:\n # wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i)\n # if(score> best_score):\n # wcsprm = wcsprm_new\n # best_score = score\n # fine_transformation = True\n # if not fine_transformation:\n # print(\"Fine transformation did not improve result so will be discarded.\")\n # else:\n # print(\"Fine transformation applied to improve result\")\n # #register.calculate_rms(observation, catalog_data,wcs)\n #\n # #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n # wcs =WCS(wcsprm.to_header())\n # if(args.verbose):\n # print(wcs)\n #\n # from astropy.wcs import utils\n # scales = utils.proj_plane_pixel_scales(wcs)\n # print(scales)\n # cdelt = wcsprm.get_cdelt()\n # print(cdelt)\n # scale_ratio = scales/cdelt\n # #print(scale_ratio)\n # pc = np.array(wcsprm.get_pc())\n # pc[0,0] = pc[0,0]/scale_ratio[0]\n # pc[1,0] = pc[1,0]/scale_ratio[1]\n # pc[0,1] = pc[0,1]/scale_ratio[0]\n # pc[1,1] = pc[1,1]/scale_ratio[1]\n # wcsprm.pc = pc\n # wcsprm.cdelt = scales\n # if(args.verbose):\n # print(\"moved scaling info to CDelt\")\n # print(WCS(wcsprm.to_header()))\n #\n # #WCS difference before and after\n # print(\"> Compared to the input the Wcs was changed by: \")\n # scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n # print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n # #sources:\n # #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n # #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n # def unit_vector(vector):\n # \"\"\" Returns the unit vector of the vector. \"\"\"\n # return vector / max(np.linalg.norm(vector), 1e-10)\n # def matrix_angle( B, A ):\n # \"\"\" comment cos between vectors or matrices \"\"\"\n # Aflat = A.reshape(-1)\n # Aflat = unit_vector(Aflat)\n # Bflat = B.reshape(-1)\n # Bflat = unit_vector(Bflat)\n # #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n # return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n # #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n # rotation_angle = matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360\n # if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n # text = \"counterclockwise\"\n # else:\n # text = \"clockwise\"\n # print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n # old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n # print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n #\n #\n # #check final figure\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Result for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_after.pdf\")\n #\n # print(\"--- Evaluate how good the transformation is ----\")\n # register.calculate_rms(observation, catalog_data,wcsprm)\n #\n #\n # #updating file\n # write_wcs_to_hdr(fits_image_filename, wcsprm)\n #\n #\n # print(\"overall time taken\")\n # print(datetime.now()-StartTime)\n # if(args.images):\n # plt.show()\n if(multiple):\n print(\">> Final report:\")\n print(\"Processed {} files, {} of them did converge. The following files failed:\".format(len(fits_image_filenames), converged_counter))\n print(not_converged)\n print(\"-- finished --\")",
"def main(argv):\r\n\r\n # We should parse the command line arguments, etc. \r\n # For this quick-and-dirty script we'll hardcode all the parameters...\r\n \r\n # ...the target svg file names...\r\n svg_base_filename = \"./table\"\r\n # ...the target text file name where a C-format table with the cycle counts\r\n # will be written to...\r\n c_table_file = \"./cycle_table.c\"\r\n # ...and the source CSV cycle count log file. Note this path is the default\r\n # working path for the Modelsim simulations, change if necessary.\r\n cycle_log_filename = \"../../sim/cycle_count_log.csv\"\r\n \r\n # Read cycle count data...\r\n cycle_info = read_cycle_info(cycle_log_filename)\r\n # ...and read opcode table data (instruction mnemonics and byte counts).\r\n opcode_info = read_opcode_info(\"opcode_info.txt\")\r\n \r\n # First of all, write the C-format cycle table, to be copied and pasted\r\n # into the B51 simulator.\r\n write_cycle_table(cycle_info, c_table_file)\r\n \r\n # We can render the opcode table 'whole', resulting in a wide table, or\r\n # we can render the left and right halves separately, which gives a format\r\n # better suted for a printed page. \r\n \r\n # So, for all three possible rendering formats...\r\n parts = (\"left\", \"right\", \"full\")\r\n # ...render the opcode table.\r\n for part in parts:\r\n \r\n # Build the SVG text for the table...\r\n svg = build_svg_table(opcode_info, cycle_info, part)\r\n # ...and write it to the target file.\r\n fout = None\r\n try:\r\n full_filename = svg_base_filename + \"_\" + part + \".svg\"\r\n fout = open(full_filename, \"w\")\r\n fout.write(svg)\r\n fout.close()\r\n print \"SVG opcode table written to %s\" % full_filename\r\n except:\r\n print \"Trouble opening %s for output\" % full_filename\r\n finally:\r\n if fout: fout.close()",
"def main():\n\t#print(scipy.__version__)\n\t#image()\n\t#heat_capacity2()\n\t#hist()\n\t#single_plot()\n\n\t#heat_capacity2()\n\t#single_plot()\n\t#plt.show()\n\t#u0_tc()\n\t#multi_heat_capacity(\"HL_DM_flux5\",True)\n\t#multi_heat_capacity2()\n\t#plot_spin()\n\t#plt.show()\n\theat_capacity2(1,2)\n\t#hist()\n\tplt.show()\n\t#potential()\n\t#plt.show()\n\t#heat_capacity(3,4)\n\t#heat_capacity(5,6)\n\t#heat_capacity(7,8)\n\t#final_spins()\n\t#plot_spin()\n\t#plot_from_csv()\n\t#difference_plot()",
"def main():\n utl.calibrate(False)\n undistort(False)\n edge_detect(False)\n transform(False)\n identify_line(False)\n lane_line(True)",
"def giveup():\n for matrix in xrange(4):\n display.set_raw64(LED8x8ICONS['UNKNOWN'],matrix)\n print \"Error occured.\"\n sys.exit(1)",
"def main():\n\n inputDirectory = '/home/mr-paul/atmp/aaproject/scripts/surprised_raw'\n outputDirectory = '/home/mr-paul/atmp/aaproject/scripts/surprised_faces'\n\n # detects all faces from all images in inputDirectory and outputs\n # to outputDirectory\n FaceDetection.extractFaces(\n inputDirectory=inputDirectory, outputDirectory=outputDirectory)",
"def run_main(raw, con, inl, mon, sub):\n # Ahmad please put (or import/call) your code here\n GOValid_func(raw,con,inl,mon,sub)",
"def main() -> int:\n ucvm_out = \"\"\n for j in frange(CORNERS[\"bl\"][\"n\"], CORNERS[\"ur\"][\"n\"], SPACING):\n for i in frange(CORNERS[\"bl\"][\"e\"], CORNERS[\"ur\"][\"e\"] + SPACING, SPACING):\n ucvm_out += \"%.2f %.2f 0\\n\" % (i, j)\n os.chdir(\"/Users/davidgil/ucvm-15.10.0/bin\")\n proc = Popen(\n [\"./ucvm_query\", \"-f\", \"../conf/ucvm.conf\"], stdout=PIPE, stdin=PIPE, stderr=STDOUT\n )\n out_arr = np.zeros(\n shape=(\n int((CORNERS[\"ur\"][\"n\"] - CORNERS[\"bl\"][\"n\"]) / SPACING) + 2,\n int((CORNERS[\"ur\"][\"e\"] - CORNERS[\"bl\"][\"e\"]) / SPACING) + 2\n )\n )\n output = proc.communicate(input=ucvm_out.encode(\"ASCII\"))[0]\n i = 0\n j = 0\n for line in output.decode(\"ASCII\").split(\"\\n\")[2:-1]:\n line_split = line.split()\n try:\n out_arr[j][i] = float(line_split[4])\n except IndexError:\n print(line_split)\n if i == int((CORNERS[\"ur\"][\"e\"] - CORNERS[\"bl\"][\"e\"]) / SPACING):\n i = 0\n j += 1\n else:\n i += 1\n np.save(\"vs30.dat\", out_arr)\n return 0",
"def test_thresholds_main():\n\n # Parsing arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--image\", required=True, help=\"Input image\")\n ap.add_argument(\"-o\", \"--out_dir\", required=True, help=\"Output directory\")\n args = ap.parse_args()\n\n # Reading image\n img = cv2.imread(args.image, 0)\n\n if img is None:\n print(\"Invalid input image\")\n return\n\n img_name = basename(args.image).split(\".\")[0]\n\n test_thresholds(img, args.out_dir, img_name)",
"def check_image_color(image):\n\n def check_color(i, j, k):\n \"\"\" Function used only for DEBUGGING\"\"\"\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()\n\n if not os.path.isfile(image):\n return \"Image not found\"\n\n def calculate_bgr(data):\n average_color_per_row = numpy.average(data, axis=0)\n average_color = numpy.average(average_color_per_row, axis=0)\n return tuple(average_color)\n\n def calculate_y(r, g, b):\n alpha = 0.299\n betta = 0.587\n gamma = 0.114\n return alpha * r + betta * g + gamma * b\n\n # split the image for four squares calucate averate pixel for them and take higest value\n # blure image and save to /Library/Caches as com.apple.desktop.admin.png\n # in case using blur tool --> blur = cv2.blur(img,(5,5))\n try:\n img_cv_data = cv2.imread(image)\n B, G, R = calculate_bgr(img_cv_data)\n Y = calculate_y(B, G, R)\n height, width = img_cv_data.shape[:2]\n except Exception as err:\n print(f\"[ERROR] {err} with image: {image}\")\n return \"Error parsing image\"\n\n # image detection\n if Y < 72.0:\n _type = \"dark\"\n elif Y >= 73.0 and Y <= 108.0:\n _type = \"evening\"\n else:\n _type = \"light\"\n\n return _type",
"def main():\r\n global matrix_x, matrix_y\r\n if inputs_valid():\r\n if number_of_images_b.get() != \"\": #check if images_b empty\r\n matrix_size = (int(number_of_images_a.get()) +\r\n int(number_of_images_b.get()))\r\n else:\r\n matrix_size = int(number_of_images_a.get())\r\n\r\n size_prime, matrix_x, matrix_y = square_distribution(matrix_size)\r\n\r\n if size_prime:\r\n messagebox.showwarning(\"Grid can not be constructed\", (\r\n \"Error: grid of requested size can not be\"\r\n \"constructed (type a + type b is prime)\"))\r\n else:\r\n generate_image_matrices()\r\n messagebox.showinfo(\"\",\"done.\")",
"def test_launch_failures_hw(self):\n self.test_launch_failures()",
"def main(unstripped_func_bounds_file_path, stripped_func_bounds_file_path):\n\tunstripped_bounds_dict, unstripped_names_dict = tuple(eval(file(unstripped_func_bounds_file_path, \"rb\").read()))\n\tstripped_bounds_dict, stripped_names_dict = tuple(eval(file(stripped_func_bounds_file_path, \"rb\").read()))\n\t\n\tunstripped_bounds_dict = {k:frozenset(unstripped_bounds_dict[k]) for k in unstripped_bounds_dict.keys()}\n\tstripped_bounds_dict = {k:frozenset(stripped_bounds_dict[k]) for k in stripped_bounds_dict.keys()}\n\t\n\tundiscovered_func_bounds = frozenset(unstripped_bounds_dict.items()) - frozenset(stripped_bounds_dict.items())\n\t\n\tprint \"IDA undiscovered %f%% procedures (%d/%d) from stripped-binary-params: %s\" %((float(len(undiscovered_func_bounds)) / len(unstripped_bounds_dict)) * 100.0, len(undiscovered_func_bounds), len(unstripped_bounds_dict), stripped_func_bounds_file_path)\n\tif len(undiscovered_func_bounds) > 0:\n\t\tprint \"List of %d functions which were undiscovered successfully after stripping:\" %(len(undiscovered_func_bounds))\n\t\tfor func in undiscovered_func_bounds:\n\t\t\tprint unstripped_names_dict[func[0]]",
"def main():\n args = lattice_argparse().parse_args()\n\n args.work_dir = simbad.command_line.get_work_dir(args.run_dir, work_dir=args.work_dir, ccp4_jobid=args.ccp4_jobid, ccp4i2_xml=args.ccp4i2_xml)\n\n log_file = os.path.join(args.work_dir, \"simbad.log\")\n debug_log_file = os.path.join(args.work_dir, \"debug.log\")\n global logger\n logger = simbad.util.logging_util.setup_logging(args.debug_lvl, logfile=log_file, debugfile=debug_log_file)\n\n gui = simbad.util.pyrvapi_results.SimbadOutput(\n args.rvapi_document, args.webserver_uri, args.display_gui, log_file, args.work_dir, ccp4i2_xml=args.ccp4i2_xml, tab_prefix=args.tab_prefix\n )\n\n simbad.command_line.print_header()\n logger.info(\"Running in directory: %s\\n\", args.work_dir)\n\n stopwatch = StopWatch()\n stopwatch.start()\n\n solution_found = simbad.command_line._simbad_lattice_search(args)\n if args.space_group and args.unit_cell:\n display_summary = False\n elif solution_found:\n logger.info(\"Lucky you! SIMBAD worked its charm and found a lattice match for you.\")\n display_summary = True\n else:\n logger.info(\"No results found - lattice search was unsuccessful\")\n display_summary = True\n\n if args.output_pdb and args.output_mtz:\n csv = os.path.join(args.work_dir, \"latt\", \"lattice_mr.csv\")\n if os.path.exists(csv):\n result = simbad.util.result_by_score_from_csv(csv, \"final_r_free\", ascending=True)\n simbad.util.output_files(args.work_dir, result, args.output_pdb, args.output_mtz)\n\n stopwatch.stop()\n logger.info(\"All processing completed in %d days, %d hours, %d minutes, and %d seconds\", *stopwatch.time_pretty)\n\n gui.display_results(display_summary, args.results_to_display)\n if args.rvapi_document:\n gui.save_document()",
"def main_guess_barcodes(in_barcodes, \n in_picard_metrics, \n out_summary_tsv, \n sample_names, \n outlier_threshold, \n expected_assigned_fraction, \n number_of_negative_controls, \n readcount_threshold, \n rows_limit):\n\n bh = util.illumina_indices.IlluminaBarcodeHelper(in_barcodes, in_picard_metrics, rows_limit)\n guessed_barcodes = bh.find_uncertain_barcodes(sample_names=sample_names, \n outlier_threshold=outlier_threshold, \n expected_assigned_fraction=expected_assigned_fraction, \n number_of_negative_controls=number_of_negative_controls, \n readcount_threshold=readcount_threshold)\n bh.write_guessed_barcodes(out_summary_tsv, guessed_barcodes)",
"def test_85_entry_point(self):\n\t\tinput = \"\"\"procedure main(pa:boolean); var a:real;x:array[1 .. 10]of real;\n\t\tbegin a:=x[sq(2)]; fuc(); end\n\t\tfunction sq(m:integer):integer; begin return m*m; end\"\"\"\n\t\texpect = \"No entry point\"\n\t\tself.assertTrue(TestChecker.test(input,expect,485))",
"def main(argv=None):\n parser = argparse.ArgumentParser(\"Test TFL attention mechanism\")\n parser.add_argument('-i', '--image', type=str, help='Path to an image')\n parser.add_argument(\"-j\", \"--json\", type=str, help=\"Path to json GT for comparison\")\n parser.add_argument('-d', '--dir', type=str, help='Directory to scan images in')\n args = parser.parse_args(argv)\n default_base = '../data'\n if args.dir is None:\n args.dir = default_base\n flist = glob.glob(os.path.join(args.dir, '*_leftImg8bit.png'))\n for image in flist:\n json_fn = image.replace('_leftImg8bit.png', '_gtFine_polygons.json')\n if not os.path.exists(json_fn):\n json_fn = None\n test_find_tfl_lights(image, json_fn)\n if len(flist):\n print(\"You should now see some images, with the ground truth marked on them. Close all to quit.\")\n else:\n print(\"Bad configuration?? Didn't find any picture to show\")\n plt.show(block=True)",
"def main(argv=None):\n parser = argparse.ArgumentParser(\"Test TFL attention mechanism\")\n parser.add_argument('-i', '--image', type=str, help='Path to an image')\n parser.add_argument(\"-j\", \"--json\", type=str, help=\"Path to json GT for comparison\")\n parser.add_argument('-d', '--dir', type=str, help='Directory to scan images in')\n args = parser.parse_args(argv)\n default_base = '../data'\n if args.dir is None:\n args.dir = default_base\n flist = glob.glob(os.path.join(args.dir, '*_leftImg8bit.png'))\n for image in flist:\n json_fn = image.replace('_leftImg8bit.png', '_gtFine_polygons.json')\n if not os.path.exists(json_fn):\n json_fn = None\n test_find_tfl_lights(image, json_fn)\n if len(flist):\n print(\"You should now see some images, with the ground truth marked on them. Close all to quit.\")\n else:\n print(\"Bad configuration?? Didn't find any picture to show\")\n plt.show(block=True)",
"def bpix_kw(bpixtab):\n print('Verifying the header keywords of UVIS bad pixel table {}...'.format(bpixtab))\n print('USEAFTER:')\n print(fits.getheader(bpixtab)['USEAFTER'])\n print(' ')\n print('PEDIGREE:')\n print(fits.getheader(bpixtab)['PEDIGREE'])\n print(' ')\n print('DESCRIP:')\n print(fits.getheader(bpixtab)['DESCRIP'])\n print(' ')\n print('COMMENT:')\n print(fits.getheader(bpixtab)['COMMENT'])\n print(' ')\n print('HISTORY:')\n print(fits.getheader(bpixtab)['HISTORY'])",
"def main():\n print()\n print(\"Un-comment and re-comment calls in MAIN one by one as you work.\")\n print()\n\n # run_test_sum_until_prime_input()\n # run_test_next_prime()\n # run_test_sum_to_next_prime()\n # run_test_prime_gap()\n # run_test_wait_for_sum_of_cubes()",
"def test_screenip_unit_det(self):\n #\n # '''\n # Dose Equiv. Toxicity:\n #\n # The FI value (kg-diet) is multiplied by the reported NOAEC (mg/kg-diet) and then divided by\n # the test animal's body weight to derive the dose-equivalent chronic toxicity value (mg/kg-bw):\n #\n # Dose Equiv. Toxicity = (NOAEC * FI) / BW\n #\n # NOTE: The user enters the lowest available NOAEC for the mallard duck, for the bobwhite quail,\n # and for any other test species. The model calculates the dose equivalent toxicity values for\n # all of the modeled values (Cells F20-24 and results worksheet) and then selects the lowest dose\n # equivalent toxicity value to represent the chronic toxicity of the chemical to birds.\n # '''\n # try:\n # # result =\n # # self.assertEquals(result, )\n # pass\n # finally:\n # pass\n # return\n #\n #\n # def test_det_duck(self):\n # \"\"\"\n # unittest for function screenip.det_duck:\n # :return:\n # \"\"\"\n # try:\n # # det_duck = (self.noaec_duck * self.fi_bird(1580.)) / (1580. / 1000.)\n # screenip_empty.noaec_duck = pd.Series([1.], dtype='int')\n # screenip_empty.fi_bird = pd.Series([1.], dtype='int')\n # result = screenip_empty.det_duck()\n # npt.assert_array_almost_equal(result, 1000., 4, '', True)\n # finally:\n # pass\n # return\n #\n # def test_det_quail(self):\n # \"\"\"\n # unittest for function screenip.det_quail:\n # :return:\n # \"\"\"\n # try:\n # # det_quail = (self.noaec_quail * self.fi_bird(178.)) / (178. / 1000.)\n # screenip_empty.noaec_quail = pd.Series([1.], dtype='int')\n # screenip_empty.fi_bird = pd.Series([1.], dtype='int')\n # result = screenip_empty.det_quail()\n # npt.assert_array_almost_equal(result, 1000., 4, '', True)\n # finally:\n # pass\n # return\n #\n # def test_det_other_1(self):\n # \"\"\"\n # unittest for function screenip.det_other_1:\n # :return:\n # \"\"\"\n # try:\n # #det_other_1 = (self.noaec_bird_other_1 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)\n # #det_other_2 = (self.noaec_bird_other_2 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)\n # screenip_empty.noaec_bird_other_1 = pd.Series([400.]) # mg/kg-diet\n # screenip_empty.bodyweight_bird_other_1 = pd.Series([100]) # grams\n # result = screenip_empty.det_other_1()\n # npt.assert_array_almost_equal(result, 4666, 4)\n # finally:\n # pass\n # return\n #\n # The following tests are configured such that:\n # 1. four values are provided for each needed input\n # 2. the four input values generate four values of out_det_* per bird type\n # 3. the inputs per bird type are set so that calculations of out_det_* will result in\n # each bird type having one minimum among the bird types;\n # thus all four calculations result in one minimum per bird type\n\n # create empty pandas dataframes to create empty object for this unittest\n screenip_empty = self.create_screenip_object()\n\n expected_results = pd.Series([4.2174, 4.96125, 7.97237, 10.664648], dtype='float')\n result = pd.Series([], dtype='float')\n\n try:\n screenip_empty.bodyweight_bobwhite_quail = 178.\n screenip_empty.bodyweight_mallard_duck = 1580.\n screenip_empty.noaec_quail = pd.Series([100., 300., 75., 150.], dtype='float')\n screenip_empty.noaec_duck = pd.Series([400., 100., 200., 350.], dtype='float')\n screenip_empty.noaec_bird_other_1 = pd.Series([50., 200., 300., 250.], dtype='float')\n screenip_empty.noaec_bird_other_2 = pd.Series([350., 400., 250., 100.], dtype='float')\n screenip_empty.noaec_bodyweight_bird_other_1 = pd.Series([345.34, 453.54, 649.29, 294.56], dtype='float')\n screenip_empty.noaec_bodyweight_bird_other_2 = pd.Series([123.84, 85.743, 127.884, 176.34], dtype='float')\n screenip_empty.no_of_runs = len(expected_results)\n result = screenip_empty.det()\n npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )\n finally:\n tab = [result, expected_results]\n print(\"\\n\")\n print(inspect.currentframe().f_code.co_name)\n print(tabulate(tab, headers='keys', tablefmt='rst'))\n return",
"def main():\r\n _evaluative_test(5)\r\n _fuzz_test(1)\r\n _fuzz_test(1, 512)\r\n _fuzz_test(1, 1512)\r\n _fuzz_test(1000)\r\n _fuzz_test(1000, 512)\r\n _fuzz_test(1000, 4077)",
"def main():\n coverage = calculate_code_coverage()\n platform = os.uname()[0]\n if coverage < CODE_COVERAGE_GOAL[platform]:\n data = {\n 'expected': CODE_COVERAGE_GOAL[platform],\n 'observed': coverage,\n }\n print '\\033[91mFAIL: %(observed).2f%% does not meet goal of %(expected).2f%%\\033[0m' % data\n sys.exit(1)",
"def main():\n if len(sys.argv) < 3:\n print(\"2 arguments are required: input png path and turn [white | black]. Optional: chess AI think time expressed in seconds, oppponent skill level [0 - 20]\")\n return\n\n png_path = sys.argv[1]\n turn = sys.argv[2].lower()\n\n if len(sys.argv) < 4:\n think_time = 1.0\n else:\n try:\n think_time = float(sys.argv[3])\n if think_time <= 0:\n raise ValueError()\n except:\n print(\"Think time must be a positive number\")\n return\n\n if len(sys.argv) < 5:\n opponent_skill = 20.0\n else:\n try:\n opponent_skill = float(sys.argv[4])\n if opponent_skill < 0 or opponent_skill > 20:\n raise ValueError\n except:\n print(\"Opponent skill must be a number between 0 and 20\")\n return\n\n if not png_path.lower().endswith(\".png\"):\n print(\"Invalid png path!\")\n return\n\n if turn != \"white\" and turn != \"black\":\n print(\"Turn must be 'white' or 'black'\")\n return\n\n print(\"Reading board state from image...\")\n chess_board = board_from_png(png_path)\n print(\"Done! Opening GUI...\")\n solve_chess_problem(chess_board, turn == \"white\", think_time, opponent_skill)",
"def main():\n try:\n\n OUTPUTOVERVIEW.write(r'\\documentclass[12pt,a4paper,twocolumn]{article}'+'\\n\\n'\\\n r'\\usepackage[utf8x]{inputenc}'+'\\n'\\\n r'\\usepackage{graphicx}'+'\\n'\\\n r'\\usepackage{tikz}'+'\\n'\\\n r'\\usepackage[left=2.5cm, right=1cm, top=1.5cm, bottom=2cm]{geometry}'+'\\n'\\\n r'\\usepackage{xcolor}'+'\\n'\\\n r'\\usepackage{siunitx}'+'\\n'\\\n r'\\usepackage{titlesec}'+'\\n'\\\n r'\\titleformat{\\section}{\\Large\\scshape}{\\thesection}{1em}{}'+'\\n'\\\n r'\\titlespacing{\\section}{0pt}{12pt plus 4pt minus 2pt}{0pt plus 2pt minus 2pt}'+'\\n'\\\n r'\\setlength{\\parindent}{0pt}'+'\\n'\\\n r'\\usepackage{LatexColors.incl}'+'\\n'\\\n r'\\begin{document}'+'\\n' + '\\n')\n\n startletter = ''\n for strline in COLORLINES[1:]:\n\n if strline.strip():\n # get color name and hex\n colname = colorname(strline)\n\n if startletter != strline[:1]:\n startletter = strline[:1]\n OUTPUTOVERVIEW.write(r'\\section*{' + startletter +'}\\n')\n\n # get RBG\n rcol, gcol, bcol = tuple(int(colname[1][i:i+2], 16) for i in (0, 2, 4))\n\n # \\definecolor{airforceblue}{HTML}{5d8aa8}\n clname = strip_accents(re.sub(BAD_CHARS_NAME, '',\\\n colname[2], 0, re.MULTILINE | re.IGNORECASE)).title()\n\n rcol = rcol/255.\n gcol = gcol/255.\n bcol = bcol/255.\n\n cmyk = convert_rgb_cmyk(rcol, gcol, bcol)\n hsv = convert_rgb_hsv(rcol, gcol, bcol)\n hsl = convert_rgb_hsl(rcol, gcol, bcol)\n\n OUTPUTOVERVIEW.write(r'\\begin{minipage}{\\linewidth}\\tikz[baseline=1mm]\\draw [fill='\\\n + colname[0] + r', rounded corners=5pt] (0,0) rectangle (2cm,1cm); {\\textbf{'\\\n + clname + r'} \\\\ \\scriptsize{'+'RGB: {0:.0f}, {1:.0f}, {2:.0f}'\\\n .format(*tuple(int(colname[1][i:i+2], 16) for i in (0, 2, 4))) + r'; ' + \\\n r'HEX:~\\#' + colname[1] + r'\\\\' + \\\n r'CMYK: \\SI{{{0:.1f}}}{{\\percent}}, \\SI{{{1:.1f}}}{{\\percent}}, '\n r'\\SI{{{2:.1f}}}{{\\percent}}, \\SI{{{3:.1f}}}{{\\percent}}'\\\n .format(cmyk[0]*100, cmyk[1]*100, cmyk[2]*100, cmyk[3]*100) + r' \\\\' + \\\n r'HSV: \\SI{{{0:.0f}}}{{\\degree}}, \\SI{{{1:.1f}}}{{\\percent}}, '\n r'\\SI{{{2:.1f}}}{{\\percent}}'\\\n .format(hsv[0], hsv[1]*100, hsv[2]*100) + r' \\\\' + \\\n r'HSL: \\SI{{{0:.0f}}}{{\\degree}}, \\SI{{{1:.1f}}}{{\\percent}}, '\n r'\\SI{{{2:.1f}}}{{\\percent}}'\\\n .format(hsl[0], hsl[1]*100, hsl[2]*100)\\\n + '}}\\n'\\\n r'\\vspace{.5em}\\end{minipage}' + '\\n')\n\n OUTPUTOVERVIEW.write(r'\\end{document}')\n\n except OSError as err:\n print(\"OS error: {0}\".format(err))\n # except Exception as ex: #comment for pylint 10.0!\n # print(str(ex))\n else:\n print('Overview file written.')\n OUTPUTOVERVIEW.close()"
] |
[
"0.5844188",
"0.56894135",
"0.55604094",
"0.5530293",
"0.5493756",
"0.5451574",
"0.5436015",
"0.5427296",
"0.5422339",
"0.54195154",
"0.540822",
"0.535743",
"0.5331199",
"0.5317544",
"0.52700955",
"0.5264249",
"0.52328736",
"0.5200136",
"0.51995796",
"0.51963454",
"0.517293",
"0.51654154",
"0.51654154",
"0.5159351",
"0.51575065",
"0.5144764",
"0.5144412",
"0.51357305",
"0.51320404",
"0.51276267"
] |
0.5725302
|
1
|
This function will put your array in a FITS file that you can open in DS9 for visual inspection, or any other purpose. Parameter(s)
|
def make_fits(array, filename, path=''):
hdu0 = fits.PrimaryHDU([])
hdu1 = fits.ImageHDU([array])
hdulist = fits.HDUList([hdu0, hdu1])
if path=='':
path = os.getcwd()
hdulist.writeto(path+filename+'.fits', overwrite=False)
else:
hdulist.writeto(path+filename+'.fits', overwrite=False)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_as_fits(self, filename):",
"def tofits(self, filename=None):\n robot_array = self.robot_array()\n target_array = self.target_array()\n fitsio.write(filename, robot_array, clobber=True)\n fitsio.write(filename, target_array, clobber=False)\n return",
"def save_fits(data, fname):\n\tcols = fits.ColDefs(np.copy(data)) # This is somehow necessary.\n\ttbhdu = fits.BinTableHDU.from_columns(cols)\n\ttbhdu.writeto(fname, clobber=True)\n\t\n\treturn",
"def create_test_file(filename, array):\n array = np.ma.asanyarray(array)\n crs = rasterio.crs.CRS(init='epsg:4326')\n transform = from_origin(52, 4, 10, 10)\n with rasterio.open(\n filename,\n mode='w',\n driver='GTiff',\n width=array.shape[1],\n height=array.shape[2],\n count=array.shape[0],\n dtype=array.dtype,\n crs=crs,\n transform=transform) as dataset:\n for band, data in enumerate(array, start=1):\n dataset.write(data, band)",
"def write_files(neither, both, c_only, f_only, fh_c, data_c_start):\n\n # output arrays for all pixels in input datasets\n write_to_file(neither.astype(np.int16), 'neither_cube.fits')\n write_to_file(both.astype(np.int16), 'both_cube.fits')\n write_to_file(c_only.astype(np.int16), 'c_only_cube.fits')\n write_to_file(f_only.astype(np.int16), 'f_only_cube.fits')\n\n # output arrays for pixels in 2d array\n print(' Within the 2d arrays:')\n if (fh_c[0].header['NAXIS'] == 3): # for nirspec with 1 integration\n write_to_file(neither.sum(axis=0), 'neither_2d.fits')\n write_to_file(both.sum(axis=0), 'both_2d.fits')\n write_to_file(c_only.sum(axis=0), 'c_only_2d.fits')\n write_to_file(f_only.sum(axis=0), 'f_only_2d.fits')\n print(' The fraction of pixels in the 2d array having true CRs:',\\\n float(len(np.where(both.sum(axis=0) != 0.)[0])) / data_c_start.size)\n elif (fh_c[1].header['NAXIS'] == 4): # for miri or nircam cases\n write_to_file(neither.sum(axis=1).sum(axis=0), 'neither_2d.fits')\n write_to_file(both.sum(axis=1).sum(axis=0), 'both_2d.fits')\n write_to_file(c_only.sum(axis=1).sum(axis=0), 'c_only_2d.fits')\n write_to_file(f_only.sum(axis=1).sum(axis=0), 'f_only_2d.fits')\n print(' The fraction of pixels in the 2d array having true CRs:',\\\n float(len(np.where(both.sum(axis=1).sum(axis=0) != 0.)[0])) / \\\n data_c_start.size)\n else:\n print('FATAL ERROR - unexpected case in write_file()')",
"def write_array(uri: str):\n a1_data = np.reshape(np.arange(1, 26), (5, 5))\n l1_data = np.arange(5, 0, -1)\n l2_data = np.arange(-2, 3)\n l3_data = np.linspace(-1.0, 1.0, 5)\n with tiledb.open(uri, \"w\") as array:\n array[:] = {\"a1\": a1_data, \"l1\": l1_data, \"l2\": l2_data, \"l3\": l3_data}",
"def write_to_file(data, filename):\n fimg = fits.HDUList()\n fimghdu = fits.PrimaryHDU()\n fimghdu.data = data\n fimg.append(fimghdu)\n fimg.writeto(filename, overwrite=True)\n print(' wrote output data to: ', filename)",
"def saveFits(self, filename):\n \n if isinstance(self.res, type(None)):\n raise Exception('Result is not yet aviable.')\n \n header = fits.Header()\n header['NAXIS1'] = self.naxis\n header['NAXIS2'] = self.naxis\n header['CTYPE1'] = 'RA---SIN'\n header['CTYPE2'] = 'DEC--SIN'\n header['CDELT1'] = - self.fov/(np.pi/180 * self.naxis)\n header['CDELT2'] = self.fov/(np.pi/180 * self.naxis)\n header['BUNIT'] = 'JY/PIXEL'\n \n hdu = fits.PrimaryHDU(self.res, header=header)\n hdulist = fits.HDUList([hdu])\n hdulist.writeto(filename, overwrite=True)\n \n print(\"Saved as '%s'.\" %(filename))",
"def tofits(outfilename, pixelarray, hdr=None, verbose=True):\n # print \"LOGX:: Entering `tofits` method/function in %(__file__)s\" %\n # globals()\n pixelarrayshape = pixelarray.shape\n if verbose:\n print(\"FITS export shape : (%i, %i)\" % (pixelarrayshape[0], pixelarrayshape[1]))\n\n if pixelarray.dtype.name == \"bool\":\n pixelarray = np.cast[\"uint8\"](pixelarray)\n\n if os.path.isfile(outfilename):\n os.remove(outfilename)\n\n if hdr == None: # then a minimal header will be created\n hdu = pyfits.PrimaryHDU(pixelarray.transpose())\n else: # this if else is probably not needed but anyway ...\n hdu = pyfits.PrimaryHDU(pixelarray.transpose(), hdr)\n\n hdu.writeto(outfilename, output_verify='ignore')\n\n if verbose:\n print(\"Wrote %s\" % outfilename)",
"def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)",
"def create_fits_file(fits, cols, cdata):\n dlist = []\n for k in range(0, len(cols)):\n aent = numpy.array(cdata[k])\n dcol = pyfits.Column(name=cols[k], format='F', array=aent)\n dlist.append(dcol)\n\n dcols = pyfits.ColDefs(dlist)\n tbhdu = pyfits.BinTableHDU.from_columns(dcols)\n\n mcf.rm_files(fits)\n tbhdu.writeto(fits)",
"def eeg_writeavr(array,tsb,di,file):\t\t\n import shutil as shu\n f=open(file,'w')\n firstline = 'Npts= %i TSB= %i DI= %7.5f SB= %7.5f SC= %i NChan= %i\\n' %(array.shape[1],tsb,di,1,200,array.shape[0]) \n chnam = 'Cz FP1 FP2 F3 F4 C3 C4 P3 P4 O1 O2 F7 F8 T7 T8 P7 P8 Fz Pz FC1 FC2 CP1 CP2 FC5 FC6 CP5 CP6 FT9 FT10 TP9 TP10 PO9 PO10\\n'\n f.write(firstline)\n f.write(chnam)\n for i in range(array.shape[0]):\n tmp = array[i,:]\n f.write(('%7.5f ' * len(tmp)) %tuple(tmp))\n f.write('\\n')\n \n f.close()\n #may want to change this on different machines...\n src = '/Users/crislanting/Projects/EEG/data/33.elp'\n dest = file[:-4] + '.elp'\n shu.copyfile(src,dest)",
"def write_fits(self, name=None, output_path=None):\n pass",
"def export_fits(self, mask=None, **kwargs):\n \n ## Check key word arguments\n save_file = kwargs.pop('save_file', 'image.fits')\n fill_value = kwargs.pop('fill_value',0.)\n \n ## Check if mask provided matches data shape\n if self.is_valid_mask(mask):\n masked_data = np.ma.MasedArray()",
"def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)",
"def toFile(self,fid):\n stack = []\n for w,b in self.stack:\n w.copy_to_host()\n b.copy_to_host()\n stack.append([w.numpy_array,b.numpy_array])\n\tpickle.dump(stack,fid)",
"def write_fits(self):\n \n import time\n import getpass\n \n formats = {}\n formats['bool'] = 'L'\n formats['int16'] = 'I'\n formats['int32'] = 'J'\n formats['int64'] = 'K'\n formats['float32'] = 'E'\n formats['float64'] = 'D'\n \n formats['>i8'] = 'K'\n formats['>f8'] = 'D'\n \n #### Make the table columns, translating numpy data types to \"TFORM\"\n coldefs = []\n dt = str(np.array(self.images).dtype)\n if 'S' in dt:\n TFORM = 'A'+dt.split('S')[1]\n elif 'U' in dt:\n TFORM = 'A'+dt.split('U')[1]\n \n print(TFORM)\n \n coldefs.append(pyfits.Column(name='images', array=np.array(self.images), format=TFORM))\n \n for column in self.params.keys():\n if column == 'comment':\n coldata = np.array(self.params['comment'])\n else:\n coldata = self.params[column]\n #\n dtype = str(coldata.dtype)\n #print column, dtype\n if dtype in formats.keys():\n TFORM=formats[dtype]\n else:\n if ('S' not in dtype) & ('U' not in dtype):\n print('Unrecognized data type in: %s' %(dtype))\n return False\n #\n if 'S' in dtype:\n TFORM = 'A'+dtype.split('S')[1]\n elif 'U' in dtype:\n TFORM = 'A'+dtype.split('U')[1]\n #\n #data = self.params[column]\n if '>' in dtype:\n cast_types = {'>i8':np.int64, '>f8':np.float64}\n coldata = np.cast[cast_types[dtype]](coldata)\n #\n coldefs.append(pyfits.Column(name=column, array=coldata, format=TFORM))\n \n #### Done, now make the binary table\n tbhdu = pyfits.BinTableHDU().from_columns(coldefs)\n \n linehdu = pyfits.ImageHDU(data=self.marked_reads, name='FLAGGED')\n \n #### Primary HDU\n hdu = pyfits.PrimaryHDU()\n thdulist = pyfits.HDUList([hdu, tbhdu, linehdu])\n\n #### Add modification time of \"infile\" to FITS header\n infile_mod_time = time.strftime(\"%m/%d/%Y %I:%M:%S %p\",\n time.localtime()) # os.path.getmtime(self.filename)))\n \n thdulist[0].header['MODTIME'] = infile_mod_time\n thdulist[0].header['USER'] = getpass.getuser()\n \n thdulist.writeto(self.logfile, clobber=True)\n \n print('Log to file %s' %(self.logfile))",
"def write(self, filename, **kwargs):\n self.to_table().write(filename, format='fits', **kwargs)",
"def writeArray(fname,arr):\n fh = open(fname,'w')\n fh.write('%d\\n' % arr.shape[0])\n fh.write('%d\\n' % arr.shape[1])\n for x in range(arr.shape[0]):\n for y in range(arr.shape[1]):\n if arr.dtype == np.complex:\n fh.write('%.7e %.7e\\n' % (arr[x,y].real, arr[x,y].imag))\n else:\n fh.write('%.7e\\n' % (arr[x,y]))\n fh.close()",
"def write(self, filename, *args, **kwargs):\n self.to_fits().writeto(filename, *args, **kwargs)",
"def write_data_to_file(pos, fps, data_file):\n xs = []\n for x,y in pos:\n xs.append(x)\n with open(data_file,'wb') as f:\n np.save(f,pos)\n np.save(f,xs)\n np.save(f,fps)",
"def write_file(self, i, path, fout):\n\n test_file = path + '/' + self.output[i]\n # Write file name\n print(test_file, file=fout, end='\\n\\n')\n\n extension = os.path.splitext(test_file)[1]\n if extension == '.fits' or extension == 'FITS':\n import subprocess\n prog = self.bindir + '/fits2ascii.py -i ' + test_file\n output = subprocess.check_output(prog.split(), shell=False)\n data = output.decode()\n else:\n fin = open(test_file, 'r')\n data = fin.read()\n fin.close()\n #fout.write(data)\n print(data, file=fout)\n print(file=fout, end='\\n')",
"def write_file(_data, _label, _clinical, _contour, _type):\n pickle.dump(np.array(_data), open(_type + '_data.pxl', 'wb'))\n pickle.dump(np.array(_label), open(_type + '_label.pxl', 'wb'))\n pickle.dump(np.array(_clinical), open(_type + '_clinical.pxl', 'wb'))\n pickle.dump(np.array(_contour), open(_type + '_contour.pxl', 'wb'))",
"def print_to_file(arr, fid, sep=\"\", format=\"%s\"):\n\n f = array_create.array(arr, bohrium=False)\n return f.tofile(fid, sep=sep, format=format)",
"def numpy_array_1d_to_fits(array_1d, file_path, overwrite=False):\r\n\r\n if overwrite and path.exists(file_path):\r\n os.remove(file_path)\r\n\r\n new_hdr = fits.Header()\r\n hdu = fits.PrimaryHDU(array_1d, new_hdr)\r\n hdu.writeto(file_path)",
"def save_fits(df, fname):\n df = df.reset_index()\n outtable = Table.from_pandas(df)\n Path(fname).parent.mkdir(parents=True, exist_ok=True)\n outtable.write(fname, format='fits', overwrite=True)",
"def write_file(self):\n print 'Writing '+self.name+' binary...'\n if self.vals is not None:\n if len(self.vals) == self.size:\n stream = self.pack_mem()\n with open(self.name+'.bin','wb') as f:\n f.write(stream)\n print 'File written: '+self.name+'.bin'\n else:\n print 'Error: input array for '+self.name+'is not the right '+\\\n 'size (should be '+str(self.size)+'). Skipping.'\n else:\n print 'No array provided, skipping.'",
"def put_2Darray(file,array,header='',format='',append='no'):\n lista=[]\n for i in range(array.shape[1]):lista.append(array[:,i])\n lista=tuple(lista)\n put_data(file,lista,header,format,append)",
"def make_file(self, tmpdir, fname='test0.fits'):\n hdul = fits.HDUList(fits.PrimaryHDU(\n np.zeros((10, 10), dtype=float)))\n ffile = str(tmpdir.join(fname))\n hdul.writeto(ffile, overwrite=True)\n hdul.close()\n return ffile",
"def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()"
] |
[
"0.7112844",
"0.6903038",
"0.663769",
"0.65599006",
"0.64984375",
"0.64938784",
"0.6489837",
"0.64578575",
"0.6420876",
"0.6293552",
"0.6273265",
"0.62717324",
"0.6267291",
"0.6227415",
"0.6185756",
"0.6176583",
"0.61275244",
"0.61153895",
"0.60628563",
"0.6028338",
"0.60221255",
"0.60148335",
"0.6006942",
"0.5997451",
"0.5992815",
"0.5987994",
"0.5974007",
"0.5961477",
"0.5960949",
"0.59365976"
] |
0.6915744
|
1
|
Organizes the files in your working directory based on visit number. Generates a dictionary that sorts the files based on visit number.
|
def group_visits(wdir):
all_files = glob(os.path.join(wdir, '*flc.fits'))
group = dict()
for file in all_files:
visit = fits.getheader(file)['LINENUM'].split('.')[0]
if visit not in group:
group[str(visit)] = [str(file)]
elif visit in group:
group[str(visit)].append(str(file))
return group
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_file_dict():\n import os\n file_dict = {}\n for root, dirs, files in os.walk('.'):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n for f in files:\n try:\n with open(f, 'r') as thing:\n res = thing.readline()\n except:\n res = ''\n file_name = os.path.join(root, f).lstrip('./')\n file_dict[file_name] = res\n return file_dict",
"def renku_op(self):\n\n files = [f for f in self.cache.get_files(self.user) if f.exists()]\n return {\"files\": sorted(files, key=lambda rec: (rec.is_dir, rec.relative_path))}",
"def main():\n extension_to_category = {}\n # Change to FileToSort directory\n os.chdir(\"FilesToSort\")\n\n # Loop through each file in the (current) directory\n for filename in os.listdir('.'):\n # Ignore directories, just process files\n if os.path.isdir(filename):\n continue\n\n extension = filename.split(\".\")[-1]\n if extension not in extension_to_category:\n category = input(\"What category would you like to sort {} files into?\".format(extension))\n extension_to_category[extension] = category\n print(extension_to_category.items())\n # In case user put in existing folder\n try:\n os.mkdir(category)\n except FileExistsError:\n pass\n\n # Move files to directories based on categories by renaming\n os.rename(filename, \"{}/{}\".format(extension_to_category[extension], filename))",
"def collect(dname='.'):\n files = {}\n\n for paths in os.walk(dname):\n for fname in paths[2]:\n flen = len(fname)\n fpath = os.path.join(paths[0], fname)\n try:\n files[flen].append(fpath)\n except KeyError:\n files[flen] = [fpath]\n\n return files",
"def main():\r\n print(\"Starting directory is: {}\".format(os.getcwd()))\r\n\r\n # Change to desired directory\r\n os.chdir('FilesToSort')\r\n\r\n # Print a list of all files in current directory\r\n print(\"Files in {}:\\n{}\\n\".format(os.getcwd(), os.listdir('.')))\r\n\r\n # Loop through each file in the (current) directory\r\n doc_types = []\r\n sorted_file_types = []\r\n document_categories = {}\r\n for directory_name, subdirectories, filenames in os.walk('.'):\r\n for filename in filenames:\r\n # Ignore directories, just process files\r\n if os.path.isdir(filename):\r\n continue\r\n\r\n doc_name, doc_type = filename.split(\".\")\r\n # file = doc_name, doc_type\r\n # files.append(file)\r\n doc_types.append(doc_type)\r\n try:\r\n if doc_type not in sorted_file_types:\r\n user_input_category = input(\"What category would you like to sort {} files into?\".format(doc_type))\r\n sorted_file_types.append(doc_type)\r\n print(\"Creating {}\".format(user_input_category))\r\n document_categories[doc_type] = user_input_category\r\n os.mkdir(user_input_category)\r\n except FileExistsError:\r\n pass\r\n print(document_categories)\r\n print(\"Moving {} to {}\".format(filename, document_categories[doc_type]))\r\n try:\r\n shutil.move(filename, os.path.join(document_categories[doc_type], filename))\r\n except FileNotFoundError:\r\n pass",
"def sort_files(file_list, set_name, time_freq, normalise):\n \n out_dict = {}\n order = []\n \n if file_list:\n\tfor item in file_list:\n key = tuple(item[0:3])\n window = int(item[2])\n out_dict[key] = nio.InputData(item[0], item[1], runave=window, normalise=normalise)\n out_dict[key].tag = item[3]\n out_dict[key].window = window\n out_dict[key].set = set_name\n out_dict[key].datetimes = runave_time_correction(out_dict[key].datetime_axis()[:], time_freq)\n order.append(key) \n else:\n outdict = None\n\torder = None\n\n return out_dict, order",
"def make_all_files_dictionary(self, all_files, append_to_this=False):\n if append_to_this:\n rdict = append_to_this\n else:\n rdict = {}\n\n all_files.sort()\n for i in all_files:\n count = len(rdict) + 1\n i = os.path.abspath(os.path.expanduser(i))\n\n if platform.system() == \"Windows\":\n full_filename = i.split('\\\\')\n else:\n full_filename = i.split('/')\n\n full_filename = full_filename[-1]\n\n extension = full_filename.split('.')\n extension = extension[-1]\n extension = extension.upper()\n\n filename = full_filename.split('.')\n filename.pop(-1)\n filename = '.'.join(filename)\n\n rdict[i] = dict(\n path=i,\n processed=False,\n drawn=False,\n count=count,\n filename=filename,\n extension=extension,\n status='UNPROCESSED',\n )\n\n return rdict",
"def main():\n os.chdir('FilesToSort')\n extension_to_category = {}\n for filename in os.listdir('.'):\n if os.path.isdir(filename):\n continue\n extension = filename.split('.')[-1]\n make_subdirectories(extension, extension_to_category)\n shutil.move(filename, extension_to_category[extension])",
"def _get_scores_list(self):\n self.scores = dict()\n for subdir in os.listdir(self.path):\n if subdir.startswith('.'):\n continue\n try:\n score = open('{0}/{1}/{2}/extract_all.sort.uniq.txt'.format(self.path, subdir, DOCKING_RUN_FILES),'r').read().split()[-1]\n self.scores[subdir] = float(score.strip())\n except:\n pass",
"def main():\n # The following dictionary will allow us to map extensions to the destination folder names\n extension_to_category = {}\n os.chdir(\"FilesToSort\")\n for filename in os.listdir('.'):\n # Ignore directories, just process files\n if os.path.isdir(filename):\n continue\n\n extension = filename.split('.')[-1]\n if extension not in extension_to_category:\n category = input(\"What category would you like to sort {} files into? \".format(extension))\n # Now we can map this new extension to a folder name\n extension_to_category[extension] = category\n try:\n # We don't expect to get an exception due to the if statement\n # But we'll play it safe anyway in case the user chooses an existing folder\n os.mkdir(category)\n except FileExistsError:\n pass\n\n # We don't need a separate loop for this next step\n # We're already in a loop per file and we now know where to put it\n os.rename(filename, \"{}/{}\".format(extension_to_category[extension], filename))",
"def sortFiles(files):\n def sortKey(file):\n dirFile = file.lower().rsplit('\\\\',1)\n if len(dirFile) == 1: dirFile.insert(0,'')\n return dirFile\n sortKeys = dict((x,sortKey(x)) for x in files)\n return sorted(files,key=lambda x: sortKeys[x])",
"def analyze_files(self):\n num_file = 0\n results = dict()\n try:\n list_files = os.listdir(self.directory)\n except FileNotFoundError:\n raise FileNotFoundError(\"Can't find any file\")\n else:\n for file in list_files: #looping the files in the directly\n num_file += 1\n if file.endswith(\".py\"): # Looking for files that end with .py\n try:\n fp = open(os.path.join(self.directory, file), \"r\")\n except FileNotFoundError:\n raise FileNotFoundError(f\"Can't open file no {num_file}\")\n else:\n with fp:\n c_total = 0 #Total length of Characters for the entire file\n filename = file # Storing the file name\n t_line = 0 # Getting the total number of line\n t_def = 0 #Getting the total number of functions\n t_class = 0 #Getting the total number of classes\n \n for line in fp:\n t_line += 1 # Counting each line\n t_char = len(line) #Length of characters for each line\n n_line = line.strip() # gets rid of white spaces and new lines\n c_total += t_char # adding each total char in line to the pervious total char in line\n if n_line.startswith(\"def \"): \n t_def += 1 \n elif n_line.startswith(\"class \"):\n t_class += 1\n results[filename] = {'class': t_class, 'function': t_def, 'line': t_line, 'char': c_total }\n return results",
"def initiallize_buffer(self):\n assert os.path.isdir(self.directory)\n #sorting files topologically, files' format is -> data_num.h5 \n files_list = sorted(os.listdir(self.directory + '/' + self.name + '/'), key = lambda x: int(x.split(\"_\")[1].split(\".\")[0]))\n self.files_counter = 0\n if files_list != []: \n for file_name in files_list:\n self.memorize(name = file_name, error = 1)\n self.files_counter += 1\n self.files_tracker = file_name\n else:\n self.files_tracker = 'data_-1.h5'",
"def sort(self):\n\n img_files = os.listdir(self.path)\n\n img_list = {}\n\n for img_file in img_files:\n filename = os.path.join(self.path, img_file)\n\n try:\n img = Image.open(filename)\n except:\n continue\n\n print \"Analyzing %s\" % img_file\n\n points = self.points(img.size[0], img.size[1])\n key = \"\"\n for point in points:\n\n # Get the average color for each point\n ave_points = self.diamond_points(point[0], point[1])\n red = 0\n green = 0\n blue = 0\n for ave_point in ave_points:\n try:\n rgb = img.getpixel(ave_point)\n red += rgb[0]\n green += rgb[1]\n blue += rgb[2]\n except IndexError:\n pass\n red /= len(ave_points)\n green /= len(ave_points)\n blue /= len(ave_points)\n\n # Bitdepths:\n # 12 bit - 4096 colors, range 0-F, divide by 16\n # 9 bit - 512 colors, range 0-7, divide by 32\n # 6 bit - 64 colors, range 0-3, divide by 64\n # 3 bit - 8 colors, range 0-1, divide by 128\n\n if self.num_colors == 8:\n div = 128\n elif self.num_colors == 64:\n div = 64\n elif self.num_colors == 512:\n div = 32\n elif self.num_colors == 4096:\n div = 16\n else:\n self.usage()\n\n # Lower the bitdepth\n red = int(red / div)\n green = int(green / div)\n blue = int(blue / div)\n\n # Add to the key\n key += \"%x%x%x\" % (red, green, blue)\n\n # Add the key if needed\n if key not in img_list:\n img_list[key] = []\n\n # Add the file to the list\n img_list[key].append(img_file)\n\n # Go through and rename the files, based on the img_list dictionary\n # and the prefix\n num = 1\n for img in sorted(img_list.iterkeys()):\n for filename in sorted(img_list[img]):\n name, ext = os.path.splitext(filename)\n new_filename = \"%s%04d%s\" % (self.prefix, num, ext)\n full_filename = os.path.join(self.path, filename)\n full_new_filename = os.path.join(self.path, new_filename)\n if os.path.isfile(full_new_filename):\n print \"File %s exists - aborting!\" % full_new_filename\n return\n\n os.rename(full_filename, full_new_filename)\n print \"Renamed %s to %s.\" % (filename, new_filename)\n num += 1",
"def make_file_dict():\r\n fileDict = {'pageUrls': [],\r\n 'pageFileNames': [],\r\n 'pageIds': [],\r\n 'fileUrls': [],\r\n 'fileIds': [],\r\n 'fileNames': [],\r\n 'cssUrls': [],\r\n 'cssFileNames': [],\r\n 'imgUrls': [],\r\n 'imgFileNames': []}\r\n return fileDict",
"def _generate_tfs_dfs(self) -> dict:\n tfs, dfs = {}, {}\n\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n if doc_path not in tfs:\n tfs[doc_path] = {}\n with open(doc_path, 'r') as f:\n text = f.readline()\n terms = set(text.split())\n for term in terms:\n tfs[doc_path][term] = text.count(term)\n\n if term not in dfs:\n dfs[term] = 1\n else:\n dfs[term] += 1\n\n return tfs, dfs",
"def group_by_filenames(self):\n package = self.container.config.output.package\n class_map = collections.group_by(self.container, key=get_location)\n groups = self.group_common_paths(class_map.keys())\n\n for keys in groups:\n if len(keys) == 1:\n common_path = os.path.dirname(keys[0])\n else:\n common_path = os.path.commonpath(keys)\n\n for key in keys:\n items = class_map[key]\n suffix = \".\".join(Path(key).parent.relative_to(common_path).parts)\n\n package_name = f\"{package}.{suffix}\" if suffix else package\n self.assign(items, package_name, module_name(key))",
"def filelist(folder):\n file_dict={}\n folderlist = glob.glob(os.getcwd()+\"/\"+folder+\"/*\")\n for i in tqdm(folderlist):\n filelist = glob.glob(i+\"/*\")\n filename = i.rsplit(\"/\")[-1]\n file_dict[filename]= filelist\n\n return file_dict",
"def parse_folder(self, path):\n\n for filename in os.listdir(path):\n self.parse_file(os.path.join(path, filename), filename)\n return self.country_dict, self.hre_dict, self.name_dict",
"def get_autographs(pathtofile):\n\n autos = {}\n\n path = Path(pathtofile)\n assert path.is_dir()\n file_list = []\n for x in path.iterdir():\n if x.is_dir():\n file_list.append(x)\n print(f\"Found files {len(file_list)} -- {file_list}\")\n\n for f in file_list:\n name = str(f)[len(pathtofile) + 1 :]\n autos[name] = {}\n for x in f.iterdir():\n if str(x) == f\"{pathtofile}/{name}/{name}.txt\":\n info_file = x\n f = open(info_file, \"r\").readlines()\n info_name = f[0]\n info_quote = f[1]\n elif (\n str(x) == f\"{pathtofile}/{name}/{name}.jpg\"\n or str(x) == f\"{pathtofile}/{name}/{name}.png\"\n ):\n info_img = x\n else:\n l = len(pathtofile) + len(name) + 12\n f = open(x, \"r\").read().replace(\"\\n\", \" \").split()\n s = []\n for i in range(0, len(f), 20):\n s.append(\" \".join(f[i : i + 20]))\n output = \"\\n\".join(s)\n autos[name][str(x)[l:-4]] = output\n\n return autos",
"def scanGrabFolder(self):\n fnames = sorted(os.listdir(self.downloadFolder))\n self.seenIDs, self.seenTimes, self.seenHashes = [], [], []\n for fname in fnames:\n fname = fname.split(\".\")\n if len(fname) != 4:\n continue\n self.seenIDs.append(fname[0])\n self.seenTimes.append(int(fname[1]))\n self.seenHashes.append(fname[2])",
"def TurboSort(input_folder, output_file):\r\n\r\n atom_dict = {}\r\n for linelist in os.listdir(input_folder):\r\n file_line = 1\r\n with open(os.path.join(input_folder, linelist), \"r\") as fin:\r\n lines = fin.readlines()\r\n while file_line < len(lines):\r\n line_index = file_line - 1\r\n header, atomic_sym = lines[line_index], lines[line_index + 1]\r\n atomic_lines = int(header.split()[4])\r\n start = line_index + 2\r\n end = start + atomic_lines\r\n splice = lines[start: end]\r\n file_line = end + 1\r\n if atomic_sym in atom_dict.keys():\r\n atomic_lines_previous = int(atom_dict[atomic_sym][0].split()[4])\r\n atomic_lines += atomic_lines_previous\r\n start_line, end_line_previous = atom_dict[atomic_sym][0][:27], atom_dict[atomic_sym][0][27:]\r\n end_line_updated = end_line_previous.replace(str(atomic_lines_previous), str(atomic_lines))\r\n if len(end_line_updated) > 10:\r\n diff = len(end_line_updated) - 10\r\n end_line_updated = end_line_updated[diff:]\r\n atom_dict[atomic_sym][0] = start_line + end_line_updated\r\n elif len(end_line_updated) < 10:\r\n diff = 10 - len(end_line_updated)\r\n atom_dict[atomic_sym][0] = start_line + \" \"*diff + end_line_updated\r\n else:\r\n atom_dict[atomic_sym][0] = start_line + end_line_updated\r\n # Sorts each element by wavelength\r\n atom_dict[atomic_sym].extend(splice)\r\n temp = atom_dict[atomic_sym][2:]\r\n temp.sort()\r\n atom_dict[atomic_sym] = atom_dict[atomic_sym][:2]\r\n atom_dict[atomic_sym].extend(temp)\r\n else:\r\n header = [header, atomic_sym]\r\n header.extend(splice)\r\n atom_dict[atomic_sym] = header\r\n\r\n # Sorts each element block by atomic number\r\n vals = list(atom_dict.values())\r\n for val in vals:\r\n \"\\n\".join(val)\r\n vals.sort()\r\n lines = []\r\n for val in vals:\r\n lines.extend(val)\r\n\r\n with open(output_file, \"w\") as fout:\r\n for line in lines:\r\n fout.write(line)",
"def __init__(self, filepath):\n self.pathdict = {}\n self.filepath = filepath\n for dirname, dirs, files in os.walk(self.filepath):\n for f in files:\n self.pathdict[os.path.join(os.path.relpath(dirname, self.filepath), f)] = self.get_permissions(os.path.join(dirname, f))\n for d in dirs:\n self.pathdict[os.path.join(os.path.relpath(dirname, self.filepath), d)] = self.get_permissions(os.path.join(dirname, d))",
"def organize_by_order(current_path):\n\tfor file in sorted(os.listdir(current_path)):\n\t\tif file != 'file_organizer.py':\n\t\t\ttry:\n\t\t\t\tos.makedirs(file[0])\n\t\t\t\tclick.echo(\"Creating a Folder\",file[0])\n\t\t\texcept:\n\t\t\t\tNone\n\t\t\tshutil.move(file,file[0])\n\t\t\tclick.secho(('Finished moving : {} to {} folder'.format(file,file[0])),fg='green')",
"def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")",
"def organize(current_path,keyword):\n\tif keyword == 'YYYY':\n\t\ttry:\n\t\t year_regex = re.compile(r'\\d{4}') # Regex to Find First 4 yyyyy in a list\n\t\t all_keywords = []\n\n\t\t for file in os.listdir(current_path):\n\t\t \t# found_result = year_regex.search(file).group()\n\t\t \tfound_result = year_regex.findall(file)[0]\n\t\t \tall_keywords.append(found_result)\n\t\t \tfinal_keywords = list(set(all_keywords))\n\n\t\t \tfor word in final_keywords:\n\t\t \t\tclick.secho(('Found File::{}'.format(file)),fg='blue')\n\t\t \t\torganize_files_by_keyword(word)\n\t\t \t\tclick.secho(('Finished Moving to:{}'.format(word)),fg='green')\n\t\texcept IndexError:\n\t\t pass\n\telse:\n\t\tclick.secho(('Organizing By Keyword :{}'.format(keyword)),fg='green')\n\t\torganize_files_by_keyword(keyword.lower())\n\t\tclick.secho(('Finished Moving to:{}'.format(keyword)),fg='green')",
"def fileCounter(directory):",
"def sort_key(path):\n file_end = path.rsplit(os.sep,1)[1]\n file_number = file_end.rstrip('.tif')\n return int(file_number)",
"def create_dictionary_of_old_and_new_paths(raw_dir: Path, bids_dir: Path, exclude_fieldmaps: bool) -> dict:\n\n old_and_new_paths = {}\n old_paths = list(raw_dir.rglob(\"*\"))\n print(f\"Sorting {len(old_paths)} paths into a dictionary.\") \n\n def task_name_of(path_to_func_or_json):\n \"\"\"\n Returns the task name of a func or json file. This function OVERWRITES the function\n \"task_name_of\" that we imported at the top of this script. BUT! It only overwrites it\n HERE, in \"create_dictionary_of_old_and_new_paths\".\n \"\"\"\n\n list_of_lines_containing_raw_subject_info = []\n for path in old_paths:\n if filetype_of(path) == \"subject info\":\n list_of_lines_containing_raw_subject_info = path.read_text().splitlines()\n break\n\n for line in list_of_lines_containing_raw_subject_info:\n if path_to_func_or_json.stem in line:\n return line.split(\"z\")[1]\n\n\n for old_path in old_paths:\n\n new_path = old_path\n\n if filetype_of(old_path.with_suffix(\".nii\")) == \"anat\" and acquisition_number_of(old_path) == \"14\":\n new_path = bids_dir / f\"sub-{subject_id_of(old_path)}\" / \"anat\" / f\"sub-{subject_id_of(old_path)}_T1w{old_path.suffix}\"\n\n elif filetype_of(old_path.with_suffix(\".nii\")) == \"func\" and acquisition_number_of(old_path) != \"02\":\n new_path = bids_dir / f\"sub-{subject_id_of(old_path)}\" / \"func\" / f\"sub-{subject_id_of(old_path)}_task-{task_name_of(old_path)}_acq-{acquisition_number_of(old_path)}_bold{old_path.suffix}\"\n\n elif filetype_of(old_path.with_suffix(\".nii\")) == \"fieldmap\" and not exclude_fieldmaps:\n old_affix = old_path.stem.split(\"_\")[-1]\n new_affix = \"phasediff\"\n if old_affix == \"e1\":\n new_affix = \"magnitude1\"\n if old_affix == \"e2\":\n new_affix = \"magnitude2\"\n new_path = bids_dir / f\"sub-{subject_id_of(old_path)}\" / \"fmap\" / f\"sub-{subject_id_of(old_path)}_{new_affix}{old_path.suffix}\"\n\n old_and_new_paths[old_path] = new_path\n\n print(\"Paths sorted.\")\n\n return old_and_new_paths",
"def create_path_dict(save_path):\n act_fn = [sorted(['relu', 'antirelu', 'identity', 'tanh', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'tanh']),\n sorted(['relu', 'antirelu', 'sigmoid', 'tanh']),\n sorted(['relu', 'identity', 'sigmoid', 'tanh']),\n sorted(['antirelu', 'identity', 'sigmoid', 'tanh']),\n ['relu'],\n ['sigmoid'],\n ['tanh'],\n ['antirelu'],\n ['None']]\n # ['identity']]\n\n act_fn = ['_'.join(act) for act in act_fn]\n path_dict = defaultdict(list)\n for (filepath, dirname, filename) in os.walk(save_path):\n if 'results.json' in filename:\n for act in act_fn:\n temp = filepath.split('/')\n if act == temp[-1] or act == temp[-2]:\n path_dict[act].append(filepath)\n print(path_dict)\n return path_dict"
] |
[
"0.5993223",
"0.59494966",
"0.5894371",
"0.5878789",
"0.5795447",
"0.5788768",
"0.5762669",
"0.5728834",
"0.5713203",
"0.5642163",
"0.56342036",
"0.5625541",
"0.5614913",
"0.5597969",
"0.55728817",
"0.5566646",
"0.55191517",
"0.5480035",
"0.54776114",
"0.546864",
"0.5452324",
"0.544773",
"0.5445857",
"0.54301506",
"0.5427764",
"0.54230636",
"0.53909904",
"0.53891885",
"0.5372354",
"0.53595537"
] |
0.61746734
|
0
|
Prints HTML response; useful for debugging tests.
|
def debug_html(label, response):
print("\n\n\n", "*********", label, "\n")
print(response.data.decode('utf8'))
print("\n\n")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def print_response(response):\n print(f\"Response for {url}\")\n if response.status_code == 200:\n # Green text\n print(f\"\\033[1;32;40m {response.status_code} {response.reason}\\033[1;37;40m\")\n else:\n # Red text\n print(f\"\\033[1;31;40m {response.status_code} {response.reason}\\033[1;37;40m\")\n # print(response.json())\n print(f\" {response.elapsed.total_seconds()} seconds elapsed.\")",
"def printable_reponse(self):\n resp = self.response\n msg = \"-- Reponse : {} -- \\r\\n\".format(resp.status_code)\n msg += \"Headers: {} \\r\\n\".format(str(resp.headers))\n msg += \"Body: {} \\r\\n\\r\\n\".format(str(resp.content))\n return msg",
"def print_request_response(request_response: json):\n print(\"Printing response:\")\n print(json.dumps(request_response, indent=4))",
"def test_html_output(self):\n pass",
"def print_response(response):\n print(response)\n print(\"-\"*30)",
"def func_PRINT(self, text):\n self.send_header(\"Content-type\",\"text/html\")\n self.end_headers()\n self.wfile.write(bytes(\"<!DOCTYPE html><html><head><title>Cats Service</title></head><body><h1 align=center>RESULT</h1><p align=center>{0}</p></body></html>\\n\".format(text).encode()))",
"def print_response(response):\n\n lines = response.split(\"\\n\")\n for line in lines:\n print line.strip()",
"def display_html_report():\n display(HTML('report_page.html'))",
"def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(str.encode(\"<html><head><title>Zen of Python.</title></head>\"))\n self.wfile.write(str.encode(\"<body>\" + random.choice(zen)))\n self.wfile.write(str.encode(\"</body></html>\"))",
"def print_html(html):\n display(HTML(html))",
"def response(self, data, response_type = \"terminal\"):\n if (response_type == \"terminal\"):\n print(data, end=\"\\n\")",
"def output(self, response: str):\n\n # Try to output through the prefered medium, but revert to\n # backup if need to and log any errors found, for example:\n # logging.error(\"Problem!\")\n\n IO.stdout(response)",
"def print_query_response(response):\n if response.text is not None:\n print(json.loads(response.text))\n else:\n logger.warning('Response not valid.')",
"def domain_response_html(req, resp, *, protocol, domain):\n\n headers = {}\n domain_response_code, domain_response_text, domain_response_time_ms, domain_response_headers = (\n _process_request(protocol, domain, req.params, headers)\n )\n\n resp.content = api.template(\n 'ping_response.html',\n domain=domain,\n domain_response_code=domain_response_code,\n domain_response_text=domain_response_text,\n domain_response_headers=domain_response_headers,\n domain_response_time_ms=domain_response_time_ms\n )",
"def test_mocked_get_simpleHtml(self):\n c = Client()\n response = c.get(\"/apimock/mocked/mocked_get\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>value</th><td>testValue</td></tr></table>', response.content)",
"def test_sample(self):\n response = self.tester.get('/sample-household/',\n content_type='html/text')\n self.assertEqual(response.status_code, 200)",
"def get(self):\n self.response.write('ok')",
"def HTMLResponse():\n\n\thtml = \t\"<html><head><title>MA MAURO ESISTE?</title><style type='text/css'>body{width:30%}</style></head><body><pre>\"\n\thtml += \" _ __<br>\"\n\thtml += \" (_) / /<br>\"\n\thtml += \" ______ __ ____ ____ / /____<br>\"\n\thtml += \" / ___/ / _ \\\\/ _ \\\\/ / _ \\\\<br>\"\n\thtml += \" / / / / /_) / /_) / / ____/<br>\"\n\thtml += \"/__/ /__/ .___/ .___/__/ \\\\_____/<br>\"\n\thtml += \" / / / /<br>\"\n\thtml += \" /__/ /__/<br>\"\n\thtml += \"<b>PYTHON > ALL VERSION</b><br><br>\"\n\thtml += \"<marquee style='white-space:pre;'><br>\"\n\thtml += \" .. o .<br>\"\n\thtml += \" o.o o . o<br>\"\n\thtml += \" oo...<br>\"\n\thtml += \" __[]__<br>\"\n\thtml += \" phwr--> _\\\\:D/_/o_o_o_|__ <span style=\\\"font-family: 'Comic Sans MS'; font-size: 8pt;\\\">u wot m8</span><br>\"\n\thtml += \" \\\\\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"/<br>\"\n\thtml += \" \\\\ . .. .. . /<br>\"\n\thtml += \"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^<br>\"\n\thtml += \"</marquee><br><strike>reverse engineering a protocol impossible to reverse engineer since always</strike><br>we are actually reverse engineering bancho successfully. for the third time.</pre></body></html>\"\n\treturn html",
"def test_mocked_post_simpleHtml(self):\n c = Client()\n response = c.post(\"/apimock/mocked/mocked_post\", data={\"key\": \"value\"})\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>value</th><td>test_return_value_for_post</td></tr></table>', response.content)",
"def get(self):\n self.response.out.write(\"There's nothing to see here. How 'bout a \"\n \"<a href='/'>puzzle</a>?\")",
"def displayResponse(request, openid_response):\n s = getServer(request)\n\n # Encode the response into something that is renderable.\n try:\n webresponse = s.encodeResponse(openid_response)\n except EncodingError as why:\n # If it couldn't be encoded, display an error.\n text = why.response.encodeToKVForm()\n return render_to_response(\n 'server/endpoint.html', {'error': cgi.escape(text)},\n context_instance=RequestContext(request))\n\n # Construct the appropriate django framework response.\n r = http.HttpResponse(webresponse.body)\n r.status_code = webresponse.code\n\n for header, value in webresponse.headers.items():\n r[header] = value\n\n return r",
"def print_resp(self, resp: dict):\n if \"details\" in resp:\n if isinstance(resp[\"details\"], str):\n self.write_string(resp[\"details\"])\n if isinstance(resp[\"details\"], Table):\n self.write_table(resp[\"details\"])\n\n if \"data\" in resp:\n for item in resp[\"data\"]:\n if not isinstance(item, dict):\n continue\n item_type = item.get(\"type\")\n if item_type == \"string\":\n self.write_string(item[\"data\"])\n elif item_type == \"table\":\n table = Table(None)\n table.set_rows(item[\"rows\"])\n self.write_table(table)\n elif item_type == \"error\":\n self.write_error(item[\"data\"])\n elif item_type == \"dict\":\n self.write_dict(item[\"data\"])\n\n if \"details\" not in resp and \"data\" not in resp:\n self.write_string(\"Response is not correct.\")",
"def show(self):\n\t\tself.html += '<head>\\n' + self.head + '</head>\\n<body>\\n' + self.body + '</body>\\n</html>'\n\n\t\treturn self.html",
"def PrintReport(self):\n print('=== Summary of Baidu Real-time Bidding test ===')\n print('Requests sent: %d' % self._requests_sent)\n print('Responses with a 200/OK HTTP response code: %d' % self._responses_ok)\n print('Responses with a non-200 HTTP response code: %d' % len(self._error))\n print('Good responses (no problems found): %d' % len(self._good))\n print('Invalid (unparseable) with a 200/OK HTTP response code: %d' % len( self._invalid))\n print('Parseable responses with problems: %d' % len(self._problematic))\n if self._responses_successful_without_bids == self._requests_sent:\n print('ERROR: None of the responses had bids!')",
"async def respondHTML(self, html):\n self.HTMLResponse = html",
"def test_get_view_html(self):\n response = self.setup_get_html_test('/api/view/1')\n self.assertEqual(response.status_code, 200)",
"def pretty_view(self):\n return self.pretty_response()",
"def do_SEND_SIMPLE_RESPONSE(self, response: str):\n self.wfile.write(response.encode('utf-8'))",
"def _render_GET(self, request, code, ctype, msg):\r\n request.setResponseCode(code)\r\n request.setHeader('content-type', ctype)\r\n request.write(msg)\r\n request.finish()",
"def create_response(content, debug, debug_cmd, cmd_buttons=cmd_buttons):\n return \"\"\"\\\n<html>\n<form action=\"/\" method=\"post\">\n<textarea name=\"input\" style=\"width:100%%;height:25%%;\" placeholder=\"%(workingfile)s\">%(content)s</textarea>\n<input type=\"submit\" value=\"Submit\">\n</form>\n<hr />\n%(cmd_buttons)s\n<hr />\n<h3>Debug (%(debug_cmd)s):</h3>\n<pre>%(debug)s</pre>\n</html>\"\"\" % {\"content\": content,\n \"debug\": debug,\n \"debug_cmd\": debug_cmd,\n \"cmd_buttons\": cmd_buttons,\n \"workingfile\": workingfile}"
] |
[
"0.70429105",
"0.6699819",
"0.65425724",
"0.65398264",
"0.63861525",
"0.6296697",
"0.62906927",
"0.6214942",
"0.6199883",
"0.61866623",
"0.6134931",
"0.61156917",
"0.6112736",
"0.6080736",
"0.6061218",
"0.6059715",
"0.60414886",
"0.601479",
"0.59842736",
"0.59836936",
"0.5976929",
"0.59399056",
"0.591425",
"0.590574",
"0.59047586",
"0.5892164",
"0.5891596",
"0.5872002",
"0.58567566",
"0.5851677"
] |
0.7718159
|
0
|
After each test, delete the cities.
|
def tearDown(self):
Cafe.query.delete()
City.query.delete()
db.session.commit()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tearDown(self):\n del self.my_city",
"def test_delete_city(self):\n u = UserFactory(role=User.MODERATOR)\n u.set_password('123')\n u.save()\n log_n = LogEntry.objects.count()\n\n c = CityFactory()\n c.save()\n\n auth_url = prepare_url('login')\n data = {\n 'username': u.username,\n 'password': '123'\n }\n response = self.client.post(auth_url, data=data, format='json')\n token = response.data['token']\n\n url = prepare_url('admin-cities-detail', kwargs={'id': str(c.id)})\n n = City.objects.count()\n\n self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))\n response = self.client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(City.objects.count(), n-1)\n self.assertEqual(LogEntry.objects.count(), log_n+1)",
"def tearDown(self) -> None:\n storage.delete(self.place)\n storage.delete(self.user)\n storage.delete(self.city)\n storage.delete(self.state)\n storage.save()",
"def tearDown(self):\n delete_company_tasks([self._id], **self._test_data)",
"def test_delete(self):\n c = city.City(name=\"Freiburg\")\n p1 = city.Citizen(name=\"Peter\")\n p2 = city.Citizen(name=\"Georg\")\n p3 = city.Citizen(name=\"Hans\")\n c.add(p1, p2, p3, rel=city.hasInhabitant)\n\n with DataspaceSession(URI) as session:\n wrapper = city.CityWrapper(session=session)\n cw = wrapper.add(c)\n session.commit()\n\n cw.remove(p3.uid)\n session.prune()\n session.commit()\n\n check_state(self, c, p1, p2, db=DB)",
"def deleteCity(self):\n self.city_lbx.delete(0)",
"def tearDown(self):\n Pics.objects.all().delete()\n Category.objects.all().delete()\n Location.objects.all().delete()",
"def citiesDel(city_id):\n yy = storage.get(\"City\", str(city_id))\n if yy is None:\n abort(404)\n yy.delete()\n storage.save()\n return (jsonify({})), 200",
"def selenium_teardown():\n families_to_delete, visits_to_delete, responses_to_delete = [], [], []\n\n families_to_delete.extend(Family.objects.filter(study_id_number=59638))\n families_to_delete.extend(Family.objects.filter(study_id_number=83695))\n for f in families_to_delete:\n visits_to_delete.extend(f.visit_set.all())\n for v in visits_to_delete:\n responses_to_delete.extend(v.response_set.all())\n\n for r in responses_to_delete:\n r.delete()\n for v in visits_to_delete:\n v.delete()\n for f in families_to_delete:\n f.delete()",
"def clearCities(self):\n self.city_lbx.delete(0, \"end\")",
"def tearDown(self) -> None:\n place = storage.get(Place, self.place_id)\n if place is not None:\n storage.delete(place)\n user = storage.get(User, self.user_id)\n if user is not None:\n storage.delete(user)\n city = storage.get(City, self.city_id)\n if city is not None:\n storage.delete(city)\n state = storage.get(State, self.state_id)\n if state is not None:\n storage.delete(state)\n storage.save()",
"def tearDown(self) -> None:\n place = storage.get(Place, self.place_id)\n if place is not None:\n storage.delete(place)\n user = storage.get(User, self.user_id)\n if user is not None:\n storage.delete(user)\n city = storage.get(City, self.city_id)\n if city is not None:\n storage.delete(city)\n state = storage.get(State, self.state_id)\n if state is not None:\n storage.delete(state)\n storage.save()",
"def cities_delete(city_id):\n found = storage.get(City, city_id)\n if not found:\n abort(404)\n else:\n storage.delete(found)\n storage.save()\n return jsonify({}), 200",
"def tearDown(self):\n self.cleanup_tenants()",
"def tearDown(self):\n with database() as db:\n db.query(\"DELETE FROM persons WHERE person_name = 'test_person_a' OR person_name = 'test_person_b'\")",
"def tearDown(self):\n del self.location",
"def del_city(city_id):\n for obj in storage.all(City).values():\n if obj.id == city_id:\n obj.delete()\n storage.save()\n return ({}, 200)\n abort(404)",
"def tearDown(self) -> None:\n user = storage.get(User, self.user_id)\n if user is not None:\n storage.delete(user)\n city = storage.get(City, self.city_id)\n if city is not None:\n storage.delete(city)\n state = storage.get(State, self.state_id)\n if state is not None:\n storage.delete(state)\n storage.save()",
"def tearDown(self) -> None:\n user = storage.get(User, self.user_id)\n if user is not None:\n storage.delete(user)\n city = storage.get(City, self.city_id)\n if city is not None:\n storage.delete(city)\n state = storage.get(State, self.state_id)\n if state is not None:\n storage.delete(state)\n storage.save()",
"def __del__(self):\r\n train_data_sources = list(self._train_data.values())\r\n test_data_sources = list(self._test_data.values())\r\n all_data_sources = train_data_sources + test_data_sources\r\n for data_source in all_data_sources:\r\n data_source.cleanup()\r\n self._tester.__del__()",
"def tearDown(self):\n with tenant_context(self.tenant):\n CostModel.objects.all().delete()\n CostModelMap.objects.all().delete()",
"def tearDown(self):\n\n User.objects.all().delete()\n Movie.objects.all().delete()\n Vote.objects.all().delete()",
"def tearDown(self):\n super(TestSelectAPI, self).tearDown()\n self.destroy_fixtures()",
"def tearDown(self):\n if self.workspace is not None:\n rmtree(self.workspace.workspace, ignore_errors=True)\n Path.cwd().joinpath(\"workspace.tar.gz\").unlink(missing_ok=True)\n for item in self.items:\n if item.is_dir():\n rmtree(item)\n elif item.is_file():\n item.unlink()\n self.workspace = None",
"def tearDown(self):\n User.objects.all().delete()\n Project.objects.all().delete()\n Review.objects.all().delete()",
"def tearDown(self):\n self.labGroup.delete()",
"def tearDown(self):\n if os.path.isdir('/tmp/remote_pacha'):\n shutil.rmtree('/tmp/remote_pacha')\n if os.path.isdir('/tmp/localhost'):\n shutil.rmtree('/tmp/localhost')\n if os.path.isdir('/tmp/test_pacha'):\n shutil.rmtree('/tmp/test_pacha')\n if os.path.isdir('/tmp/pacha_test'):\n shutil.rmtree('/tmp/pacha_test')\n if os.path.isdir('/tmp/pacha_bucket'):\n shutil.rmtree('/tmp/pacha_bucket')\n\n try:\n shutil.rmtree('/tmp/test_pacha')\n shutil.rmtree('/tmp/remote_pacha')\n shutil.rmtree('/tmp/localhost')\n shutil.rmtree('/tmp/single_dir')\n except OSError:\n pass # nevermind if you could not delete this guy",
"def setUp(self):\n Pet.remove_all()",
"def tearDown(self):\n User.objects.all().delete()\n Project.objects.all().delete()",
"def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()"
] |
[
"0.80575997",
"0.6969745",
"0.694893",
"0.68621755",
"0.680407",
"0.6800682",
"0.6720583",
"0.6715607",
"0.66467243",
"0.6631308",
"0.649708",
"0.649708",
"0.6492599",
"0.6469972",
"0.6467292",
"0.64334804",
"0.64098126",
"0.6382244",
"0.6382244",
"0.63776475",
"0.6363666",
"0.6349106",
"0.6322069",
"0.6306689",
"0.6296698",
"0.6279275",
"0.6229869",
"0.62074965",
"0.6205226",
"0.62048626"
] |
0.72604793
|
1
|
Tell the Robot to stop cleaning
|
def stopclean(self):
raise Exception("Not implemented")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def stopDetection(self):\n self.statusWrite(\"stop\")\n self.p.sleep()\n self.birdHere = 0",
"def stop(self):\r\n self.terminating = True",
"def stop(self):\n print_message_received(\"stop\")\n self.robot.drive_system.stop()",
"def stop(self):\r\n self.running = False",
"def stop(self):\r\n self.running = False",
"def stop() -> None:",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def stop(self):\n self._should_run = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def stop(self):",
"def stop(self):",
"def stop(self):\n self.turnOffMotors()",
"def stop(self):\n self._run = False",
"def ShutDown(self):\n self.stop = True",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def turn_off(self):\n self.robot.stop_simulation()",
"def do_stop(self,line):\n print \"Trying to stop the robot\"\n self.robot.tank(0,0)",
"def turn_off(self, **kwargs):\n self.robot.pause_cleaning()\n time.sleep(1)\n self.robot.send_to_base()",
"def shutdown():\n\trospy.loginfo(\"Stopping the robot...\")\n\tglobal_vars.move_base.cancel_all_goals()\n\n\tglobal_vars.cmd_vel_pub.publish(Twist())\n\n\trospy.sleep(1)",
"def shutdown(self):\n\t\trospy.loginfo(\"Stopping the robot...\")\n\t\tself.cmd_vel.publish(Twist())\n\t\trospy.sleep(1)",
"def stop(self) -> None:\n ...",
"def stop(self):\n self.stopped = True"
] |
[
"0.73576075",
"0.72907317",
"0.7263385",
"0.72399735",
"0.72399735",
"0.7227755",
"0.7223492",
"0.7223492",
"0.7223492",
"0.7223492",
"0.71658593",
"0.71473134",
"0.71473134",
"0.71473134",
"0.71473134",
"0.71473134",
"0.7133828",
"0.7133828",
"0.7132527",
"0.7125268",
"0.7101528",
"0.7101129",
"0.7101129",
"0.70681703",
"0.7052448",
"0.7047095",
"0.70387536",
"0.7032234",
"0.7014331",
"0.70061547"
] |
0.77689457
|
0
|
Get indexing status Check if indexing is enabled. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def is_indexing_enabled(self, collection_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.is_indexing_enabled_with_http_info(collection_id, **kwargs)
else:
(data) = self.is_indexing_enabled_with_http_info(collection_id, **kwargs)
return data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def has_index(self):\n\n if self._check_idx and self._index:\n return self._check_idx",
"def is_indexing_enabled_with_http_info(self, collection_id, **kwargs):\n\n all_params = ['collection_id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method is_indexing_enabled\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `is_indexing_enabled`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'collection_id' in params:\n path_params['collectionId'] = params['collection_id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections/{collectionId}/indexing', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='IndexingConfig',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def index_exists(self, index):\n req = requests.head(\n urljoin(self.base_url, '{0}'.format(index)),\n verify=self.verify_certs)\n return req.status_code == 200",
"def is_indexed(self):\r\n return self._indexed",
"def has_index(self):\n return self.index is not None",
"def set_indexing_enabled(self, collection_id, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.set_indexing_enabled_with_http_info(collection_id, body, **kwargs)\n else:\n (data) = self.set_indexing_enabled_with_http_info(collection_id, body, **kwargs)\n return data",
"def index_exists(self, index: str) -> bool:\n return self.__client__.indices.exists(index)",
"def index_exists(index_name):\n return ES.indices.exists(index=index_name)",
"def has_index(self, index):\n return index in [s[0] for s in self.get_index_list()]",
"def set_indexing_enabled_with_http_info(self, collection_id, body, **kwargs):\n\n all_params = ['collection_id', 'body']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method set_indexing_enabled\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `set_indexing_enabled`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `set_indexing_enabled`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'collection_id' in params:\n path_params['collectionId'] = params['collection_id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections/{collectionId}/indexing', 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='IndexingConfig',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def indexed(self):\n return self.properties.get('indexed', None)",
"def status(self):\n return self._get(path='status')",
"def is_index_read_only(self, index: str) -> bool:\n response = self.__client__.indices.get_settings(\n index=index,\n name=\"index.blocks.write\",\n allow_no_indices=True,\n flat_settings=True,\n )\n print(response)\n return (\n response[index][\"settings\"][\"index.blocks.write\"] == \"true\"\n if response\n else False\n )",
"def check_status(self):\n return self.status",
"def check_status(self):\n return self.status",
"def status(self):\n return self._query_status()['status']",
"def check_exists(self, index: str) -> bool:\n\n if self.es.indices.exists(index=index):\n return True\n return False",
"def index_status_description(self):\n if (self.index_status == 3):\n return 'approved'\n if (self.index_status == 0):\n return 'no data'\n if (self.index_status == 1):\n return 'reserved'\n if (self.index_status == 2):\n return 'pending'",
"def get_index(\n self,\n ) -> Callable[[datastore_admin.GetIndexRequest], Awaitable[index.Index]]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_index\" not in self._stubs:\n self._stubs[\"get_index\"] = self.grpc_channel.unary_unary(\n \"/google.datastore.admin.v1.DatastoreAdmin/GetIndex\",\n request_serializer=datastore_admin.GetIndexRequest.serialize,\n response_deserializer=index.Index.deserialize,\n )\n return self._stubs[\"get_index\"]",
"def isActive(self, index):\n return self.isKnown(index) and self.isEnabled(index)",
"def _check_idx(self, url):\n if not url.endswith('.idx'):\n url += '.idx'\n return requests.head(url).ok",
"def is_indexed(self):\n return self._index is not UnindexedComponent_set",
"def getStatus(self):\n return self.enabled",
"def get_status(self):\n r = requests.get(self.base_url + '/status')\n return r.json()",
"def test_index_availability(client):\n response = client.get('/')\n assert response.status_code == 200",
"def get_status(self):\n # TODO retrieve from db if not set\n return self.status",
"def status(self):\n return self.rpc.call(MsfRpcMethod.DbStatus)",
"def queryStatus (self) :\n\n return self.sendCommand(\"CMD_IN_QUERY_STATUS\", \"\")",
"def status(self):\n return self.get(self._names[\"status\"])",
"def exist_idx(index_name):\n query = \"\"\"SELECT EXISTS(SELECT 1 \n FROM pg_indexes\n WHERE indexname = '{0}') \n AS idx_exists\"\"\".format(index_name)\n res = db.engine.execute(query).first()\n return res.idx_exists"
] |
[
"0.64202756",
"0.6375936",
"0.57622427",
"0.57565325",
"0.5620718",
"0.5593013",
"0.5583242",
"0.5406694",
"0.5362593",
"0.5350638",
"0.52855283",
"0.522831",
"0.5217023",
"0.51975834",
"0.51975834",
"0.517215",
"0.511512",
"0.51103634",
"0.5067149",
"0.5033741",
"0.50284636",
"0.5002792",
"0.49930352",
"0.496227",
"0.49506685",
"0.49470145",
"0.49462122",
"0.49452803",
"0.49387813",
"0.49384987"
] |
0.64591163
|
0
|
Request rebuild index Request an index rebuild on an existing collection. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def rebuild(self, collection_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.rebuild_with_http_info(collection_id, **kwargs)
else:
(data) = self.rebuild_with_http_info(collection_id, **kwargs)
return data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _rebuild_index(self):\n from django.core.management import call_command\n call_command('rebuild_index', interactive=False, verbosity=0)",
"def rebuild_with_http_info(self, collection_id, **kwargs):\n\n all_params = ['collection_id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method rebuild\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `rebuild`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'collection_id' in params:\n path_params['collectionId'] = params['collection_id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections/{collectionId}/rebuild', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='SimpleResponse',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def rebuild_index(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n with get_db_connection() as db:\n c = db.cursor()\n execute_with_retry(db, c, self._rebuild_index)",
"def rebuild_all_indexes():\n response = _get_lambda_client().invoke(\n FunctionName=indexer_function_name,\n InvocationType=\"Event\",\n )",
"def reindex(self):",
"def reindex(self):",
"def reindex(self):\n raise NotImplementedError()",
"def build_index(self):\n self.rebuild_index()",
"def solr_rebuild(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n cmd = 'bin/django rebuild_index --batch-size=5000 --verbosity=2'\n run(cmd)",
"def refresh():\n global tree\n tree = build_tree()\n tree.order_by_create()\n return index()",
"def update_index(self, ref_gen):\n testing = True\n logging.warning('Updating index')\n es_insert.index(es, ref_gen, self.index_name, testing, action=\"update\")\n logging.warning('Finished updating')",
"def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')",
"def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()",
"def build_index(self):\n\t\tix = self.create_index()\n\t\twriter = AsyncWriter(ix)\n\n\t\tfor i, document in enumerate(self.documents):\n\t\t\tif document:\n\t\t\t\twriter.add_document(**document)\n\t\t\tupdate_progress_bar(\"Building Index\", i, len(self.documents))\n\n\t\twriter.commit(optimize=True)",
"def MutateDeployedIndex(self, request, global_params=None):\n config = self.GetMethodConfig('MutateDeployedIndex')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def build_index():\n pass",
"def reindex(source, target):\n return elasticsearch.helpers.reindex(es.get_es(), source, target)",
"def refresh_index(self):\n synchronize()\n # TODO: add logger call here\n self._compute_embeddings()",
"def init_index(clear=False):\n return _run_indexer_func(\"init_index\", clear)",
"def commit():\n return _run_indexer_func(\"commit\")",
"def reindex(session, es, request):\n\n if get_aliased_index(es) is None:\n raise RuntimeError('cannot reindex if current index is not aliased')\n\n settings = request.find_service(name='settings')\n\n new_index = configure_index(es)\n\n try:\n settings.put(SETTING_NEW_INDEX, new_index)\n request.tm.commit()\n\n indexer = BatchIndexer(session, es, request, target_index=new_index, op_type='create')\n\n errored = indexer.index()\n if errored:\n log.debug('failed to index {} annotations, retrying...'.format(\n len(errored)))\n errored = indexer.index(errored)\n if errored:\n log.warn('failed to index {} annotations: {!r}'.format(\n len(errored),\n errored))\n\n update_aliased_index(es, new_index)\n\n finally:\n settings.delete(SETTING_NEW_INDEX)\n request.tm.commit()",
"def DeployIndex(self, request, global_params=None):\n config = self.GetMethodConfig('DeployIndex')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def readable_reindex(request):\r\n tasks.reindex_fulltext_allbookmarks.delay()\r\n return _api_response(request, {\r\n 'success': True\r\n })",
"def rebuild(options, project_directory=None):\n if options.help:\n print rebuild.__doc__\n sys.exit(1)\n\n if not project_directory:\n project_directory = os.getcwd()\n action_rebuild(project_directory)",
"def commit(self, state):\n # TODO: User optimistic concurrency control via\n # \"version_type=external_gte\"\n return self.client.index(\n index=self.index,\n id=self.document_id,\n body=state\n )",
"def reindex(self):\n result = self.database.command('reIndex', self.name)\n del result['serverUsed']\n return result",
"def rebuild_cache(build_name=None,sources=None,target=None,force_build=False):\n if build_name:\n sources = mongo.get_source_fullnames(build_manager.list_sources(build_name))\n target = mongo.get_latest_build(build_name)\n elif sources:\n sources = mongo.get_source_fullnames(sources)\n if not sources and not target:\n raise Exception(\"No valid sources found\")\n\n def rebuild(col):\n cur = mongo.id_feeder(col,batch_size=10000,logger=config.logger,force_build=force_build)\n [i for i in cur] # just iterate\n\n @asyncio.coroutine\n def do(srcs,tgt):\n pinfo = {\"category\" : \"cache\",\n \"source\" : None,\n \"step\" : \"rebuild\",\n \"description\" : \"\"}\n config.logger.info(\"Rebuild cache for sources: %s, target: %s\" % (srcs,tgt))\n for src in srcs:\n # src can be a full name (eg. clinvar.clinvar_hg38) but id_feeder knows only name (clinvar_hg38)\n if \".\" in src:\n src = src.split(\".\")[1]\n config.logger.info(\"Rebuilding cache for source '%s'\" % src)\n col = mongo.get_src_db()[src]\n pinfo[\"source\"] = src\n job = yield from job_manager.defer_to_thread(pinfo, partial(rebuild,col))\n yield from job\n config.logger.info(\"Done rebuilding cache for source '%s'\" % src)\n if tgt:\n config.logger.info(\"Rebuilding cache for target '%s'\" % tgt)\n col = mongo.get_target_db()[tgt]\n pinfo[\"source\"] = tgt\n job = job_manager.defer_to_thread(pinfo, partial(rebuild,col))\n yield from job\n\n task = asyncio.ensure_future(do(sources,target))\n return task",
"def update_index_by_name(self, doc_name):\n\t\tdocument = self.get_document_to_index(doc_name)\n\t\tif document:\n\t\t\tself.update_index(document)",
"def test_recreate_index_that_exists(self):\n indices = self.elasticsearch_cls().indices\n indices.exists.return_value = True\n\n index_name = 'abcd'\n self.client._recreate_index(index_name)\n indices.delete.assert_called_once_with(index_name)\n indices.create.assert_called_once_with(index_name)",
"def _do_search_action(self, index, action, force=False):\n assert self.pk, \"Object must have a primary key before being indexed.\"\n assert action in ('index', 'delete'), (\n \"Search action '{}' is invalid; must be 'index' or 'delete'.\".format(action)\n )\n client = get_client()\n cache_key = self.search_document_cache_key\n if action == 'index':\n # if the locally cached search doc is the same as the new one,\n # then don't bother pushing to ES.\n new_doc = self.as_search_document(index)\n if not force:\n cached_doc = cache.get(cache_key)\n if new_doc == cached_doc:\n logger.debug(\"Search document for %r is unchanged, ignoring update.\", self)\n return []\n cache.set(cache_key, new_doc, timeout=60) # TODO: remove hard-coded timeout\n return client.index(\n index=index,\n doc_type=self.search_doc_type,\n body=new_doc,\n id=self.pk\n )\n\n if action == 'delete':\n cache.delete(cache_key)\n return client.delete(\n index=index,\n doc_type=self.search_doc_type,\n id=self.pk\n )"
] |
[
"0.62315875",
"0.602992",
"0.59027463",
"0.5742475",
"0.5566116",
"0.5566116",
"0.5414252",
"0.5414006",
"0.51835304",
"0.51398534",
"0.51164776",
"0.5080432",
"0.49207366",
"0.49111786",
"0.48998672",
"0.48903412",
"0.47889283",
"0.47243795",
"0.4723609",
"0.46759757",
"0.46749023",
"0.46712604",
"0.4662824",
"0.46526963",
"0.4606227",
"0.45541188",
"0.454888",
"0.45452425",
"0.4541399",
"0.45187196"
] |
0.6661653
|
0
|
Change indexing status Enable or disable indexing on an existing collection. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def set_indexing_enabled(self, collection_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.set_indexing_enabled_with_http_info(collection_id, body, **kwargs)
else:
(data) = self.set_indexing_enabled_with_http_info(collection_id, body, **kwargs)
return data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_indexing_enabled_with_http_info(self, collection_id, body, **kwargs):\n\n all_params = ['collection_id', 'body']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method set_indexing_enabled\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `set_indexing_enabled`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `set_indexing_enabled`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'collection_id' in params:\n path_params['collectionId'] = params['collection_id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections/{collectionId}/indexing', 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='IndexingConfig',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def is_indexing_enabled_with_http_info(self, collection_id, **kwargs):\n\n all_params = ['collection_id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method is_indexing_enabled\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `is_indexing_enabled`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'collection_id' in params:\n path_params['collectionId'] = params['collection_id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections/{collectionId}/indexing', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='IndexingConfig',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def is_indexing_enabled(self, collection_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.is_indexing_enabled_with_http_info(collection_id, **kwargs)\n else:\n (data) = self.is_indexing_enabled_with_http_info(collection_id, **kwargs)\n return data",
"def enable_index_update_feature(settings):\n settings.FEATURES[INDEX_UPDATES] = True",
"def setIndexMode(self, mode):\n self.indexMode = mode",
"def data_collection_status(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"data_collection_status\"), kwargs)",
"def is_indexed(self, is_indexed):\n\n self._is_indexed = is_indexed",
"def update_course_index(self, course_key, updated_index_entry):\n bulk_write_record = self._get_bulk_ops_record(course_key)\n if bulk_write_record.active:\n bulk_write_record.index = updated_index_entry\n else:\n self.db_connection.update_course_index(updated_index_entry, course_context=course_key)",
"def document_update(index_name, doc_type, doc_id, doc=None, new=None):\n if doc:\n resp = es.index(index=index_name, doc_type=doc_type,\n id=doc_id, body=doc)\n print(resp)\n else:\n resp = es.update(index=index_name, doc_type=doc_type,\n id=doc_id, body={\"doc\": new})",
"def set_settings(self, settings):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/settings\" % self.url_index_name, self.client.timeout, settings)",
"def status(self, collection_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.status_with_http_info(collection_id, **kwargs)\n else:\n (data) = self.status_with_http_info(collection_id, **kwargs)\n return data",
"def index(self, model_name, field_name, force=False):\n model = self.get_model(model_name)\n field = model[field_name]\n if 'index' in field:\n if field['index'] is False and force is False:\n raise ModelIndexError(\n \"Field definition has index: False. \"\n \"Use force=True to override.\")\n collection = self.get_collection(model_name)\n collection.ensure_index(field_name)",
"def setStatus(self, newStatus):\n self._status = newStatus",
"def setIndex(self, index):\n\n self._index = index\n\n return True",
"def activate_collection(self, coll_name=None, collid=None):\n return self.update_collection_active_flag(1, coll_name, collid)",
"def enable(self, index, value=True, missingok=False):\n self._action(index, StateVariable.enable, missingok=missingok,\n value=value)",
"def update_indexes(cls, document: dict = None):\n if cls._check_indexes(document):\n cls.logger.info(\"Updating indexes...\")\n cls.__collection__.drop_indexes()\n cls._create_indexes(IndexType.Unique, document)\n cls._create_indexes(IndexType.Other, document)\n cls.logger.info(\"Indexes updated.\")\n if cls.audit_model:\n cls.audit_model.update_indexes(document)",
"def _create_update_index(self) -> Result[Ok, Err]:\n collection_status = self.collection\n if collection_status.is_err():\n return collection_status\n collection: MongoCollection = collection_status.ok()\n\n def check_index_keys(current_keys, new_index_keys):\n current_keys.sort()\n new_index_keys.sort()\n return current_keys == new_index_keys\n\n syft_obj = self.settings.object_type\n\n unique_attrs = getattr(syft_obj, \"__attr_unique__\", [])\n object_name = syft_obj.__canonical_name__\n\n new_index_keys = [(attr, ASCENDING) for attr in unique_attrs]\n\n try:\n current_indexes = collection.index_information()\n except BaseException as e:\n return Err(str(e))\n index_name = f\"{object_name}_index_name\"\n\n current_index_keys = current_indexes.get(index_name, None)\n\n if current_index_keys is not None:\n keys_same = check_index_keys(current_index_keys[\"key\"], new_index_keys)\n if keys_same:\n return Ok()\n\n # Drop current index, since incompatible with current object\n try:\n collection.drop_index(index_or_name=index_name)\n except Exception:\n return Err(\n f\"Failed to drop index for object: {object_name} with index keys: {current_index_keys}\"\n )\n\n # If no new indexes, then skip index creation\n if len(new_index_keys) == 0:\n return Ok()\n\n try:\n collection.create_index(new_index_keys, unique=True, name=index_name)\n except Exception:\n return Err(\n f\"Failed to create index for {object_name} with index keys: {new_index_keys}\"\n )\n\n return Ok()",
"def update_index(self, ref_gen):\n testing = True\n logging.warning('Updating index')\n es_insert.index(es, ref_gen, self.index_name, testing, action=\"update\")\n logging.warning('Finished updating')",
"def setstatus(self, status):\n with self.lock:\n self.status = status",
"def setup(self):\n collection = self._get_collection()\n\n indices = copy(self.params[\"indices\"])\n\n if \"when\" not in indices:\n indices[\"when\"] = {}\n\n for index in indices:\n self.log(DEBUG, \"Ensuring we have index for {}\".format(index))\n\n options = indices[index]\n collection.create_index(index, *options)\n self.log(DEBUG, \"Done.\")",
"def _create_indexes(self):\r\n # WARNING: The collection will be locked during the index\r\n # creation. If the collection has a large number of\r\n # documents in it, the operation can take a long time.\r\n\r\n # TODO: The creation of indexes can be moved to a Django\r\n # management command or equivalent. There is also an option to\r\n # run the indexing on the background, without locking.\r\n self.collection.ensure_index([('time', pymongo.DESCENDING)])\r\n self.collection.ensure_index('event_type')",
"def _update_status(self):\n self._db_update({'status': self.status})",
"def update_collection_active_flag(self, is_active, coll_name=None, collid=None):\n if self.sub_scheme_version < CAT_ACTIVE_VERSION:\n warn(\"Database schema is too old to support collection active inactive feature\")\n\n elif is_active == 1 or is_active == 0:\n if collid is None and coll_name is not None:\n collid = self.get_collection_id(coll_name)\n\n if collid is not None:\n self.update_collection({COL_NAME_COLL_IS_ACTIVE: is_active}, collid)\n return True\n\n return False",
"def setEnabled(self, status):\r\n self._status = status\r\n\r\n if status:\r\n self._start()\r\n else:\r\n self._stop()\r\n\r\n for cb in self._statusListener:\r\n cb(self, status)",
"def _setEnabled(self, indexlist):\n for index in self._items.keys():\n self.enable(index, index in indexlist)",
"def create_index(collection, index):\n db[collection].create_index(index)",
"def set_status(self, status):\n # TODO log to db\n self.status = status",
"def index_read_only(self, index: str, read_only: bool):\n self.__client__.indices.put_settings(\n index=index, body={\"settings\": {\"index.blocks.write\": read_only}}\n )",
"def change_status(self, status, application_id):"
] |
[
"0.66845334",
"0.571336",
"0.5518714",
"0.5299254",
"0.5092742",
"0.49066973",
"0.48308542",
"0.47795677",
"0.46901208",
"0.4644369",
"0.46376935",
"0.46256793",
"0.46205524",
"0.46175686",
"0.45979646",
"0.45585275",
"0.45510438",
"0.45369563",
"0.45341522",
"0.45341247",
"0.45005736",
"0.44916067",
"0.44829103",
"0.44642344",
"0.4458638",
"0.44549412",
"0.44485778",
"0.44255406",
"0.4423343",
"0.44215614"
] |
0.71299356
|
0
|
Change indexing status Enable or disable indexing on an existing collection. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def set_indexing_enabled_with_http_info(self, collection_id, body, **kwargs):
all_params = ['collection_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_indexing_enabled" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'collection_id' is set
if ('collection_id' not in params) or (params['collection_id'] is None):
raise ValueError("Missing the required parameter `collection_id` when calling `set_indexing_enabled`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `set_indexing_enabled`")
collection_formats = {}
path_params = {}
if 'collection_id' in params:
path_params['collectionId'] = params['collection_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
return self.api_client.call_api('/api/v1/collections/{collectionId}/indexing', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IndexingConfig',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_indexing_enabled(self, collection_id, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.set_indexing_enabled_with_http_info(collection_id, body, **kwargs)\n else:\n (data) = self.set_indexing_enabled_with_http_info(collection_id, body, **kwargs)\n return data",
"def is_indexing_enabled_with_http_info(self, collection_id, **kwargs):\n\n all_params = ['collection_id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method is_indexing_enabled\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `is_indexing_enabled`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'collection_id' in params:\n path_params['collectionId'] = params['collection_id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections/{collectionId}/indexing', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='IndexingConfig',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def is_indexing_enabled(self, collection_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.is_indexing_enabled_with_http_info(collection_id, **kwargs)\n else:\n (data) = self.is_indexing_enabled_with_http_info(collection_id, **kwargs)\n return data",
"def enable_index_update_feature(settings):\n settings.FEATURES[INDEX_UPDATES] = True",
"def setIndexMode(self, mode):\n self.indexMode = mode",
"def data_collection_status(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"data_collection_status\"), kwargs)",
"def is_indexed(self, is_indexed):\n\n self._is_indexed = is_indexed",
"def update_course_index(self, course_key, updated_index_entry):\n bulk_write_record = self._get_bulk_ops_record(course_key)\n if bulk_write_record.active:\n bulk_write_record.index = updated_index_entry\n else:\n self.db_connection.update_course_index(updated_index_entry, course_context=course_key)",
"def document_update(index_name, doc_type, doc_id, doc=None, new=None):\n if doc:\n resp = es.index(index=index_name, doc_type=doc_type,\n id=doc_id, body=doc)\n print(resp)\n else:\n resp = es.update(index=index_name, doc_type=doc_type,\n id=doc_id, body={\"doc\": new})",
"def set_settings(self, settings):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/settings\" % self.url_index_name, self.client.timeout, settings)",
"def status(self, collection_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.status_with_http_info(collection_id, **kwargs)\n else:\n (data) = self.status_with_http_info(collection_id, **kwargs)\n return data",
"def index(self, model_name, field_name, force=False):\n model = self.get_model(model_name)\n field = model[field_name]\n if 'index' in field:\n if field['index'] is False and force is False:\n raise ModelIndexError(\n \"Field definition has index: False. \"\n \"Use force=True to override.\")\n collection = self.get_collection(model_name)\n collection.ensure_index(field_name)",
"def setStatus(self, newStatus):\n self._status = newStatus",
"def setIndex(self, index):\n\n self._index = index\n\n return True",
"def activate_collection(self, coll_name=None, collid=None):\n return self.update_collection_active_flag(1, coll_name, collid)",
"def enable(self, index, value=True, missingok=False):\n self._action(index, StateVariable.enable, missingok=missingok,\n value=value)",
"def update_indexes(cls, document: dict = None):\n if cls._check_indexes(document):\n cls.logger.info(\"Updating indexes...\")\n cls.__collection__.drop_indexes()\n cls._create_indexes(IndexType.Unique, document)\n cls._create_indexes(IndexType.Other, document)\n cls.logger.info(\"Indexes updated.\")\n if cls.audit_model:\n cls.audit_model.update_indexes(document)",
"def _create_update_index(self) -> Result[Ok, Err]:\n collection_status = self.collection\n if collection_status.is_err():\n return collection_status\n collection: MongoCollection = collection_status.ok()\n\n def check_index_keys(current_keys, new_index_keys):\n current_keys.sort()\n new_index_keys.sort()\n return current_keys == new_index_keys\n\n syft_obj = self.settings.object_type\n\n unique_attrs = getattr(syft_obj, \"__attr_unique__\", [])\n object_name = syft_obj.__canonical_name__\n\n new_index_keys = [(attr, ASCENDING) for attr in unique_attrs]\n\n try:\n current_indexes = collection.index_information()\n except BaseException as e:\n return Err(str(e))\n index_name = f\"{object_name}_index_name\"\n\n current_index_keys = current_indexes.get(index_name, None)\n\n if current_index_keys is not None:\n keys_same = check_index_keys(current_index_keys[\"key\"], new_index_keys)\n if keys_same:\n return Ok()\n\n # Drop current index, since incompatible with current object\n try:\n collection.drop_index(index_or_name=index_name)\n except Exception:\n return Err(\n f\"Failed to drop index for object: {object_name} with index keys: {current_index_keys}\"\n )\n\n # If no new indexes, then skip index creation\n if len(new_index_keys) == 0:\n return Ok()\n\n try:\n collection.create_index(new_index_keys, unique=True, name=index_name)\n except Exception:\n return Err(\n f\"Failed to create index for {object_name} with index keys: {new_index_keys}\"\n )\n\n return Ok()",
"def update_index(self, ref_gen):\n testing = True\n logging.warning('Updating index')\n es_insert.index(es, ref_gen, self.index_name, testing, action=\"update\")\n logging.warning('Finished updating')",
"def setstatus(self, status):\n with self.lock:\n self.status = status",
"def setup(self):\n collection = self._get_collection()\n\n indices = copy(self.params[\"indices\"])\n\n if \"when\" not in indices:\n indices[\"when\"] = {}\n\n for index in indices:\n self.log(DEBUG, \"Ensuring we have index for {}\".format(index))\n\n options = indices[index]\n collection.create_index(index, *options)\n self.log(DEBUG, \"Done.\")",
"def _create_indexes(self):\r\n # WARNING: The collection will be locked during the index\r\n # creation. If the collection has a large number of\r\n # documents in it, the operation can take a long time.\r\n\r\n # TODO: The creation of indexes can be moved to a Django\r\n # management command or equivalent. There is also an option to\r\n # run the indexing on the background, without locking.\r\n self.collection.ensure_index([('time', pymongo.DESCENDING)])\r\n self.collection.ensure_index('event_type')",
"def _update_status(self):\n self._db_update({'status': self.status})",
"def update_collection_active_flag(self, is_active, coll_name=None, collid=None):\n if self.sub_scheme_version < CAT_ACTIVE_VERSION:\n warn(\"Database schema is too old to support collection active inactive feature\")\n\n elif is_active == 1 or is_active == 0:\n if collid is None and coll_name is not None:\n collid = self.get_collection_id(coll_name)\n\n if collid is not None:\n self.update_collection({COL_NAME_COLL_IS_ACTIVE: is_active}, collid)\n return True\n\n return False",
"def setEnabled(self, status):\r\n self._status = status\r\n\r\n if status:\r\n self._start()\r\n else:\r\n self._stop()\r\n\r\n for cb in self._statusListener:\r\n cb(self, status)",
"def _setEnabled(self, indexlist):\n for index in self._items.keys():\n self.enable(index, index in indexlist)",
"def create_index(collection, index):\n db[collection].create_index(index)",
"def set_status(self, status):\n # TODO log to db\n self.status = status",
"def index_read_only(self, index: str, read_only: bool):\n self.__client__.indices.put_settings(\n index=index, body={\"settings\": {\"index.blocks.write\": read_only}}\n )",
"def change_status(self, status, application_id):"
] |
[
"0.7132368",
"0.57167596",
"0.5522436",
"0.52990115",
"0.50922185",
"0.49085262",
"0.48318607",
"0.47808215",
"0.46896845",
"0.46444702",
"0.4639374",
"0.46250963",
"0.46188778",
"0.46187243",
"0.46014965",
"0.45583203",
"0.45503423",
"0.45368776",
"0.45359975",
"0.45320195",
"0.45027402",
"0.4493288",
"0.44817355",
"0.44667137",
"0.4456491",
"0.4454917",
"0.44519877",
"0.4424114",
"0.4422401",
"0.44190463"
] |
0.6687099
|
1
|
List collection status Display status information about an existing collection. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def status(self, collection_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.status_with_http_info(collection_id, **kwargs)
else:
(data) = self.status_with_http_info(collection_id, **kwargs)
return data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def data_collection_status(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"data_collection_status\"), kwargs)",
"def status_all_with_http_info(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method status_all\" % key\n )\n params[key] = val\n del params['kwargs']\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections/status', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ListCollectionStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def status_with_http_info(self, collection_id, **kwargs):\n\n all_params = ['collection_id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method status\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `status`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'collection_id' in params:\n path_params['collectionId'] = params['collection_id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections/{collectionId}/status', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CollectionStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def get_collection_status(self, db_cfg_name, collection_cfg_name):\n from ir_config import IRConfig\n db_name = IRConfig.get_instance().get(db_cfg_name)\n collection_name = IRConfig.get_instance().get(collection_cfg_name)\n res = self.__find_collection_in_meta(db_name, collection_name)\n if res.count() > 0:\n return res[0][self.__meta_lastmodified_name], \\\n res[0][self.__meta_success_name]\n else:\n return None, None",
"def get_status(self, collection):\n\n status_order = [\n Data.STATUS_ERROR,\n Data.STATUS_UPLOADING,\n Data.STATUS_PROCESSING,\n Data.STATUS_PREPARING,\n Data.STATUS_WAITING,\n Data.STATUS_RESOLVING,\n Data.STATUS_DONE,\n ]\n\n # Use 'data_statuses' attribute when available. It is created in the\n # BaseCollectionViewSet class. It contains all the distinct statuses of the\n # data objects in the collection.\n status_set = (\n set(collection.data_statuses)\n if hasattr(collection, \"data_statuses\")\n else collection.data.values_list(\"status\", flat=True).distinct()\n )\n\n if not status_set:\n return None\n\n for status in status_order:\n if status in status_set:\n return status\n\n logger.warning(\n \"Could not determine the status of a collection.\",\n extra={\"collection\": collection.__dict__},\n )\n return None",
"async def list_collections(self):\n try:\n response = await self.get('/solr/admin/collections',\n params={'action': 'CLUSTERSTATUS'})\n response_data = json.loads(response.body.decode('utf-8'))\n collections = response_data['cluster']['collections']\n has_cores = []\n has_no_cores = []\n for collection_name, collection_status in collections.items():\n shards = collection_status['shards'].values()\n if any(shard['replicas'] for shard in shards):\n has_cores.append(collection_name)\n else:\n has_no_cores.append(collection_name)\n self._collections_cache = set(has_cores)\n self._broken_collections_cache = set(has_no_cores)\n self._cache_timestamp = time.time()\n return self._collections_cache, self._broken_collections_cache\n except (SolrError, KeyError):\n logger.exception('Failed to list collections')\n raise",
"def status(self):\n return StatusCollection(client=self)",
"def list_with_http_info(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ListResponseCollection',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def collections():\n\tcollections = models.Collection.query.all()\n\tif not collections:\n\t\tabort(404)\n\tresponse = { 'collections': [c.dictionary() for c in collections] }\n\treturn jsonify(response)",
"def status_all(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.status_all_with_http_info(**kwargs)\n else:\n (data) = self.status_all_with_http_info(**kwargs)\n return data",
"def list_collections(self, database):\n r = self.__get_response(settings.LST_COLS, {\"db\": database})\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])",
"def mmo_collection_stats(self, mmo_connection, execution_database, collection):\n command = { \"collStats\": collection }\n return self.mmo_execute_on_mongos(mmo_connection, command, execution_database)",
"def status(self):\n path = \"contexts/status?contextId=%s&clusterId=%s\" % (self.id, self.cluster_id)\n return self.get(self.url, \"1.2\", path, token=self.token)",
"def collection_names(self, callback):\n callback = partial(self._collection_names_result, callback)\n self[\"system.namespaces\"].find(_must_use_master=True, callback=callback)",
"def collections(self):\r\n\t\tself.fetch_collections()\r\n\t\treturn self._collection_names",
"def test_get_collection_name(self):\n response = self.client.get(\n 'http://localhost:8000/webapp/api/collection/')\n self.assertEqual(response.status_code, 200)",
"def collection(collection_id):\n\tcollection = models.Collection.query.get(collection_id)\n\tif not collection:\n\t\tabort(404)\n\treturn jsonify(collection.dictionary())",
"def status(self):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_STATUS]))\n return r.json()",
"def getClusterStatus(self):\n data = self.connect('get','cluster/status', None)\n return data",
"def status():\n statuses = get_all_statuses()\n return json.dumps(statuses, indent=4)",
"def status():\n response = \"NOT_OK\"\n if db_client.data_loaded:\n response = \"OK\"\n return flask.jsonify({'status': response})",
"def list_namespaced_component_status(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.list_namespaced_component_status_with_http_info(**kwargs)\n else:\n (data) = self.list_namespaced_component_status_with_http_info(**kwargs)\n return data",
"async def getAllCollections(self, q=None, schedule_status=None, type=None, tags=None, is_active=None, page_no=None, page_size=None):\n payload = {}\n \n if q:\n payload[\"q\"] = q\n \n if schedule_status:\n payload[\"schedule_status\"] = schedule_status\n \n if type:\n payload[\"type\"] = type\n \n if tags:\n payload[\"tags\"] = tags\n \n if is_active:\n payload[\"is_active\"] = is_active\n \n if page_no:\n payload[\"page_no\"] = page_no\n \n if page_size:\n payload[\"page_size\"] = page_size\n \n\n # Parameter validation\n schema = CatalogValidator.getAllCollections()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[{\"in\":\"query\",\"name\":\"q\",\"description\":\"Get collection list filtered by q string,\",\"schema\":{\"type\":\"string\"},\"required\":false},{\"in\":\"query\",\"name\":\"schedule_status\",\"description\":\"Get collection list filtered by scheduled status,\",\"schema\":{\"type\":\"string\",\"enum\":[\"live\",\"upcoming\",\"expired\"]},\"required\":false},{\"in\":\"query\",\"name\":\"type\",\"description\":\"type of the collections\",\"schema\":{\"type\":\"string\"},\"required\":false},{\"in\":\"query\",\"name\":\"tags\",\"description\":\"Each response will contain next_id param, which should be sent back to make pagination work.\",\"schema\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"required\":false},{\"in\":\"query\",\"name\":\"is_active\",\"description\":\"get collections filtered by active status.\",\"schema\":{\"type\":\"boolean\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_no\",\"description\":\"The page number to navigate through the given set of results.\",\"schema\":{\"type\":\"integer\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_size\",\"description\":\"Number of items to retrieve in each page. Default is 12.\",\"schema\":{\"type\":\"integer\"},\"required\":false}],\"query\":[{\"in\":\"query\",\"name\":\"q\",\"description\":\"Get collection list filtered by q string,\",\"schema\":{\"type\":\"string\"},\"required\":false},{\"in\":\"query\",\"name\":\"schedule_status\",\"description\":\"Get collection list filtered by scheduled status,\",\"schema\":{\"type\":\"string\",\"enum\":[\"live\",\"upcoming\",\"expired\"]},\"required\":false},{\"in\":\"query\",\"name\":\"type\",\"description\":\"type of the collections\",\"schema\":{\"type\":\"string\"},\"required\":false},{\"in\":\"query\",\"name\":\"tags\",\"description\":\"Each response will contain next_id param, which should be sent back to make pagination work.\",\"schema\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"required\":false},{\"in\":\"query\",\"name\":\"is_active\",\"description\":\"get collections filtered by active status.\",\"schema\":{\"type\":\"boolean\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_no\",\"description\":\"The page number to navigate through the given set of results.\",\"schema\":{\"type\":\"integer\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_size\",\"description\":\"Number of items to retrieve in each page. Default is 12.\",\"schema\":{\"type\":\"integer\"},\"required\":false}],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", q=q, schedule_status=schedule_status, type=type, tags=tags, is_active=is_active, page_no=page_no, page_size=page_size)\n query_string = await create_query_string(q=q, schedule_status=schedule_status, type=type, tags=tags, is_active=is_active, page_no=page_no, page_size=page_size)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/\", q=q, schedule_status=schedule_status, type=type, tags=tags, is_active=is_active, page_no=page_no, page_size=page_size), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def validate_collection_response(self, response):\n\n self.validate_response(response)\n if response.status_code not in self.model._meta['valid_get_status']:\n raise InvalidStatusError(\n self.model._meta['valid_get_status'], response\n )",
"async def ensure_collection(self, collection):\n if await self.does_collection_exist(collection):\n return\n # Create Solr collection\n try:\n # Collection creation in API v2 doesn't support collection.configName yet.\n # So using old API (/solr/...).\n response = await self.get(\n '/solr/admin/collections',\n params={\n 'action': 'CREATE',\n 'name': collection,\n 'collection.configName': APPSCALE_CONFIG_SET_NAME,\n 'replicationFactor': self._settings.replication_factor,\n 'autoAddReplicas': True,\n 'numShards': self._settings.shards_number,\n 'maxShardsPerNode': self._settings.max_shards_per_node,\n 'waitForFinalState': True,\n }\n )\n logger.info('Successfully created collection {} ({})'\n .format(collection, response.body))\n except SolrError as err:\n if 'collection already exists' in err.error_detail:\n logger.info('Collection {} already exists'.format(collection))\n elif 'Cannot create collection ' in err.error_detail:\n logging.warning('Solr message: {}'.format(err.error_detail))\n logging.warning('Scheduling deletion of collection {}'\n .format(collection))\n ioloop.IOLoop.current().spawn_callback(\n self.delete_collection, collection\n )\n raise\n else:\n logger.warning('Failed to create collection {}'.format(collection))\n raise\n # Update collections cache in background\n ioloop.IOLoop.current().spawn_callback(self.list_collections)",
"def status(self):\n return self._get(path='status')",
"def collections(self, query, page=1, per_page=10):\n url = \"/search/collections\"\n data = self._search(url, query, page=page, per_page=per_page)\n data[\"results\"] = CollectionModel.parse_list(data.get(\"results\"))\n return data",
"def statuses(self):\n return self._get_paged(\"statuses\")",
"def document_status(document_id: uuid.UUID, db: Session = Depends(get_db)):\n document_status = get_document_status(db, document_id)\n return document_status",
"def get_all_categories_from_collection():\n api_endpoint = URL\n response = requests.get(api_endpoint)\n return response"
] |
[
"0.7318824",
"0.7295206",
"0.7102955",
"0.65757906",
"0.648761",
"0.63682735",
"0.6271686",
"0.61010176",
"0.5650195",
"0.5554575",
"0.55134416",
"0.5458567",
"0.5368529",
"0.52405345",
"0.5231259",
"0.51703393",
"0.51604354",
"0.515305",
"0.5146796",
"0.514557",
"0.5134877",
"0.51084954",
"0.5089186",
"0.5075183",
"0.5072828",
"0.5070213",
"0.50361234",
"0.5027173",
"0.5026482",
"0.49859232"
] |
0.7648862
|
0
|
List status for all collections Display status information about all existing collections. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def status_all_with_http_info(self, **kwargs):
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method status_all" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
return self.api_client.call_api('/api/v1/collections/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListCollectionStatus',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def data_collection_status(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"data_collection_status\"), kwargs)",
"def status(self, collection_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.status_with_http_info(collection_id, **kwargs)\n else:\n (data) = self.status_with_http_info(collection_id, **kwargs)\n return data",
"async def list_collections(self):\n try:\n response = await self.get('/solr/admin/collections',\n params={'action': 'CLUSTERSTATUS'})\n response_data = json.loads(response.body.decode('utf-8'))\n collections = response_data['cluster']['collections']\n has_cores = []\n has_no_cores = []\n for collection_name, collection_status in collections.items():\n shards = collection_status['shards'].values()\n if any(shard['replicas'] for shard in shards):\n has_cores.append(collection_name)\n else:\n has_no_cores.append(collection_name)\n self._collections_cache = set(has_cores)\n self._broken_collections_cache = set(has_no_cores)\n self._cache_timestamp = time.time()\n return self._collections_cache, self._broken_collections_cache\n except (SolrError, KeyError):\n logger.exception('Failed to list collections')\n raise",
"def status_with_http_info(self, collection_id, **kwargs):\n\n all_params = ['collection_id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method status\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `status`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'collection_id' in params:\n path_params['collectionId'] = params['collection_id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections/{collectionId}/status', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CollectionStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def status_all(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.status_all_with_http_info(**kwargs)\n else:\n (data) = self.status_all_with_http_info(**kwargs)\n return data",
"def status(self):\n return StatusCollection(client=self)",
"def list_with_http_info(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ListResponseCollection',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def collections():\n\tcollections = models.Collection.query.all()\n\tif not collections:\n\t\tabort(404)\n\tresponse = { 'collections': [c.dictionary() for c in collections] }\n\treturn jsonify(response)",
"def get_collection_status(self, db_cfg_name, collection_cfg_name):\n from ir_config import IRConfig\n db_name = IRConfig.get_instance().get(db_cfg_name)\n collection_name = IRConfig.get_instance().get(collection_cfg_name)\n res = self.__find_collection_in_meta(db_name, collection_name)\n if res.count() > 0:\n return res[0][self.__meta_lastmodified_name], \\\n res[0][self.__meta_success_name]\n else:\n return None, None",
"async def get_all_collections():\n try:\n collections_query_result = get_db().AQLQuery(\n query=menu_queries.QUERY_ALL_COLLECTIONS\n )\n return {\"result\": collections_query_result.result}\n\n except DocumentNotFoundError as error:\n print(error)\n raise HTTPException(status_code=404, detail=\"Item not found\") from error\n except AQLQueryError as error:\n print(\"AQLQueryError: \", error)\n raise HTTPException(status_code=400, detail=error.errors) from error\n except KeyError as error:\n print(\"KeyError: \", error)\n raise HTTPException(status_code=400) from error",
"def get_status(self, collection):\n\n status_order = [\n Data.STATUS_ERROR,\n Data.STATUS_UPLOADING,\n Data.STATUS_PROCESSING,\n Data.STATUS_PREPARING,\n Data.STATUS_WAITING,\n Data.STATUS_RESOLVING,\n Data.STATUS_DONE,\n ]\n\n # Use 'data_statuses' attribute when available. It is created in the\n # BaseCollectionViewSet class. It contains all the distinct statuses of the\n # data objects in the collection.\n status_set = (\n set(collection.data_statuses)\n if hasattr(collection, \"data_statuses\")\n else collection.data.values_list(\"status\", flat=True).distinct()\n )\n\n if not status_set:\n return None\n\n for status in status_order:\n if status in status_set:\n return status\n\n logger.warning(\n \"Could not determine the status of a collection.\",\n extra={\"collection\": collection.__dict__},\n )\n return None",
"def list_collections(self, database):\n r = self.__get_response(settings.LST_COLS, {\"db\": database})\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])",
"def statuses(self):\n return self._get_paged(\"statuses\")",
"def collection_names(self, callback):\n callback = partial(self._collection_names_result, callback)\n self[\"system.namespaces\"].find(_must_use_master=True, callback=callback)",
"def get_all(self) -> List[Status]:\n return self.__mapper.map_all(\n self.__repository.get_all(),\n Status\n )",
"def status(self):\n path = \"contexts/status?contextId=%s&clusterId=%s\" % (self.id, self.cluster_id)\n return self.get(self.url, \"1.2\", path, token=self.token)",
"def status():\n statuses = get_all_statuses()\n return json.dumps(statuses, indent=4)",
"async def getAllCollections(self, q=None, schedule_status=None, type=None, tags=None, is_active=None, page_no=None, page_size=None):\n payload = {}\n \n if q:\n payload[\"q\"] = q\n \n if schedule_status:\n payload[\"schedule_status\"] = schedule_status\n \n if type:\n payload[\"type\"] = type\n \n if tags:\n payload[\"tags\"] = tags\n \n if is_active:\n payload[\"is_active\"] = is_active\n \n if page_no:\n payload[\"page_no\"] = page_no\n \n if page_size:\n payload[\"page_size\"] = page_size\n \n\n # Parameter validation\n schema = CatalogValidator.getAllCollections()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[{\"in\":\"query\",\"name\":\"q\",\"description\":\"Get collection list filtered by q string,\",\"schema\":{\"type\":\"string\"},\"required\":false},{\"in\":\"query\",\"name\":\"schedule_status\",\"description\":\"Get collection list filtered by scheduled status,\",\"schema\":{\"type\":\"string\",\"enum\":[\"live\",\"upcoming\",\"expired\"]},\"required\":false},{\"in\":\"query\",\"name\":\"type\",\"description\":\"type of the collections\",\"schema\":{\"type\":\"string\"},\"required\":false},{\"in\":\"query\",\"name\":\"tags\",\"description\":\"Each response will contain next_id param, which should be sent back to make pagination work.\",\"schema\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"required\":false},{\"in\":\"query\",\"name\":\"is_active\",\"description\":\"get collections filtered by active status.\",\"schema\":{\"type\":\"boolean\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_no\",\"description\":\"The page number to navigate through the given set of results.\",\"schema\":{\"type\":\"integer\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_size\",\"description\":\"Number of items to retrieve in each page. Default is 12.\",\"schema\":{\"type\":\"integer\"},\"required\":false}],\"query\":[{\"in\":\"query\",\"name\":\"q\",\"description\":\"Get collection list filtered by q string,\",\"schema\":{\"type\":\"string\"},\"required\":false},{\"in\":\"query\",\"name\":\"schedule_status\",\"description\":\"Get collection list filtered by scheduled status,\",\"schema\":{\"type\":\"string\",\"enum\":[\"live\",\"upcoming\",\"expired\"]},\"required\":false},{\"in\":\"query\",\"name\":\"type\",\"description\":\"type of the collections\",\"schema\":{\"type\":\"string\"},\"required\":false},{\"in\":\"query\",\"name\":\"tags\",\"description\":\"Each response will contain next_id param, which should be sent back to make pagination work.\",\"schema\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"required\":false},{\"in\":\"query\",\"name\":\"is_active\",\"description\":\"get collections filtered by active status.\",\"schema\":{\"type\":\"boolean\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_no\",\"description\":\"The page number to navigate through the given set of results.\",\"schema\":{\"type\":\"integer\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_size\",\"description\":\"Number of items to retrieve in each page. Default is 12.\",\"schema\":{\"type\":\"integer\"},\"required\":false}],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", q=q, schedule_status=schedule_status, type=type, tags=tags, is_active=is_active, page_no=page_no, page_size=page_size)\n query_string = await create_query_string(q=q, schedule_status=schedule_status, type=type, tags=tags, is_active=is_active, page_no=page_no, page_size=page_size)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/\", q=q, schedule_status=schedule_status, type=type, tags=tags, is_active=is_active, page_no=page_no, page_size=page_size), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def collections(self):\r\n\t\tself.fetch_collections()\r\n\t\treturn self._collection_names",
"def list_namespaced_component_status(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.list_namespaced_component_status_with_http_info(**kwargs)\n else:\n (data) = self.list_namespaced_component_status_with_http_info(**kwargs)\n return data",
"def mmo_collection_stats(self, mmo_connection, execution_database, collection):\n command = { \"collStats\": collection }\n return self.mmo_execute_on_mongos(mmo_connection, command, execution_database)",
"def json_statuslist():\n statuses = Status.query.all()\n out = {'statuses': []}\n for status in statuses:\n out['statuses'].append(status.value)\n\n return jsonify(out)",
"def get_all_categories_from_collection():\n api_endpoint = URL\n response = requests.get(api_endpoint)\n return response",
"def list_namespaced_component_status_with_http_info(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_component_status\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/componentstatuses'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ComponentStatusList',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))",
"def status(self):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_STATUS]))\n return r.json()",
"def status():\n response = \"NOT_OK\"\n if db_client.data_loaded:\n response = \"OK\"\n return flask.jsonify({'status': response})",
"def display_all(self) -> None:\n self.display.draw_list(self.read_all_statuses())",
"def get_statuses(self):\n return self.statuses",
"def list(self, request, vocab, collection, format=None):\n # What we really want is the collection, which contains a list of\n # concepts\n return redirect(\"/collections/{}/{}\".format(vocab, collection))",
"def getClusterStatus(self):\n data = self.connect('get','cluster/status', None)\n return data"
] |
[
"0.6863459",
"0.66944957",
"0.6633338",
"0.6509232",
"0.6375338",
"0.6056797",
"0.60126746",
"0.5943748",
"0.5908979",
"0.5755109",
"0.5734625",
"0.5677355",
"0.5655235",
"0.56186336",
"0.55815095",
"0.5505445",
"0.5504833",
"0.5412489",
"0.5389496",
"0.53617734",
"0.5295269",
"0.525086",
"0.52467835",
"0.5204359",
"0.51544696",
"0.5081544",
"0.50761455",
"0.5057882",
"0.50526416",
"0.5044288"
] |
0.7622068
|
0
|
Update a collection Updates an existing collection. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def update(self, collection_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_with_http_info(collection_id, body, **kwargs)
else:
(data) = self.update_with_http_info(collection_id, body, **kwargs)
return data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_collection(self, bucket_id, collection_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.update_collection_with_http_info(bucket_id, collection_id, **kwargs)\n else:\n (data) = self.update_collection_with_http_info(bucket_id, collection_id, **kwargs)\n return data",
"def update_collection_with_http_info(self, bucket_id, collection_id, **kwargs):\n\n all_params = ['bucket_id', 'collection_id', 'collection', 'if_match', 'if_none_match', 'fields']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update_collection\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'bucket_id' is set\n if ('bucket_id' not in params) or (params['bucket_id'] is None):\n raise ValueError(\"Missing the required parameter `bucket_id` when calling `update_collection`\")\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `update_collection`\")\n\n if 'if_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_match']):\n raise ValueError(\"Invalid value for parameter `if_match` when calling `update_collection`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n if 'if_none_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_none_match']):\n raise ValueError(\"Invalid value for parameter `if_none_match` when calling `update_collection`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n\n collection_formats = {}\n\n resource_path = '/buckets/{bucket_id}/collections/{collection_id}'.replace('{format}', 'json')\n path_params = {}\n if 'bucket_id' in params:\n path_params['bucket_id'] = params['bucket_id']\n if 'collection_id' in params:\n path_params['collection_id'] = params['collection_id']\n\n query_params = {}\n if 'fields' in params:\n query_params['_fields'] = params['fields']\n collection_formats['_fields'] = 'csv'\n\n header_params = {}\n if 'if_match' in params:\n header_params['If-Match'] = params['if_match']\n if 'if_none_match' in params:\n header_params['If-None-Match'] = params['if_none_match']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'collection' in params:\n body_params = params['collection']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Collection',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def update_collection(collection_uuid, title):\n assert isinstance(collection_uuid, UUID)\n data = {\"title\": title}\n result = api_request('patch', api_url('collections', str(collection_uuid)), json=data)\n return _collection_from_response(result)",
"def update_with_http_info(self, collection_id, body, **kwargs):\n\n all_params = ['collection_id', 'body']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `update`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `update`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'collection_id' in params:\n path_params['collectionId'] = params['collection_id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api('/api/v1/collections/{collectionId}', 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Collection',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"async def updateCollection(self, id=None, body=\"\"):\n payload = {}\n \n if id:\n payload[\"id\"] = id\n \n\n # Parameter validation\n schema = CatalogValidator.updateCollection()\n schema.dump(schema.load(payload))\n \n # Body validation\n from .models import UpdateCollection\n schema = UpdateCollection()\n schema.dump(schema.load(body))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/{id}/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"id\",\"description\":\"A `id` is a unique identifier of a collection.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"id\",\"description\":\"A `id` is a unique identifier of a collection.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", id=id)\n query_string = await create_query_string(id=id)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"PUT\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"put\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/{id}/\", id=id), query_string, headers, body, exclude_headers=exclude_headers), data=body)",
"def collection_update(request, *args, **kwargs):\n patch_data = request.data\n # Extract form data and validate\n form = CollectionForm(patch_data)\n if not form.is_valid():\n data = json.dumps({\"errors\": form.errors})\n return HttpResponse(content=data, content_type=\"application/json\", status=status.HTTP_400_BAD_REQUEST)\n # Update the collection\n collection = Collection.objects.get(id=int(kwargs['pk']))\n if \"title\" in patch_data:\n collection.title = patch_data[\"title\"]\n if \"permission\" in patch_data:\n collection.public = patch_data[\"permission\"] == \"Public\"\n if \"comment\" in patch_data:\n collection.comment = patch_data[\"comment\"]\n collection.save()\n # Prepare a response\n data = json.dumps({'success': True, 'id': collection.id, 'url': \"/collection/{0}\".format(collection.id)})\n return HttpResponse(data, content_type=\"json\")",
"def patch_collection(self, bucket_id, collection_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.patch_collection_with_http_info(bucket_id, collection_id, **kwargs)\n else:\n (data) = self.patch_collection_with_http_info(bucket_id, collection_id, **kwargs)\n return data",
"def update_collection(self, collection, collid):\n cond = SQLBinaryExpr(COL_NAME_COLL_COLLID, OP_EQ, collid)\n rowcount = 0\n if collection is not None:\n self.update_generic_data(collection, TABLE_NAME_COLL, cond)\n # done\n return rowcount",
"def update(self, docs, commit=False):\n if not docs:\n return\n\n data = json.dumps(\n docs,\n default=lambda obj: obj.isoformat() if isinstance(\n obj, dt.datetime) else None\n )\n\n params = {}\n\n if commit:\n params['commit'] = 'true'\n\n return self.client.post(\n self._get_collection_url('update/json'),\n params=params,\n body=data\n )",
"def patch_collection_with_http_info(self, bucket_id, collection_id, **kwargs):\n\n all_params = ['bucket_id', 'collection_id', 'collection', 'if_match', 'if_none_match', 'fields']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_collection\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'bucket_id' is set\n if ('bucket_id' not in params) or (params['bucket_id'] is None):\n raise ValueError(\"Missing the required parameter `bucket_id` when calling `patch_collection`\")\n # verify the required parameter 'collection_id' is set\n if ('collection_id' not in params) or (params['collection_id'] is None):\n raise ValueError(\"Missing the required parameter `collection_id` when calling `patch_collection`\")\n\n if 'if_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_match']):\n raise ValueError(\"Invalid value for parameter `if_match` when calling `patch_collection`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n if 'if_none_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_none_match']):\n raise ValueError(\"Invalid value for parameter `if_none_match` when calling `patch_collection`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n\n collection_formats = {}\n\n resource_path = '/buckets/{bucket_id}/collections/{collection_id}'.replace('{format}', 'json')\n path_params = {}\n if 'bucket_id' in params:\n path_params['bucket_id'] = params['bucket_id']\n if 'collection_id' in params:\n path_params['collection_id'] = params['collection_id']\n\n query_params = {}\n if 'fields' in params:\n query_params['_fields'] = params['fields']\n collection_formats['_fields'] = 'csv'\n\n header_params = {}\n if 'if_match' in params:\n header_params['If-Match'] = params['if_match']\n if 'if_none_match' in params:\n header_params['If-None-Match'] = params['if_none_match']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'collection' in params:\n body_params = params['collection']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/merge-patch+json', 'application/json-patch+json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Collection',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"async def put_documents(self, collection, documents):\n await self.ensure_collection(collection)\n try:\n if SOLR_COMMIT_WITHIN:\n params = {'commitWithin': SOLR_COMMIT_WITHIN}\n else:\n params = {'commit': 'true'}\n await self.post(\n '/v2/collections/{}/update'.format(collection),\n params=params, json_data=documents\n )\n logger.info('Successfully indexed {} documents to collection {}'\n .format(len(documents), collection))\n except SolrError:\n logger.warning('Failed to put {} documents to collection {}'\n .format(len(documents), collection))\n raise",
"def update_documents(self, database, collection, spec, doc_or_docs, upsert,\n multi):\n validators.check_document_to_update(doc_or_docs)\n r = self.__get_response(settings.UPD_DOCS,\n {\"db\": database, \"col\": collection},\n data=doc_or_docs, q=spec, m=multi, u=upsert)\n if r[\"status\"] == 200:\n if r[\"result\"][\"error\"]:\n raise Exception(r[\"result\"][\"error\"])\n return r[\"result\"][\"n\"]\n raise Exception(r[\"result\"][\"message\"])",
"def update_collection(self, collection):\n node = self.node\n flow = node if node.is_flow else node.flow\n\n # Build the key used to store the entry in the document.\n key = node.name\n if node.is_task:\n key = \"w\" + str(node.pos[0]) + \"_t\" + str(node.pos[1])\n elif node.is_work:\n key = \"w\" + str(node.pos)\n\n db = collection.database\n\n # Save files with GridFs first in order to get the ID.\n if self.gridfs_files:\n import gridfs\n fs = gridfs.GridFS(db)\n for ext, gridfile in self.gridfs_files.items():\n logger.info(\"gridfs: about to put file:\", str(gridfile))\n # Here we set gridfile.fs_id that will be stored in the mondodb document\n try:\n with open(gridfile.path, \"r\" + gridfile.mode) as f:\n gridfile.fs_id = fs.put(f, filename=gridfile.path)\n except IOError as exc:\n logger.critical(str(exc))\n\n if flow.mongo_id is None:\n # Flow does not have a mongo_id, allocate doc for the flow and save its id.\n flow.mongo_id = collection.insert({})\n print(\"Creating flow.mongo_id\", flow.mongo_id, type(flow.mongo_id))\n\n # Get the document from flow.mongo_id and update it.\n doc = collection.find_one({\"_id\": flow.mongo_id})\n if key in doc:\n raise ValueError(\"%s is already in doc!\" % key)\n doc[key] = self.as_dict()\n\n collection.save(doc)\n #collection.update({'_id':mongo_id}, {\"$set\": doc}, upsert=False)",
"def update_document(collection: str, query: dict, data: dict) -> None:\n validate_arguments({'collection': [collection, str],\n 'query': [query, dict],\n 'data': [data, dict]})\n new_document = find_document(collection, query=query)\n if new_document is None:\n raise Exception('Didnt find a document to update')\n DB[collection].delete_one(query)\n for key in data:\n new_document[key] = data[key]\n add_document(collection, new_document)",
"def update_document(self, database, collection, _id, document):\n r = self.__get_response(settings.UPD_DOC,\n {\"db\": database, \"col\": collection, \"id\": str(_id)},\n data=document)\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])",
"def update(cls, collection, uid, data):\n validated = cls.validate(data)\n validated.pop(\"_id\", None) # remove field \"_id\" if set\n object_uid = cls.object_id(uid)\n collection.update_one({\"_id\": object_uid}, {\"$set\": validated}, upsert=True)\n return collection.find_one({\"_id\": object_uid})",
"def update_user_collection(args):\n is_parameter_exists([\n constants.ID, constants.USER_ID\n ], args)\n\n collection_id = int(args[constants.ID])\n user_id = args[constants.USER_ID]\n\n request_user = args[constants.USER]\n\n # Revoke ownership from request_user\n try:\n collection_user = CollectionUser.objects.get(collection_id=collection_id, user_id=request_user.id)\n except ObjectDoesNotExist:\n raise ApiError(constants.NOT_EXIST_OBJECT)\n\n # if request_user is not owner, then raise AUTH_ERROR\n if collection_user.type != COLLECTION_USER_TYPE[0]:\n raise ApiError(constants.AUTH_ERROR)\n\n collection_user.type = COLLECTION_USER_TYPE[1] # change to member\n collection_user.save()\n\n # Grant ownership to the user whose id is user_id\n try:\n collection_user = CollectionUser.objects.get(collection_id=collection_id, user_id=user_id)\n except ObjectDoesNotExist:\n raise ApiError(constants.NOT_EXIST_OBJECT)\n\n collection_user.type = COLLECTION_USER_TYPE[0] # change to owner\n collection_user.save()",
"def test_update_collection(self):\n pass",
"def dataUpdate(self, collectionName, findCat, findData, updateCat, updateData):\n condition = {findCat: findData}\n findRes = collectionName.find_one(condition)\n findRes[updateCat] = updateData\n result = collectionName.update(condition, findRes)\n return result",
"def update_document(self, collection, query, mongo_id):\n\n try:\n self.client[self.db][collection].update_one(\n {'_id': mongo_id},\n query)\n except errors.PyMongoError as e:\n print \"Exception\", e",
"def update(self, spec, document, upsert=False, multi=False):\n return self.database.connection.request.update_documents(\n self.database.name, self.name, spec, document, upsert, multi)",
"def update_topics(mongo_collection, name, topics):\n query_name = {'name': name}\n new_topics = {'$set': {'topics': topics}}\n if mongo_collection:\n return mongo_collection.update_many(query_name, new_topics)",
"def collection(self, name=\"\", desc=\"\", collection=None, remove=False,\n elements=None, **kwargs):\n\n #in the future, MPO may support updates of values such as name and desc. At that point,\n #specifying a UUID will enable updates of those values. May want to be able to remove element\n #from a collection too.\n #remove option could apply to the entire collection in future api extensions\n\n ##validation of input\n #elements must be a list if present\n if elements:\n if not isinstance(elements,list):\n elements=[elements]\n else:\n elements=[]\n\n if collection: #add to existing collection\n\n if remove:\n if desc!=\"\":\n warnings.warn(\"InvalidArgs in collect/collection. No description used when removing an element.\")\n if name!=\"\":\n warnings.warn(\"InvalidArgs in collect/collection. No name used when removing an element.\")\n assert elements,\"InvalidArgs in collect/collection. Must specify an element to remove.\"\n assert collection!=None,\"InvalidArgs in collect/collection. Must specify the collection from which to remove the element.\"\n\n for element in elements:\n r=self.delete(self.COLLECTION_ELEMENT_RT.format(cid=collection)+'/'+element)\n\n else:\n payload={\"elements\":elements}\n r=self.post(self.COLLECTION_ELEMENT_RT.format(cid=collection), None,\n collection, data=payload, **kwargs)\n\n else: #make new collection\n payload={\"name\":name,\"description\":desc,\"elements\":elements}\n r=self.post(self.COLLECTION_RT, None, None, data=payload, **kwargs)\n\n return r",
"def update_all(collection: Collection, data_to_update):\n return collection.update_many({}, {'$set': data_to_update}).matched_count",
"def setCollection(self, collection):\n self.collectionName = collection[\"name\"]\n self.collectionType = collection[\"type\"]\n return",
"def update_many(collection: Collection, query, data_to_update):\n return collection.update_many(query, {'$set': data_to_update}).matched_count",
"def update(self, parameters):\n self.__enforce_connected()\n self.collection._update(self, parameters)",
"def salesforce_collection_update(self, objects):\n for obj in objects:\n assert obj[\n \"id\"\n ], \"Should be a list of objects with Ids returned by Salesforce Collection Insert\"\n if STATUS_KEY in obj:\n del obj[STATUS_KEY]\n\n assert len(objects) <= SF_COLLECTION_INSERTION_LIMIT, (\n \"Cannot update more than %s objects with this keyword\"\n % SF_COLLECTION_INSERTION_LIMIT\n )\n\n records = self.cumulusci.sf.restful(\n \"composite/sobjects\",\n method=\"PATCH\",\n json={\"allOrNone\": True, \"records\": objects},\n )\n\n for record, obj in zip(records, objects):\n obj[STATUS_KEY] = record",
"def update(self, force = False):\n self.__enforce_connected()\n parameters = {}\n if(force):\n parameters[\"force\"] = \"true\"\n self.collection._update(self, parameters)",
"def test_update_collection_metadata(self):\n c1 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection(self.hiarc_util.get_test_metadata()))\n upmd = {\n \"department\": \"support\",\n \"quotaCarrying\": False,\n \"targetRate\": 7.271,\n \"level\": 2,\n \"startDate\": dateutil.parser.parse(\"2020-02-25T22:33:50.134Z\")\n }\n uc = hiarc.UpdateCollectionRequest(metadata=upmd)\n updated = self.hiarc_collections.update_collection(uc, c1.key)\n updated.metadata['startDate'] = dateutil.parser.parse(\n updated.metadata['startDate'])\n self.assertDictEqual(upmd, updated.metadata)"
] |
[
"0.76857024",
"0.71235275",
"0.7037156",
"0.69677466",
"0.6750384",
"0.6648804",
"0.65590507",
"0.6208271",
"0.60423267",
"0.6038878",
"0.5893929",
"0.5853785",
"0.5823851",
"0.5781511",
"0.5763387",
"0.5668278",
"0.54936445",
"0.5441208",
"0.5405871",
"0.5403321",
"0.5403277",
"0.5288181",
"0.5232295",
"0.5156777",
"0.5140344",
"0.50702155",
"0.49837434",
"0.4980507",
"0.49452758",
"0.49441364"
] |
0.7737028
|
0
|
Calculate the normalized distance between the embeddings of two words.
|
def diff(self, word1, word2):
v = self._vecs[self._index[word1]] - self._vecs[self._index[word2]]
return v / np.linalg.norm(v)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _pairwise_distance(self, src_embeds, vocab_embeds, squared=False):\n # compute square norm to avoid compute all the directions\n vocab_sq_norm = vocab_embeds.norm(p=2, dim=-1) ** 2\n src_sq_norm = src_embeds.norm(p=2, dim=-1) ** 2\n\n # dot product\n dot_product = self._pairwise_dot_product(src_embeds, vocab_embeds)\n \n # reshape for broadcasting\n vocab_sq_norm = vocab_sq_norm.unsqueeze(0).unsqueeze(0) # 1, 1, vocab size\n src_sq_norm = src_sq_norm.unsqueeze(2) # batch, seq length, 1\n\n # compute squared difference\n sq_norm = vocab_sq_norm + src_sq_norm - 2 * dot_product\n if squared:\n return sq_norm\n else:\n # relu + epsilon for numerical stability\n sq_norm = F.relu(sq_norm) + 1e-20\n \n # take the square root\n return sq_norm.sqrt()",
"def wordMoversDistance(model, document1, document2):\n # If pyemd C extension is available, import it.\n # If pyemd is attempted to be used, but isn't installed, ImportError will be raised in wmdistance\n from pyemd import emd\n # Remove out-of-vocabulary words.\n len_pre_oov1 = len(document1)\n len_pre_oov2 = len(document2)\n document1 = [token for token in document1 if token in model]\n document2 = [token for token in document2 if token in model]\n diff1 = len_pre_oov1 - len(document1)\n diff2 = len_pre_oov2 - len(document2)\n if diff1 > 0 or diff2 > 0:\n print('Remove ' + str(diff1) + ' and ' + str(diff2) + ' OOV words from document 1 and 2 ('\n 'respectively).')\n return float('inf')\n\n if not document1 or not document2:\n print(\"At least one of the documents had no words that were in the vocabulary. Aborting (returning \"\n \"inf).\")\n return float('inf')\n\n dictionary = Dictionary(documents=[document1, document2])\n vocab_len = len(dictionary)\n\n if vocab_len == 1:\n # Both documents are composed by a single unique token\n return 0.0\n\n # Sets for faster look-up.\n docset1 = set(document1)\n docset2 = set(document2)\n\n # Compute distance matrix.\n distance_matrix = zeros((vocab_len, vocab_len), dtype=double)\n for i, t1 in dictionary.items():\n if t1 not in docset1:\n continue\n\n for j, t2 in dictionary.items():\n if t2 not in docset2 or distance_matrix[i, j] != 0.0:\n continue\n\n # Compute Euclidean distance between word vectors.\n distance_matrix[i, j] = distance_matrix[j, i] = sqrt(np_sum((model[t1] - model[t2]) ** 2))\n\n if np_sum(distance_matrix) == 0.0:\n # `emd` gets stuck if the distance matrix contains only zeros.\n print('The distance matrix is all zeros. Aborting (returning inf).')\n return float('inf')\n\n def nbow(document):\n d = zeros(vocab_len, dtype=double)\n nbow = dictionary.doc2bow(document) # Word frequencies.\n doc_len = len(document)\n for idx, freq in nbow:\n d[idx] = freq / float(doc_len) # Normalized word frequencies.\n return d\n\n # Compute nBOW representation of documents.\n d1 = nbow(document1)\n d2 = nbow(document2)\n\n # Compute WMD.\n return emd(d1, d2, distance_matrix)",
"def distance(self, word1, word2):\n\n return scipy.spatial.distance.cosine(self.vectors.get(word1), self.vectors.get(word2))",
"def wordSimilarityRatio(sent_1,sent_2):",
"def embedding_distance(embedding_1: Embedding,\n embedding_2: Embedding,\n distance_metric: DistanceMetric) -> float:\n distance = embedding_distance_bulk(embedding_1.reshape(\n 1, -1), embedding_2.reshape(1, -1), distance_metric=distance_metric)[0]\n return distance",
"def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist",
"def sentence_distance(sentence_a, sentence_b):\n \n sent_a = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_a+bigrams(sentence_a)+trigrams(sentence_a)], axis=0)\n sent_b = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_b+bigrams(sentence_b)+trigrams(sentence_b)], axis=0)\n \n \n return float(cosine(sent_a, sent_b))",
"def word_order_similarity(self,sentence_1, sentence_2):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = list(set(words_1).union(set(words_2)))\n\t windex = {x[1]: x[0] for x in enumerate(joint_words)}\n\t r1 = self.word_order_vector(words_1, joint_words, windex)\n\t r2 = self.word_order_vector(words_2, joint_words, windex)\n\t return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))",
"def calc_similarity_between_words(word1, word2):\n # pos = wn.Noun is mandatory otherwise the lowest common hypernym cant be found because of part of speach\n word1_synsets = wn.synsets(word1, pos=wn.NOUN)\n word2_synsets = wn.synsets(word2, pos=wn.NOUN)\n\n w1 = get_words_from_sysets(word1_synsets)\n w2 = get_words_from_sysets(word2_synsets)\n\n sim_matrix = np.zeros((len(w1), len(w2)))\n\n for i in range(len(w1)):\n for j in range(len(w2)):\n try:\n sim_matrix[i, j] = embeddings.distances(w1[i], [w2[j]])\n except KeyError:\n sim_matrix[i, j] = 1000\n continue\n\n w1_ind, w2_ind = np.unravel_index(np.nanargmin(sim_matrix, axis=None), sim_matrix.shape)\n lowest_common_hyp = (word1_synsets[w1_ind]).lowest_common_hypernyms(word2_synsets[w2_ind])\n return (sim_matrix[w1_ind, w2_ind], lowest_common_hyp)",
"def similarity(self, word1, word2):\n common_vect = +np.ones(self.nEmbed) * 10000\n if word1 not in self.vocab and word2 in self.vocab:\n id_word_2 = self.w2id[word2]\n w1 = common_vect\n w2 = self.U[id_word_2]\n elif word1 in self.vocab and word2 not in self.vocab:\n id_word_1 = self.w2id[word1]\n w1 = self.U[id_word_1]\n w2 = common_vect\n elif word1 not in self.vocab and word2 not in self.vocab:\n w1 = common_vect\n w2 = common_vect\n else:\n id_word_1 = self.w2id[word1]\n id_word_2 = self.w2id[word2]\n w1 = self.U[id_word_1]\n w2 = self.U[id_word_2]\n\n # scalair = w1.dot(w2)/np.linalg.norm(w1,w2)\n similarity = w1.dot(w2) / (np.linalg.norm(w1) * np.linalg.norm(w2))\n # similarity = 1 / (1 + np.exp(-scalair))\n # similarity = scalair / (np.linalg.norm(w1) * np.linalg.norm(w2))\n return similarity",
"def calc_distance(sentence_1, sentence_2):\n print(sentence_1)\n print(sentence_2)\n # sentence_1 = sentence_1.replace(\"'\", \"\")\n # sentence_2 = sentence_2.replace(\"'\", \"\")\n sentence_1 = sentence_1.replace(\",\", \"\")\n sentence_2 = sentence_2.replace(\",\", \"\")\n sentence_1 = sentence_1.replace(\";\", \"\")\n sentence_2 = sentence_2.replace(\";\", \"\")\n print(sentence_1)\n print(sentence_2)\n sentence_1 = sentence_1.lower().split()\n sentence_2 = sentence_2.lower().split()\n sentence_1 = [w for w in sentence_1 if w not in stop_words]\n sentence_2 = [w for w in sentence_2 if w not in stop_words]\n return wordMoversDistance(trained_model, sentence_1, sentence_2)",
"def similar_text(word1, word2) -> float:\n\n return textdistance.overlap.similarity(word1, word2)",
"def word_embedding_levenshtein(seq1, seq2, embeddings, average_distance, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\td.append(list(range(x2)))\n\tfor i in range(1, x1):\n\t\td.append([i] * x2)\n\n\t# Core of the algorithm\n\tfor i in range(1, x1):\n\t\tfor j in range(1, x2):\n\t\t\te1 = seq1[i-1]\n\t\t\te2 = seq2[j-1]\n\n\t\t\tif(e1 == e2): c = 0\n\t\t\telse:\n\t\t\t\tv1 = embeddings[e1]\n\t\t\t\tv2 = embeddings[e2]\n\n\t\t\t\tif((v1 is None) or (v2 is None)): c = 1\n\t\t\t\telse:\n\t\t\t\t\tdst = np.linalg.norm(v1 - v2) # Distance 2 (or L2 norm of the difference)\n\n\t\t\t\t\t# Now, we need a function increasing function mapping 0 to 0 and +inf to 1\n\t\t\t\t\tc = 1 - (1 / (1 + (alpha * dst)))\n\n\t\t\t\t\t#c /= r # If you uncomment this line, the cost of a substitution at distance `average_distance` will be 1 and substitutions might have higher cost, up to 1/r. This might be justified as long as `r` is above 0.5 (otherwise, some substitutions might be more expensive than an insertion followed by a deletion).\n\n\t\t\td[i][j] = min(\n\t\t\t\t(d[(i-1)][j] + 1), # Deletion of seq1[i]\n\t\t\t\t(d[i][(j-1)] + 1), # Insertion of seq2[j]\n\t\t\t\t(d[(i-1)][(j-1)] + c) # Substitution from seq1[i] to seq2[j]\n\t\t\t)\n\n\traw = d[-1][-1]\n\n\tif(normalise): return (raw / (len(seq1) + len(seq2)))\n\treturn raw",
"def distance(str1, str2):\n return levenshtein.normalized_distance(str1, str2)",
"def get_distance(self, resp1, resp2):\n feed_dict = {self.anchor: resp1}\n embed1 = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n\n feed_dict = {self.anchor: resp2}\n embed2 = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n\n return np.sqrt(np.sum((embed1-embed2)**2, 1))",
"def distance(self, word_a, word_b):\n word_a, word_b = word_a.upper(), word_b.upper()\n s_a = self.word_lookup[word_a]\n s_b = self.word_lookup[word_b]\n j = 1\n max_len = min(len(s_a), len(s_b))\n while j <= max_len:\n if s_a[-j] != s_b[-j]:\n break\n j += 1\n return j",
"def distance(a, b):\n if len(a) > len(b):\n a = a[:len(b)]\n elif len(b) > len(a):\n b = b[:len(a)]\n\n ar = numpy.array(a)\n br = numpy.array(b)\n dist = numpy.linalg.norm(ar-br)\n\n return dist",
"def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)",
"def length_dist(self,synset_1, synset_2):\n\t l_dist = sys.maxsize\n\t if synset_1 is None or synset_2 is None: \n\t return 0.0\n\t if synset_1 == synset_2:\n\t # if synset_1 and synset_2 are the same synset return 0\n\t l_dist = 0.0\n\t else:\n\t wset_1 = set([str(x.name()) for x in synset_1.lemmas()]) \n\t wset_2 = set([str(x.name()) for x in synset_2.lemmas()])\n\t if len(wset_1.intersection(wset_2)) > 0:\n\t # if synset_1 != synset_2 but there is word overlap, return 1.0\n\t l_dist = 1.0\n\t else:\n\t # just compute the shortest path between the two\n\t l_dist = synset_1.shortest_path_distance(synset_2)\n\t if l_dist is None:\n\t l_dist = 0.0\n\t # normalize path length to the range [0,1]\n\t return math.exp(-self.ALPHA * l_dist)",
"def get_distance_metrics(source_embeddings, target_embeddings):\n cosine_avg, euclidean_avg = 0.0, 0.0\n for i in range(len(source_embeddings)):\n cosine_avg += cosine(source_embeddings[i], target_embeddings[i])\n euclidean_avg += euclidean(source_embeddings[i], target_embeddings[i])\n return (cosine_avg / len(source_embeddings)), (euclidean_avg / len(source_embeddings))",
"def distance_unigrams_same(t1, t2):\n t1_terms = make_terms_from_string(t1)\n t2_terms = make_terms_from_string(t2)\n terms1 = set(t1_terms)\n terms2 = set(t2_terms)\n shared_terms = terms1.intersection(terms2)\n #print(shared_terms)\n all_terms = terms1.union(terms2)\n #print(all_terms)\n dist = 1.0 - (len(shared_terms) / float(len(all_terms)))\n return dist",
"def get_wmd_dist(s1, s2, model):\r\n s1 = s1.lower().strip().split()\r\n s2 = s2.lower().strip().split()\r\n\r\n distance = model.wmdistance(s1, s2)\r\n return distance",
"def distance(cls, atom_1, atom_2):\n\t\t\n\t\treturn np.linalg.norm((atom_1-atom_2).atom_loc)",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def wer(self, s1, s2):\n\n # build mapping of words to integers\n b = set(s1.split() + s2.split())\n word2char = dict(zip(b, range(len(b))))\n\n # map the words to a char array (Levenshtein packages only accepts\n # strings)\n w1 = [chr(word2char[w]) for w in s1.split()]\n w2 = [chr(word2char[w]) for w in s2.split()]\n\n return Lev.distance(''.join(w1), ''.join(w2))",
"def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))",
"def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))",
"def lev_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.Levenshtein()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the distance measure.\n return measure.get_raw_score(s1, s2)",
"def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))"
] |
[
"0.7286021",
"0.72623956",
"0.721585",
"0.721307",
"0.7204037",
"0.71977025",
"0.7186543",
"0.70891815",
"0.6977888",
"0.6919238",
"0.68870157",
"0.6784941",
"0.6743485",
"0.6693251",
"0.6688765",
"0.66666394",
"0.6653541",
"0.6604185",
"0.65965676",
"0.65595305",
"0.6548764",
"0.65394235",
"0.65262336",
"0.6523619",
"0.6523619",
"0.6517038",
"0.6503068",
"0.6503068",
"0.6500789",
"0.6480366"
] |
0.742528
|
0
|
Save the words and embeddings to a file, sorted by words frequency in descending order.
|
def save_embeddings(self, filename, binary=True):
with open(filename, "wb", encoding="utf8") as fout:
fout.write("%s %s\n" % self._vecs.shape)
# store in sorted order: most frequent words at the top
for i, word in enumerate(self._words):
row = self._vecs[i]
if binary:
fout.write(word + b" " + row.tostring())
else:
fout.write(
"%s %s\n" % (word, " ".join("%f" % val for val in row))
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save(self, filename):\n with open(filename, \"w\", encoding=\"utf8\") as f:\n f.write(\n \"\\n\".join(\n [\n w + \" \" + \" \".join([str(x) for x in v])\n for w, v in zip(self._words, self._vecs)\n ]\n )\n )\n print(\"Wrote\", self.n, \"words to\", filename)",
"def write_vocabulary(vocab_processor, outfile):\n vocab_size = len(vocab_processor.vocabulary_)\n with open(outfile, \"w\") as vocabfile:\n for id in range(vocab_size):\n word = vocab_processor.vocabulary_._reverse_mapping[id]\n vocabfile.write(word + \"\\n\")\n print(\"Saved vocabulary to {}\".format(outfile))",
"def dump_vocab(vocab, path, encoding=\"Utf-8\"):\n with open(path, \"w\", encoding=encoding) as fout:\n for word, freq in vocab:\n fout.write(\"%s\\t%d\\n\" % (word, freq))",
"def write_vocabulary():\n with open('../data/vocabulary.txt', 'w') as vocabulary_file:\n vocabulary = generate_vocabulary()\n word_count = sum(vocabulary.values())\n print(word_count)\n vocabs_str = [(\"%s %d\" % (key, value)) for key, value in vocabulary.items()]\n vocabulary_file.write('\\n'.join(vocabs_str))",
"def save_vocab(self):\n opts = self._options\n with open(os.path.join(opts.save_path, \"vocab.txt\"), \"w\") as f:\n for i in xrange(opts.vocab_size):\n f.write(\"%s %d\\n\" % (tf.compat.as_text(opts.vocab_words[i]),\n opts.vocab_counts[i]))",
"def write_vocab(vocab, filename):\n print(\"Writing vocab...\")\n with open(filename, \"w\", encoding='utf-8') as f:\n for i, word in enumerate(vocab):\n if i != len(vocab) - 1:\n f.write(\"{}\\n\".format(word))\n else:\n f.write(word)\n print(\"- done. {} tokens\".format(len(vocab)))",
"def save_model(self):\n filename = self.name + '_words'\n f = open(filename, 'w') \n f.write(str(self.words)) \n f.close()\n \n filename2 = self.name + '_word_lengths'\n f = open(filename2, 'w') \n f.write(str(self.word_lengths)) \n f.close()\n \n filename3 = self.name + '_stems'\n f = open(filename3, 'w') \n f.write(str(self.stems)) \n f.close()\n \n filename4 = self.name + '_sentence_lengths'\n f = open(filename4, 'w') \n f.write(str(self.sentence_lengths)) \n f.close()\n \n filename5 = self.name + '_punctuation'\n f = open(filename5, 'w') \n f.write(str(self.punctuation)) \n f.close()",
"def save_model(self):\n # words dictionary\n filename = self.name + \"_words\"\n f = open(filename, 'w')\n f.write(str(self.words))\n f.close()\n\n # word_lengths dictionary\n filename = self.name + \"_word_lengths\"\n f = open(filename, 'w')\n f.write(str(self.word_lengths))\n f.close()\n\n # stems dictionary\n filename = self.name + \"_stems\"\n f = open(filename, 'w')\n f.write(str(self.stems))\n f.close()\n\n # sentence_lengths dictionary\n filename = self.name + \"_sentence_lengths\"\n f = open(filename, 'w')\n f.write(str(self.sentence_lengths))\n f.close()\n\n # ten most common words\n filename = self.name + \"_common_word\"\n f = open(filename, 'w')\n f.write(str(self.common_word))\n f.close()",
"def save(self):\n pickle.dump([self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word], open(self.save_file, 'wb'), protocol=4)",
"def write_embeddings_to_file(self):\n modes = [self.generator, self.discriminator]\n for i in range(2):\n embedding_matrix = modes[i].embedding_matrix\n embedding_matrix = embedding_matrix.detach().to('cpu').numpy()\n index = np.array(range(self.n_node)).reshape(-1, 1)\n embedding_matrix = np.hstack([index, embedding_matrix])\n embedding_list = embedding_matrix.tolist()\n embedding_str = [str(int(emb[0])) + \"\\t\" + \"\\t\".join([str(x) for x in emb[1:]]) + \"\\n\" \n for emb in embedding_list]\n with open(config.emb_filenames[i], \"w+\") as f:\n lines = [str(self.n_node) + \"\\t\" + str(config.n_emb) + \"\\n\"] + embedding_str\n f.writelines(lines)",
"def save_dists(self, name):\n with open(name, \"w\", encoding=\"utf-8\") as f:\n for word in self._distWords:\n f.write(\"{} | {}\\n\".format(word, \",\".join(map(str_tuple, self._distWords[word].items()))))",
"def save(self, file_name):\n try:\n open(file_name, 'w').write(\"\\n\".join(self.word_list.keys()))\n except IOError as e:\n print(e)",
"def get_vocabulary(text_fname, vocab_fname):\n with codecs.open(text_fname,'r','utf-8') as infile, \\\n codecs.open(vocab_fname,'w','utf-8') as outfile: \n\n count_map={}\n for line in infile:\n sent=line.strip().split(' ')\n for w in sent:\n count_map[w]=count_map.get(w,0.0)+1.0\n\n for w,c in count_map.iteritems(): \n outfile.write(u'{}|{}\\n'.format(w,c))",
"def save_word_set(file_path: str, words: Iterable[str]) -> None:\n with open(file_path, 'w', encoding='utf-8') as file:\n for word in SortedSet(words):\n file.write(word)\n file.write('\\n')",
"def save(self, filename):\n target = open(filename, 'w')\n target.write(\"\\\\data\\\\\\n\")\n target.write(\"ngram 1=\" + str(len(self.f1)) + \"\\n\\n\")\n target.write(\"\\\\1-grams:\\n\")\n for w,p in sorted(self.f1.items()): \n target.write(str(p) + \" \" + w + \"\\n\")\n target.write(\"\\\\end\\\\\\n\")\n target.close()",
"def build_vocab(train_dir, vocab_dir, vocab_size=5000):\n data_train, _ = read_file(train_dir)\n\n all_data = []\n for content in data_train:\n all_data.extend(content)\n\n counter = Counter(all_data)\n count_pairs = counter.most_common(vocab_size-1)\n words, _ = list(zip(*count_pairs))\n\n open_file(vocab_dir,mode='w').write('\\n'.join(words)+'\\n')",
"def save_model(self):\n filename=self.name + '_words'\n file_write(filename, self.words)\n\n filename2=self.name+'_word_lengths'\n file_write(filename2, self.word_lengths)\n\n filename3=self.name+'_stems'\n file_write(filename3, self.stems)\n\n filename4=self.sentence_lengths+'_sentence_lengths'\n file_write(filename4, self.sentence_lengths)\n\n filename5= self.endings+'_endings'\n file_write(filename5, self.endings)",
"def save_embedding(emb, emb_dim, vocabs, save_dir, file_name):\n\tembedding = emb.weight.data.tolist()\n\n\tnum_vocab = len(embedding)\n\n\tvocab2emb = {}\n\tfor id in list(vocabs.index2word.keys())[:num_vocab]:\n\t\tvocab2emb[vocabs.index2word[id]] = embedding[id]\n\t\n\ttry:\n\t\tdel vocab2emb['<PAD>']\n\t\tnum_vocab-=1\n\texcept:\n\t\tpass\n\t\n\tpath = save_dir / (file_name + \".vec\")\n\twith path.open(\"w\") as ff:\n\t\tff.write(str(num_vocab) + \" \" + str(emb_dim) + \"\\n\")\n\t\tfor word, embed in vocab2emb.items():\n\t\t\tcontent = word + \" \" + \" \".join(map(str, embed)) + \"\\n\"\n\t\t\tff.write(content)",
"def save_to_file(self, vocab_file = None):\n if len(self.words_new) > 0:\n # Use file path which is given either by the constructor or this method's argument.\n # This method's argument takes priority.\n if not vocab_file:\n vocab_file = self.vocab_file\n\n if vocab_file:\n self.export_appended = False\n if os.path.exists(vocab_file):\n # Append the data to the existing vocab file.\n self.export_appended = True\n else:\n # If the vocab file is to be newly created, initialize the file with special tokens first.\n with open(vocab_file, 'w', encoding='utf8') as fp:\n for d in special_tokens:\n fp.write(\"%s\\n\" % d)\n\n # Append the newly added data\n with open(vocab_file, 'a', encoding='utf8') as fp:\n for d in self.words_new:\n fp.write(\"%s\\n\" % d)\n self.export_num += 1",
"def saveWordsToFile(words, filename):\n\n f = file(filename, 'w')\n for word in words:\n try:\n f.write(word.encode('utf-8') + '\\n')\n except UnicodeEncodeError, e:\n print 'Encoding error' + word + '\\n'\n f.close()",
"def sorting_and_save(self, content):\n if content is None:\n logging.warn(\"Content is null\")\n return\n try:\n arr = content.split()\n with open(self.output, 'w') as outfile:\n if len(arr) > 0:\n words = []\n integers = []\n posTypes = [] # Position types: True: integer, False: word\n for item in arr:\n try:\n val = int(item)\n integers.append(val)\n posTypes.append(True)\n except ValueError:\n words.append(item)\n posTypes.append(False)\n words.sort(key=str.lower)\n logging.debug(words)\n integers.sort()\n logging.debug(integers)\n outfile.write(str(integers.pop(0)) if posTypes[0] else words.pop(0))\n if len(posTypes) > 1:\n for pos in posTypes[1:]:\n outfile.write(\" \" + str(integers.pop(0)) if pos else \" \" + words.pop(0))\n outfile.write(\"\\n\") # end the line\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")",
"def save_dict(emb, path_to_vec, dim=100):\n word_vec = {}\n for w in emb.keys():\n word_vec[w] = emb[w]\n with open(path_to_vec + f'embedding_{str(dim)}d.txt', 'w') as file:\n for w, v in word_vec.items():\n file.write(w + ' ')\n for el in v:\n file.write(\"{:.20f}\".format(float(el)) + ' ')\n file.write('\\n')",
"def save_vocab(count, name='vocab.txt'):\n pwd = os.getcwd()\n vocabulary_size = len(count)\n with open(os.path.join(pwd, name), \"w\") as f:\n for i in xrange(vocabulary_size):\n f.write(\"%s %d\\n\" % (tf.compat.as_text(count[i][0]), count[i][1]))\n print(\"%d vocab saved to %s in %s\" % (vocabulary_size, name, pwd))",
"def dump_token_embeddings(vocab_file, options_file, weight_file, outfile):\n with open(options_file, 'r') as fin:\n options = json.load(fin)\n max_word_length = options['char_cnn']['max_characters_per_token']\n\n vocab = UnicodeCharsVocabulary(vocab_file, max_word_length)\n batcher = Batcher(vocab_file, max_word_length)\n\n ids_placeholder = tf.placeholder('int32',\n shape=(None, None, max_word_length)\n )\n model = BidirectionalLanguageModel(options_file, weight_file)\n embedding_op = model(ids_placeholder)['token_embeddings']\n\n n_tokens = vocab.size\n embed_dim = int(embedding_op.shape[2])\n\n embeddings = np.zeros((n_tokens, embed_dim), dtype=DTYPE)\n\n config = tf.ConfigProto(allow_soft_placement=True)\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n for k in range(n_tokens):\n token = vocab.id_to_word(k)\n char_ids = batcher.batch_sentences([[token]])[0, 1, :].reshape(\n 1, 1, -1)\n embeddings[k, :] = sess.run(\n embedding_op, feed_dict={ids_placeholder: char_ids}\n )\n\n with h5py.File(outfile, 'w') as fout:\n ds = fout.create_dataset(\n 'embedding', embeddings.shape, dtype='float32', data=embeddings\n )",
"def save_vocabulary(self, save_dir: str) -> None:\n vocab_f: str = os.path.join(save_dir, 'vocab.tsv')\n with open(vocab_f, 'w') as ofile:\n for i, word_type in enumerate(self.get_instruction_vocabulary()):\n ofile.write(str(i) + '\\t' + word_type + '\\n')",
"def save_vocabulary(self, vocab_path):\n index = 0\n if os.path.isdir(vocab_path):\n vocab_file = os.path.join(vocab_path, VOCAB_NAME)\n else:\n vocab_file = vocab_path\n with open(vocab_file, 'w', encoding='utf-8') as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning('Saving vocabulary to {}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!'.format(vocab_file))\n index = token_index\n writer.write(token + u'\\n')\n index += 1\n return vocab_file",
"def save_model(self):\n f = open(self.name + '_' + 'words', 'w')\n f.write(str(self.words))\n f.close\n\n f = open(self.name + '_' + 'word_lengths', 'w')\n f.write(str(self.word_lengths))\n f.close\n\n f = open(self.name + '_' + 'sentence_lengths', 'w')\n f.write(str(self.sentence_lengths))\n f.close\n\n f = open(self.name + '_' + 'stems', 'w')\n f.write(str(self.stems))\n f.close\n\n f = open(self.name + '_' + 'commas_per_sentence', 'w')\n f.write(str(self.commas_per_sentence))\n f.close",
"def save_txt(words,data,fname):\n\n out=open(fname,\"w\")\n\n rows,dims=data.shape\n print(\"{} {}\".format(rows,dims),file=out)\n counter=0\n\n for i,w in enumerate(words):\n print(w,\" \".join((\"{:6f}\".format(x) for x in data[i,:])),file=out)\n out.close()",
"def save(self, filename):\n try:\n\n with codecs.open(filename, 'w', sg.__encoding__) as fd:\n for word in sorted(self.__entries.keys()):\n fd.write(\"{:s}\\n\".format(word))\n\n except Exception as e:\n logging.info('Save file failed due to the following error: {:s}'\n ''.format(str(e)))\n return False\n\n return True",
"def save_model(self):\r\n jeff = self.name + '_words'\r\n f = open(jeff, 'w')\r\n f.write(str(self.words))\r\n f.close()\r\n \r\n jeph = self.name + '_word_lengths'\r\n f = open(jeph, 'w')\r\n f.write(str(self.word_lengths))\r\n f.close()\r\n \r\n geoff = self.name + '_stems'\r\n f = open(geoff, 'w')\r\n f.write(str(self.stems))\r\n f.close()\r\n \r\n joeff= self.name + '_sentence_lengths'\r\n f = open(joeff, 'w')\r\n f.write(str(self.sentence_lengths))\r\n f.close()\r\n \r\n geoph = self.name + '_punctuation'\r\n f = open(geoph, 'w')\r\n f.write(str(self.punctuation))\r\n f.close()"
] |
[
"0.71027607",
"0.6819131",
"0.68078804",
"0.677343",
"0.6735313",
"0.6511843",
"0.65113306",
"0.6460898",
"0.6394754",
"0.6384446",
"0.6375948",
"0.6351385",
"0.631681",
"0.62701535",
"0.6256178",
"0.6229504",
"0.6220278",
"0.6203524",
"0.6178648",
"0.6155609",
"0.6126635",
"0.61245555",
"0.60994625",
"0.60953176",
"0.6093159",
"0.6085396",
"0.6079143",
"0.6054481",
"0.60535216",
"0.60372293"
] |
0.74827415
|
0
|
Print the most stereotypical professions on both ends of the bias direction.
|
def profession_stereotypes(self, profession_words, bias_space, print_firstn=20):
assert isinstance(print_firstn, int) and print_firstn >= 0
# Calculate the projection values onto the bias subspace
sp = sorted(
[
(self.v(w).dot(bias_space), w)
for w in profession_words
if w in self._words
]
)
# Check what genders belong to positive/negative projection values
pos_neg = (
("Female", "Male")
if self.v("she").dot(bias_space) > 0
else ("Male", "Female")
)
# Print the professions with scores
if print_firstn > 0:
print(pos_neg[0].center(38) + "|" + pos_neg[1].center(38))
print("-" * 77)
for i in range(min(print_firstn, len(sp))):
print(
str(sp[-(i + 1)][0].round(3)).ljust(8) # score neg
+ sp[-(i + 1)][1].rjust(29)
+ " | " # profession neg
+ sp[i][1].ljust(29) # score pos
+ str(sp[i][0].round(3)).rjust(8)
) # profession pos
return sp
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def print_people_strategies():\n\t\tfor person in sorted(Simulation.community):\n\t\t\tSimulation.community[person].print_info()\n\t\tPerson.person_progression.write(\"--------------- END OF WEEK ---------------\" + \"\\n\")",
"def print_skill_title(self):\n #index_largest = self.clusters.index(max(self.clusters))\n for i in range(len(self.cluster)):\n #if i != index_largest:\n list_temp = []\n for j in range(len(self.cluster[i])):\n list_temp.append(self.skill[self.cluster[i][j]])\n #print(self.present_skill[i], list_temp)\n print(i, self.skill[self.present_skill[i]], list_temp)\n print(\" \") \n return",
"def display_hall_of_fame(self) -> None:\n print(\"Hall of fame\")\n for env, dico in self.score_dic.items():\n print(\"Environment :\", env)\n for team, score in sorted(dico.items()):\n print(\"team: \", team, \"mean: \", score[0], \"std: \", score[1])",
"def printMajors(self):\n import mush\n skills = mush.combatSkills+mush.magicSkills+mush.stealthSkills\n for className, stats in sorted(self.classStats.items()):\n print className,'-------------------------------'\n skillStats = [(key,value) for key,value in stats.items() if key in skills]\n skillStats.sort(key=lambda a: a[1][1],reverse=True)\n for low,high in ((0,5),(5,10)):\n for skill,stat in sorted(skillStats[low:high]):\n print '%-13s %3d' % (skill,stat[1])\n print",
"def print_leader(self):\r\n return \"Best particle found:\\n{0}\".format(\r\n repr(self.population[self.leader]))",
"def display_current_situation(self, magic):\n usable_points = 0\n active_powers = []\n header_print(\"Your side of the struggle:\")\n for card in self.stats['active']:\n print(card_format(card))\n for power in card['powers']:\n active_powers.append(power)\n for power in card['powers']:\n if power in self.stats['opponent']['powers']:\n usable_points += card['points']\n break\n print(\n \"\\nRelevant strength: %d Magic: %d Relevant powers: %s\" % (\n usable_points,\n magic,\n ', '.join(\n set(\n self.stats['opponent']['powers']\n ).intersection(active_powers)\n )\n )\n )\n header_print(\"The other side of the struggle:\")\n print(card_format(self.stats['opponent']))\n return usable_points",
"def __statistics_best_situation(self):\n students_list = self.__grade_controller.get_list_of_students_with_best_situation()\n if len(students_list) == 0:\n print(\"There is no student with a grade!\")\n return\n\n for student in students_list:\n print(str(student) + \"\\n\")",
"def user_story_4(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA' and family.husb_id != 'NA' and family.divorced != 'NA':\n if family.divorced < family.married:\n print(\n f'US04 - {self.individuals[family.wife_id].name} and {self.individuals[family.husb_id].name} married after divorce on line {family._married_line}')",
"def print_result(best_instance):\n\n print('Best instance: ')\n print('Generation: ' + str(best_instance.generation))\n print('Instance: ' + str(best_instance.instance))\n print('Fitness: ' + str(round(best_instance.value, 2)))\n print('Phenotype: ' + str(best_instance.phenotype))",
"def printPosteriors(posteriors):\n for i,p in enumerate(posteriors):\n print(u\" > Bin {0}: {1:.2f} \\u00B1 {2:.2f}\".format(i,np.mean(p),np.std(p)))\n # \\u00B1 is the same as $\\pm$ in unicode\n\n return",
"def print_possibility_space():\n\n print(\"Possibility space:\")\n print(\" {} unique sword images\".format(calculate_image_possibilities()))",
"def viz(analogies):\n print(\"Index\".ljust(12) + \"Analogy\".center(45) + \"Gender score\".rjust(12))\n print(\"-\" * 69)\n print(\n \"\\n\".join(\n str(i).rjust(4) + a[0].rjust(29) + \" | \" + a[1].ljust(29) + (str(a[2]))[:4]\n for i, a in enumerate(analogies)\n )\n )",
"def showWorstStats(self) :\n Scenario.messageWorstStats()\n self.showWorstGainWon()\n self.showWorstBetUse()\n self.showNbLevelLose()",
"def print_prediction (self, ptup):\n from . import ellipses\n bestra, bestdec, maj, min, pa = ptup\n\n f = ellipses.sigmascale (1)\n maj *= R2A\n min *= R2A\n pa *= R2D\n\n print ('position =', fmtradec (bestra, bestdec))\n print ('err(1σ) = %.2f\" × %.2f\" @ %.0f°' % (maj * f, min * f, pa))",
"def head_surprised():\n print (hair_spiky())\n print (eye_wide())\n print (nose_leftwards())\n print (mouth_open())\n print (chin_combo())",
"def showprivelages(self):\r\n\t\tprint (\"An administrator has the following abilities: \")\r\n\t\tfor power in self.powers:\r\n\t\t\tprint (\"- \" + power)",
"def print_predict(classes, probs):\n predictions = list(zip(classes, probs))\n for i in range(len(predictions)):\n print('{} : {:.3%}'.format(predictions[i][0], predictions[i][1]))\n pass",
"def print_collisions(self):",
"def print(self):\n\n print(f\"{len(self._sources)} vocabularies given sensitivity {self._sensitivity}.\"\n f\" From best to worst (vocabularies with no matches are excluded):\")\n for source in self._sources:\n print(f\"{source.uri}, {self._score_type.__str__()}: {getattr(source.ranking, self._score_type.__str__())}\")",
"def show_prop(self):\n print(self.population_size)\n print(self.max_generation)\n print(self.mutate_rate)\n print(self.elite_rate)\n print(self.cross_rate)\n print(self.cross_type)\n print(self.verify_num)\n print(self.proof)",
"def show(self):\n print \"Name: \"+str(self.name)\n ss = self.y.shape[0]\n for i in xrange(ss):\n print \"Actual: \"+str(self.y[i])\n print \"Prediction: \"+str(self.a[i])\n print \"\"\n print \"\\n\"",
"def wypisz_info(self):\n print(f\"Samochód: {self.producent} {self.model}\")",
"def print_top_answers(answers):\n print(\"Possible answers:\")\n print(\"-\" * 40)\n for res in answers:\n print(unicode(u\"{0:.2f}\\t{1}\".format(res[1], res[0])))",
"def print_intro(self):\n \n print('Did you know mammals tend to have the shortest migration routes because walking takes more energy than flying or swimming?')",
"def print_outcomes(multi_cohort, strategy_name):\n\n # mean and confidence interval text of rewards\n survival_mean_PI_text = Format.format_estimate_interval(\n estimate=multi_cohort.get_mean_total_reward(),\n interval=multi_cohort.get_PI_total_reward(alpha=0.05),\n deci=1)\n\n # print reward statistics\n print(strategy_name)\n print(\"Estimate of mean rewards ($) and {:.{prec}%} prediction interval:\".format(1 - 0.05, prec=0),\n survival_mean_PI_text)",
"def showBestStats(self) :\n Scenario.messageBestStats()\n self.showBestStatLevelReached()\n self.showNbCoupFindFirstAttempt()\n self.showBestGainWon()\n self.showBestBetUse()\n self.showNbLevelWon()",
"def printEvolutionMaximum(self):\n evolutionMaximum = self.getEvolutionMax();\n print(evolutionMaximum)\n\n print(\"The path of the maximum evolution is the following:\")\n print()\n print('\\n'.join('{:.3}'.format(max) for max in evolutionMaximum))\n print()",
"def print_matrices(self):\n\n \"\"\"\n Print Optimal Matrix\n \"\"\"\n print(\"\\n\", \"_\"*7, \"Optimal Matrix\", \"_\"*7)\n print(\"\\t\\t\" + \"\\t\".join(list(self.sequenceB)))\n for i in range(0, len(self.sequenceA)+1):\n\n if i >= 1:\n print(self.sequenceA[i-1] + '\\t', end=\"\")\n else:\n print('\\t', end=\"\")\n for j in range(0, len(self.sequenceB)+1):\n print(str(self.optimal[i][j]) + '\\t', end=\"\"),\n print(\"\")\n\n \"\"\"\n Print Direction Matrix\n \"\"\"\n print(\"\\n\", \"_\"*7, \"Direction Matrix\", \"_\"*7)\n print(\"\\t\\t\" + \"\\t\".join(list(self.sequenceB)))\n for i in range(0, len(self.sequenceA)+1):\n if i >= 1:\n print(self.sequenceA[i-1] + '\\t', end=\"\"),\n else:\n print('\\t', end=\"\"),\n for j in range(0, len(self.sequenceB)+1):\n print(str(self.direction[i][j]) + '\\t', end=\"\"),\n print(\"\")",
"def show_priveleges(self):\n print(\"This user:\")\n for privelege in self.priveleges:\n print(privelege)",
"def pretty_print(words, M):\n sols = [0] *( len(words)+1)\n solution_dict = {}\n for i in range(len(words)-1, -1, -1):\n best = None\n for j in range(i, len(words)):\n temp = calculatePenalty(words[i:j+1], M)\n if temp is None:\n continue\n\n temp = temp + sols[j+1]\n if best is None or temp < best:\n best = temp\n sols[i] = best\n solution_dict.update({i: j+1})\n\n walker = 0\n while walker< len(words):\n sub_str = \"\"\n for i in range(walker, solution_dict[walker]):\n sub_str += words[i] + ' '\n walker = solution_dict[walker]\n\n print(sub_str)\n print(\"Penalty: \", sols[0])"
] |
[
"0.6361542",
"0.60957587",
"0.59916824",
"0.597503",
"0.5859005",
"0.5754796",
"0.5745316",
"0.5673546",
"0.5574457",
"0.55710953",
"0.55202615",
"0.5507526",
"0.5440333",
"0.54283935",
"0.54233825",
"0.5417117",
"0.5393393",
"0.53846264",
"0.5380719",
"0.5379185",
"0.53535235",
"0.5324707",
"0.5314863",
"0.53025615",
"0.5299852",
"0.5293366",
"0.52901524",
"0.5289529",
"0.52857745",
"0.52715886"
] |
0.7290917
|
0
|
Print the analogies in a nicer format.
|
def viz(analogies):
print("Index".ljust(12) + "Analogy".center(45) + "Gender score".rjust(12))
print("-" * 69)
print(
"\n".join(
str(i).rjust(4) + a[0].rjust(29) + " | " + a[1].ljust(29) + (str(a[2]))[:4]
for i, a in enumerate(analogies)
)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def print(cls, vas):\n print(vas)",
"def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i",
"def print(self):\n str_items = [(str(v),str(p)) for v,p in sorted(self.items())]\n max_lens = [\n max(i[0] for i in str_items),\n max(i[1] for i in str_items)\n ]\n lena, lenb = max_lens\n print(\"\\n\".join(\n f\"{v:{lena}s} -> {p:{lenb}s}\"\n for v, p in str_items\n ))",
"def __repr__(self):\n s = \"\"\n for v in self.V():\n s += f\"{v.__repr__()}\\n\"\n \n return s",
"def debug_repr(self) -> str:\n repr_string = \"{}(Confi):\\n\".format(self.__class__.__name__)\n items = list(self.entries.items())\n items.sort(key = lambda item: item[0])\n indent = ' ' * 4\n for key, entry in items:\n repr_string += f\"{indent}{key}: {repr(entry.value)}\\n\"\n return repr_string",
"def print_nice(self):\n print(\"- \" + str(self.__node_a.name) + \" (\" + self.__node_a.get_value_string() +\n \") -> \" + str(self.__node_b.name) + \" (\" + self.__node_b.get_value_string() + \")\")",
"def print(self):\r\n self.print_avec_separateur()",
"def print_army(self):\n print(self.army)",
"def print_string(self):\n for x in self.minimal:\n print(\"min: %s\" % x)\n for x in self.also_installed:\n print(\"als: %s\" % x)\n for x in self.uninstalled:\n print(\"uni: %s\" % x)",
"def printpretty(self):\n print(self.string_rep())",
"def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()",
"def __repr__(self):\n output = \"\"\n output +=\"V:\\n\"\n for row in self.V:\n output += \"\\t\"\n for el in row:\n output += str(el) + \" \" \n output += \"\\n\" \n \n output += \"\\nW:\\n\"\n for row in self.W:\n output += \"\\t\"\n for el in row:\n output += str(el) + \" \" \n output += \"\\n\"\n return output",
"def __repr__(self):\n lstout = [\"Azimuthal Integrator:\", self.ai.__repr__(),\n \"Input image shape: %s\" % list(self.shapeIn),\n \"Number of points in radial direction: %s\" % self.nbpt_rad,\n \"Number of points in azimuthal direction: %s\" % self.nbpt_azim,\n \"Unit in radial dimension: %s\" % self.unit.REPR,\n \"Correct for solid angle: %s\" % self.correct_solid_angle,\n \"Polarization factor: %s\" % self.polarization,\n \"Dark current image: %s\" % self.dark_current_image,\n \"Flat field image: %s\" % self.flat_field_image,\n \"Mask image: %s\" % self.mask_image,\n \"Dummy: %s,\\tDelta_Dummy: %s\" % (self.dummy, self.delta_dummy),\n \"Directory: %s, \\tExtension: %s\" % (self.subdir, self.extension)]\n return os.linesep.join(lstout)",
"def print(self):\n\n for domino in self.hand:\n print(domino)",
"def print(self):\n print(self.pretty_str())",
"def print(self):\n for i, v in enumerate(self._adj):\n if v:\n print(\"vertex {0}\".format(i))\n for e in v:\n print(e)\n print()",
"def _display_examples(self):\n\n print(self._usage)\n print(self._examples)",
"def __repr__(self):\n return str.format(\"Cards: {0} Rank: '{1}' Values: {2}\",\n self.__cards,\n Hand.RANKS[self.rank()],\n self.values())",
"def print_songs(self):\n\t\tfor i,s in enumerate(self._songs):\n\t\t\tprint('{0}. {1}'.format(i, s.print_info()))",
"def __str__(self):\n items = ['({!r})'.format(item) for item in self.items()]\n return '[{}]'.format(' -> '.join(items))",
"def __str__(self):\n return '\\n'+'\\n'.join([\"%-15s: %s\" % (qq(w), str(v)) for w, v in sorted(self.value.items())]) + '\\0'",
"def display(self):\n for value, prob in self.items():\n print(value, prob)",
"def __repr__(self) -> str:\n s = \"\\n\"\n fmt = \"{:7.3f}\"\n for i in range(len(self.w)):\n s += \" \".join(fmt.format(w) for w in self.w[i])\n s += \" | \" + fmt.format(self.b[i]) + \"\\n\"\n return s",
"def __str__(self):\n # TODO also show relative abundance\n s = \"{} ion species\\n\".format(len(self.ions))\n for ion in self.ions:\n s += \" {:2s} (Z = {:3d}) {:.3e} particles\\n\".format(ion.getName(), ion.getCharge(), ion.getParticleNumber())\n \n return s",
"def _print_custom(self):\n pass",
"def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass",
"def print(self):\n self.print_avec_separateur(\" \")",
"def _print_transforms(self):\n self._print_frozen_transforms()\n self._print_nonfrozen_transforms()",
"def pprint(self):\n print(self.pprint_str())",
"def display(self) -> str:\n lines, _, _, _ = self._display_aux()\n return '\\n'.join(lines)"
] |
[
"0.63618153",
"0.633266",
"0.62989044",
"0.62901837",
"0.62889653",
"0.6280727",
"0.6275443",
"0.6168047",
"0.61478996",
"0.6145579",
"0.61368775",
"0.6111617",
"0.6092454",
"0.6092312",
"0.6087881",
"0.60767114",
"0.60763526",
"0.6049187",
"0.60266894",
"0.60248387",
"0.6019836",
"0.6016713",
"0.6006247",
"0.5999944",
"0.59941137",
"0.5992239",
"0.59755063",
"0.5975026",
"0.595046",
"0.59420234"
] |
0.67746913
|
0
|
Perform PCA on the centered embeddings of the words in the pairs.
|
def doPCA(pairs, embedding, num_components=10):
matrix = []
for a, b in pairs:
center = (embedding.v(a) + embedding.v(b)) / 2
matrix.append(embedding.v(a) - center)
matrix.append(embedding.v(b) - center)
matrix = np.array(matrix)
pca = PCA(n_components=num_components)
pca.fit(matrix)
# bar(range(num_components), pca.explained_variance_ratio_)
return pca
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def pca(embedding, num_components=3, principal_components=None):\n# shape = embedding.get_shape().as_list()\n shape = tf.shape(embedding)\n embedding = tf.reshape(embedding, [-1, shape[3]])\n\n if principal_components is None:\n principal_components = calculate_principal_components(embedding,\n num_components)\n embedding = tf.matmul(embedding, principal_components)\n\n embedding = tf.reshape(embedding,\n [shape[0], shape[1], shape[2], num_components])\n return embedding",
"def pca(X = Math.array([]), no_dims = 50):\n\n print \"Preprocessing the data using PCA...\"\n (n, d) = X.shape;\n X = X - Math.tile(Math.mean(X, 0), (n, 1));\n (l, M) = Math.linalg.eig(Math.dot(X.T, X));\n Y = Math.dot(X, M[:,0:no_dims]);\n return Y;",
"def pca_2(emb) :\n pcaer = skd.PCA(n_components=2)\n pca = pcaer.fit_transform(emb)\n \n return pca",
"def performPCA(dataSet, numShapesInDataset, numPointsInShapes, num_components):\n\tdataMat = np.array(dataSet).reshape((numShapesInDataset, numPointsInShapes*2))\n\t\n\t\"\"\"Creating the covariance matrix\"\"\"\n\tcovarMat = np.cov(dataMat.T)\n\t\t\n\t\"\"\"Generating the eigen vectors and eigen values\"\"\"\n\teigVals, eigVecs = np.linalg.eig(covarMat)\n\n\t\"\"\"Taking the first num_components eigen vectors and values, and the center of the space.\"\"\"\n\tprincipleComponents = np.real(eigVecs[:, 0:num_components])\n\tprincipleValues = np.real(eigVals[0:num_components])\n\tmeanShape = dataMat.mean(0).reshape((numPointsInShapes * 2, 1))\n\treturn principleComponents, principleValues, meanShape",
"def pca(adata, n_components=50, train_ratio=0.35, n_batches=50, gpu=False):\n\n train_size = math.ceil(adata.X.shape[0] * train_ratio)\n\n if gpu:\n from cuml.decomposition import PCA\n import cupy as cp\n else:\n from sklearn.decomposition import PCA\n import numpy as cp\n\n pca = PCA(n_components=n_components).fit(adata.X[:train_size])\n \n embeddings = cp.zeros((adata.X.shape[0], n_components))\n batch_size = int(embeddings.shape[0] / n_batches)\n for batch in range(n_batches):\n start_idx = batch * batch_size\n end_idx = start_idx + batch_size\n\n if(adata.X.shape[0] - end_idx < batch_size):\n end_idx = adata.X.shape[0]\n\n embeddings[start_idx:end_idx,:] = cp.asarray(pca.transform(adata.X[start_idx:end_idx]))\n \n if gpu:\n embeddings = embeddings.get()\n\n adata.obsm[\"X_pca\"] = embeddings\n return adata",
"def _compute_util_data(self):\n\n print(\"Computing PCA of document vectors.\")\n self.pca = PCA(n_components = 3)\n\n print(\"Computing document clusters in PCA basis.\")\n inferred_vecs = np.array([self.model.infer_vector(doc.words) for doc in self.tagged_docs])\n self.pca_reduced_vecs = self.pca.fit_transform(inferred_vecs)\n n_clusters = 25 # TODO find way to determine approx cluster size\n self.kmeans = KMeans(init = 'k-means++', n_clusters = n_clusters, random_state = 0)\n self.kmeans_preds = self.kmeans.fit_predict(self.pca_reduced_vecs)",
"def principle_component_analysis(data_frame, dim=2):\n pca = PCA(n_components=dim)\n sc = StandardScaler()\n y = data_frame.loc[:, [\"Label\"]].values\n x = pd.DataFrame(data_frame[\"Vector\"].tolist())\n x = sc.fit_transform(x)\n principlecomponents = pca.fit_transform(x)\n principalDf = pd.DataFrame(data=principlecomponents)\n data_frame[\"Vector\"] = principalDf.values.tolist()",
"def doPCA(self):\n data = [l.points for l in self.preprocessedLandmarks]\n data.append(data[0])\n\n S = np.cov(np.transpose(data))\n\n eigenvalues, eigenvectors = np.linalg.eig(S)\n sorted_values = np.flip(eigenvalues.argsort(), 0)[:self.pcaComponents]\n\n self.eigenvalues = eigenvalues[sorted_values]\n self.eigenvectors = eigenvectors[:, sorted_values]\n # print(self.eigenvalues)\n return self",
"def postprocess(self, embeddings_batch):\n assert len(embeddings_batch.shape) == 2, \"Expected 2-d batch, got %r\" % (\n embeddings_batch.shape,\n )\n assert (\n embeddings_batch.shape[1] == vggish_params.EMBEDDING_SIZE\n ), \"Bad batch shape: %r\" % (embeddings_batch.shape,)\n\n # Apply PCA.\n # - Embeddings come in as [batch_size, embedding_size].\n # - Transpose to [embedding_size, batch_size].\n # - Subtract pca_means column vector from each column.\n # - Premultiply by PCA matrix of shape [output_dims, input_dims]\n # where both are are equal to embedding_size in our case.\n # - Transpose result back to [batch_size, embedding_size].\n pca_applied = torch.mm(\n self.pca_eigen_vectors, (embeddings_batch.t() - self.pca_means)\n ).t()\n\n # Quantize by:\n # - clipping to [min, max] range\n clipped_embeddings = torch.clamp(\n pca_applied, vggish_params.QUANTIZE_MIN_VAL, vggish_params.QUANTIZE_MAX_VAL\n )\n # - convert to 8-bit in range [0.0, 255.0]\n quantized_embeddings = torch.round(\n (clipped_embeddings - vggish_params.QUANTIZE_MIN_VAL)\n * (\n 255.0\n / (vggish_params.QUANTIZE_MAX_VAL - vggish_params.QUANTIZE_MIN_VAL)\n )\n )\n return torch.squeeze(quantized_embeddings)",
"def pca(points: np.ndarray, axis: Optional[Any] = None) -> np.ndarray:\n\n # Perform PCA to understand what the primary axis\n # of the given point set is\n mean = np.mean(points, axis=0)\n # Points have to be zero-mean\n centered = points - mean\n # np.linalg.eig takes a covariance matrix as an argument\n cov = np.cov(centered.T)\n # Call eigenvector decomposition to obtain principal components\n eigenval, eigenvec = np.linalg.eig(cov)\n # We want to parametrize target straight line\n # in the coordinate frame given by the eigenvector\n # that corresponds to the biggest eigenvalue\n argmax_eigen = np.argmax(eigenval)\n # We'll need projections of data points\n # on the primary axis\n loc_pca = np.dot(centered, eigenvec)\n loc_maxeigen = loc_pca[:, argmax_eigen]\n max_eigenval = eigenval[argmax_eigen]\n max_eigenvec = eigenvec[:, argmax_eigen]\n # Re-parametrize the line\n loc_start = mean + max_eigenvec * loc_maxeigen[0]\n loc_final = mean + max_eigenvec * loc_maxeigen[-1]\n linspace = np.linspace(0, 1, num=len(points))\n positions = loc_start + np.outer(linspace, loc_final - loc_start)\n\n if axis is not None:\n for ax in axis:\n ax.set_title(\"PCA\")\n ax.plot(points[:, 0], points[:, 1], 'or')\n ax.plot(positions[:, 0], positions[:, 1], 'o-', mfc='none')\n ax.grid(True, linestyle='--')\n ax.axis('equal')\n\n return positions",
"def align_meshes_pca(self, display_opt):\n # convert vtk points to numpy first\n vtk_pts = self.points\n numpy_pts = numpy_support.vtk_to_numpy(vtk_pts.GetData())\n\n # perform pca\n pca = PCA(n_components=3)\n trans_coords = pca.fit_transform(numpy_pts)\n eigenvectors = pca.components_\n eigenvalues = pca.explained_variance_ratio_\n\n # save pca vectors as global variables\n self.pca1 = eigenvectors[0]\n self.pca2 = eigenvectors[1]\n self.pca3 = eigenvectors[2]\n\n if display_opt:\n axes = get_axes_actor([80,80,80], [0,0,0])\n\n trans_act = include_points(trans_coords, trans_coords.shape[0], 4, (0,1,0))\n self.meshActor.GetProperty().SetOpacity(0.6)\n\n ren = vtk.vtkRenderer()\n ren.AddActor(self.meshActor)\n ren.AddActor(trans_act)\n ren.AddActor(axes)\n vtk_show(ren)\n\n # reset the self.attributes with transformed coordinates\n trans_vtk_pts = MakevtkPoints(trans_coords, deep=True)\n self.points = trans_vtk_pts\n self.mesh_poly.SetPoints(trans_vtk_pts)\n\n meshMapper = vtk.vtkPolyDataMapper()\n meshMapper.SetInputData(self.mesh_poly)\n\n self.meshActor = vtk.vtkActor()\n self.meshActor.SetMapper(meshMapper)\n self.meshActor.GetProperty().SetColor(1.0, 0.0, 0.0)",
"def PCA(data, n=2):\n U, S, Vt = np.linalg.svd(data, full_matrices=False)\n s = np.diag(S)\n newdata = np.dot(U[:, :n], np.dot(s[:n, :n], Vt[:n,:]))\n return newdata",
"def pca(X, ndim):\n\n Xmean = X - np.mean(X, axis=0)\n _, _, vh = np.linalg.svd(Xmean)\n W = vh[:ndim].T\n T = np.matmul(Xmean, W)\n\n return T",
"def pca(X, k = 30):\n \n # Center/scale the data.\n s = np.std(X, axis=0)\n s = np.where(s==0, 1, s)\n X = (X - np.mean(X, axis=0))/s\n \n # Run PCA with sklearn.\n pca_ = PCA(n_components=k)\n return pca_.fit_transform(X)",
"def PCA (numpy_cloud ):\r\n\r\n # abort, if there are no points\r\n if (numpy_cloud.shape[0] == 0):\r\n #print (\"In normals.py, in PCA: The input array is empty. Returning a null vector and high sigma\")\r\n return np.array ((0, 0, 0)), 1.0, np.array ((0, 0, 0))\r\n\r\n # we only need three colums [X, Y, Z, I] -> [X, Y, Z]\r\n numpy_cloud = numpy_cloud[:, :3].copy () # copying takes roughly 0.000558 seconds per 1000 points\r\n cloud_size = numpy_cloud.shape[0]\r\n\r\n # get covariance matrix\r\n a_transposed_a, mass_center = build_covariance_matrix (numpy_cloud )\r\n\r\n # get normal vector and smallest eigenvalue\r\n normal_vector, smallest_eigenvalue = eigenvalue_decomposition (a_transposed_a )\r\n\r\n # the noise is based on the smallest eigenvalue and normalized by number of points in cloud\r\n noise = smallest_eigenvalue\r\n if (cloud_size <= 3 or noise < 1 * 10 ** -10):\r\n sigma = noise # no noise with 3 points\r\n else:\r\n sigma = sqrt(noise/(cloud_size - 3) )\r\n\r\n return normal_vector, sigma, mass_center",
"def pca(x):\n\t\n\tx = (x - x.mean(axis = 0)) # Subtract the mean of column i from column i, in order to center the matrix.\n\t\n\tnum_observations, num_dimensions = x.shape\n\t\n\t# Often, we have a large number of dimensions (say, 10,000) but a relatively small number of observations (say, 75). In this case, instead of directly computing the eigenvectors of x^T x (a 10,000 x 10,000 matrix), it's more efficient to compute the eigenvectors of x x^T and translate these into the eigenvectors of x^T x by using the transpose trick. \n\t# The transpose trick says that if v is an eigenvector of M^T M, then Mv is an eigenvector of MM^T.\n\t# We arbitrarily select \"100\" as the switching threshold. Another approach is to switch by comparing num_observations and num_dimensions.\n\tif num_dimensions > 100:\n\t\teigenvalues, eigenvectors = linalg.eigh(dot(x, x.T))\n\t\tv = (dot(x.T, eigenvectors).T)[::-1] # Unscaled, but the relative order is still correct.\n\t\ts = sqrt(eigenvalues)[::-1] # Unscaled, but the relative order is still correct.\n\telse:\n\t\tu, s, v = linalg.svd(x, full_matrices = False)\n\t\t\n\treturn v, s",
"def pca(X, ndim):\n X_m = X - np.mean(X, axis=0)\n u, s, vh = np.linalg.svd(X_m)\n # traditional notation decomp(A) = U (sigma) VT = (u * s) @ vh\n W = vh[0:ndim].T\n # X_m = X - np.mean(X, axis=0)\n return np.matmul(X_m, W)",
"def pca(X, k):\n n, dim = X.shape\n\n # Center the data\n X_mean = np.mean(X, axis = 0)\n X = X - X_mean\n # Get the covariance matrix\n covariance_matrix = np.dot(X.T, X) / (n - 1)\n eigval, eigvec = eigs(covariance_matrix, k)\n return np.array(eigvec), np.array(eigval)",
"def cluster_prob(self, embeddings: np.ndarray) -> List[Tuple[str, float]]:\n assert self._centroids != {} # Centroids must be set in advance\n \n # Setup known database\n cluster_ids, cluster_embs = zip(*self._centroids.items())\n cluster_embs = np.vstack(cluster_embs)\n \n # Calculate similarity with cluster centroids\n similarity = cosine_similarity(embeddings, cluster_embs)\n \n # Fetch the best-matching clusters\n results = []\n for i, idx in enumerate(similarity.argmax(axis=1)):\n results.append((cluster_ids[idx], similarity[i, idx]))\n return results",
"def plot_embedding_pca(features, labels):\n\n import bob.learn.linear\n import matplotlib.pyplot as mpl\n\n colors = ['#FF0000', '#FFFF00', '#FF00FF', '#00FFFF', '#000000',\n '#AA0000', '#AAAA00', '#AA00AA', '#00AAAA', '#330000']\n\n # Training PCA\n trainer = bob.learn.linear.PCATrainer()\n machine, lamb = trainer.train(features.astype(\"float64\"))\n\n # Getting the first two most relevant features\n projected_features = machine(features.astype(\"float64\"))[:, 0:2]\n\n # Plotting the classes\n n_classes = max(labels)+1\n fig = mpl.figure()\n\n for i in range(n_classes):\n indexes = numpy.where(labels == i)[0]\n\n selected_features = projected_features[indexes,:]\n mpl.scatter(selected_features[:, 0], selected_features[:, 1],\n marker='.', c=colors[i], linewidths=0, label=str(i))\n mpl.legend()\n return fig",
"def _apply_pca(self, X):\n newX = np.reshape(X, (-1, X.shape[2]))\n pca = sklearnPCA(n_components=self.num_components, whiten=True)\n newX = pca.fit_transform(newX)\n newX = np.reshape(newX, (X.shape[0], X.shape[1], self.num_components))\n return newX",
"def feature_cPCA(wv, n_components=12, incremental=False, batch_size=None):\n if incremental:\n raise NotImplementedError(\"Can't run incremental PCA yet.\")\n\n ers = np.reshape(np.transpose(wv, axes=(1, 0, 2)), (N_SAMPLES * N_CHANNELS, -1))\n pca = PCA(n_components)\n scores = pca.fit_transform(ers.T)\n return scores",
"def all_clusters_prob(self, embeddings: np.ndarray, use_softmax: bool = False) -> Tuple[List[str], np.ndarray]:\n assert self._centroids != {} # Centroids must be set in advance\n \n # Setup known database\n cluster_ids, cluster_embs = zip(*sorted(self._centroids.items(), key=lambda x: x[0] if x[0] else ''))\n cluster_embs = np.vstack(cluster_embs)\n \n # Calculate similarity with cluster centroids\n similarity = cosine_similarity(embeddings, cluster_embs)\n return cluster_ids, softmax(similarity, axis=1) if use_softmax else similarity",
"def pca(self):\n self.pca_mean = self.X.mean(axis=1)\n X_meanC = self.X - self.pca_mean[:, None]\n (self.pca_U, self.pca_S, self.pca_V) = np.linalg.svd(X_meanC, full_matrices=False)\n self.pc_weights = np.dot(np.diag(self.pca_S), self.pca_V)\n self.pc_stdevs = np.std(self.pc_weights, axis=1)",
"def pca_data(train_data_lst, test_data_lst, data_anots):\r\n \r\n train_data_pca = []\r\n test_data_pca = []\r\n new_anots = []\r\n\r\n for idx in range(len(train_data_lst)):\r\n pca = PCA(n_components=0.985)\r\n X_train = pca.fit_transform(train_data_lst[idx])\r\n train_data_pca.append(X_train)\r\n \r\n X_test = pca.transform(test_data_lst[idx])\r\n test_data_pca.append(X_test)\r\n new_anots.append(data_anots[idx]+'_pca')\r\n return train_data_pca, test_data_pca, new_anots",
"def feature_cPCA24(wv, n_components=12, incremental=False, batch_size=None):\n if incremental:\n raise NotImplementedError(\"Can't run incremental PCA yet.\")\n\n ers = np.reshape(np.transpose(wv[:24, :, :], axes=(1, 0, 2)), (24 * N_CHANNELS, -1))\n pca = PCA(n_components)\n scores = pca.fit_transform(ers.T)\n return scores",
"def apply_PCA(data, ncomp):\n import sklearn.decomposition as dc\n \n pca = dc.PCA(n_components=ncomp, whiten=False, svd_solver='full')\n cps = pca.fit_transform(data)\n svl = pca.singular_values_\n return cps,pca,svl",
"def get_word_cluster_pairs(cls, clusters, words):\n\n print \"Getting the associations with clusters\", clusters\n\n associations = db.session.query(cls.cluster_id, cls.word).filter(\n cls.cluster_id.in_(clusters), cls.word.in_(words)).all()\n\n return associations",
"def pca(filename, class_col, sample):\n\n\tX = ml.read_file( filename )\n\n\t# Remove the class label from the dataset so that it doesn't prevent us from training a classifier in the future\n\tif class_col != None:\n\t\ttry:\n\t\t\tclassifier = ml.pd.DataFrame(X.iloc[:, class_col])\n\t\texcept:\n\t\t\tml.sys.exit('Class column out of range.')\n\t\tm = X.shape[1]\n\t\tkeepers = list(range(m))\n\t\tkeepers.pop( class_col )\n\n\t# Determine whether sample is present\n\tX_input = X.iloc[:, keepers]\n\n\t# # Visualize raw data\n\tml.plt.figure()\n\tml.sns.scatterplot(data = X, x = X_input['Petal Length (cm)'], y = X_input['Petal Width (cm)'], color = 'k', alpha = 0.5).set(title = filename + ' raw')\n\n\t# Normalize features by Z-score (so that features' units don't dominate PCs), and apply PCA\n\tX_norm, X_mean, X_std = ml.z_norm(X_input)\n\tY, P, e_scaled = ml.pca_cov( X_norm )\n\n\t# Visualize 2D PC data\n\tml.plt.figure()\n\tml.sns.scatterplot(data = Y, x = Y.iloc[:, 0], y = Y.iloc[:, 1], alpha=0.5, color = 'k').set(title = 'PC 2D Projection')\n\n\t# Visualize PCs with heatmap and cree plot\n\tinfo_retention = ml.scree_plot( e_scaled )\n\tml.pc_heatmap( P, info_retention )\n\n\t# Reconstruct data\n\treconstruct(X_input, X_mean, X_std, Y, P, e_scaled, 2, 3)\n\n\tml.plt.show()",
"def _pca(self):\n mean_beam = np.mean(self.beam_images, axis=1, keepdims=False)\n mask = self.mask\n beam_images = self.beam_images[:, :self.n_beam_images]\n\n # Subtract mean_beam from images and apply the mask. Element-wise\n # multiplication and subtraction using numpy broadcasting (as commented\n # out below) requires 3 large matrices in memory at an intermediate\n # point in the computation, namely right after (beam_images -\n # mean_beam_2d) is evaluated and memory for centered_masked_images is\n # allocated.\n # mask_2d = mask[:,np.newaxis]\n # mean_beam_2d = mean_beam[:,np.newaxis]\n # centered_masked_images = mask_2d * (beam_images - mean_beam_2d)\n\n # Instead of that direct approach, use self._center_and_mask_numba() or\n # self._center_and_mask_in_place(). As of this writing the _in_place\n # version is faster, but this may change in the future since the numba\n # version supports parallelization.\n centered_masked_images = self._center_and_mask_in_place(\n beam_images,\n mask,\n mean_beam,\n )\n # centered_masked_images should be C-contiguous already but it's good to\n # make sure.\n centered_masked_images = np.ascontiguousarray(centered_masked_images)\n\n # Compute the masked principal components\n # -1 since last eigenvector isn't necessarily orthogonal to the others.\n n_eigs = min(self.n_beam_images - 1, self.max_principal_components)\n n_eigs = max(n_eigs, 1) # Need at least one.\n # .T means transpose, @ means matrix multiplication.\n cov_mat = centered_masked_images.T @ centered_masked_images\n del centered_masked_images # Free up memory.\n if self.use_sparse_routines:\n variances, principal_components = eigsh(\n cov_mat, k=n_eigs, which='LM')\n else:\n eigvals_param = (\n self.n_beam_images - n_eigs,\n self.n_beam_images - 1)\n # overwrite_a might reduce memory usage\n variances, principal_components = eigh(\n cov_mat, eigvals=eigvals_param, overwrite_a=True)\n del cov_mat # Free up memory.\n\n # Reverse ordering to put largest eigenvectors/eigenvalues first\n principal_components = np.fliplr(principal_components)\n variances = np.flip(variances)\n\n # principal_components isn't always C-contiguous, and when it's not the\n # matrix multiplication below becomes extremely slow. It's much faster\n # to make it C-contiguous first so that numpy can use faster matrix\n # multiplication routines behind the scenes.\n principal_components = np.ascontiguousarray(principal_components)\n\n # Construct the un-masked basis vectors.\n centered_images = beam_images - mean_beam[:, np.newaxis]\n # centered_images should be C-contiguous already but it's good to make\n # sure.\n centered_images = np.ascontiguousarray(centered_images)\n principal_components = centered_images @ principal_components\n del centered_images # Free up memory.\n\n # As of this writing, self._normalize_vectorized() is faster than using\n # self._normalize_numba() despite the fact that the latter is uses numba\n # and allows for parallelization. That may change in the future though.\n principal_components = self._normalize_vectorized(\n principal_components,\n mask,\n )\n\n return mean_beam, principal_components, variances"
] |
[
"0.6045342",
"0.5942827",
"0.58956623",
"0.58551455",
"0.5813153",
"0.57986903",
"0.5774442",
"0.5707375",
"0.5700203",
"0.5676374",
"0.5656551",
"0.56111175",
"0.56064177",
"0.55749196",
"0.55516756",
"0.5547325",
"0.55451876",
"0.55138457",
"0.5510211",
"0.55065274",
"0.54997855",
"0.5469571",
"0.5447762",
"0.5442962",
"0.5433995",
"0.5387004",
"0.535895",
"0.53571606",
"0.5351158",
"0.5336863"
] |
0.75337654
|
0
|
Argument Parser for the nussinov program
|
def setParser():
parser = argparse.ArgumentParser(
prog="Nussinov Algorithm Solver",
description="A program that runs Nussinov's Algorithm on a given RNA strand and returns the most viable pairings."
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-f", "--filepath", help="the path to a text file with a sequence")
group.add_argument("-s", "--sequence", help="the RNA sequence to evaluate")
parser.add_argument("-v", "--verbose", action="store_true", help="More verbose output")
parser.add_argument("-u", "--uncommon", action="store_true", help="Use Uncommon RNA matches (G,U)")
return parser
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def Args(parser):",
"def parse_arguments(args):",
"def parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"\nNenG - Nash Equilibrium Noncooperative games.\nTool for computing Nash equilibria in noncooperative games.\nSpecifically:\nAll pure Nash equilibria in all games (--method=pne).\nAll mixed Nash equilibria in two-players games (--method=support_enumeration).\nOne sample mixed Nash equilibria in n-players games (--method={CMAES,L-BFGS-B,SLSQP}).\n\"\"\")\n pa = parser.add_argument\n pa('-f', '--file', required=True, help=\"File where game in nfg format is saved.\")\n pa('-m', '--method', default='CMAES', choices=game.Game.METHODS,\n help=\"Method to use for computing Nash equlibria.\")\n pa('-e', '--elimination', action='store_true', default=False,\n help=\"Use Iterative Elimination of Strictly Dominated Strategies before computing NE.\")\n pa('-p', '--payoff', action='store_true', default=False,\n help=\"Print also players payoff with each Nash equilibrium.\")\n pa('-c', '--checkNE', action='store_true', default=False,\n help=\"After computation check if found strategy profile is really Nash equilibrium.\")\n pa('-t', '--trim', choices=('normalization', 'penalization'), default='normalization',\n help=\"Method for keeping strategy profile in probability distribution universum.\")\n pa('-l', '--log', default=\"WARNING\",\n choices=(\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"),\n help=\"Level of logs to save/print\")\n pa('--log-file', default=None, help='Log file. If omitted log is printed to stdout.')\n return parser.parse_args()",
"def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument('infile',\n help='path to file containing objects')\n p.add_argument('n1',\n help='night 1')\n p.add_argument('n2',\n help='night 2')\n p.add_argument('observatory',\n help='Astropy name of observatory')\n return p.parse_args()",
"def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()",
"def parse_user_arguments(*args, **kwds):\n\n parser = argparse.ArgumentParser(\n description = \"Comparison of PPI networks\",\n epilog = \"@oliva's lab 2017\")\n parser.add_argument('-n','--network_file',dest='network_file',action = 'store',\n help = \"\"\"\" Input file with a protein-protein interaction network in SIF format. \"\"\")\n parser.add_argument('-nf','--network_format',dest='network_format',action = 'store',default='multi-fields',\n help = '''Format of the edge file (network):\\tsif, netscore, raw, multi-fields (default):\\n\n 'sif': <node1>\\tscore\\t<node2>\\n\n 'netscore': <node1>\\t<node2>\\t<score>\\n\n 'raw': <node1>\\t<node2>\\n\n 'multi-fields' : <node1>\\t<node2>\\t<sources>\\t<method_ids>\\t<method_names>\\t<pmids>\\n''')\n parser.add_argument('-trans','--translation_file',dest='translation_file',action = 'store',\n help = \"\"\"\" Input file with the translation file of biana codes to geneID \"\"\")\n parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'),\n help = \"\"\"Define the workspace directory where the results will be created. \"\"\")\n\n options=parser.parse_args()\n\n return options",
"def parse_args():\n parser = argparse.ArgumentParser(description='MXNet Gluon Semantic Segmentation')\n # model and dataset\n parser.add_argument('--model', type=str, default='fcn',\n help='model name (default: fcn)')\n parser.add_argument('--model-zoo', type=str, default=None,\n help='evaluating on model zoo model')\n parser.add_argument('--pretrained', action=\"store_true\",\n help='whether to use pretrained params')\n parser.add_argument('--backbone', type=str, default='resnet50',\n help='backbone name (default: resnet50)')\n parser.add_argument('--dataset', type=str, default='pascal',\n help='dataset name (default: pascal)')\n parser.add_argument('--workers', type=int, default=16,\n metavar='N', help='dataloader threads')\n parser.add_argument('--base-size', type=int, default=520,\n help='base image size')\n parser.add_argument('--crop-size', type=int, default=480,\n help='crop image size')\n parser.add_argument('--train-split', type=str, default='train',\n help='dataset train split (default: train)')\n # training hyper params\n parser.add_argument('--aux', action='store_true', default=False,\n help='Auxiliary loss')\n parser.add_argument('--aux-weight', type=float, default=0.5,\n help='auxiliary loss weight')\n parser.add_argument('--epochs', type=int, default=50, metavar='N',\n help='number of epochs to train (default: 50)')\n parser.add_argument('--start_epoch', type=int, default=0,\n metavar='N', help='start epochs (default:0)')\n parser.add_argument('--batch-size', type=int, default=16,\n metavar='N', help='input batch size for \\\n training (default: 16)')\n parser.add_argument('--test-batch-size', type=int, default=16,\n metavar='N', help='input batch size for \\\n testing (default: 16)')\n parser.add_argument('--optimizer', type=str, default='sgd',\n help='optimizer (default: sgd)')\n parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',\n help='learning rate (default: 1e-3)')\n parser.add_argument('--warmup-epochs', type=int, default=0,\n help='number of warmup epochs.')\n parser.add_argument('--momentum', type=float, default=0.9,\n metavar='M', help='momentum (default: 0.9)')\n parser.add_argument('--weight-decay', type=float, default=1e-4,\n metavar='M', help='w-decay (default: 1e-4)')\n parser.add_argument('--no-wd', action='store_true',\n help='whether to remove weight decay on bias, \\\n and beta/gamma for batchnorm layers.')\n parser.add_argument('--mode', type=str, default=None,\n help='whether to turn on model hybridization')\n # cuda and distribute\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--ngpus', type=int,\n default=len(mx.test_utils.list_gpus()),\n help='number of GPUs (default: 4)')\n parser.add_argument('--kvstore', type=str, default='device',\n help='kvstore to use for trainer/module.')\n parser.add_argument('--dtype', type=str, default='float32',\n help='data type for training. default is float32')\n # checking point\n parser.add_argument('--resume', type=str, default=None,\n help='put the path to resuming file if needed')\n parser.add_argument('--checkname', type=str, default='default',\n help='set the checkpoint name')\n parser.add_argument('--save-dir', type=str, default=None,\n help='directory of saved models')\n parser.add_argument('--log-interval', type=int, default=20,\n help='Number of batches to wait before logging.')\n parser.add_argument('--logging-file', type=str, default='train.log',\n help='name of training log file')\n # evaluation only\n parser.add_argument('--eval', action='store_true', default=False,\n help='evaluation only')\n parser.add_argument('--no-val', action='store_true', default=False,\n help='skip validation during training')\n # synchronized Batch Normalization\n parser.add_argument('--syncbn', action='store_true', default=False,\n help='using Synchronized Cross-GPU BatchNorm')\n # the parser\n args = parser.parse_args()\n\n # handle contexts\n if args.no_cuda:\n print('Using CPU')\n args.kvstore = 'local'\n args.ctx = [mx.cpu(0)]\n else:\n print('Number of GPUs:', args.ngpus)\n assert args.ngpus > 0, 'No GPUs found, please enable --no-cuda for CPU mode.'\n args.ctx = [mx.gpu(i) for i in range(args.ngpus)]\n\n if 'psp' in args.model or 'deeplab' in args.model:\n assert args.crop_size % 8 == 0, ('For PSPNet and DeepLabV3 model families, '\n 'we only support input crop size as multiples of 8.')\n\n # logging and checkpoint saving\n if args.save_dir is None:\n args.save_dir = \"runs/%s/%s/%s/\" % (args.dataset, args.model, args.backbone)\n makedirs(args.save_dir)\n\n # Synchronized BatchNorm\n args.norm_layer = mx.gluon.contrib.nn.SyncBatchNorm if args.syncbn \\\n else mx.gluon.nn.BatchNorm\n args.norm_kwargs = {'num_devices': args.ngpus} if args.syncbn else {}\n return args",
"def parse_args():\n parser = argparse.ArgumentParser(description='MXNet Gluon Segmentation')\n parser.add_argument('--host', type=str, default='xxx',\n help='xxx is a place holder')\n parser.add_argument('--model', type=str, default='ResFPN',\n help='model name: ResNetFPN, ResUNet')\n parser.add_argument('--fuse-mode', type=str, default='AsymBi',\n help='DirectAdd, Concat, SK, BiLocal, BiGlobal, AsymBi, '\n 'TopDownGlobal, TopDownLocal')\n parser.add_argument('--tiny', action='store_true', default=False,\n help='evaluation only')\n parser.add_argument('--blocks', type=int, default=3,\n help='block num in each stage')\n parser.add_argument('--channel-times', type=int, default=1,\n help='times of channel width')\n parser.add_argument('--dataset', type=str, default='DENTIST',\n help='dataset name: DENTIST, Iceberg, StopSign')\n parser.add_argument('--workers', type=int, default=48,\n metavar='N', help='dataloader threads')\n parser.add_argument('--base-size', type=int, default=512,\n help='base image size')\n parser.add_argument('--iou-thresh', type=float, default=0.5,\n help='iou-thresh')\n parser.add_argument('--crop-size', type=int, default=480,\n help='crop image size')\n parser.add_argument('--train-split', type=str, default='trainval',\n help='dataset train split (default: train)')\n parser.add_argument('--val-split', type=str, default='test',\n help='dataset val split (default: val)')\n # training hyper params\n parser.add_argument('--epochs', type=int, default=300, metavar='N',\n help='number of epochs to train (default: 110)')\n parser.add_argument('--start_epoch', type=int, default=0,\n metavar='N', help='start epochs (default:0)')\n parser.add_argument('--batch-size', type=int, default=8,\n metavar='N', help='input batch size for \\\n training (default: 16)')\n parser.add_argument('--test-batch-size', type=int, default=8,\n metavar='N', help='input batch size for \\\n testing (default: 32)')\n parser.add_argument('--optimizer', type=str, default='adagrad',\n help='sgd, adam, adagrad')\n parser.add_argument('--lr', type=float, default=0.05, metavar='LR',\n help='learning rate (default: 0.1)')\n parser.add_argument('--lr-decay', type=float, default=0.1,\n help='decay rate of learning rate. default is 0.1.')\n parser.add_argument('--gamma', type=int, default=2,\n help='gamma for Focal Soft IoU Loss')\n parser.add_argument('--momentum', type=float, default=0.9,\n metavar='M', help='momentum (default: 0.9)')\n parser.add_argument('--weight-decay', type=float, default=1e-4,\n metavar='M', help='w-decay (default: 1e-4)')\n parser.add_argument('--no-wd', action='store_true',\n help='whether to remove weight decay on bias, \\\n and beta/gamma for batchnorm layers.')\n parser.add_argument('--score-thresh', type=float, default=0.5,\n help='score-thresh')\n parser.add_argument('--warmup-epochs', type=int, default=0,\n help='number of warmup epochs.')\n # cuda and logging\n parser.add_argument('--no-cuda', action='store_true', default=\n False, help='disables CUDA training')\n parser.add_argument('--gpus', type=str, default='0',\n help='Training with GPUs, you can specify 1,3 for example.')\n parser.add_argument('--kvstore', type=str, default='device',\n help='kvstore to use for trainer/module.')\n parser.add_argument('--dtype', type=str, default='float32',\n help='data type for training. default is float32')\n parser.add_argument('--wd', type=float, default=0.0001,\n help='weight decay rate. default is 0.0001.')\n parser.add_argument('--log-interval', type=int, default=50,\n help='Number of batches to wait before logging.')\n # checking point\n parser.add_argument('--resume', type=str, default=None,\n help='put the path to resuming file if needed')\n parser.add_argument('--colab', action='store_true', default=\n False, help='whether using colab')\n parser.add_argument('--save-dir', type=str, default=None,\n help='directory of saved models')\n # evaluation only\n parser.add_argument('--eval', action='store_true', default= False,\n help='evaluation only')\n parser.add_argument('--no-val', action='store_true', default= False,\n help='skip validation during training')\n parser.add_argument('--metric', type=str, default='mAP',\n help='F1, IoU, mAP')\n parser.add_argument('--logging-file', type=str, default='train.log',\n help='name of training log file')\n parser.add_argument('--summary', action='store_true',\n help='print parameters')\n # synchronized Batch Normalization\n parser.add_argument('--syncbn', action='store_true', default= False,\n help='using Synchronized Cross-GPU BatchNorm')\n # the parser\n args = parser.parse_args()\n # handle contexts\n if args.no_cuda or (len(mx.test_utils.list_gpus()) == 0):\n print('Using CPU')\n args.kvstore = 'local'\n args.ctx = [mx.cpu(0)]\n else:\n args.ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]\n print('Number of GPUs:', len(args.ctx))\n\n # logging and checkpoint saving\n if args.save_dir is None:\n args.save_dir = \"runs/%s/%s/\" % (args.dataset, args.model)\n makedirs(args.save_dir)\n\n # Synchronized BatchNorm\n args.norm_layer = mx.gluon.contrib.nn.SyncBatchNorm if args.syncbn \\\n else mx.gluon.nn.BatchNorm\n args.norm_kwargs = {'num_devices': len(args.ctx)} if args.syncbn else {}\n print(args)\n return args",
"def parse_args():\n parser = argparse.ArgumentParser(description='MXNet Gluon Segmentation')\n\n parser.add_argument('--model', type=str, default='fcn', help='model name (default: fcn)')\n parser.add_argument('--backbone', type=str, default='resnet50', help='backbone name (default: resnet50)')\n parser.add_argument('--dataset', type=str, default='pascalaug', help='dataset name (default: pascal)')\n parser.add_argument('--dataset-dir', type=str, default='../imgclsmob_data/voc', help='dataset path')\n parser.add_argument('--workers', type=int, default=16, metavar='N', help='dataloader threads')\n parser.add_argument('--base-size', type=int, default=520, help='base image size')\n parser.add_argument('--crop-size', type=int, default=480, help='crop image size')\n\n parser.add_argument('--batch-size', type=int, default=1, metavar='N', help='input batch size for testing')\n\n parser.add_argument('--ngpus', type=int, default=len(mx.test_utils.list_gpus()), help='number of GPUs (default: 4)')\n\n # checking point\n parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed')\n parser.add_argument('--checkname', type=str, default='default', help='set the checkpoint name')\n parser.add_argument('--model-zoo', type=str, default=None, help='evaluating on model zoo model')\n\n # the parser\n args = parser.parse_args()\n # handle contexts\n if args.ngpus == 0:\n args.ctx = [mx.cpu(0)]\n else:\n args.ctx = [mx.gpu(i) for i in range(args.ngpus)]\n args.test_batch_size = args.ngpus if args.ngpus > 0 else 1\n print(args)\n return args",
"def parse_user_arguments(*args, **kwds):\n\n parser = argparse.ArgumentParser(\n description = \"Generates the profiles of the input drug by using a network of expansion created expanding the protein-protein interactions from the targets of the drug\",\n epilog = \"@oliva's lab 2017\")\n parser.add_argument('-d','--drug_name',dest='drug_name',action = 'store',\n help = \"\"\" Name of the drug. If you do not provide targets for this drug or the number of targets is not large enough,\n the program will use this name to search for targets in BIANA database. If targets are provided, this field will be only used\n for naming purposes and will be completely optional.\n If the name of the drug has more than one word or special characters (parentheses, single quotes), introduce the name between \n double quotes. \"\"\")\n parser.add_argument('-t','--targets',dest='targets',action = 'store',\n help = 'Input file with the targets of the drug. Each target must be separated by a newline character.')\n parser.add_argument('-pt','--proteins_type_id',dest='proteins_type_id',action = 'store', default='geneid',\n help = 'Input the type of ID of the targets introduced / proteins of the network. It must be the same! (default is geneid).')\n parser.add_argument('-rad','--radius',dest='radius',action = 'store',default='3',\n help = \"\"\" Define the radius of expansion for the creation of the network from targets (default is 3). \"\"\")\n parser.add_argument('-tax','--taxid',dest='taxid',action = 'store',default='9606',\n help = \"\"\"Define the restriction of species for the creation of the network from targets using a Taxonomy ID (default is '9606' (human))\"\"\")\n parser.add_argument('-res','--restriction',dest='restriction',action = 'store',\n help = \"\"\"Define an experiment restriction for the creation of the network from targets.\\n\n Options:\\n\n - AFF: Use interactions at least described by affinity methods (i.e. Tandem Affinity Purification)\\n\n - Y2H: Use interactions at least described by yeast two hybrid methods (Y2H)\\n\n - eAFF: Use all interactions except those described by affinity methods (i.e. Tandem Affinity Purification)\\n\n - eY2H: Use all interactions except those described by yeast two hybrid methods (Y2H)\\n\n - None: Not use experiment restrictions\n \"\"\")\n parser.add_argument('-th','--threshold_list',dest='threshold_list',action = 'store',\n help = \"\"\"List of percentages that will be used as cut-offs to define the profiles of the drugs. It has to be a file containing:\n - Different numbers that will be the threshold values separated by newline characters. \n For example, a file called \"top_threshold.list\" containing:\n 0.1\n 0.5\n 1\n 5\n 10\n \"\"\")\n parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'),\n help = \"\"\"Define the workspace directory where the data directory and the results directory will be created\"\"\")\n\n options=parser.parse_args()\n\n return options",
"def parse_args():\n # TODO default for model, conn, socket\n parser = argparse.ArgumentParser(description='Anomaly-based Network Intrusion Detection')\n parser.add_argument('--model', choices=model_choice.keys(),\n help='Model to train & use', metavar='model', required=True) # default=model_choice['Kitsune']\n # NOTE: args for offline models\n parser.add_argument('--csv', help='csv file to read network flow data from')\n parser.add_argument('--csv-dir', help='directory to read csv network flow data from')\n parser.add_argument('--pcap', help='pcap file to read data from')\n parser.add_argument('--pcap-dir', help='directory to read pcaps from')\n parser.add_argument('--parquet', help='parquet directory to read network flow data from')\n parser.add_argument('--parquet-dir', help='parquet directory to multiple parquet directory network flow data from')\n parser.add_argument('--tune', action='store_true', help='tune model before training to find best hyperparameters')\n # NOTE: args for online models\n parser.add_argument('--tsv', help='tsv file to read data from')\n parser.add_argument('--conn', help='Connection audit record file (netcap)') # default='/tmp/Connection.sock'\n parser.add_argument('--arp', help='ARP audit record file (netcap)')\n parser.add_argument('--credentials', help='Credentials audit record file (netcap)')\n parser.add_argument('--device', help='Device Profile audit record file (netcap)')\n parser.add_argument('--dhcpv4', help='DHCPv4 audit record file (netcap)')\n parser.add_argument('--dhcpv6', help='DHCPv6 audit record file (netcap)')\n parser.add_argument('--dns', help='DNS audit record file (netcap)')\n parser.add_argument('--ethernet', help='Ethernet audit record file (netcap)')\n parser.add_argument('--exploit', help='Exploit audit record file (netcap)')\n parser.add_argument('--http', help='HTTP audit record file (netcap)')\n parser.add_argument('--icmpv4', help='ICMPv4 audit record file (netcap)')\n parser.add_argument('--icmpv6', help='ICMPv6 audit record file (netcap)')\n parser.add_argument('--igmp', help='IGMP audit record file (netcap)')\n parser.add_argument('--ipprofile', help='IPProfile audit record file (netcap)')\n parser.add_argument('--ipv4', help='IPv4 audit record file (netcap)')\n parser.add_argument('--ipv6hop', help='IPv6HopByHop audit record file (netcap)')\n parser.add_argument('--ipv6', help='IPv6 audit record file (netcap)')\n parser.add_argument('--ntp', help='NTP audit record file (netcap)')\n parser.add_argument('--service', help='Servie audit record file (netcap)')\n parser.add_argument('--sip', help='SIP audit record file (netcap)')\n parser.add_argument('--software', help='Software audit record file (netcap)')\n parser.add_argument('--tcp', help='TCP audit record file (netcap)')\n parser.add_argument('--tls-client-hello', help='TLSClientHello audit record file(netcap)')\n parser.add_argument('--tls-server-hello', help='TLSServerHello audit record file(netcap)')\n parser.add_argument('--udp', help='UDP audit record file (netcap)')\n parser.add_argument('--vulnerability', help='vulnerability audit record file (netcap)')\n parser.add_argument('--audit', help='Read the given audit record types from unix sockets',\n choices=audit_records, nargs='*')\n parser.add_argument('--encoded', action='store_true', help='Read encoded audit records')\n parser.add_argument('--labelled', action='store_true', help='Read labelled audit records')\n return parser.parse_args()",
"def make_parser():\n p = argparse.ArgumentParser(\n description=\"Visualize and analyze error from oblique/straight tag observations\"\n )\n\n p.add_argument(\"-n\", help=\"name of the test in the config file\")\n\n p.add_argument(\"-t\", help=\"throw out bad tags\", action=\"store_true\")\n\n p.add_argument(\"-v\", help=\"visualize data\", action=\"store_true\")\n\n p.add_argument(\"-i\", help=\"print result data\", action=\"store_true\")\n\n return p",
"def parse_args():\n parser = argparse.ArgumentParser(description=_program_description)\n parser.add_argument('input_file', help=_input_file_description)\n #parser.add_argument('-v', '--verbose', action='store_true', \n # default=False, help='show progress')\n args = parser.parse_args()\n return args",
"def parse(self, args):\n pass",
"def parseArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='fsod', help='training dataset') # use fsod dataset for default\n parser.add_argument('--cfg', dest='cfg_file', required=True, help='optional config file')\n parser.add_argument('--load_ckpt', help='path to load checkpoint')\n parser.add_argument('--load_detectron', help='path to load detectron weight pickle file')\n parser.add_argument('--output_dir', help='output directory to save the testing results.')\n parser.add_argument('--range', help='[start, end)', type=int, nargs=2)\n parser.add_argument('--visualize', dest='visualize', help='output images of detection', action='store_true')\n return parser.parse_args()",
"def parseArgs( argv ):\r\n\r\n parser = OptionParser()\r\n parser.add_option(\"-o\", \"--order\", type=\"choice\", action=\"store\", choices=[\"p\",\"h\",\"g\",\"c\"], default=\"p\", dest=\"orderType\",\r\n help=\"specify a display sorted by (p)lugin, (c)ve id, (h)ost, or just (g)enerate a hostfile\")\r\n parser.add_option(\"-f\", \"--odf\", type=\"string\", action=\"store\", dest=\"odfOutputFilename\",\r\n help=\"output to this file in ODF format\", default=\"\")\r\n parser.add_option(\"-p\", \"--portlist\", type=\"string\", action=\"store\", dest=\"portList\",\r\n help=\"specify specific ports to show\")\r\n parser.add_option(\"-r\", \"--riskfactors\", type=\"string\", action=\"store\", dest=\"riskFactorsList\", default=\"critical,high,moderate,medium,low,none\",\r\n help=\"specify list of allowable risk factors (default is any of critical,high,moderate,medium,low,none\")\r\n parser.add_option(\"-t\", \"--hostlist\", type=\"string\", action=\"store\", dest=\"hostList\",\r\n help=\"specify specific hosts to show\")\r\n parser.add_option(\"-s\", \"--severities\", type=\"string\", action=\"store\", dest=\"severityList\", default=\"critical_hole,hole,warn,note,info,openport\",\r\n help=\"specify specific list of severity codes to show (default is any of critical_hole,hole,warn,note,info,openport\")\r\n parser.add_option(\"-q\", \"--query\", type=\"string\", action=\"store\", dest=\"contentQuery\",\r\n help=\"show all results whose synopses match this regular expression\")\r\n parser.add_option(\"-i\", \"--idlist\", type=\"string\", action=\"store\", dest=\"pluginIDList\",\r\n help=\"display only results that match these Nessus plugin IDs\")\r\n parser.add_option(\"-c\", \"--csv\", type=\"string\", action=\"store\", dest=\"csvOutputFilename\", default=\"\",\r\n help='output CSV-friendly text delimitted by default or overriden delimiter to a given filename (use \"0\" for standard output)')\r\n# parser.add_option(\"-c\", \"--csv\", action=\"store_true\", dest=\"CSV\", default=False,\r\n# help=\"output CSV-friendly text delimitted by |++| (overridable with the 'd' option)\")\r\n parser.add_option(\"-d\", \"--delimiter\", type=\"string\", action=\"store\", dest=\"delimiter\", default=\"|++|\", \r\n help=\"override CSV delimiter default of |++|\")\r\n\r\n (options, args) = parser.parse_args() \r\n \r\n if options.orderType: options.orderType = options.orderType.lower()\r\n\r\n return (options,args)",
"def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")",
"def parse_command_line(argv):\n import argparse\n parser = argparse.ArgumentParser(\n description=\"\"\"\\\nShow information about a list of scales in Neuroglancer \"info\" JSON file format\n\"\"\")\n parser.add_argument(\"info_file\", nargs=\"?\", default=\"./info\",\n help=\"JSON file containing the information\")\n args = parser.parse_args(argv[1:])\n return args",
"def parse_user_arguments(*args, **kwds):\n\n parser = argparse.ArgumentParser(\n description = \"Validate network using reference networks\",\n epilog = \"@oliva's lab 2017\")\n parser.add_argument('-n','--network_file',dest='network_file',action = 'store',\n help = \"\"\" Input file with a protein-protein interaction network. \"\"\")\n parser.add_argument('-t','--type_id',dest='type_id',action = 'store',\n help = \"\"\" Type of ID of the protein-protein interaction network (i.e. geneID, uniprotaccession, genesymbol) \"\"\")\n parser.add_argument('-f','--network_format',dest='network_format',action = 'store',\n help = '''Format file of the network file:\\tsif, netscore, raw, multi-fields:\\n\n 'sif': <node1>\\tscore\\t<node2>\\n\n 'netscore': <node1>\\t<node2>\\t<score>\\n\n 'raw': <node1>\\t<node2>\\n\n 'multi-fields' : <node1>\\t<node2>\\t<sources>\\t<method_ids>\\t<method_names>\\t<pmids>\\n''')\n\n options=parser.parse_args()\n\n return options",
"def parse_inputs():\n\n parser = argparse.ArgumentParser(description=\"Command line argument handler for ugaudio spectral_average program.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # a group of args for verbosity\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-v', '--verbose', action='store_true')\n group.add_argument('-q', '--quiet', action='store_true')\n\n # nfiles\n help_nfiles = 'nfiles to preview (ONLY for testing)'\n parser.add_argument('-g', '--nfiles', default=DEFAULT_NFILES, type=nfiles_int, help=help_nfiles)\n\n # nfft\n help_nfft = 'Nfft'\n parser.add_argument('-n', '--nfft', default=DEFAULT_NFFT, type=nfft_int, help=help_nfft)\n\n # sample rate\n help_rate = 'sample rate (sa/sec)'\n parser.add_argument('-r', '--rate', default=DEFAULT_RATE, type=rate_str, help=help_rate)\n\n # cutoff\n help_cutoff = 'cutoff'\n parser.add_argument('-c', '--cutoff', default=DEFAULT_CUTOFF, type=cutoff_str, help=help_cutoff)\n\n # sensors\n help_sensors = 'sensors'\n parser.add_argument('-s', '--sensors', default=DEFAULT_SENSORS, type=sensors_list, help=help_sensors)\n\n # PAD directory\n help_paddir = 'PAD dir'\n parser.add_argument('-p', '--paddir', default=DEFAULT_PADDIR, type=folder_str, help=help_paddir)\n\n # output directory\n help_outdir = 'output dir'\n parser.add_argument('-o', '--outdir', default=DEFAULT_OUTDIR, type=outdir_str, help=help_outdir)\n\n # start date\n help_start = 'start date'\n parser.add_argument('-d', '--start', default=DEFAULT_START, type=dtm_date, help=help_start)\n\n # end date\n help_end = 'end date'\n parser.add_argument('-e', '--end', default=DEFAULT_END, type=dtm_date, help=help_end)\n\n # parse arguments\n module_logger.debug('calling parse_args')\n args = parser.parse_args()\n\n return args",
"def ParseArgs() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--input', dest='input', help='Japanese phonetic reading file')\n parser.add_argument(\n '--output_token_array',\n dest='output_token_array',\n help='Output token array file.')\n parser.add_argument(\n '--output_string_array',\n dest='output_string_array',\n help='Output string array file.')\n return parser.parse_args()",
"def parse_args():\n parser = argparse.ArgumentParser(description=\"Run NCF..\")\n parser.add_argument(\n \"--config_file\",\n nargs=\"?\",\n type=str,\n default=\"../configs/ncf_default.json\",\n help=\"Specify the config file name. Only accept a file from ../configs/\",\n )\n # If the following settings are specified with command line,\n # These settings will used to update the parameters received from the config file.\n parser.add_argument(\n \"--dataset\",\n nargs=\"?\",\n type=str,\n help=\"Options are: tafeng, dunnhunmby and instacart\",\n )\n parser.add_argument(\n \"--data_split\",\n nargs=\"?\",\n type=str,\n help=\"Options are: leave_one_out and temporal\",\n )\n parser.add_argument(\n \"--root_dir\", nargs=\"?\", type=str, help=\"working directory\",\n )\n parser.add_argument(\n \"--emb_dim\", nargs=\"?\", type=int, help=\"Dimension of the embedding.\"\n )\n parser.add_argument(\"--lr\", nargs=\"?\", type=float, help=\"Intial learning rate.\")\n parser.add_argument(\"--max_epoch\", nargs=\"?\", type=int, help=\"Number of max epoch.\")\n parser.add_argument(\n \"--batch_size\", nargs=\"?\", type=int, help=\"Batch size for training.\"\n )\n parser.add_argument(\"--optimizer\", nargs=\"?\", type=str, help=\"OPTI\")\n parser.add_argument(\"--activator\", nargs=\"?\", type=str, help=\"activator\")\n parser.add_argument(\"--alpha\", nargs=\"?\", type=float, help=\"ALPHA\")\n return parser.parse_args()",
"def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-i1\",\n help=\"\"\"viral alignments\"\"\",\n dest=\"viral\",\n required=True)\n parser.add_argument(\"-i2\",\n help=\"\"\"GTA alignments\"\"\",\n dest=\"gta\",\n required=True)\n parser.add_argument(\"-o\",\n dest=\"output\",\n help=\"output image file\")\n return parser",
"def parse_args():\n parser = argparse.ArgumentParser(\n description=\"pop-nedry Win64 shellcode build script\"\n )\n\n parser.add_argument(\n '--url', type=str, required=True,\n help='URL for web page hosting the Nedry GIF'\n )\n\n return parser.parse_args()",
"def _cli_parser():\n parser = argparse.ArgumentParser()\n # Original flags\n parser.add_argument('-s', type=str, metavar='input_vol',\n help=\"Absolute path to input volume. Input should be \"\n \"in nifti format\")\n parser.add_argument('-o', type=str, metavar='output_dir',\n help=\"Absolute path to output directory\")\n parser.add_argument('-p', type=str, metavar='template_type', default='MNI152_orig',\n help=\"Type of volumetric template used in index files. \"\n \"Use MNI152_orig or Colin27_orig when -r is \"\n \"RF_ANTs. Use MNI152_norm or Colin27_norm when \"\n \"-r is RF_M3Z. Otherwise, an exception is raised. \"\n \"Ensure that the template matches the standard \"\n \"space of -i (i.e., use MNI152_* if -i is \"\n \"in MNI152-space). Default: MNI152_orig\")\n parser.add_argument('-r', type=str, metavar='RF_type', default='RF_ANTs',\n help=\"Type of Registration Fusion approaches used to \"\n \"generate the mappings (RF_M3Z or RF_ANTs). \" \n \"RF_M3Z is recommended if data was registered \" \n \"from subject's space to the volumetric atlas \" \n \"space using FreeSurfer. RF_ANTs is recommended \" \n \"if such registrations were carried out using \" \n \"other tools, especially ANTs. Default: RF_ANTs\")\n parser.add_argument('-i', type=str, metavar='interp', default='linear',\n help=\"Interpolation (linear or nearest). If \"\n \"-g is label.gii, then interpolation is always set \"\n \"to nearest and a warning is raised. Default: \"\n \"linear\")\n # New flags\n parser.add_argument('-t', type=str, metavar='out_type', default='nii.gz',\n help=\"File type of surface files. nii.gz is true to \"\n \"the original Wu et al (2018) implementation. \"\n \"Note that gifti formats, either \"\n \"func.gii or label.gii, are often preferred. \"\n \"Default: nii.gz\")\n return parser.parse_args()",
"def parse_args():\n parser = argparse.ArgumentParser(description='Train and evaluate SESEMI.')\n parser.add_argument('--network', dest='network', type=str, required=True)\n parser.add_argument('--dataset', dest='dataset', type=str, required=True)\n parser.add_argument('--labels', dest='nb_labels', type=int, required=True)\n parser.add_argument('--gpu', dest='gpu_id', type=str, default='0')\n args = parser.parse_args()\n return args",
"def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n #parser.add_argument(\"args\", metavar=\"N\", type=str, nargs=\"*\", help=\"Positional arguments.\")\n #parser.add_argument(\"\", dest=\"\", type=\"\", default=, help=)\n #parser.add_argument(\"--version\", action=\"version\", version=\"<the version>\")\n\n return parser.parse_args()",
"def parse_commandline():\n parser = argparse.ArgumentParser(prog=\"passgen\")\n parser.add_argument(\"n\", type=int, nargs=\"?\", default=6,\n help=\"number of words in the pass phrase (default=6)\")\n parser.add_argument(\"--special\", action=\"store_true\",\n help=\"\"\"make pass phrase stronger by randomly inserting\n a special character (default=false)\"\"\")\n parser.add_argument(\"--source\", metavar=\"<file name>\",\n default=\"wordlist.txt\",\n help=\"use alternative word list to generate pass phrase\")\n parser.add_argument(\"--separator\", default=\" \",\n help=\"separator between words (default=\" \")\")\n\n return parser.parse_args()",
"def parse_cmdline(args):\n usage = \"usage: %prog [options] <name> <snpfile> <human asm build No> \" +\\\n \"<database>\"\n parser = OptionParser(usage)\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\",\n action=\"store_true\", default=False,\n help=\"Give verbose output\")\n return parser.parse_args()",
"def parse_arg():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', type=str, default='stylegan', choices=['vanilla', 'stylegan'])\n parser.add_argument('--mode', type=str, default='sample', choices=['sample', 'project', 'draw', 'interpolate'])\n parser.add_argument('--latent', type=str, default='z', choices=['z', 'w', 'w+'])\n parser.add_argument('--n_iters', type=int, default=1000, help=\"number of optimization steps in the image projection\")\n parser.add_argument('--perc_wgt', type=float, default=0., help=\"perc loss lambda\")\n parser.add_argument('--input', type=str, default='data/cat/*.png', help=\"path to the input image\")\n return parser.parse_args()"
] |
[
"0.6941162",
"0.67057675",
"0.669804",
"0.6569517",
"0.65421695",
"0.648353",
"0.6388414",
"0.6338641",
"0.6330334",
"0.6304197",
"0.63023865",
"0.63015646",
"0.6294502",
"0.627146",
"0.6257962",
"0.62389624",
"0.6235523",
"0.62227726",
"0.62178814",
"0.62130237",
"0.6191569",
"0.6189618",
"0.6186471",
"0.6173706",
"0.6160858",
"0.6155156",
"0.6151275",
"0.614821",
"0.61401576",
"0.6139276"
] |
0.7096899
|
0
|
Takes passed arguments from script and loads the sequence from file or from input string
|
def getSequence(args):
sequence = args.sequence
if sequence in [None, "", ''] and args.filepath not in [None, "", '']:
if path.exists(args.filepath):
try:
with open(args.filepath, "r+") as file:
sequence = file.readline()
except Exception as e:
print("An excepttion occured.", e)
return None
return sequence
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def run_real(self):\n\n if len(self.args) == 1:\n slice_file = \"-\"\n seq_file = self.args[0]\n elif len(self.args) == 2:\n slice_file = self.args[0]\n seq_file = self.args[1]\n else:\n self.parser.print_help()\n return 1\n\n self.load_sequences(seq_file)\n self.process_file(slice_file)",
"def load_args(init_regs: Registers, memory: Memory, args: List[str]):\n init_regs[\"$a0\"] = len(args) # argc\n\n argv: List[int] = []\n for arg in args:\n ptr = memory.extend_stack(bytesify(arg))\n argv.append(ptr)\n\n argv.append(0)\n\n for idx, ptr in enumerate(argv[::-1]):\n memory.extend_stack(bytesify(ptr, size=4), align_data=True)\n\n init_regs[\"$a1\"] = memory.ram[\"stack\"][\"stops\"] # argv",
"def __main__():\n\n args = parse_command_line(sys.argv)\n\n identifiers = []\n if args.input and args.column:\n [\n identifiers.append(line.split(\"\\t\")[args.column - 1].strip())\n for line in open(args.input, \"r\")\n ]\n elif args.text:\n identifiers = [a.strip() for a in args.text.split() if a.strip()]\n\n fetch_fasta(identifiers, args)",
"def load(self):\n\n address = 0\n program = []\n\n if len(sys.argv) < 2:\n print(\"Please pass in a second file.\")\n sys.exit()\n\n file_name = sys.argv[1]\n try:\n with open(file_name) as file:\n for line in file:\n split_line = line.split('#')[0]\n command = split_line.strip()\n\n if command == '':\n continue\n\n program.append(int(command, 2))\n\n except FileNotFoundError:\n print(f'{sys.argv[0]}: {sys.argv[1]} file was not found')\n sys.exit()\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1",
"def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n parser = E.ArgumentParser(description=__doc__)\n\n parser.add_argument('--version', action='version', version='%(prog)s 1.0')\n\n parser.add_argument(\"-f\", \"--file\", dest=\"input_filename\", type=str,\n help=\"input filename. If not given, stdin is used.\")\n\n parser.add_argument(\"-i\", \"--input-pattern\", dest=\"input_pattern\", type=str,\n help=\"input pattern. Parses description line in order to extract id.\")\n\n parser.add_argument(\"-o\", \"--output-filename-pattern\", dest=\"output_pattern\", type=str,\n help=\"output pattern. Gives filename for a given sequence.\")\n\n parser.add_argument(\"-n\", \"--num-sequences\", dest=\"num_sequences\", type=int,\n help=\"split by number of sequences (not implemented yet).\")\n\n parser.add_argument(\"-m\", \"--map\", dest=\"map_filename\", type=str,\n help=\"map filename. Map identifiers to filenames\")\n\n parser.add_argument(\"-s\", \"--skip-identifiers\", dest=\"skip_identifiers\", action=\"store_true\",\n help=\"do not write identifiers.\")\n\n parser.add_argument(\"--min-size\", dest=\"min_size\", type=int,\n help=\"minimum cluster size.\")\n\n parser.set_defaults(\n input_filename=None,\n map_filename=None,\n skip_identifiers=False,\n input_pattern=\"^(\\S+)\",\n min_size=0,\n num_sequences=None,\n output_pattern=\"%s\")\n\n (args) = E.start(parser)\n\n if args.input_filename:\n infile = iotools.open_file(args.input_filename, \"r\")\n else:\n infile = sys.stdin\n\n if args.map_filename:\n map_id2filename = iotools.ReadMap(open(args.map_filename, \"r\"))\n else:\n map_id2filename = {}\n\n if args.num_sequences:\n files = FilesChunks(chunk_size=args.num_sequences,\n output_pattern=args.output_pattern,\n skip_identifiers=args.skip_identifiers)\n\n else:\n files = Files(output_pattern=args.output_pattern,\n skip_identifiers=args.skip_identifiers)\n\n if args.input_pattern:\n rx = re.compile(args.input_pattern)\n else:\n rx = None\n\n ninput = 0\n noutput = 0\n identifier = None\n chunk = 0\n\n for seq in FastaIterator.iterate(infile):\n\n ninput += 1\n\n if rx:\n try:\n identifier = rx.search(seq.title).groups()[0]\n except AttributeError:\n print(\"# parsing error in description line %s\" % (seq.title))\n else:\n identifier = seq.title\n\n if map_id2filename:\n if identifier in map_id2filename:\n identifier = map_id2filename[identifier]\n else:\n continue\n\n files.Write(identifier, seq)\n noutput += 1\n\n if args.input_filename:\n infile.close()\n\n # delete all clusters below a minimum size\n # Note: this has to be done at the end, because\n # clusters sizes are only available once both the fasta\n # file and the map has been parsed.\n if args.min_size:\n ndeleted = files.DeleteFiles(min_size=args.min_size)\n else:\n ndeleted = 0\n\n if args.loglevel >= 1:\n print(\"# input=%i, output=%i, ndeleted=%i\" % (ninput, noutput, ndeleted))\n\n E.stop()",
"def iload(self, args):\n content = self.shell.find_user_code(args).splitlines()\n\n # we create a stack so e.g. having an iload inside of an iload\n # will process the inner iload and then resume the outer iload\n orig_readline = self.shell.pre_readline\n\n def pre_readline():\n if self.shell.rl_next_input is None:\n self.shell.rl_next_input = content.pop(0)\n self.shell.rl_do_indent = False\n orig_readline()\n if not content:\n # restore original hook\n self.shell.readline_startup_hook(orig_readline)\n self.shell.pre_readline = orig_readline\n\n self.shell.readline_startup_hook(pre_readline)\n self.shell.pre_readline = pre_readline\n\n print('Interactively loading \"%s\"'%args)",
"def load( self, arguments = None ):\n\n # determine list of arguments to load\n if arguments is None:\n arguments = sys.argv[ 1 : ]\n num_arguments = len( arguments )\n\n # index into positional argument specifications\n pos_index = 0\n\n # scan through each argument in the argument list\n for position in range( num_arguments ):\n\n # flag to indicate if this argument has been captured\n captured = False\n\n # value of current argument\n arg = arguments[ position ]\n\n # check for an option argument\n for key, patt, conf in self._opt_specs:\n\n # test this option's pattern\n match = re.match( patt, arg )\n if match is not None:\n\n # see if this argument captures the next as its value\n if conf[ 'type' ] == 'next':\n if position >= ( num_arguments - 1 ):\n raise ValueError(\n 'No value given for \"{}\" argument.'.format(\n key\n )\n )\n position += 1\n value = arguments[ position ]\n\n # switch-style arguments\n elif conf[ 'type' ] == 'switch':\n value = True\n\n # self-capturing arguments (capture offset in type)\n else:\n value = match.group( conf[ 'type' ] )\n\n # set the value for this argument\n self._set( ( key, patt, conf ), value )\n\n # continue to next argument\n captured = True\n break\n\n # this is a positional argument\n if captured == False:\n\n # current positional specifier\n key, patt, conf = spec = self._pos_specs[ pos_index ]\n\n # set the value for this argument\n self._set( spec, arg )\n\n # check for lists of positional argument lists\n if 'count' in conf:\n\n # fixed-length value list\n if type( conf[ 'count' ] ) is int:\n\n # see if the value list is complete\n if len( self._values[ key ] ) >= conf[ 'count' ]:\n pos_index += 1\n\n # single-value positional argument\n else:\n\n # move to the next positional argument\n pos_index += 1\n\n ### ZIH - validate here\n\n # store the list of arguments we loaded for future reference\n self.arguments = arguments\n\n # return an object container for the arguments\n return DictObject( **self._values )",
"def load(self):\n address = 0\n if len(sys.argv) < 2:\n print(\"Please pass in a second file name: python3 ls8.py second_filename.py\")\n sys.exit()\n file_name = sys.argv[1]\n try:\n file = open(file_name, \"r\")\n except FileNotFoundError:\n print(f\"{sys.argv[0]}: {sys.argv[1]} file was not found.\")\n sys.exit()\n \n for line in file.readlines():\n instruction = line.split(\"#\")[0]\n instruction = instruction.strip() \n if len(instruction) > 0:\n self.ram_write(address, int(instruction, 2))\n address += 1 \n file.close()",
"def load(args):\n subprocess.check_call([\"/bin/launchctl\", \"load\"] + values.get(args))",
"def load(self, input):",
"def data_sequence(\n calibration_file: Path,\n pedestal_file: Path,\n time_calibration_file: Path,\n systematic_correction_file: Path,\n drive_file: Path,\n run_summary: Path,\n pedestal_ids_file: Path,\n run_str: str,\n):\n history_file = Path(options.directory) / f\"sequence_{options.tel_id}_{run_str}.history\"\n # Set the starting level and corresponding return code from last analysis step\n # registered in the history file.\n level, rc = (4, 0) if options.simulate else historylevel(history_file, \"DATA\")\n log.info(f\"Going to level {level}\")\n\n if level == 4:\n rc = r0_to_dl1(\n calibration_file,\n pedestal_file,\n time_calibration_file,\n systematic_correction_file,\n drive_file,\n run_summary,\n pedestal_ids_file,\n run_str,\n )\n level -= 1\n log.info(f\"Going to level {level}\")\n\n if level == 3:\n rc = dl1ab(run_str)\n if cfg.getboolean(\"lstchain\", \"store_image_dl1ab\"):\n level -= 1\n log.info(f\"Going to level {level}\")\n else:\n level -= 2\n log.info(f\"No images stored in dl1ab. Producing DL2. Going to level {level}\")\n\n if level == 2:\n rc = dl1_datacheck(run_str)\n if options.no_dl2:\n level = 0\n log.info(f\"No DL2 are going to be produced. Going to level {level}\")\n else:\n level -= 1\n log.info(f\"Going to level {level}\")\n\n if level == 1:\n if options.no_dl2:\n level = 0\n log.info(f\"No DL2 are going to be produced. Going to level {level}\")\n else:\n rc = dl1_to_dl2(run_str)\n level -= 1\n log.info(f\"Going to level {level}\")\n\n if level == 0:\n log.info(f\"Job for sequence {run_str} finished without fatal errors\")\n\n return rc",
"def argument_parser_seq2var():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--encoder-hidden', type=int, default=64, help='Number of hidden units.')\n parser.add_argument('--epochs', type=int, default=2000, help='Number of epochs to train.')\n parser.add_argument('--lr', action='store_true', default=5e-4, help='Learning rate.')\n parser.add_argument('--tau', type=int, default=0.5, help='Gumbel softmax temperature.')\n \n args, unknown = parser.parse_known_args()\n \n return args",
"def parse_arguments(args):",
"def load_seq(\n filename: os.PathLike,\n annotation_path: Optional[os.PathLike] = None,\n format: Optional[str] = None,\n moltype: Optional[str] = None,\n label_to_name: Optional[Callable] = None,\n parser_kw: Optional[dict] = None,\n info: Optional[dict] = None,\n **kw,\n) -> Sequence:\n info = info or {}\n info[\"source\"] = str(filename)\n file_format, _ = get_format_suffixes(filename)\n if file_format == \"json\":\n seq = load_from_json(filename, (Sequence,))\n seq.name = label_to_name(seq.name) if label_to_name else seq.name\n return seq\n\n data = _load_seqs(file_format, filename, format, kw, parser_kw)\n name, seq = data[0]\n name = label_to_name(name) if label_to_name else name\n result = make_seq(seq, name, moltype=moltype)\n result.info.update(info)\n\n if getattr(seq, \"annotation_db\", None):\n result.annotation_db = seq.annotation_db\n\n if annotation_path is not None:\n result.annotation_db = load_annotations(path=annotation_path, seqids=[name])\n return result",
"def main(args):",
"def main(args):",
"def get_sequence(self):\n if os.path.isfile(self.input):\n with open(self.input, \"r\") as file:\n self.sequence = file.read()\n else:\n raise oma.SequenceError(\"Cannot open {0}\".format(self.input))",
"def getArgs():\r\n parser = argparse.ArgumentParser(\r\n description = \"\"\"This program uses the validation data and a given model to do brain segmentation that will be sent to FeTs challenge to get evaluated \"\"\")\r\n parser.add_argument(\"-d\", type = str, help = \"d is the path to validation dataset, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/\")\r\n parser.add_argument(\"-m\", type = str, help = \"m is the path for the model to load, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/cpt/cpt_0_1\")\r\n parser.add_argument(\"-o\", type = str, help = \"o is the output path, e.g: C:/Documents/inferences\")\r\n # Get your arguments\r\n return parser.parse_args()",
"def from_command_line(lev_out):\n print(lev_out)\n assert lev_out in ('1a', '2') #('02', '03.2')\n\n ###################################################\n # DEFAULT VALUES #\n ###################################################\n timestamp_beg = dt.datetime.now().strftime('%Y%m%d-%H%M%S')\n plot_path_out = '~/Documents'\n plot_file_out = None # '%s/lev_%s_calibrate_%s.eps' % (\n # plot_path_out, lev_out, timestamp_beg)\n\n defaults = {\n 'scriptname' : 'step_%s_0_calibration.py' % lev_out,\n 'setup_file_in' : '../setup/setup_lev_%s.calibrate.txt' % lev_out,\n 'setup_file_out' : '../setup/setup_lev_%s.calibrated.txt' % lev_out,\n 'plot_file_out' : plot_file_out,\n }\n\n ###################################################\n # COMMAND LINE ARGUMENT #\n ###################################################\n # override defaults\n argv = sys.argv\n args = []\n\n # get un-dashed parameters\n for arg in argv:\n if arg[:1] == '-':\n continue\n args.append(arg)\n\n scriptname = defaults['scriptname']\n setup_file_in = defaults['setup_file_in']\n setup_file_out = defaults['setup_file_out']\n plot_file_out = defaults['plot_file_out']\n\n if len(args) > 1:\n setup_file_in = args[1]\n if len(args) > 2:\n setup_file_out = args[2]\n if len(args) > 3:\n plot_file_out = args[3]\n\n ###################################################\n # CALL MAIN #\n ###################################################\n opt = main(setup_file_in, setup_file_out, plot_file_out, scriptname)",
"def load_and_run(runner):\r\n\r\n #**************************************************************************************************************************\r\n #Cache commands to be executed.\r\n #If using -object spoofs, -enable option needs to be added as either a standalone command or an option in another command.\r\n #Note: -enable is not required for named spoofs (-id).\r\n #Example command: runner.AddCommand('spoof_main \"-enable -object CHwOrders -data acRPM -value 0.33 -enable\"\\n','->',False)\r\n #Templates:\r\n #runner.AddCommand('spoof_main \"-enable\"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-id -var1 -var2 \"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-enable -object -data -value \"\\n','->',False)\r\n #runner.AddCommand('<enter any telnet command here>\\n','->',False)\r\n #**************************************************************************************************************************\r\n\r\n\r\n\r\n #**************************************************************************************************************************\r\n #Execute cached commands, then clear cached commands to get runner ready for next set of commands.\r\n #Optional pause here with additional details if necessary (i.e. instructions, timing, etc.).\r\n #The raw_input method will display the string then the operator will have to press ENTER to proceed.\r\n #raw_input(\"<Prompt to present to operator>\");\r\n #**************************************************************************************************************************\r\n runner.AddCommand(\"Log_Test_Info(\\\"Running spoof script \" + scriptName + \"\\\")\\n\", \"->\", False)\r\n runner.Run()\r\n runner.ResetCommands()\r\n \r\n \r\n runner.AddCommand('spoof_main\"-object CHwStates -data red -value 995\"\\n','->',False)\r\n runner.AddCommand('spoof_main\"-object CHwStates -data green -value 950\"\\n','->',False)\r\n runner.AddCommand('spoof_main\"-object ProcRun_IntegratedPlateletYield -data data -value 400000000000\"\\n','->',False)\r\n runner.AddCommand('spoof_main\"-enable\"\\n','->',False)\r\n runner.Run()\r\n runner.ResetCommands()\r\n\r\n\r\n #**************************************************************************************************************************\r\n #(Optional) Pause here with additional details if necessary (i.e. instructions, timing, etc.).\r\n #time.sleep for no operator prompt, raw_input for prompt.\r\n #The raw_input method will display the string then the operator will have to press ENTER to proceed.\r\n #time.sleep(30)\r\n #raw_input(\"<Prompt to present to operator>, press ENTER to continue.\");\r\n #**************************************************************************************************************************\r\n\r\n \r\n\r\n #**************************************************************************************************************************\r\n #(Optional) Next set of commands to be executed.\r\n #If more sets of commands are needed copies of this section and the \"Execute\" section below may be inserted after the \"Execute\" section below.\r\n #If data needs to be unspoofed prior to next spoof use command below.\r\n #runner.AddCommand('spoof_main \"-unspoof -object <Spoofed Object Here> -data <Spoofed Data Here>\"\\n','->',False)\r\n #Example command: runner.AddCommand('spoof_main \"-enable -object CHwOrders -data acRPM -value 0.33 -enable\"\\n','->',False)\r\n #**************************************************************************************************************************\r\n\r\n\r\n\r\n #**************************************************************************************************************************\r\n #(Optional) Execute next set of cached commands.\r\n #Optional pause here with additional details if necessary (i.e. instructions, timing, etc.).\r\n #The raw_input method will display the string then the operator will have to press ENTER to proceed.\r\n #raw_input(\"<Prompt to operator>\");\r\n #runner.Run()\r\n #runner.ResetCommands()\r\n #**************************************************************************************************************************\r\n\r\n\r\n\r\n #**************************************************************************************************************************\r\n #If desired, add a -disable -end or -unspoof command to disable the spoofer or simply unspoof spoofed data. \r\n #runner.AddCommand('spoof_main \"-unspoof -object <Spoofed Object Here> -data <Spoofed Data Here>\"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-disable\"\\n','->',False)\r\n #runner.AddCommand('spoof_main \"-end\"\\n','->',False)\r\n #**************************************************************************************************************************\r\n runner.AddCommand(\"Log_Test_Info(\\\"Exiting spoof script \" + scriptName + \"\\\")\\n\", \"->\", False)\r\n runner.Run()\r\n runner.ResetCommands()\r\n\r\n #**************************************************************************************************************************\r\n #Optional prompt to notify operator that script and all spoofs have been execute successfully.\r\n #raw_input(\"Script execution complete, press ENTER to close this window.\");\r\n #**************************************************************************************************************************\r",
"def main(self):\n cmd = \"self.%s(sys.stdin)\" % sys.argv[1]\n exec(cmd)",
"def run (self, bioseqs, *clargs):\t\t\n\t\t## Preconditions:\n\t\tassert (2 <= len (bioseqs))\n\t\t## Main:\n\t\tself._inseqs = bioseqs\n\t\tself.call_cmdline (*clargs)",
"def fasta_reader(inp):\n #inp is hard coded as \"Sequence1/2.fasta in this script\".\n with open(inp) as in_file: \n for line in in_file.readlines():\n #Guarantees sequence is pulled from the FASTA file not the title \n if line[0].isalpha():\n seq = line.rstrip()\n return (seq)",
"def main(args=None):",
"def main(args=None):",
"def __init__(self):\n self.parser = argparse.ArgumentParser(prog='PROG')\n self.parser.add_argument(\"--idir\", action=\"store\",\n dest=\"idir\", default=\"\", help=\"Input data path\")\n self.parser.add_argument(\"--dates\", action=\"store\",\n dest=\"dates\", default=\"\", help=\"dates or dates-rante to read, e.g. YYYYMMDD-YYYYMMDD\")",
"def load_generator_command(path):\n h5dict = H5Dict(path)\n\n arguments_train = json.loads(h5dict['arguments_train'].decode('utf8'))\n \n try:\n arguments_val = json.loads(h5dict['arguments_val'].decode('utf8'))\n h5dict.__exit__()\n return arguments_train, arguments_val\n\n except AttributeError:\n h5dict.__exit__()\n return arguments_train",
"def load(self):\n\n address = 0\n\n program = sys.argv[1]\n\n with open(program) as p:\n for instruction in p:\n if instruction[0] == '#':\n continue\n\n instruction = instruction.strip()\n temp = instruction.split()\n\n if len(temp) == 0:\n continue\n\n self.ram[address] = int(temp[0], 2)\n address += 1\n \n # print(\"======= PROGRAM =========\")\n # for i in self.ram[:35]:\n # print(i)",
"def loadSequence(self, **kwargs):\n\n try:\n side = kwargs['side']\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n\n if self.baxter.br.mutex[side].locked():\n return\n self.baxter.br.stopExecution(side,False)\n\n try:\n threaded = kwargs['threaded']\n except Exception,e:\n threaded = False\n if threaded: \n actionSeq = self.baxter.br.post.readSequence(side)\n else:\n actionSeq = self.baxter.br.readSequence(side)\n\n for action in actionSeq:\n if 'block -' in action:\n colour = action.rstrip().split(' - ')[1].rstrip()\n else:\n self.addAction(fname = colour, action = action)\n self.runSequence()",
"def readCommand( argv ): ## argv belongs to the 'sys'-library and can be called through sys.argv. The function reads the console's comand line argument and passes it to a variable like so: args = sys.argv[1:]\n from optparse import OptionParser ## Option Parser is a powerful library for passing command line options (an advanced args) if you like. It allows you to add options by defining attributes. \n usageStr = \"\"\" \n USAGE: python pacman.py <options> \n EXAMPLES: (1) python pacman.py\n - starts an interactive game\n (2) python pacman.py --layout smallClassic --zoom 2\n OR python pacman.py -l smallClassic -z 2\n - starts an interactive game on a smaller board, zoomed in\n \"\"\" \n parser = OptionParser(usageStr) ## This creates the Option Parser instance. It also passes the usageStr which functions as a little help-text for the user.\n\n ### In this section all the option strings are defined. Typically each option has one short option string and one long option string. For example the parser.add_option('-n'... has '-n' as short and '--numGames' as the long option string. Both have the same effect. The option argument will be the same and be saved as the variabme 'numGames'. \n parser.add_option('-n', '--numGames', dest='numGames', type='int', \n help=default('the number of GAMES to play'), metavar='GAMES', default=1) ## the syntax for the options is (based on the example in this line) --n 3. This means that the value '3' would be assigned to the variable numGames.\n parser.add_option('-l', '--layout', dest='layout',\n help=default('the LAYOUT_FILE from which to load the map layout'), #The instance -> 'options.layout' defines the layout_file from which to load the map layout; DEFAULT = medium_classic\n metavar='LAYOUT_FILE', default='mediumClassic')\n parser.add_option('-p', '--pacman', dest='pacman',\n help=default('the agent TYPE in the pacmanAgents module to use'), #The instance -> 'options.pacman' defines which of the agent TYPE in the pacmanAgents moduleto use.\n metavar='TYPE', default='KeyboardAgent')\n parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',\n help='Display output as text only', default=False)\n parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',\n help='Generate minimal output and no graphics', default=False)\n parser.add_option('-g', '--ghosts', dest='ghost',\n help=default('the ghost agent TYPE in the ghostAgents module to use'),\n metavar = 'TYPE', default='RandomGhost')\n parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',\n help=default('The maximum number of ghosts to use'), default=4)\n parser.add_option('-z', '--zoom', type='float', dest='zoom',\n help=default('Zoom the size of the graphics window'), default=1.0)\n parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',\n help='Fixes the random seed to always play the same game', default=False)\n parser.add_option('-r', '--recordActions', action='store_true', dest='record',\n help='Writes game histories to a file (named by the time they were played)', default=False)\n parser.add_option('--replay', dest='gameToReplay',\n help='A recorded game file (pickle) to replay', default=None)\n parser.add_option('-a','--agentArgs',dest='agentArgs',\n help='Comma separated values sent to agent. e.g. \"opt1=val1,opt2,opt3=val3\"')\n parser.add_option('-x', '--numTraining', dest='numTraining', type='int',\n help=default('How many episodes are training (suppresses output)'), default=0)\n parser.add_option('--frameTime', dest='frameTime', type='float',\n help=default('Time to delay between frames; <0 means keyboard'), default=0.1)\n parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',\n help='Turns on exception handling and timeouts during games', default=False)\n parser.add_option('--timeout', dest='timeout', type='int',\n help=default('Maximum length of time an agent can spend computing in a single game'), default=30)\n\n #ONCE ALL THE OPTIONS HAVE BEEN DEFINED, optparse is instructed to parse the programm's command line.\n ##> The parser.parse_args() returns two values:\n ### (A) OPTIONS: An object containing values for all of your options e.g.:e.g. if --file takes a single string argument, then options.file will be the filename supplied by the user, or None if the user did not supply that option\n ### (B) ARGS: The list of positional arguments leftover after parsing options (we call this here otherjunk)\n options, otherjunk = parser.parse_args(argv) ## if the user happens to accidentally enter a command other than the specified arguments specified by parser.add_option it is passed to otherjunk\n if len(otherjunk) != 0: ## if there actually ends up to be a value in the otherjunk the program raises an Exception.\n raise Exception('Command line input not understood: ' + str(otherjunk))\n args = dict() # ARGS IS THE VARIABLE THAT IS BEING RETURNED BY THE readCommand function.\n\n # Fix the random seed\n if options.fixRandomSeed: random.seed('cs188') # 'random.seed' is part of the random class. The random.seed([x]) command initialises a standard random number. Optional argument x can be any hashable object. \n\n # Choose a layout\n args['layout'] = layout.getLayout( options.layout ) # REF_LAYOUT111: layout.py --> This function returns the layout object that was created by the layout class via the getlayout function. This contains the height, width, walls, food, captules and agent positions etc.\n if args['layout'] == None: raise Exception(\"The layout \" + options.layout + \" cannot be found\")\n\n # Choose a Pacman agent\n noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics) ## noKeyboard is set to TRUE if the user chooses the --replay and text- or silent graphics option.\n ##print noKeyboard\n pacmanType = loadAgent(options.pacman, noKeyboard) ## [see REFERENCE_001]: the loadAgent function takes the pacman argument the user passed into the command line as the option--pacman option identifies the appropriate agent (which may be the programmed agent or whost agent). \n agentOpts = parseAgentArgs(options.agentArgs) ##Passes the option.agentArgs which was captured by the user's console input into the agentOps variable. agentArgs is: \"Comma separated values sent to agent. e.g. \"opt1=val1,opt2,opt3=val3. The ParseAgentArgs function converts the option - value pairings into a dictionary formatted opts[opt1] = val1. \n if options.numTraining > 0: ##numTraining was captured by the user's console input and designates how many games are training games which means that the output remains surpressed.\n args['numTraining'] = options.numTraining ## This takes the user's input as the -x or --numTraining and passes it to the args dictionary with the numTraining key as the args['numTraining'] variable.\n if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining ## This integrates the variable entered into as training rounds in the agentOpts variable.\n pacman = pacmanType(**agentOpts) ## REFERENCE002 ##Instantiate Pacman with agentOpts. ## The variable pacmanType contains a reference to agent module loaded by the load Agent function. This function does not cause the module to be instanciated. This happens when here ## See[REFERENCE_001]: ## The * and ** will 'soak up' any remaining values not otherwise accounted for. In this case these options are basically the agent options the user can input.\n ## agentOpts contains the opts dictionary = {opt1:val1, opt2:val2, opt3:val3}; it also contains the numTraining variable as the ['numTraining'] key. As such it has the following structure. {opt1:val1,opt2:val2,opt3:val3, numTraining:int}.\n args['pacman'] = pacman ## This passes the instanciated object to the agent dictionary containing the pacman key.\n\n # Don't display training games\n if 'numTrain' in agentOpts: ## Checks whether the user has determined a certain number of training games. If they did, the number is passed on as an int to the options.numQuiet and option.numIgnore variables.\n options.numQuiet = int(agentOpts['numTrain']) \n options.numIgnore = int(agentOpts['numTrain'])\n\n # Choose a ghost agent\n ghostType = loadAgent(options.ghost, noKeyboard) ## The options.ghost variable contains the user's ghost type preference as specified in the console.The user can choose between -g RandomGhost which is A ghost that chooses a legal action uniformly at random OR DirectionalGhost, a ghost that prefers to rush Pacman, or flee when scared.\n args['ghosts'] = [ghostType( i+1 ) for i in range( options.numGhosts )] #instanciates as many ghost agents as the player requested by entering the desired number as -k', '--numghosts'in the console.\n\n # Choose a display format ##contains whether the game output is displayed as minimal output and no graphics (-q) text only (-t) or via graphicsDiplay (standard)\n if options.quietGraphics: \n import textDisplay\n args['display'] = textDisplay.NullGraphics()\n elif options.textGraphics:\n import textDisplay\n textDisplay.SLEEP_TIME = options.frameTime\n args['display'] = textDisplay.PacmanGraphics()\n else:\n import graphicsDisplay ## This refers to the module that is responsible for the graphical representation of the game.\n args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime = options.frameTime) ## This line instanciates the PacmanGraphics class from the graphicsDisplay module and passes the reference to the args['display'] dictionary.\n args['numGames'] = options.numGames \n args['record'] = options.record\n args['catchExceptions'] = options.catchExceptions\n args['timeout'] = options.timeout\n\n # Special case: recorded games don't use the runGames method or args structure\n if options.gameToReplay != None:\n print 'Replaying recorded game %s.' % options.gameToReplay \n import cPickle\n f = open(options.gameToReplay)\n try: recorded = cPickle.load(f)\n finally: f.close()\n recorded['display'] = args['display']\n replayGame(**recorded)\n sys.exit(0)\n\n return args #returns the args-dictionary which contains:\n ##args['pacman'] which contains a dictionary of dictionaries of the agent that was loaded into args['numtraining'] = {agentOpts[opt1]: val1 ; agentOpts[opt2]:val2; agentOpts[opt3]:val3}\n ##args['layout'] - this function returns the layout object that was created by the layout class via the getlayout function.\n ##args['numTraining'] which contains which designates how many games are training games which means that the output remains surpressed\n ##args['ghosts'] - contains the instanciated ghost agents in line with the number the user specified\n ##args['display'] - contains whether the game output is displayed as minimal output and no graphics (-q) text only (-t) or via graphicsDiplay (standard)\n ##args['numGames'] - the number of GAMES to play\n ##args['record'] - Writes game histories to a file (named by the time they were played)\n ##args['catchExceptions'] = options.catchExceptions - Turns on exception handling and timeouts during games\n ##args['timeout'] = options.timeout -Maximum length of time an agent can spend computing in a single game"
] |
[
"0.60671085",
"0.591243",
"0.5838243",
"0.5584758",
"0.5563122",
"0.5546405",
"0.5540286",
"0.55338585",
"0.55183625",
"0.55168164",
"0.54732597",
"0.54644245",
"0.53941137",
"0.53898495",
"0.5385075",
"0.5385075",
"0.53753227",
"0.53566664",
"0.535505",
"0.5349396",
"0.5347528",
"0.5344904",
"0.53325224",
"0.5327549",
"0.5327549",
"0.5308943",
"0.5308604",
"0.5300972",
"0.5282693",
"0.5281635"
] |
0.657227
|
0
|
Determines the cost associated with a pair, 1 if in valid pairs, else 0 This function gives 1 cost to UG pairs as well
|
def costFunction(a, b):
pairs = [('G', 'C'), ('C', 'G'), ('A', 'U'), ('U', 'A')]
if UNCOMMON:
pairs.append([('G', 'U'), ('U', 'G')])
if (a, b) in pairs:
return 1
return 0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def cost(graph, gates_qubits_pairs):\n for allowed, gate in enumerate(gates_qubits_pairs):\n if gate not in graph.edges():\n break\n return len(gates_qubits_pairs) - allowed",
"def pairing_cost_map(players, cost_functions):\n\n if len(players) == 0:\n return dict()\n\n num_pairings_in_round = math.ceil(len(players)/2)\n all_possible_pairs = list(itertools.combinations(players, 2))\n\n def _init_cost_map():\n cost_map = dict()\n for p in players:\n cost_map[p] = dict()\n for pb in players:\n if pb != p:\n cost_map[p][pb] = 0\n return cost_map\n\n def _set_cost(cost_map, pa, pb, cost):\n cost_map[pa][pb] = cost\n cost_map[pb][pa] = cost\n\n def _max_bits_used_in_function_in_round():\n \"\"\"\n For input players, return a number of bits that can contain the output\n of any of the cost functions multiplied by the number of pairs in a\n round.\n \"\"\"\n word_size = 16\n bits_occupied = [word_size] * len(cost_functions)\n for (pa, pb) in all_possible_pairs:\n for i in range(len(cost_functions)):\n max_sum_of_cost = num_pairings_in_round * \\\n cost_functions[i](pa, pb)\n while (max_sum_of_cost >= 2**bits_occupied[i]):\n bits_occupied[i] *= 2\n bits_occupied = [2*b for b in bits_occupied] # Paranoia\n for b in bits_occupied:\n assert(b % word_size == 0)\n return max(bits_occupied)\n\n def _eval_cost_functions(pa, pb, bits_per_func):\n result = 0\n max_cost = 2**bits_per_func - 1\n for i in range(len(cost_functions)):\n cost = cost_functions[i](pa, pb)\n assert(cost >= 0)\n assert(cost * num_pairings_in_round <= max_cost)\n assert(round(cost) == cost)\n result = (result << bits_per_func)\n result += cost\n return result\n\n def _assert_two_of_each_value(cost_map):\n values = [v for (k,v) in cost_map.items()]\n for v in values:\n assert(values.count(v) == 1)\n\n cost_map = _init_cost_map()\n max_bits = _max_bits_used_in_function_in_round()\n for (pa, pb) in all_possible_pairs:\n _set_cost(cost_map, pa, pb, _eval_cost_functions(pa, pb, max_bits))\n for (pa, pb) in all_possible_pairs:\n assert(cost_map[pa][pb] == cost_map[pb][pa])\n _assert_two_of_each_value(cost_map)\n return cost_map",
"def _check_empty_and_sort_cost_pairs(self, pair_description, pairs):\n\n if pairs is None or len(pairs) == 0:\n raise ValueError(f\"Empty {pair_description} are provided.\")\n\n # sort based on power output\n pairs.sort(key=lambda p: p[0])\n\n return",
"def test_reachable_pair(self):\n G = DiGraph([(0, 1), (1, 2), (2, 0)])\n assert_true(is_reachable(G, 0, 2))",
"def calc_cost(self):\n cost = 0\n for i,[source, sinks] in enumerate(self.nets):\n self.costs[i] = self.calc_half_perimeter(source, sinks)\n cost += self.costs[i]\n self.cost = cost\n return True",
"def compute_cost(AL, Y):\n pass",
"def test_cost_consideration():\n # input\n net = create_test_net()\n idx = pp.create_sgen(net, 1, 1.3, index=2)\n pp.create_poly_cost(net, idx, \"sgen\", 2.3, index=4)\n pp.runpp(net)\n assert all(net.sgen.index.values == np.array([0, 5, 2]))\n assert all(net.poly_cost.element == np.array([0, 0, 5, 2]))\n\n for cost_type in [\"poly_cost\", \"pwl_cost\"]:\n\n if cost_type == \"pwl_cost\":\n for poly in net.poly_cost.itertuples():\n net.poly_cost.drop(poly.Index, inplace=True)\n pp.create_pwl_cost(net, poly.element, poly.et, [[0, 20, 1]], index=poly.Index)\n\n # eq generation\n boundary_buses = [0, 2]\n internal_buses = [1]\n eq_net1 = pp.grid_equivalents.get_equivalent(net, \"rei\", boundary_buses, internal_buses)\n eq_net2 = pp.grid_equivalents.get_equivalent(net, \"rei\", boundary_buses, internal_buses,\n return_internal=False)\n\n # check elements\n check_elements_amount(eq_net1, {\"bus\": 6, \"load\": 3, \"sgen\": 3, \"shunt\": 5, \"ext_grid\": 1,\n \"line\": 2, \"impedance\": 10, cost_type: 4},\n check_all_pp_elements=True)\n check_elements_amount(eq_net2, {\"bus\": 5, \"load\": 3, \"sgen\": 2, \"shunt\": 5, \"ext_grid\": 1,\n \"impedance\": 10, cost_type: 3},\n check_all_pp_elements=True)\n assert all(eq_net1.sgen.index.values == np.array([0, 1, 2])) # simple create_sgen()\n # without index=... expected\n assert all(eq_net2.sgen.index.values == np.array([0, 1]))\n\n # --- check poly cost\n # eq_net1\n assert np.all(net[cost_type].loc[net[cost_type].et == \"ext_grid\"].values ==\n eq_net1[cost_type].loc[eq_net1[cost_type].et == \"ext_grid\"])\n for i in range(3):\n idx_net = net.sgen.sort_values(\"p_mw\").index[i]\n idx_eq_net = eq_net1.sgen.sort_values(\"p_mw\").index[i]\n assert np.all(net[cost_type].loc[(net[cost_type].element == idx_net) &\n (net[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values ==\n eq_net1[cost_type].loc[(eq_net1[cost_type].element == idx_eq_net) &\n (eq_net1[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values)\n\n # eq_net2\n assert np.all(net[cost_type].loc[net[cost_type].et == \"ext_grid\"].values ==\n eq_net2[cost_type].loc[eq_net2[cost_type].et == \"ext_grid\"])\n for i in range(2):\n idx_net = net.sgen.loc[~net.sgen.bus.isin(boundary_buses+internal_buses)].sort_values(\n \"p_mw\").index[i]\n idx_eq_net = eq_net2.sgen.sort_values(\"p_mw\").index[i]\n assert np.all(net[cost_type].loc[(net[cost_type].element == idx_net) &\n (net[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values ==\n eq_net2[cost_type].loc[(eq_net2[cost_type].element == idx_eq_net) &\n (eq_net2[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values)",
"def get_expected_cost(self):",
"def compute_cost(AL, Y):\n pass",
"def fn(i, j):\n if i == len(costs): return 0 # no more houses to paint \n return costs[i][j] + min(fn(i+1, jj) for jj in range(3) if j != jj)",
"def partial_cost(data, indexing, combinations, cf, cf_prime):\n # selects indices that are mapped to by u,v and w vertices respectively\n part_us = indexing[combinations[:,0]]\n part_vs = indexing[combinations[:,1]]\n part_ws = indexing[combinations[:,2]]\n \n result = 0\n # cf_prime is applied to cf_prime_combs and cf to cf_combs respectively\n # the result is the sum over both functions\n if cf_prime is not None:\n # select pairings where u,v and w vertices share the same index (i.e. same partition)\n cf_prime_combs = combinations[(part_us == part_vs) & (part_us == part_ws)]\n if cf_prime_combs.shape[0] > 0:\n result += cf_prime(data, cf_prime_combs).sum()\n if cf is not None:\n # select pairings where u,v and w vertices have \n # all distinct indices (i.e. distinct partitions) \n cf_combs = combinations[(part_us != part_vs) & (part_us != part_ws) & (part_vs != part_ws)]\n if cf_combs.shape[0] > 0:\n result += cf(data, cf_combs).sum()\n \n # divide by the overall amount of selected pairings\n return result/combinations.shape[0]",
"def costSubstitution(el1,el2):\r\n if el1<el2:\r\n return 0\r\n else:\r\n return 5",
"def subst_cost(c0, c1): # Beräknar kostnaden efter att det blivit länkat\r\n return 0 if c0 == c1 else 2 # Om charachter 0 är samma som charachter 1 kostar det 0 annars 2.\r",
"def cost(self) -> float:",
"def test_cost(self):\n self.assertAlmostEqual(m2.cost(params), 57.629, 2)",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n result = []\n qu = util.PriorityQueue()\n visited = set([])\n current = (problem.getStartState(), \"\", 0)\n qu.update(current, 0)\n costs = {}\n parents = {}\n parents[problem.getStartState()] = (problem.getStartState(), \"\")\n\n while not qu.isEmpty():\n cost, current= qu.pop()\n visited.add(current[0])\n\n if problem.isGoalState(current[0]):\n result = current[0]\n break\n\n for each in problem.getSuccessors(current[0]):\n if each[0] not in visited:\n qu.update(each, cost+each[2])\n if each[0] not in costs:\n costs[each[0]] = cost+each[2]\n parents[each[0]] = (current[0], each[1])\n elif costs[each[0]] > cost+each[2]:\n costs[each[0]] = cost + each[2]\n parents[each[0]] = (current[0], each[1])\n\n path = []\n while parents[result][0] != result:\n path.append(parents[result][1])\n result = parents[result][0]\n\n path.reverse()\n result = []\n for each in path:\n if each == \"South\":\n result.append(s)\n elif each == \"West\":\n result.append(w)\n elif each == \"North\":\n result.append(n)\n elif each == \"East\":\n result.append(e)\n\n return result\n util.raiseNotDefined()",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n aStarSearch(problem)",
"def is_pair_allowed(a, b):\n if a == complementary(b):\n return True\n if a == 'G' and b == 'U' or a == 'U' and b == 'G':\n return True\n return False",
"def uniformCostSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\r\n\r\n\tutil.raiseNotDefined()",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #the logic is pretty much the same just that, unlike the other two algorithms we take it consideration cost of every node\n #created a priority queue for the frontier nodes\n neighbourNodes = util.PriorityQueue()\n moves = []\n #hence while pushing into the queue, there are three tuples (state,action,cost)\n neighbourNodes.push((problem.getStartState(),moves,0),0)\n seenNodes = set()\n\n while not neighbourNodes.isEmpty():\n poppedNodeState, poppedNodeAction, popCost = neighbourNodes.pop()\n if(poppedNodeState in seenNodes):\n continue\n if problem.isGoalState(poppedNodeState):\n return poppedNodeAction\n seenNodes.add(poppedNodeState)\n for state, action, cost in problem.getSuccessors(poppedNodeState):\n if(state in seenNodes):\n continue\n #here, when a node is pushed into the piority queue, the actions are appeneded as usual\n #but also the cost is addede, so tht we know what is the cost of visting the node\n neighbourNodes.push((state, poppedNodeAction+[action], popCost+cost),popCost+cost)\n return moves\n #util.raiseNotDefined()",
"def cost_puzzle(U, I, cost_clue):\n litsU = set(abs(l) for l in U) | set(-abs(l) for l in U)\n assert all(i in U or -i in U for i in I), \"Making sure all literals are in user defined variables\"\n\n def cost_lit(lit):\n if lit not in litsU:\n raise CostFunctionError(U, lit)\n elif lit in cost_clue:\n return cost_clue[lit]\n else:\n # lit in\n return 1\n\n return cost_lit",
"def compute_cost(self, del_u : list, u : list):\n print(\"ym: \", self.ym, \"yn: \", self.yn)\n self.cost = 0.0\n\n self.ym = self.d_model.ym\n self.yn = self.d_model.yn\n\n # FIXME : this is supposed to be from N1 to N2\n self.cost+= (self.ym[0] - self.yn[0])\n angle_diff = (self.ym[1] - self.yn[1])\n if angle_diff > np.pi:\n angle_diff -= 2*np.pi\n if angle_diff < -np.pi:\n angle_diff += 2*np.pi\n self.cost += angle_diff\n\n for j in range(self.Nu):\n self.cost += (self.ym[j] - self.yn[j])**2\n\n for j in range(self.Nu):\n self.cost += self.lambd[j]*(del_u[j])**2\n\n for j in range(self.Nu):\n self.cost += self.s / (u[j] + self.r / 2.0 - self.b) + self.s / (self.r/2.0 + self.b - u[j]) - 4.0 / self.r\n\n return self.cost",
"def heuristic_cal(current: list, goal: list) -> int:\n\n current_locations = state_to_locations(current)\n goal_locations = state_to_locations(goal)\n\n h_val = 0 # Tracks the cost of the heuristic function\n for i in range(1, 16):\n h_val += (abs(current_locations[i][0] - goal_locations[i][0]) +\n abs(current_locations[i][1] - goal_locations[i][1]))\n \"\"\" Loops through both lists of locations and adds the Manhattan distance \n of each number to the sum h_val. The range is from 1 to 16 because the \n blank in either state is not taken into account.\"\"\"\n\n return h_val",
"def find_pair(numbers, target_sum):\n for num in numbers:\n partner_num = target_sum - num\n if partner_num in numbers:\n return num * partner_num",
"def pair_is_consistent(graph, u, v):\n relations = get_all_relations(graph, u, v)\n\n if 1 != len(relations):\n return False\n\n return list(relations)[0]",
"def test_check_cost():",
"def costSDT(graph, a):\n hit=0; miss=0; fa=0; cr=0\n check=(graph==a)\n for rnum, r in enumerate(a):\n for cnum, c in enumerate(r[:rnum]):\n if check[rnum,cnum]==True:\n if a[rnum,cnum]==1:\n hit += 1\n else:\n cr += 1\n else:\n if a[rnum,cnum]==1:\n miss += 1\n else:\n fa += 1\n return [hit, miss, fa, cr]"
] |
[
"0.6760898",
"0.59937674",
"0.5882713",
"0.5844929",
"0.5836548",
"0.58140975",
"0.57850605",
"0.5784143",
"0.57834023",
"0.5724555",
"0.56991553",
"0.56976324",
"0.5600499",
"0.559372",
"0.55909175",
"0.55856216",
"0.55798566",
"0.5541015",
"0.55360955",
"0.5508084",
"0.5508084",
"0.5508084",
"0.55060333",
"0.5492819",
"0.5481898",
"0.5449596",
"0.54492384",
"0.54469794",
"0.5438846",
"0.54245037"
] |
0.6949529
|
0
|
Compare a set of input keys to expected keys.
|
def assert_keys_match(keys, expected, allow_missing=True):
if not allow_missing:
missing = expected - keys
assert not missing, 'missing keys: %s' % missing
extra = keys - expected
assert not extra, 'extraneous keys: %s' % extra
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_compare_keys(self):\n dict1 = {\"a\":1 , \"b\":2 , \"c\":3}\n dict2 = {\"b\":1 ,\"a\":2 , \"c\":3}\n dict3 = {\"b\":1 ,\"d\":2 , \"c\":3}\n self.assertEqual(True, comparator.compare_keys(dict1, dict2))\n self.assertEqual(False, comparator.compare_keys(dict2, dict3))",
"def compare(this, other, keys):\n for key in keys:\n assert this[key] == other[key]",
"def check_keys(set_name, keys, value, expect_key):\n\trecords = lib.read_all_records(set_name)\n\n\tfor key in keys:\n\t\tdigest = lib.get_key_digest(set_name, key)\n\t\tmeta_key, meta_ttl, record = records[str(digest).encode().hex()]\n\t\tlib.validate_record(key, record, [\"value\"], [value])\n\t\tlib.validate_meta(key, meta_key, meta_ttl, expect_key)",
"def test_keys_eq(self):\n self.assertListEqual(self.result, self.expected)",
"def assertKeys(self, data, expected):\r\n self.assertEqual(sorted(data.keys()), sorted(expected))",
"def key_checker(expected_keys):\r\n\r\n def check(actual_dict, raise_error=True):\r\n \"\"\"\r\n Function that checks whether all keys in the expected_keys object is in the given actual_dict object.\r\n \"\"\"\r\n missing = set(expected_keys) - set(actual_dict.keys())\r\n if not missing:\r\n return True\r\n if raise_error:\r\n raise InvalidTabsException(\r\n \"Expected keys '{0}' are not present in the given dict: {1}\".format(expected_keys, actual_dict)\r\n )\r\n else:\r\n return False\r\n\r\n return check",
"def test_keys(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'},\n 'a': 1,\n 'aa': 2,\n 'abc':3,\n 'hello':4}\n for key in keys_to_set:\n storage.set(key, keys_to_set[key])\n\n pattern_answers = {'?': ['1','2','3','4','a'],\n '*': list(keys_to_set.keys()),\n '[13]': ['1', '3'],\n '[^a]': ['1','2','3','4'],\n '[1-3]': ['1','2','3'],\n '?[ae]*': ['aa', 'hello']}\n for pattern in pattern_answers:\n self.assertEqual(pattern_answers[pattern],\n storage.keys(pattern), f'For pattern \"{pattern}\" expected {pattern_answers[pattern]}.')",
"def _dict_assert(actual_dict, expected_dict):\n for key in set(actual_dict) & set(expected_dict):\n _value_assert(key, actual_dict[key], expected_dict[key])",
"def check_prior_keys(prior, keylist=None):\n if keylist == None:\n keylist = prior.keys()\n for key1 in keylist:\n for key2 in keylist:\n assert prior[key1][key2] == prior[key2][key1], (key1, key2)",
"def assert_keys_have_values(self, caller, *keys):\n for key in keys:\n self.assert_key_has_value(key, caller)",
"def keysWhichMatch(cls, *args):\n if len(cls.keys) < len(args) > 0:\n raise ValueError('Number of keys provided is too long.\\n'\n 'Len Class Keys: %s\\n'\n 'Len Provided Keys: %s\\n' % (len(cls.keys), len(args)))\n\n index = 0\n output = cls.db_key_tuples()\n\n for keyToCheck in args:\n temp = []\n for key in output:\n if key[index] == keyToCheck:\n temp.append(key)\n\n index += 1\n output = temp\n\n return output",
"def check_expected_values(self, expected_values, scraped_values):\n\n\t\tfor key in expected_values:\n\t\t\tself.assertIn(key, scraped_values)\n\t\t\tself.assertEqual(expected_values[key], scraped_values[key])",
"def _in_keys(self, key, keys):\n # sorting required for comparison\n key.sort()\n return key in keys",
"def checkKeysCorrection(self, input, valid_keys):\n for key in input.keys():\n if key not in valid_keys:\n print(\"[ERROR] Key '%s' does not exist.\" % key)\n return False\n # check whether all result keys are included in valid keys\n if key == \"result\" and not self.checkResultsCorrection(result=input[\"result\"], valid_keys=valid_keys):\n return False\n return True",
"def assert_keys_exist(self, caller, *keys):\n assert keys, (\"*keys parameter must be specified.\")\n for key in keys:\n self.assert_key_exists(key, caller)",
"def check_equivalent(self, a, b):\n assert set(a) == set(b)\n for key in a:\n assert self.is_equal(a[key], b[key])",
"def predicate(keys_0, keys_1):\n num_matching = 0\n for i, (k_0, k_1) in enumerate(zip(keys_0, keys_1)):\n if i != position and k_0 == k_1:\n num_matching += 1\n return num_matching == len(key) - 1",
"def _check_entry_keys(self, entry, n, key, required, xor_sets=None, optional=None):\n xor_sets = set(tuple()) if xor_sets is None else xor_sets\n optional = set() if optional is None else optional\n nth = ordinal(n)\n if not isinstance(entry, dict):\n raise TypeError(f\"Each entry must be a dictionary, error on {nth} {key}\")\n if len(required - entry.keys()) > 0:\n missing_keys = required - entry.keys()\n raise ValueError(\n f\"Each entry of {key} requires keys of: {', '.join(sorted(required))}. \"\n f\"Missing {sorted(missing_keys)} on {nth} entry, possibly others.\"\n )\n allowable_keys = required | optional | set().union(*xor_sets)\n if not set(entry.keys()) <= allowable_keys:\n unknown_keys = set(entry.keys()) - allowable_keys\n err_msg = f\"Got unknown keys in {nth} {key}: {', '.join(unknown_keys)}\"\n raise ValueError(err_msg)\n for xor_set in sorted(xor_sets):\n if len(xor_set & entry.keys()) != 1:\n err_msg = f\"For {key}, must specify one of {xor_set} but not both\"\n err_msg += f\". Error on {nth} entry, possibly others\"\n raise ValueError(err_msg)",
"def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)",
"def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)",
"def check_all_have_keys(dict_list, keys, name):\n if len(dict_list) == 0:\n return\n keys = set(keys)\n for dct in dict_list:\n if not keys.issubset(dct.keys()):\n raise DGLError('Expect all {} to include keys {}, but got {}.'.format(\n name, keys, dct.keys()))",
"def verify(self, values):\n s = set(values)\n if not s.issubset(self.keys):\n raise ValueError, 'unknown keys in values'",
"def _recursively_assert_actual_result_matches_expected_result_keys(\n expected, actual, description_for_error_reporting\n):\n if isinstance(expected, Mapping):\n for expected_key in expected.keys():\n assert expected_key in actual.keys(), description_for_error_reporting\n _recursively_assert_actual_result_matches_expected_result_keys(\n expected[expected_key],\n actual[expected_key],\n description_for_error_reporting + f'[\"{expected_key}\"]',\n )\n else:\n assert expected == actual, description_for_error_reporting",
"def same_keys(a, b):\n for ak in a.keys():\n if ak not in b:\n return False\n for bk in b.keys():\n if bk not in a:\n return False\n return True",
"def test_main_multiple_keys(self):\n args = [\n \"--layout\",\n self.layout_double_signed_path,\n \"--layout-keys\",\n self.alice_path,\n self.danny_path,\n \"--key-types\",\n \"rsa\",\n \"ed25519\",\n ]\n self.assert_cli_sys_exit(args, 0)",
"def test_main_multiple_keys(self):\n args = [\n \"--layout\",\n self.layout_double_signed_path,\n \"--layout-keys\",\n self.alice_path,\n self.danny_path,\n \"--key-types\",\n \"rsa\",\n \"ed25519\",\n ]\n self.assert_cli_sys_exit(args, 0)",
"def assertDictContainsSubset(self, expected, actual, msg=None):\r\n missing = []\r\n mismatched = []\r\n for key, value in expected.iteritems():\r\n if key not in actual:\r\n missing.append(key)\r\n elif value != actual[key]:\r\n mismatched.append('%s, expected: %s, actual: %s' %\r\n (safe_repr(key), safe_repr(value), \r\n safe_repr(actual[key])))\r\n\r\n if not (missing or mismatched):\r\n return\r\n\r\n standardMsg = ''\r\n if missing:\r\n standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in \r\n missing)\r\n if mismatched:\r\n if standardMsg:\r\n standardMsg += '; '\r\n standardMsg += 'Mismatched values: %s' % ','.join(mismatched)\r\n\r\n self.fail(self._formatMessage(msg, standardMsg))",
"def verify(self, values):\n s = set(values)\n if not s.issubset(self.keys):\n raise ValueError('unknown keys in values')",
"def check_all_same_keys(dict_list, name):\n if len(dict_list) == 0:\n return\n keys = dict_list[0].keys()\n for dct in dict_list:\n if keys != dct.keys():\n raise DGLError('Expect all {} to have the same set of keys, but got'\n ' {} and {}.'.format(name, keys, dct.keys()))",
"def test_4():\n input, output = get_input()\n with open(\n os.path.join(os.path.dirname(__file__), \"..\", \"test_data\",\n \"main_test4_keys.txt\")) as f:\n returnkeys = f.read().splitlines()\n os.system(f\" python -m vcflat -i {input} -o {output}\")\n\n with open(output, \"r\") as f:\n reader = csv.DictReader(f)\n assert [\n i for i in returnkeys if i in set(reader.fieldnames[0].split(\"\\t\"))\n ]\n os.remove(output)"
] |
[
"0.72339267",
"0.70807713",
"0.7028946",
"0.6939085",
"0.69050825",
"0.68213785",
"0.68083143",
"0.6603015",
"0.6579404",
"0.6534215",
"0.6483633",
"0.63990706",
"0.6259154",
"0.62108934",
"0.6114493",
"0.6069182",
"0.60690826",
"0.60552496",
"0.60516036",
"0.60516036",
"0.60385317",
"0.6015633",
"0.6010362",
"0.6001011",
"0.5998713",
"0.5998713",
"0.5997558",
"0.5978166",
"0.59733886",
"0.5971141"
] |
0.7215275
|
1
|
Reads a key from dict, ensuring valid bool if present.
|
def read_key_bool(op, key):
if key in op:
assert isinstance(op[key], bool), 'must be bool: %s' % key
return op[key]
return None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_key_dict(obj, key):\n assert key in obj, 'key `%s` not found' % key\n assert obj[key], 'key `%s` was blank' % key\n assert isinstance(obj[key], dict), 'key `%s` not a dict' % key\n return obj[key]",
"def readKey(self, keyPath):\n\t\ttry:",
"def isValidKey(key):\n return True",
"def _check_key(self, key):\n raise NotImplementedError",
"def __is_key_in_json(self, key=str, json_dict=json):\n if key in json_dict:\n # noinspection PyUnresolvedReferences\n return json_dict[key]\n else:\n return self.NO_KEY_VALUE_FOR_ENTRY",
"def read(self, key):\n raise NotImplementedError",
"def contains_key(kv_json, key):\n if isinstance(kv_json, str):\n kv_dict = loads(kv_json)\n try:\n res = kv_dict[key]\n return True\n except KeyError:\n return False\n else:\n print(\"Provide A JSON Key Value String\")",
"def func3(key):\n value = my_test_dict.get(key)\n if value is None:\n return False\n else:\n return True",
"def isInDic(dic, key):\n pass",
"def __getitem__(self, key):\n for entry_key, value in self.read(key):\n if entry_key != key:\n raise KeyError(key)\n return value\n raise KeyError(key)",
"def get_key(dict, key):\n return dict.get(key, None)",
"def test_key_dict(self):\n key = Key({\"warning\": False, \"inCar\": True})\n\n dictionary = key.as_dict()\n assert isinstance(dictionary, dict)\n assert dictionary == {\"warning\": False, \"in_car\": True}",
"def bool(self, key: str, def_: Union[builtins.bool, T] = False) -> Union[builtins.bool, T]:\n try:\n return BOOL_LOOKUP[self.get(key).casefold()]\n except KeyError:\n return def_",
"def load(self, key):\n return self.load_attrs().get(key)",
"async def get(self, key):\n return self.dict.get(key, None)",
"def has_key(self, key):\n return self.__dict__.has_key(key)",
"def _access_dict(self, d, key):\n try:\n # try to get access to the value by using the key\n value = d[key]\n return value\n except:\n # fail to access the value from the key\n # namely, the feature does not exist in the \n # feature dictionary of a specific apartment\n return None",
"def getbool(self, key):\n try:\n return self.parser.getboolean(\"wpwatcher\", key)\n except ValueError as err:\n raise ValueError(\n \"Could not read boolean value in config file for key '{}' and string '{}'. Must be Yes/No\".format(\n key, self.parser.get(\"wpwatcher\", key)\n )\n ) from err",
"def fetch_val_for_key(key, delete_key=False):\n\n # first try to find it in the FLAGS\n try:\n if delete_key:\n return MSAF_FLAGS_DICT.pop(key)\n return MSAF_FLAGS_DICT[key]\n except KeyError:\n pass\n\n # next try to find it in the config file\n\n # config file keys can be of form option, or section.option\n key_tokens = key.rsplit('.', 1)\n if len(key_tokens) == 2:\n section, option = key_tokens\n else:\n section, option = 'global', key\n try:\n try:\n return msaf_cfg.get(section, option)\n except InterpolationError:\n return msaf_raw_cfg.get(section, option)\n except (NoOptionError, NoSectionError):\n raise KeyError(key)",
"def validate_key(self, key: keyType) -> bool:\n if isinstance(key, (dict,bool)):\n raise Exception\n if key is None:\n raise Exception\n # Numerical key object has no len(),\n # so explicitly specify which types are not allowed to use empty value as keys\n if isinstance(key, (str, tuple, set, list)) and (len(key) == 0):\n raise Exception\n return True",
"def key(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"key\")",
"def _validate_key(sample, path):\n mapping_tmp = sample\n for key in path:\n try:\n mapping_tmp = mapping_tmp[key]\n except KeyError:\n return False\n except TypeError:\n return False\n return True",
"def containsKey(self, key):\n return get(key) != None",
"def has_key(self, name):\n return self[name] <> None",
"def keyIsValid(key):\n\n isValid = 1\n \n try:\n temp = getParam(key)\n\n except ValueError:\n isValid = 0\n warning(\" WARNING: %s not set\" % (key))\n\n return isValid",
"def load_key(key_name):\n if not p.exists(key_name):\n write_key(key_name)\n\n return open(key_name, \"rb\").read()",
"def handle_key(self, k):\n\t\treturn False",
"async def read(self, key: str) -> ResponseOrKey:",
"def __nonzero__ (self):\n hKey, _, _ = self._from_string (self.moniker, accept_value=False)\n return bool (hKey)",
"def getBoolean(self, key):\n self._check(key)\n return self.__config.value(key).toBool()"
] |
[
"0.6125381",
"0.61197007",
"0.59757054",
"0.5955354",
"0.5898141",
"0.5896521",
"0.58521867",
"0.58086807",
"0.57042295",
"0.567589",
"0.56483555",
"0.5639008",
"0.56366557",
"0.562926",
"0.5597538",
"0.5592852",
"0.5591112",
"0.558603",
"0.5575906",
"0.55707663",
"0.55380774",
"0.550639",
"0.5503048",
"0.54759663",
"0.5461862",
"0.54615074",
"0.5452861",
"0.54370403",
"0.543641",
"0.54272866"
] |
0.69093263
|
0
|
Given a dict, read `key`, ensuring result is a dict.
|
def read_key_dict(obj, key):
assert key in obj, 'key `%s` not found' % key
assert obj[key], 'key `%s` was blank' % key
assert isinstance(obj[key], dict), 'key `%s` not a dict' % key
return obj[key]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mapToDict(dictionary, key):\n return dictionary[key]",
"def get_dict(key):\r\n name = f\"{key}_dict\"\r\n return eval(name)",
"def retrieve_airflow_variable_as_dict(\n key: str) -> Dict[str, Union[str, Dict[str, str]]]:\n value = models.Variable.get(key)\n try:\n value_dict = json.loads(value)\n except json.decoder.JSONDecodeError as error:\n raise Exception('Provided key \"{}\" cannot be decoded. {}'.format(\n key, error))\n return value_dict",
"def get_dict_by_key(pcb_data: List[Dict[str, Any]], key: str) -> Dict[str, Any]:\n for d in pcb_data:\n if isinstance(d, dict) and key in d.keys():\n return d\n return {}",
"def dict_by_key(d, k):\n k = k.split('.')\n while len(k) != 0:\n if isinstance(d, dict):\n d = d[k[0]]\n else:\n d = d[int(k[0])]\n k = k[1:]\n return d",
"def read_serialized_dict(path: str) -> dict:\n data = read_serialized_data(path)\n if isinstance(data, dict):\n return data\n raise TypeError(\n f\"Expected data encoded by {path!r} to be a dictionary at the top-level. \"\n f\"Received {data.__class__.__name__!r} instead.\"\n )",
"def test_key_dict(self):\n key = Key({\"warning\": False, \"inCar\": True})\n\n dictionary = key.as_dict()\n assert isinstance(dictionary, dict)\n assert dictionary == {\"warning\": False, \"in_car\": True}",
"def get_key(dict, key):\n return dict.get(key, None)",
"def get_key_from_data_dict(data: dict, key: str):\n retrieved_key = data.get(key, None)\n if not retrieved_key:\n LOG.info(\n f\"Could not get key {key} from request to the API. Data received: {data}\"\n )\n return retrieved_key",
"def convert_key_info_to_readable(key_info: dict[str, Any]) -> dict[str, Any]:\n key_fields = {'kid': 'key_id',\n 'kty': 'json_web_key_type',\n 'key_ops': 'key_operations',\n 'n': 'RSA_modulus',\n 'e': 'RSA_public_components',\n }\n for key, value in key_fields.items():\n if key in key_info:\n key_info[value] = key_info.pop(key)\n\n return key_info",
"def get_from_dict(d, k):\n try:\n return reduce(dict.get, k, d)\n except TypeError:\n # Value not found.\n return None",
"def _retrieve_dict(self, object_key):\n return json.loads(self._retrieve_blob(object_key).decode('utf-8'))",
"def decode_dict(x: dict):\n assert isinstance(x, dict)\n if \"$type\" in x:\n return decode_typed_value(x)\n else:\n return x",
"def get_from_dictionary(self,dictionary,key):\r\n try:\r\n return dictionary[key]\r\n except KeyError:\r\n raise RuntimeError(\"Dictionary does not contain key '%s'\" %key)",
"def _key_from_dict(d) -> Hashable:\n if isinstance(d, dict):\n return frozenset((k, _key_from_dict(v)) for k, v in d.items())\n elif isinstance(d, (list, tuple)):\n return tuple(map(_key_from_dict, d))\n else:\n return d",
"def visit_Dict(self, node):\n ret = {}\n all_pairs = []\n has_UNRESOLVED_VALUE = False\n has_star_include = False\n for key_node, value_node in zip(node.keys, node.values):\n value_val = self.visit(value_node)\n # This happens in Python 3 for syntax like \"{a: b, **c}\"\n if key_node is None:\n has_star_include = True\n continue\n key_val = self.visit(key_node)\n all_pairs.append((key_val, value_val))\n if not isinstance(key_val, KnownValue) or not isinstance(\n value_val, KnownValue\n ):\n has_UNRESOLVED_VALUE = True\n value = value_val.val if isinstance(value_val, KnownValue) else None\n\n if not isinstance(key_val, KnownValue):\n continue\n\n key = key_val.val\n\n try:\n already_exists = key in ret\n except TypeError as e:\n self._show_error_if_checking(key_node, e, ErrorCode.unhashable_key)\n continue\n\n if (\n already_exists\n and os.path.basename(self.filename)\n not in self.config.IGNORED_FILES_FOR_DUPLICATE_DICT_KEYS\n ):\n self._show_error_if_checking(\n key_node,\n \"Duplicate dictionary key %r\" % (key,),\n ErrorCode.duplicate_dict_key,\n )\n ret[key] = value\n\n if has_star_include:\n # TODO more precise type\n return TypedValue(dict)\n elif has_UNRESOLVED_VALUE:\n return DictIncompleteValue(all_pairs)\n else:\n return KnownValue(ret)",
"def __is_key_in_json(self, key=str, json_dict=json):\n if key in json_dict:\n # noinspection PyUnresolvedReferences\n return json_dict[key]\n else:\n return self.NO_KEY_VALUE_FOR_ENTRY",
"def dict_item(dictionary, key):\n try:\n return dictionary.get(key, None)\n except AttributeError:\n # fail silently if something other than a dict is passed\n return None",
"def keyvalue(dict, key):\n try:\n return dict[key]\n except KeyError:\n return ''",
"def loads(kv_data):\n dict_kv = {}\n if isinstance(kv_data, str):\n kvs = json.loads(kv_data)\n for kv in kvs:\n dict_kv[kv['Key']] = kv['Value']\n else:\n print(\"To load Key Value Data it must be String Type\")\n\n return dict_kv",
"def ReadKey(key_type, key):\n try:\n return {\n keyinfo.AES: AesKey.Read,\n keyinfo.HMAC_SHA1: HmacKey.Read,\n keyinfo.DSA_PRIV: DsaPrivateKey.Read,\n keyinfo.RSA_PRIV: RsaPrivateKey.Read,\n keyinfo.DSA_PUB: DsaPublicKey.Read,\n keyinfo.RSA_PUB: RsaPublicKey.Read\n }[key_type](key)\n except KeyError:\n raise errors.KeyczarError(\"Unsupported key key_type: %s\" % key_type)",
"def get(self, key: str):\r\n\r\n if key in self._inner_dict:\r\n return self._inner_dict[key]\r\n else:\r\n raise KeyError(f\"key '{key}' is invalid\")",
"def at_key(a_dict, key):\n\treturn a_dict[key]",
"def get_random_value_from_dict(d: dict):\n return d[get_random_key_from_dict(d)]",
"def dict_pop(d, key):\n return d.pop(key)",
"def sub_dict(d):\n r = {}\n for k in d:\n if type(d[k]) in prims:\n r[k] = d[k]\n elif type(d[k]) is list:\n r[k] = sub_list(d[k])\n elif type(d[k]) is dict:\n r[k] = sub_dict(d[k])\n else:\n print \"Unknown Type: {}\".format(type(d[k]))\n return r",
"def read_key(stub, key):\n try:\n response = stub.Read(keyval_pb2.ReadRequest(key=key))\n print(\"Read result:\")\n print_response(response)\n except grpc.RpcError as exception:\n print_response(exception)",
"def ReadKey(type, key):\n try:\n return {keyinfo.AES: AesKey.Read,\n keyinfo.HMAC_SHA1: HmacKey.Read,\n keyinfo.DSA_PRIV: DsaPrivateKey.Read,\n keyinfo.RSA_PRIV: RsaPrivateKey.Read,\n keyinfo.DSA_PUB: DsaPublicKey.Read,\n keyinfo.RSA_PUB: RsaPublicKey.Read}[type](key)\n except KeyError:\n raise errors.KeyczarError(\"Unsupported key type: %s\" % type)",
"def __getitem__(self, key):\n for entry_key, value in self.read(key):\n if entry_key != key:\n raise KeyError(key)\n return value\n raise KeyError(key)",
"def dict_from_file(path, key='id', dialect='excel-tab'):\n if not os.path.exists(path):\n raise ValueError(\"File not found: {}\".format(path))\n reader = csv.DictReader(open(path), dialect=dialect)\n return dict([(x[key], x) for x in reader])"
] |
[
"0.64505696",
"0.62807685",
"0.61291075",
"0.6112858",
"0.60777533",
"0.6070061",
"0.6029543",
"0.60195816",
"0.5918118",
"0.5897547",
"0.58628786",
"0.5807329",
"0.57855767",
"0.577967",
"0.5760674",
"0.5735775",
"0.5665205",
"0.5650904",
"0.56203103",
"0.56022245",
"0.55796933",
"0.557871",
"0.5551816",
"0.5543873",
"0.55396074",
"0.55152726",
"0.5492912",
"0.54575104",
"0.54399854",
"0.54144615"
] |
0.7652841
|
0
|
Verify `name` as a candidate and check for record id.
|
def validated_id(cls, name):
if name:
if name in cls._ids:
return cls._ids[name]
if cls.validated_name(name):
if Accounts.exists(name):
return cls.get_id(name)
return None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_id(self, id):",
"def _validate_duplicate_names(res_data, name, _id=None):\n if _id:\n for data in res_data:\n if data.get(\"name\") == name and data.get(\"id\") != _id:\n return False\n return True\n else:\n for data in res_data:\n if data.get(\"name\") == name:\n return False\n return True",
"def _verify_unique_instance_name(self, name):\n existing = self.instances.find_one({'name': name, 'deleted': False})\n if existing:\n raise AXApiInvalidParam(\"Fixture instance with name '{}' already exists\".format(name))",
"def _check_name(self):\n\t\tpass",
"def check_name_uniqueness(cls, user_id, name):\n data_with_same_name = Data.objects.only('id').filter(user_id=user_id, name = name)\n return len(data_with_same_name) == 0",
"def isEditName(id):\n for char in id:\n if re.compile('[0-9]+').match(char[0]) == None:\n print NameError(\"'%s' is not valid name. \\n Id should be numeric\" % (name))\n return -1\n return 0",
"def test_reserved_name(self):\n with self.assertRaises(ValidationError):\n field_name_validator('_id')",
"def check_name(self, name):\n status, msg = utils.validate_name(name, \"36\", \"storageview name\")\n if not status:\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n else:\n LOG.info(msg)",
"def assert_known_field(self, name):\n if not (name == self.id_field_name or self.is_known_field(name)):\n raise ChipsError(\"Unknown field in model %s [%s]\", self.__class__.__name__, name)",
"def checkValidId(self, id, prep_id = False):\n new_id = unquote(id)\n if prep_id: new_id = self.prepId(id)\n try:\n globalCheckValidId(self, new_id)\n return True\n except Exception:\n return str(sys.exc_info()[1])",
"def nameIsValid(self, name):\n self.notify.debug('nameIsValid')\n if (name in self.usedNames):\n return OTPLocalizer.ToonAlreadyExists % (name)\n\n problem = NameCheck.checkName(name, font=self.nameEntry.getFont())\n if problem:\n return problem\n\n # name has passed local checks\n return None",
"def try_create_uniqe_name(self,name=None,plan_id=None):\n if self.valid_name(name):\n for i in range (1,20):\n new_name=name+\"_\"+str(i)\n if self.unique_name(name=new_name,plan_id=plan_id):\n return new_name\n return False\n else:\n return False",
"def checkValidId(self, id, prep_id = False):\n # RRD docs say that limit on vnames is 255 characters and that\n # A-Za-z0-9_ are the valid characters. Zenoss reserves - for it's own\n # use. Limiting to 200 instead just to leave room for whatever.\n # http://oss.oetiker.ch/rrdtool/doc/rrdgraph_data.en.html\n if len(id) > 200:\n return 'GraphPoint names can not be longer than 200 characters.'\n allowed = set(string.ascii_letters + string.digits + '_')\n attempted = set(id)\n if not attempted.issubset(allowed):\n return 'Only letters, digits and underscores are allowed' + \\\n ' in GraphPoint names.'\n return ZenModelRM.checkValidId(self, id, prep_id)",
"def testValidateId(self):\n #create a different person and try to use their id\n self.directory.invokeFactory(type_name=\"FSDPerson\",id=\"def456\",firstName=\"Joe\",lastName=\"Blow\")\n self.failUnless('def456' in self.person.validate_id('def456'))\n #create a different content object and try to use its id\n self.directory.invokeFactory(\"Document\", \"mydoc\")\n self.failUnless('mydoc' in self.person.validate_id('mydoc'))",
"def validate(self, name):\n return name in self.dict",
"def validated_name(cls, name):\n if (name[:5] == 'hive-'\n and name[5] in ['1', '2', '3']\n and re.match(r'^hive-[123]\\d{4,6}$', name)):\n return name\n return None",
"def validate_identifier(self, identifier):\n pass",
"def check_input(naming):\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')",
"def isAddName(name):\t\n if lib.essentials.isAlphanumeric(name) != 0:\n\tprint \" '%s' is not valid name. \\n Vadapter-name should be an alphanumeric.\" % (name)\n #output.completeOutputError(lib.errorhandler.InvalidArgumentCount(descape = \" '%s' is not valid name. \\n Vadapter-name should be an alphanumeric.\" % (name))) \n return -1\n \n if lib.essentials.isStartNumeric(name) != 0:\n\tprint \"'%s' is not valid name. \\n Vadapter name should not start with an digit\"% (name)\n\t#output.completeOutputError(lib.errorhandler.InvalidArgumentCount(descape = \"'%s' is not valid name. \\n Vadapter name should not start with an digit\"% (name)))\n return -1\n\n if lib.essentials.isContainSpecial(name) != 0:\n\tprint \"'%s' is not valid name. \\n Vadapter name should not contain special characher\" % (name)\n\t#output.completeOutputError(InvalidArgumentCount(descape = \"'%s' is not valid name. \\n Vadapter name should not contain special characher\" % (name)))\n return -1\n\n# if lib.db.db.ifExistsInDatabase(name) == 0:\n#\tprint NameError(\"'%s' is not valid name. \\n Already Exists\" % (name))\n#\treturn -1\n \n return 0",
"def check_name(name, allow_services=False):",
"def identify_id(id: str) -> bool:\n return validate_handle(id)",
"def test_nonreserved_name(self):\n try:\n field_name_validator('_identifier')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')",
"def _validate_rule_target_name(name: str) -> None:\n if not name:\n raise common_exceptions.RuleTargetValidationError(\n \"A `name` field must be supplied.\"\n )",
"def isNameUsed(self, name: unicode, startId: long, stopId: long) -> bool:\n ...",
"def the_name_should_reflect_in_the_state_of_the_device(name):\n assert web_app.check_value_in_state(\"name\",name)",
"def _solid_name_check(self, solid_name, chk_dict=None):\n return self._name_check(solid_name, 'solids', chk_dict=chk_dict)",
"def the_name_should_not_reflect_in_the_state_of_the_device(name):\n assert (web_app.check_value_in_state(\"name\",name),False)",
"def test_should_name_field(self):\n self.assertIn(\"name\", self.fields)",
"def validname(name):\r\n return len(name)>0 and (\r\n Context.__invalid_character.search(name) is None)",
"def _validate_snap_name(name, snap_name, strict=True, runas=None):\n snap_name = salt.utils.data.decode(snap_name)\n\n # Try to convert snapshot name to an ID without {}\n if re.match(GUID_REGEX, snap_name):\n return snap_name.strip(\"{}\")\n else:\n return snapshot_name_to_id(name, snap_name, strict=strict, runas=runas)"
] |
[
"0.6676182",
"0.66549516",
"0.65631896",
"0.6561926",
"0.6419692",
"0.64138436",
"0.63998145",
"0.6386987",
"0.62342644",
"0.62223",
"0.62131345",
"0.6199242",
"0.6191196",
"0.6190522",
"0.6190452",
"0.61288655",
"0.6125294",
"0.60896033",
"0.6065993",
"0.60242844",
"0.5990351",
"0.59660745",
"0.5962108",
"0.5944459",
"0.588708",
"0.5871492",
"0.5828084",
"0.5827396",
"0.58128273",
"0.5792331"
] |
0.7007093
|
0
|
Given a community name, get its internal id.
|
def get_id(cls, name):
assert name, 'name is empty'
if name in cls._ids:
return cls._ids[name]
sql = "SELECT id FROM hive_communities WHERE name = :name"
cid = DB.query_one(sql, name=name)
if cid:
cls._ids[name] = cid
cls._names[cid] = name
return cid
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _community(G, u, community):\n node_u = G.node[u]\n try:\n return node_u[community]\n except KeyError:\n raise nx.NetworkXAlgorithmError('No community information')",
"def network_id(tenant_id, auth_token, network_name):\r\n content = common_utils.do_request(\r\n tenant_id, auth_token,\r\n method='GET',\r\n body='', service=\"network\",\r\n path='networks.json')\r\n for network in range(len(content[\"networks\"])):\r\n if content[\"networks\"][network][\"name\"] == network_name:\r\n network_id = content[\"networks\"][network][\"id\"]\r\n return network_id",
"def get_network_id_by_name(name: str) -> str:\n networks_info = get_networks()\n\n for network in networks_info[\"networks\"]:\n if network[\"name\"] == name:\n return network[\"id\"]\n\n raise AttributeError(f\"No network named {name}\")",
"def retrieve_node_id(self, wg, node_name):\n\n result = self.retrieve(\"\"\"SELECT id FROM nodes\n WHERE wg = %s AND lower(name) = %s\"\"\",\n (utils.wg_as_int(wg), node_name.strip().lower(), ))\n\n if not result:\n raise UnknownNodeError(\"node does not exist\")\n else:\n return int(result[0][0])",
"def get_id_from_name(slack_client, name):\n api_call = slack_client.api_call(\"users.list\")\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n users = api_call.get('members')\n for user in users:\n if 'name' in user and user['name'] == name:\n return user.get('id')\n return None",
"def topic_name_to_id(course, name):\r\n return \"{course}_{run}_{name}\".format(course=course.location.course,\r\n run=course.url_name,\r\n name=name)",
"def get_id(self, name=None, code='default'):\n\t\tairport_id = self.search(Airport._col_tr[code], name)['id'].tolist()\n\t\treturn airport_id if len(airport_id) > 1 else airport_id[0]",
"def get_id(self, name=None):\n\n # Support using integer IDs directly\n if isinstance(name, int):\n return name\n\n self.ensure_loaded()\n if name is not None:\n ems_systems = self.search('name', name.upper(), searchtype=\"match\")\n if ems_systems.empty:\n sys_names = self.list_all()['name'].to_list()\n raise ValueError(\n 'No matching systems found. You have access to: {0}'.format(sys_names))\n id = ems_systems.iloc[0]['id']\n else:\n ems_systems = self.list_all()\n if ems_systems.shape[0] == 1:\n id = ems_systems.iloc[0]['id']\n else:\n raise LookupError(\n 'Multiple ems systems found. Please select one from the available:\\n{0}'\n .format(ems_systems.loc[:, ['id', 'name']])\n )\n return id",
"def get_id_from_project_name(name: str) -> int:\n proj = db.session.query(TProjects).filter(TProjects.name == name).first()\n\n if proj is None:\n return -1\n\n return proj.id_project",
"def id(self): \n if self.cloudnet:\n return self.cloudnet.id\n else:\n return None",
"def get_party_id_by_name(party_name: str) -> int:\n # Open a new connection\n db, cursor = db_connector.cursor()\n use_name = \"\"\n for chars in party_name:\n if chars in [\"'\", '\"']:\n use_name = use_name + \"%\"\n else:\n use_name = use_name + chars\n query = \"select id from party where name LIKE '{}';\".format(use_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]",
"def club_id(self, club_name):\r\n # UTF-8 comparison\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n for i in soup.find('table', cellpadding=2).find_all('tr'):\r\n # Get teamid from the bets\r\n team1 = i.find('a')['title']\r\n team2 = i.find_all('a')[1]['title']\r\n if club_name == team1:\r\n return i.find('a')['href'].split('cid=')[1]\r\n elif club_name == team2:\r\n return i.find_all('a')[1]['href'].split('cid=')[1]\r\n return None",
"def id(self) -> str:\n\n return self._inst.query('*IDN?')",
"def get_transport_id_by_name(transport_name: str) -> int:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select id from transport where name = '{}';\".format(transport_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]",
"def external_network_id(self) -> str:\n return pulumi.get(self, \"external_network_id\")",
"def channelNameToID(channelName):\n slackAuth = slackHeader(current_user.slackToken)\n\n # channel list request\n channelListRequest = get(\"https://slack.com/api/conversations.list?\" + parse.urlencode({\"exclude_archived\": True, \"limit\": 1000}), headers=slackAuth)\n channelListRequest = channelListRequest.json()\n\n # get a list of all the channel names and id's\n return next((current['id'] for current in channelListRequest['channels'] if current['name'] == channelName), None)",
"def organization_id(self) -> str:\n return pulumi.get(self, \"organization_id\")",
"def _ecc_id(self, ecc_type_name):\n return self.ecc_lookup[ecc_type_name]",
"def get_channels_id(channel_name):\n response = slackclient.api_call(\"groups.list\")\n groups = response.get(\"groups\")\n for group in groups:\n if group.get(\"name_normalized\") == channel_name:\n return (group.get(\"id\"))",
"def get_device_id_by_name(self, device_name):\n\n return self.get_device_by_name(device_name).id",
"def get_id(self):\n try:\n return self.inst.query('*IDN?')[:36]\n except errors.VisaIOError as e:\n logger.warning(e)\n return 'Device not connected.'",
"def get_group_id(namespace: str, identifier: str) -> int:\n return Murmur3().get_group_id(namespace, identifier)",
"def grab_external_id(stix_object, source_name):\n for external_reference in stix_object.get(\"external_references\", []):\n if external_reference.get(\"source_name\") == source_name:\n return external_reference[\"external_id\"]",
"def internal_id(self) -> str:\n return pulumi.get(self, \"internal_id\")",
"def organization_id():\n return os.environ[\"GCLOUD_ORGANIZATION\"]",
"def get_client_id(name, sort_code):\n output = None\n\n sql = u'SELECT client_company_ID ' \\\n u'FROM client_company_TBL ' \\\n u'WHERE name = %s ' \\\n u'AND sort_code = %s'\n\n data = (name, sort_code)\n c.execute(sql, data)\n value = c.fetchone()\n if value is not None:\n output = value[0]\n\n return output",
"def get_ident():\n return -1",
"async def __board_id(self) -> str:\n url = await self.__url_with_auth(\"1/members/me/boards?fields=name\")\n boards = await (await super()._get_source_responses(url))[0].json()\n return str(first(boards, lambda board: self._parameter(\"board\") in board.values())[\"id\"])",
"def org_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"org_id\")",
"def get_building_id_from_name(building_name):\n building_info_txt = open('building_info.txt')\n building_info_json = json.load(building_info_txt)\n\n building_name_f = building_name.lower().replace('_', ' ')\n parent_name_to_id = dict()\n for building in building_info_json['data']:\n parent_name = building['parent_name']\n if parent_name.lower() == building_name_f:\n return building['parent_id']\n return None"
] |
[
"0.6250433",
"0.6121883",
"0.5975133",
"0.59303844",
"0.59173137",
"0.5826043",
"0.5796814",
"0.57310736",
"0.5728848",
"0.57082736",
"0.570665",
"0.57005477",
"0.569628",
"0.5695367",
"0.56726134",
"0.5668089",
"0.56609166",
"0.565801",
"0.5649222",
"0.56416994",
"0.56351113",
"0.5603983",
"0.5601097",
"0.55766034",
"0.5561491",
"0.55390126",
"0.553591",
"0.55339",
"0.5532439",
"0.5507987"
] |
0.74913245
|
0
|
Return a list of all muted accounts.
|
def get_all_muted(cls, community_id):
return DB.query_col("""SELECT name FROM hive_accounts
WHERE id IN (SELECT account_id FROM hive_roles
WHERE community_id = :community_id
AND role_id < 0)""",
community_id=community_id)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def all_users(self):\n distinct_users = list(self.client.smartsleep.attendees.distinct(\"userId\"))\n return distinct_users",
"def unlocked_accounts(self):\n return [account for account in self if not account.locked]",
"def get_all_volunteers(self):\n volunteers = []\n for user in User.objects.all():\n if not OcAuth(user.id).is_admin():\n volunteers.append(user)\n return volunteers",
"async def mutedlist(self, ctx):\r\n server = ctx.message.guild\r\n msg = \"\"\r\n i = 0\r\n try:\r\n for userid in self.d[str(server.id)]:\r\n if self.d[str(server.id)][userid][\"toggle\"] == True:\r\n i = i + 1\r\n except:\r\n await ctx.send(\"No one is muted in this server :no_entry:\")\r\n return\r\n if i == 0:\r\n await ctx.send(\"No one is muted in this server :no_entry:\")\r\n return\r\n for userid in self.d[str(server.id)]:\r\n if self.d[str(server.id)][userid][\"time\"] == None or self.d[str(server.id)][userid][\"time\"] - ctx.message.created_at.timestamp() + self.d[str(server.id)][userid][\"amount\"] <= 0:\r\n time = \"Infinite\"\r\n else:\r\n m, s = divmod(self.d[str(server.id)][userid][\"time\"] - ctx.message.created_at.timestamp() +\r\n self.d[str(server.id)][userid][\"amount\"], 60)\r\n h, m = divmod(m, 60)\r\n d, h = divmod(h, 24)\r\n if d == 0:\r\n time = \"%d hours %d minutes %d seconds\" % (h, m, s)\r\n if h == 0 and d == 0:\r\n time = \"%d minutes %d seconds\" % (m, s)\r\n elif h == 0 and m == 0:\r\n time = \"%d seconds\" % (s)\r\n else:\r\n time = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\r\n if self.d[str(server.id)][userid][\"toggle\"] == True:\r\n user = discord.utils.get(server.members, id=int(userid))\r\n if user:\r\n msg += \"{} - {} (Till mute ends)\\n\".format(user, time)\r\n if not msg:\r\n await ctx.send(\"No one is muted in this server :no_entry:\")\r\n return\r\n s = discord.Embed(description=msg, colour=0xfff90d, timestamp=datetime.datetime.utcnow())\r\n s.set_author(name=\"Mute List for {}\".format(server), icon_url=server.icon_url)\r\n await ctx.send(embed=s)",
"def list_accounts(self):\n pass",
"def auto_unmute():\n muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n # put user IDs of people you want to remain muted here\n users_keep_muted = set([])\n \n # mute all \n for user_id in muted:\n if user_id not in users_keep_muted:\n t.mutes.users.destroy(user_id=user_id)\n print(\"unmuted %d\" % (user_id))",
"def filter_users_by_mute_subscription(\n self, user_ids: List[UserId], mute_all_dataset_notifications=None\n ) -> List[U]:\n ...",
"def GetAccountList(self):\n\t\treturn self.accounts.keys()",
"def accounts(self):\n return self._accounts.values()",
"def display_accounts(cls):\n return cls.account_list",
"def get_accounts(self):\n\n\t\treturn self.__accounts",
"def muting(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n for a in account:\n a = Account(a, morphene_instance=stm)\n print(\"\\nMuting statistics for @%s (please wait...)\" % a.name)\n muting = a.get_mutings(False)\n muting.print_summarize_table(tag_type=\"Muting\")",
"def get_accounts(self):\n return self.accounts.all()",
"def get_list_of_admins() -> List[User]:\n return DBDiscussionSession.query(User).filter(User.group == Group.ADMIN).all()",
"async def get_blacklisted_users() -> list:\n async with aiosqlite.connect(DATABASE_PATH) as db:\n async with db.execute(\n \"SELECT user_id, strftime('%s', created_at) FROM blacklist\"\n ) as cursor:\n result = await cursor.fetchall()\n return result",
"def get_accounts(self):\n me = objects.AdUser(fbid=\"me\")\n my_accounts = list(me.get_ad_accounts(fields=[\n 'id',\n 'name',\n 'timezone_name',\n 'amount_spent',\n 'currency']))\n return my_accounts",
"def get_users_list(self, session):\n\n users = session.query(User.chat_id).filter(User.is_admin==False).all()\n return users",
"def list_accounts():\n\n try:\n accounts = Account.query.all()\n except NoResultFound:\n print(f\"No account configured yet.\")\n return\n n_len = max([len(a.nickname) for a in accounts if a.nickname != 'no.name'])\n fmt = \"{nickname:\" + str(n_len) + \"s}: {email:s}\"\n #import pdb; pdb.set_trace()\n for acct in [acct for acct in accounts if acct.nickname != 'no.name']:\n print(fmt.format(nickname=acct.nickname, email=acct.email))\n return",
"def get_accounts(self):\r\n return self._accounts",
"async def mute(self, ctx):\n author = ctx.message.author\n channel = author.voice.channel\n members = channel.members\n for member in members:\n user = ctx.guild.get_member(member.id)\n await user.edit(mute=True)\n\n embed = await embeds.generate_embed(ctx, author, members,\n description=\":white_check_mark: Successfully muted the following users:\",\n title=channel.name)\n await ctx.send(embed=embed)",
"def get_active_users(text_channel) -> List[discord.Member]:\n\n active_users = []\n for m in text_channel.members:\n if m.status.name in [\"online\", \"dnd\"] and m.bot == False:\n active_users.append(m)\n\n return active_users",
"def get_all_users():",
"def all_users():\n\treturn [unicode(name[:-4]).lower() for name in os.listdir(os.path.join(WORLD_DIR, 'players'))]",
"def users(self, site = None):\r\n uids = self.user_ids()\r\n if uids:\r\n users = Account._byID(uids, True, return_dict = False)\r\n return [self.ajax_user(u) for u in users]\r\n else:\r\n return ()",
"def get_accounts(self):\n return self.accounts",
"def get_all_users():\n return jsonify(admin.get_all_users(current_app.scoped_session()))",
"def get_users_admins_list(self, session):\n\n users = session.query(User.chat_id).all()\n return users",
"def muter(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n for a in account:\n a = Account(a, morphene_instance=stm)\n print(\"\\nMuters statistics for @%s (please wait...)\" % a.name)\n muters = a.get_muters(False)\n muters.print_summarize_table(tag_type=\"Muters\")",
"def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)",
"def get_fedcm_account_list(self):\n pass"
] |
[
"0.6307945",
"0.6274277",
"0.623905",
"0.6095837",
"0.6094344",
"0.60708183",
"0.6038648",
"0.59835505",
"0.59828734",
"0.57955194",
"0.5775009",
"0.5770793",
"0.5766413",
"0.57480866",
"0.57162577",
"0.57076395",
"0.5706624",
"0.5699043",
"0.56884253",
"0.56756544",
"0.5655423",
"0.5652996",
"0.5644527",
"0.5644268",
"0.5635291",
"0.5633136",
"0.56230485",
"0.5622781",
"0.5586709",
"0.5545969"
] |
0.7069909
|
0
|
Get user role within a specific community.
|
def get_user_role(cls, community_id, account_id):
return DB.query_one("""SELECT role_id FROM hive_roles
WHERE community_id = :community_id
AND account_id = :account_id
LIMIT 1""",
community_id=community_id,
account_id=account_id) or Role.guest.value
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getRole(self, node):\n info = self.getNode(node, includeDevices=False)\n if info is None:\n self.log.error(\"could not get role because '%s' does not exist\", node)\n return None\n return info.role",
"def get_user_role():\n\n if session['user_role'] == 'student':\n return student\n elif session['user_role'] == 'tutor':\n return tutor\n else:\n raise Exception(\"User is not student or tutor. Who is user?\")",
"def get_organisation_with_role(user, rolecode):\n return get_organisations_with_role(user, rolecode).get()",
"def get_role(self):\n return self.role",
"def getRole(self, desired=None):\n strDes = str(desired)\n logging.debug(\"[LaymanAuthLiferay][getRole]: '%s'\"%strDes)\n if not self.authorised:\n logging.error(\"[LaymanAuthLiferay][getRole] The user is not authorised\")\n raise AuthError(401, \"I am sorry, but you are not authorised\")\n if self.authJson[\"userInfo\"] and self.authJson[\"userInfo\"][\"roles\"]:\n roles = self.authJson[\"userInfo\"][\"roles\"]\n if len(roles) < 1:\n logging.error(\"[LaymanAuthLiferay][getRole] Cannot determine the workspace - Liferay provided empty list of roles\")\n raise AuthError(500,\"Cannot determine the workspace - Liferay provided empty list of roles\") \n\n theRole = roles[0]\n for r in roles:\n if desired == r[\"roleName\"]:\n theRole = r\n\n #lower and spaces\n #theRole[\"roleName\"] = theRole[\"roleName\"].lower()\n #theRole[\"roleName\"] = \"_\".join(theRole[\"roleName\"].split(' '))\n roleName = theRole[\"roleName\"]\n logging.debug(\"[LaymanAuthLiferay][getRole] The role: '%s'\"% roleName)\n return theRole\n else: \n logging.error(\"[LaymanAuthLiferay][getRole] Cannot determine the workspace - Liferay did not provide user's roles\")\n raise AuthError(500,\"Cannot determine the workspace - Liferay did not provide user's roles\")",
"def get_role(ssm):\n nodes = ssm[\"nodes\"]\n for node in nodes:\n if node[\"type\"] == \"role\":\n return node[\"name\"]\n return \"no role\"",
"def get_roles(role):",
"def _community(G, u, community):\n node_u = G.node[u]\n try:\n return node_u[community]\n except KeyError:\n raise nx.NetworkXAlgorithmError('No community information')",
"def _get_role(self):\n return self.__role",
"def get_role(self):\n memberships = Membership.objects.filter(person = self, entity__abstract_entity = False, importance_to_person__gte = 2).order_by('-importance_to_person')\n if memberships:\n return memberships[0]\n else: # the poor person had no memberships\n return None",
"async def get_role(request, role_id):\n conn = await create_connection()\n\n head_block = await utils.get_request_block(request)\n role_resource = await roles_query.fetch_role_resource(conn, role_id)\n conn.close()\n return await utils.create_response(conn, request.url, role_resource, head_block)",
"def role(self):\r\n roles = {\r\n 'student': u'Student',\r\n 'staff': u'Administrator',\r\n 'instructor': u'Instructor',\r\n }\r\n return roles.get(self.system.get_user_role(), u'Student')",
"def get(self, role_id):\n return self.client.get_role(role_id)",
"def community(self):\n return self._community",
"def getUserRole(self):\n\n # general question concerning the user's role (hersteller, beauftragter...)\n self.roleView.getUserRole()",
"def get_role(role_id: int) -> Optional[Role]:\n return db.session.query(Role).get(role_id)",
"def courses_with_role(self):\r\n return CourseAccessRole.objects.filter(role=self.role, user=self.user)",
"def role(self) -> str:\n return pulumi.get(self, \"role\")",
"def get_role(resource_root, service_name, name, cluster_name=\"default\"):\n return _get_role(resource_root, _get_role_path(cluster_name, service_name, name))",
"def token_role(self, role):\n return self.read('auth/token/roles/{0}'.format(role))",
"def get(self):\n return self._roles.get(self._id)",
"def get_user_role(user, course_key):\r\n if is_masquerading_as_student(user):\r\n return 'student'\r\n elif has_access(user, 'instructor', course_key):\r\n return 'instructor'\r\n elif has_access(user, 'staff', course_key):\r\n return 'staff'\r\n else:\r\n return 'student'",
"def get_course_access_role(user, org, course_id, role):\n try:\n course_access_role = _CourseAccessRole.objects.get(\n user=user,\n org=org,\n course_id=course_id,\n role=role,\n )\n except _CourseAccessRole.DoesNotExist:\n log.exception('No CourseAccessRole found for user_id=%(user_id)s, org=%(org)s, '\n 'course_id=%(course_id)s, and role=%(role)s.', {\n 'user': user.id,\n 'org': org,\n 'course_id': course_id,\n 'role': role,\n })\n return None\n return course_access_role",
"def compute_roles(community_vect, sparse_mat, role_type=\"Amaral_roles\"):\n\n dense_mat = sparse_mat.todense()\n undir_dense_mat = dense_mat + np.transpose(dense_mat)\n bin_dense_mat = np.array(undir_dense_mat != 0, dtype=int)\n\n # within community Z-degree\n Z_com_deg = _return_Z_com_deg(community_vect, bin_dense_mat)\n\n # participation_coeff\n parti_coef = _return_parti_coef(community_vect, bin_dense_mat)\n\n if role_type == \"Amaral_roles\":\n node_roles = _return_amaral_roles(Z_com_deg, parti_coef)\n\n elif role_type == \"4roles\":\n node_roles = _return_4roles(Z_com_deg, parti_coef)\n\n return node_roles, Z_com_deg, parti_coef",
"async def role_from_id(self, guild: discord.Guild, role_id: int):\n\n return discord.utils.get(guild.roles, id=role_id)",
"def get_role(self, name):\n role = Role.query.filter_by(name=name).first()\n\n return role",
"def role(self):\n return self._role",
"def role(self):\n return self._role",
"def role(self):\n return self._role",
"def get_role(self, role_id):\n raise exception.NotImplemented() # pragma: no cover"
] |
[
"0.6476978",
"0.63823515",
"0.6363005",
"0.6283268",
"0.6265126",
"0.62571126",
"0.62291235",
"0.62223184",
"0.61968863",
"0.60822725",
"0.60033077",
"0.5954628",
"0.593859",
"0.59317774",
"0.59272474",
"0.591147",
"0.5909513",
"0.58935285",
"0.58813465",
"0.5865219",
"0.5852297",
"0.5819865",
"0.5814587",
"0.5810928",
"0.5803299",
"0.579078",
"0.57718444",
"0.57718444",
"0.57718444",
"0.5758297"
] |
0.71126795
|
0
|
Given a new post/comment, check if valid as per community rules
|
def is_post_valid(cls, community_id, comment_op: dict):
assert community_id, 'no community_id'
community = cls._get_name(community_id)
account_id = Accounts.get_id(comment_op['author'])
role = cls.get_user_role(community_id, account_id)
type_id = int(community[5])
# TODO: check `nsfw` tag requirement #267
# TODO: (1.5) check that beneficiaries are valid
if type_id == TYPE_JOURNAL:
if not comment_op['parent_author']:
return role >= Role.member
elif type_id == TYPE_COUNCIL:
return role >= Role.member
return role >= Role.guest # or at least not muted
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def validate_blog_post(self, req, postname, version, fields):\n for category in _parse_categories(fields['categories']):\n if category in self.draft:\n if req.authname == 'anonymous':\n return [(None, 'You need to be logged in to save as draft.')]\n elif req.authname != fields['author']:\n return [(None, \"Cannot save draft for an author that isn't you.\")]\n return []",
"def check_comments(self, args):\n\n for submission in args.comments:\n if any(char.isalpha() for char in submission[1]) \\\n or self._illegal_chars.search(submission[1]) != None:\n raise ValueError",
"def is_new_post(self, post):\n return self.last_post != post['id']",
"def is_valid(self):\n # Check if required fields are filled.\n if self.post_url == \"\" or self.post_url_hash == \"\" or self.title == \"\":\n return False\n\n # Check if the hash of the URL matches the hash field.\n if self.post_url_hash != url_to_hashkey(self.post_url):\n return False\n\n return True",
"def test_user_can_comment_on_article_data(self):\n token1 = self.create_user(VALID_USER_DATA)\n response = self.create_article(VALID_ARTICLE, token1)\n response = self.create_comment(\n token=token1, \n parentId=0,\n slug=response.data['article']['slug']\n )\n\n self.assertEqual(\n response.data['comment']['body'], \n VALID_COMMENT['body']\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_201_CREATED\n )",
"def is_valid(self):\n return (self.time is not None\n and self.author is not None\n and self.content is not None)",
"def question_new_validate():",
"def _validate_post(self, source_file, frontmatter):\n required = set([\n 'date',\n 'title',\n ])\n fields = set(frontmatter.keys())\n missing = required - fields\n if missing:\n raise AbortError(_(\n 'The blog post, {filename}, '\n 'is missing required fields: {missing_fields}'.format(\n filename=source_file, missing_fields=', '.join(missing))))",
"def validate_article(article):\r\n\tcollection=create_database_connection()\r\n\tinsert_article(collection,article)",
"def check_can_comment(self, user):\n if not user.is_active or user.is_anonymous():\n return False\n\n membership = self.check_membership(user)\n\n if ((self.type != 'O' or self.comment_membership_required) and\n (membership is None or\n membership.is_banned() or\n membership.is_left())):\n return False\n elif (self.comment_condition == 'K' and\n user.profile.karma < self.post_karma_threshold):\n return False\n else:\n return True",
"def test_user_can_reply_to_comment(self):\n token1 = self.create_user(VALID_USER_DATA)\n response = self.create_article(VALID_ARTICLE, token1)\n response = self.create_comment(\n token=token1, \n parentId=0,\n slug=response.data['article']['slug']\n )\n response = self.create_comment(\n token=token1, \n parentId=response.data['comment']['id'],\n slug=response.data['comment']['article']['slug']\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_201_CREATED\n )\n self.assertEqual(\n response.data['comment']['body'],\n VALID_COMMENT['body']\n )",
"def form_valid(self, form):\n post = self.get_object()\n #WAS: post.comment_add(form.cleaned_data['text'], self.request.user)\n post.add_comment(form.cleaned_data['text'], \n self.request.user,\n added_at=None, \n by_email=False\n )\n return views_support.response_success(self.request)",
"def test_invalid_user_can_update_comment(self):\n token = self.create_user(VALID_USER_DATA)\n response = self.create_article(VALID_ARTICLE, token)\n\n response = self.create_comment(\n token=token,\n parentId=0,\n slug=response.data['article']['slug']\n )\n token = self.create_user(VALID_USER_DATA_2)\n\n get_comment_url = reverse('crud-comment', kwargs={\n 'id': response.data['comment']['id']\n })\n response = self.client.put(\n get_comment_url,\n HTTP_AUTHORIZATION=token,\n data=VALID_COMMENT_2,\n format='json'\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN\n )",
"def test_comments_are_moderated(self):\n self.assertFalse(Article().comments_are_moderated, \"comment should not be moderated yet\")\n self.assertTrue(\n Article(publication_date=datetime.min).comments_are_moderated,\n \"old comment should be moderated\",\n )\n\n # Test ranges\n days = appsettings.FLUENT_COMMENTS_MODERATE_AFTER_DAYS\n self.assertFalse(\n Article(publication_date=now() - timedelta(days=days - 1)).comments_are_moderated\n )\n self.assertTrue(\n Article(publication_date=now() - timedelta(days=days)).comments_are_moderated\n )\n self.assertTrue(\n Article(publication_date=now() - timedelta(days=days + 1)).comments_are_moderated\n )",
"def test_comment_not_null(self):\n comments = self.story.get_comments()\n comment = comments[randrange(0, len(comments))]\n self.assertTrue(bool(comment.body))\n self.assertTrue(bool(comment.body_html))",
"def create_post(self: User, content: str, is_public: bool, circles: List[Circle], reshareable: bool,\n reshared_from: Optional[Post], media_list: List[Media], mentioned_users: List[User],\n is_update_avatar: bool) \\\n -> Union[Post, bool]:\n if not content and not media_list:\n # a post has to have either content or media\n return False\n\n new_post = Post()\n new_post.eid = make_uuid()\n new_post.author = self.id\n if content:\n new_post.content = bleach.clean(content)\n new_post.is_public = is_public\n new_post.circles = circles\n new_post.media_list = media_list\n new_post.is_update_avatar = is_update_avatar\n\n if reshared_from and not reshareable:\n # if resharing from a post, this post must also be reshareable, otherwise it's logically wrong\n return False\n\n if reshared_from:\n if media_list:\n # when resharing, only allow content (text), e.g. no media\n return False\n\n if reshared_from.reshared_from:\n # if reshared_from itself is a reshared post, reshare reshared_from's original post\n # reshared_from.reshared_from is LazyReference so need to retrieve the full post\n reshared_from = get_in_post_cache(reshared_from.reshared_from.id)\n\n # same explanation for context_home_or_profile=False\n if not sees_post(self, reshared_from, context_home_or_profile=False):\n return False\n\n if not reshared_from.reshareable:\n return False\n\n new_post.reshared_from = reshared_from\n\n new_post.reshareable = reshareable\n new_post.save()\n\n if reshared_from:\n create_notification(\n self,\n notifying_href=new_post.make_href(),\n notifying_summary=new_post.content,\n notifying_action=NotifyingAction.Reshare,\n notified_href=reshared_from.make_href(),\n notified_summary=reshared_from.content,\n owner=reshared_from.author\n )\n # only cache reshared post\n set_in_post_cache(reshared_from)\n\n mention(\n self,\n notified_href=new_post.make_href(),\n notified_summary=new_post.content,\n mentioned_users=mentioned_users\n )\n\n return new_post",
"def test_user_can_update_their_comment_with_same_data(self):\n token = self.create_user(VALID_USER_DATA)\n response = self.create_article(VALID_ARTICLE, token)\n\n response = self.create_comment(\n token=token,\n parentId=0,\n slug=response.data['article']['slug']\n )\n get_comment_url = reverse('crud-comment', kwargs={\n 'id': response.data['comment']['id']\n })\n response = self.client.put(\n get_comment_url,\n HTTP_AUTHORIZATION=token,\n data=VALID_COMMENT,\n format='json'\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST\n )\n self.assertEqual(\n response.data['message'],\n 'No changes made to the comment'\n )",
"def sees_post(self, post, context_home_or_profile):\n if owns_post(self, post):\n return True\n if context_home_or_profile and post.author not in self.followings:\n return False\n if post.is_public:\n return True\n else:\n for circle in post.circles:\n circle = get_in_circle_cache(circle.id)\n if check_member(circle, self):\n return True\n return False",
"def check_can_post(self, user):\n if not user.is_active or user.is_anonymous():\n return False\n\n membership = self.check_membership(user)\n\n if ((self.type != 'O' or self.post_membership_required or self.post_admin_required) and\n (membership is None or\n membership.is_banned() or\n membership.is_left())):\n return False\n elif self.post_admin_required and membership.role not in ['O', 'A']:\n return False\n elif (self.post_condition == 'K' and\n user.profile.karma < self.post_karma_threshold):\n return False\n else:\n return True",
"def is_new_for(post, user):\n return NewBlog.objects.filter(user=user, post=post)",
"def process_post(new_post, cfg):\n id_already_handled_in_db = i18n['debug']['id_already_handled_in_db']\n discovered_submit_title = i18n['posts']['discovered_submit_title']\n rules_comment = i18n['posts']['rules_comment']\n yt_already_has_transcripts = i18n['posts']['yt_already_has_transcripts']\n\n if new_post['subreddit'] in cfg.upvote_filter_subs:\n # ignore posts if they don't meet the threshold for karma and the sub\n # is in our list of upvoted filtered ones\n if new_post['ups'] < cfg.upvote_filter_subs[new_post['subreddit']]:\n return\n\n if not is_valid(new_post['name'], cfg):\n logging.debug(id_already_handled_in_db.format(new_post['name']))\n return\n\n if new_post['archived']:\n return\n\n if new_post['author'] is None:\n # we don't want to handle deleted posts, that's just silly\n return\n\n logging.info(\n f'Posting call for transcription on ID {new_post[\"name\"]} posted by '\n f'{new_post[\"author\"]}'\n )\n\n if new_post['domain'] in cfg.image_domains:\n content_type = 'image'\n content_format = cfg.image_formatting\n\n elif new_post['domain'] in cfg.audio_domains:\n content_type = 'audio'\n content_format = cfg.audio_formatting\n\n elif new_post['domain'] in cfg.video_domains:\n if 'youtu' in new_post['domain']:\n if not valid_youtube_video(new_post['url']):\n add_complete_post_id(new_post['name'], cfg)\n return\n if get_yt_transcript(new_post['url']):\n np = cfg.r.submission(id=new_post['name'])\n np.reply(_(\n yt_already_has_transcripts\n ))\n add_complete_post_id(new_post['name'], cfg)\n logging.info(\n f'Found YouTube video, {get_yt_video_id(new_post[\"url\"])},'\n f' with good transcripts.'\n )\n return\n content_type = 'video'\n content_format = cfg.video_formatting\n else:\n # This means we pulled from a subreddit bypassing the filters.\n content_type = 'Other'\n content_format = cfg.other_formatting\n\n # Truncate a post title if it exceeds 250 characters, so the added\n # formatting still fits in Reddit's 300 char limit for post titles\n post_title = new_post['title']\n max_title_length = 250\n if len(post_title) > max_title_length:\n post_title = post_title[:max_title_length - 3] + '...'\n\n # noinspection PyBroadException\n try:\n result = cfg.tor.submit(\n title=discovered_submit_title.format(\n sub=new_post['subreddit'],\n type=content_type.title(),\n title=post_title\n ),\n url=reddit_url.format(new_post['permalink'])\n )\n result.reply(\n _(\n rules_comment.format(\n post_type=content_type,\n formatting=content_format,\n header=cfg.header\n )\n )\n )\n flair_post(result, flair.unclaimed)\n\n add_complete_post_id(new_post['name'], cfg)\n cfg.redis.incr('total_posted', amount=1)\n\n if cfg.OCR and content_type == 'image':\n # hook for OCR bot; in order to avoid race conditions, we add the\n # key / value pair that the bot isn't looking for before adding\n # to the set that it's monitoring.\n cfg.redis.set(new_post['name'], result.fullname)\n cfg.redis.rpush('ocr_ids', new_post['name'])\n\n cfg.redis.incr('total_new', amount=1)\n\n # The only errors that happen here are on Reddit's side -- pretty much\n # exclusively 503s and 403s that arbitrarily resolve themselves. A missed\n # post or two is not the end of the world.\n except Exception as e:\n logging.error(\n f'{e} - unable to post content.\\nID: {new_post[\"name\"]}\\n '\n f'Title: {new_post[\"title\"]}\\n Subreddit: '\n f'{new_post[\"subreddit\"]}'\n )",
"def test_post_comment_user_data_validation(self):\n\n # omit both input\n r1 = self.client.post(reverse('movieapi:comments'))\n self.assertJSONEqual(\n r1.content,\n '{\"error\": \"Please provide movie ID and comment\"}'\n )\n self.assertEqual(r1.status_code, 400)\n\n # omit only comment\n r2 = self.client.post(reverse('movieapi:comments'), {'movie_id': 'tt0112573'})\n self.assertJSONEqual(\n r2.content,\n '{\"error\": \"Please provide movie ID and comment\"}'\n )\n self.assertEqual(r2.status_code, 400)\n\n r3 = self.client.post(reverse('movieapi:comments'), {'comment': 'test comment'})\n self.assertJSONEqual(\n r3.content,\n '{\"error\": \"Please provide movie ID and comment\"}'\n )\n self.assertEqual(r3.status_code, 400)",
"def process_if_valid(cls, actor, op_json, date):\n op = CommunityOp(actor, date)\n if op.validate(op_json):\n op.process()\n return True\n return False",
"def test_model_can_create_a_post(self):\n old_count = Post.objects.count()\n self.post.save()\n new_count = Post.objects.count()\n self.assertNotEqual(old_count, new_count)",
"def test_model_can_create_a_post(self):\n old_count = Post.objects.count()\n self.post.save()\n new_count = Post.objects.count()\n self.assertNotEqual(old_count, new_count)",
"def check_comments():\n\n # Get the id of the group track\n try:\n group_track = soundcloud.get('/me/tracks')[config.post_track_id]\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.critical('Cannot find a track with id %d. Please, fix post_track_id in config.py', config.post_track_id)\n sys.exit(1)\n else:\n raise\n\n # Get the comment list for the group track\n comments = soundcloud.get('/tracks/%d/comments' % group_track.id)\n if not comments:\n logging.info('Nothing found...')\n return\n \n # Process each comment and delete it\n for comment in reversed(comments): \n logging.info('Processing a comment by user %d (%s): %s', comment.user_id, comment.user['username'], comment.body)\n response = None\n \n # Try to process the comment\n try:\n response = process_comment(comment)\n except HTTPError as e:\n if e.response.status_code == 429:\n logging.exception('Failed to repost track: too many requests:')\n return\n elif e.response.status_code // 100 == 4:\n logging.exception('Failed to process comment due to a client request error:')\n else:\n raise\n except Exception as e: # Program crash\n logging.exception('Failed to process comment:')\n else:\n if response:\n logging.info('The comment would have this response: %s', response) \n else:\n logging.info('Comment processed successfully')\n \n # Delete the processed comment\n try:\n soundcloud.delete('/tracks/' + str(group_track.id) + '/comments/' + str(comment.id))\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.warning('Comment already deleted')\n else:\n raise\n\n if config.use_advanced_description and should_update_description:\n update_description()",
"def test_editing_post_comment(self):\n\n form_data = {\"comment\": \"Here's my new comment!\"}\n new_comment = edit_post_comment(1, form_data)\n\n self.assertIn(\"my new comment\", new_comment.comment_body)",
"def test_user_can_get_comments_of_invalid_article(self):\n token = self.create_user(VALID_USER_DATA)\n response = self.create_article(VALID_ARTICLE, token)\n\n response = self.create_comment(\n token=token,\n parentId=0,\n slug=response.data['article']['slug']\n )\n token = self.create_user(VALID_USER_DATA_2)\n\n get_comment_url = reverse('comments', kwargs={\n 'slug': 'random-non-existent-article-0x3',\n 'id': 0\n })\n response = self.client.get(\n get_comment_url,\n HTTP_AUTHORIZATION=token,\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_404_NOT_FOUND\n )",
"def test_add_comment(self):\n post = PostFactory()\n comment = PostFactory()\n post.add_comment(comment)\n self.assertEqual(comment.title, post.title)\n self.assertTrue(comment.is_comment)\n self.assertEqual(comment.parent_post, post)",
"def test_issue_post_comment_reaction(self):\n pass"
] |
[
"0.62227374",
"0.61295384",
"0.60019886",
"0.59893394",
"0.5949739",
"0.59365237",
"0.5877104",
"0.5830495",
"0.57753825",
"0.57273066",
"0.5714507",
"0.57066494",
"0.56890285",
"0.56612223",
"0.5660342",
"0.5644787",
"0.56245315",
"0.559673",
"0.5590323",
"0.5586681",
"0.5581335",
"0.55806434",
"0.5566802",
"0.5563535",
"0.5563535",
"0.5547472",
"0.5545426",
"0.5522109",
"0.5499991",
"0.5491119"
] |
0.7543054
|
0
|
Update all pending payout and rank fields.
|
def recalc_pending_payouts(cls):
sql = """SELECT id,
COALESCE(posts, 0),
COALESCE(payouts, 0),
COALESCE(authors, 0)
FROM hive_communities c
LEFT JOIN (
SELECT community_id,
COUNT(*) posts,
ROUND(SUM(payout)) payouts,
COUNT(DISTINCT author) authors
FROM hive_posts_cache
WHERE community_id IS NOT NULL
AND is_paidout = '0'
GROUP BY community_id
) p
ON community_id = id
ORDER BY COALESCE(payouts, 0) DESC,
COALESCE(authors, 0) DESC,
COALESCE(posts, 0) DESC,
subscribers DESC,
(CASE WHEN c.title = '' THEN 1 ELSE 0 END)
"""
for rank, row in enumerate(DB.query_all(sql)):
cid, posts, payouts, authors = row
sql = """UPDATE hive_communities
SET sum_pending = :payouts, num_pending = :posts,
num_authors = :authors, rank = :rank
WHERE id = :id"""
DB.query(sql, id=cid, payouts=payouts, posts=posts,
authors=authors, rank=rank+1)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_budgets(self):\n\n # Init the variables\n self.next_Omega = self.Omega\n self.next_Phi = self.Phi\n self.next_Lambda = self.Lambda\n # Update the one to be updated\n if self.player == 0:\n self.next_Omega = self.Omega - 1\n elif self.player == 1:\n self.next_Phi = self.Phi - 1\n elif self.player == 2:\n self.next_Lambda = self.Lambda - 1",
"def payout(self):\n if not self.is_closable():\n raise ValueError(\"The tournament can't be closed.\")\n if self.is_paid:\n raise ValueError(\"This tournament has already been paid out.\")\n payout_information = self.get_payout_information()\n for info in payout_information:\n players, cut = info[\"players\"], info[\"won\"]\n for player in players:\n user_profile = player.user.get_profile()\n user_profile.credits += cut\n user_profile.save()\n self.is_open = False\n self.is_paid = True\n self.save()",
"def update_amounts(self, save=True):\n self.amount_donated = self.get_amount_total(\n [StatusDefinition.SUCCESS, StatusDefinition.PENDING,\n StatusDefinition.PLEDGED])\n self.amount_needed = self.amount_asked - self.amount_donated\n\n if self.amount_needed < 0:\n # Should never be less than zero\n self.amount_needed = 0\n\n if save:\n self.save()",
"def update(self):\r\n debug.write(\"[SourceRPG] Updating all ranked positions\", 1)\r\n database.execute(\"SELECT steamid FROM Player ORDER BY level DESC,xp DESC\")\r\n results = database.cursor.fetchall()\r\n self.ranks = []\r\n for index, steamid in enumerate(results):\r\n debug.write(\"Rank: %s Steamid: %s\" % (index, steamid), 5)\r\n self.ranks.append(steamid[0])\r\n debug.write(\"[SourceRPG] All ranked positions updated\", 1)",
"def payout(self):\n self.close()\n if self.is_paid:\n raise ValueError(\"Already paid out the wager.\")\n self.is_paid = True\n self.paid_on = datetime.datetime.now()\n payouts = self.get_payout_information()\n for info in payouts:\n bet, credits = info[\"bet\"], info[\"won\"]\n player = bet.created_by\n player.credits += credits\n player.save()\n self.save()",
"def __update_accounts(self):\n\t\tfor acct in self.wallet:\n\t\t\tif len(get_unspent(acct[\"address\"], self.testnet))!=0:\n\t\t\t\tacct[\"status\"] = \"in use\"\n\t\t\telse:\n\t\t\t\tspent = get_spent(acct[\"address\"], self.testnet)\n\t\t\t\tconfirm = (s[\"confirmations\"] >= 6 for s in spent)\n\t\t\t\tif len(spent) > 0 and all(confirm):\n\t\t\t\t\tacct[\"status\"] = \"used\"\n\t\t\t\telif len(spent) > 0:\n\t\t\t\t\tacct[\"status\"] = \"in use\"\n\t\tself.header[\"LAST_UPDATE_TIME\"] = str(round(time.time()))\n\t\toutput = [self.header, *self.wallet]\n\t\twith open(self.filepath, 'w+') as f:\n\t\t\tjson.dump(output, f)",
"def update(self):\r\n if self.games and all(game.result for game in self.games):\r\n self.rankings = self.compute_ranking()\r\n self.update_observers()\r\n\r\n if self.finals:\r\n for final in self.finals:\r\n final.update()",
"def update_totals(self, i, no_steps):\n\t\tself.all_rewards.append(self.round_reward)\n\t\tself.round_reward = 0\n\t\tself.all_iterations.append(i)\n\t\tself.step_count.append(no_steps)",
"def update(self):\n for pl, result in zip(self._players, self.golf_round.doc.results):\n for score in result.scores:\n n = score.num-1\n # update net \n pl.dct_net['holes'][n] = score.gross - pl._bumps[n]\n pl.update_totals(pl.dct_net)",
"def updateAll(self):\n \tself.idToUpdate=''\n \tself.newState=''\n \tself.save()",
"def update_rank(self):\n self.__rank += 1",
"def payout(self):\n user = self.completed_by\n user_node = user.get()\n for key, value in self.v_reward:\n user_node[key] += value\n graph.push(user_node)",
"def updateOrderbookFull(self, asks, bids):\n self.asks = asks\n self.bids = bids",
"def update_all(self):\n self.update_head_node_ip()\n self.get_database_info()\n self.update_users()",
"def update_payment(self):\r\n update_payment_to_db(self.__payment_id__, self.__camper_id__, self.__camp_id__, self.__payment_date__, self.__paid_amount__)",
"def reset_parameters(self): \n self.deposit_intent = 0\n self.contribution_intent = 0\n self.sponsor_intent = 0\n self.teo_exchange_intent = 0\n self.euro_exchange_intent = 0\n self.withdraw_intent = 0\n \n self.hour_wallet = self.monthly_hours\n self.staged_euro = 0\n self.staged_teo = 0\n self.contributed_hours = 0\n self.exchanged_euros = 0\n self.exchanged_teos = 0\n self.withdrawn_euros = 0",
"def paynow_update(request, payment_reference):\r\n\r\n # Get saved paymend details\r\n payment = get_object_or_404(PaynowPayment, reference=payment_reference)\r\n # Init paynow object. The URLS can be blank\r\n paynow = Paynow(settings.PAYNOW_INTEGRATION_ID, settings.PAYNOW_INTEGRATION_KEY, '', '')\r\n # Check the status of the payment with paynow server\r\n payment_result = paynow.check_transaction_status(payment.poll_url)\r\n\r\n save_changes = False\r\n\r\n # check if status has changed\r\n if payment.status != payment_result.status:\r\n payment.status = payment_result.status\r\n save_changes = True\r\n\r\n # Check if paynow reference has changed\r\n if payment.paynow_reference != payment_result.paynow_reference:\r\n payment.paynow_reference = payment_result.paynow_reference\r\n save_changes = True\r\n\r\n # Check if payment is now paid\r\n if payment_result.paid:\r\n if not payment.paid:\r\n payment.paid = True\r\n payment.confirmed_at = timezone.now()\r\n\r\n if save_changes:\r\n payment.save()\r\n\r\n return HttpResponse('ok')",
"async def _update_balances(self):\n local_asset_names = set(self._account_balances.keys())\n remote_asset_names = set()\n resp_json = await self._api_request(\"post\",\n \"terra/balances\",\n {\"address\": self._terra_wallet_address})\n for token, bal in resp_json[\"balances\"].items():\n self._account_available_balances[token] = Decimal(str(bal))\n self._account_balances[token] = Decimal(str(bal))\n remote_asset_names.add(token)\n\n asset_names_to_remove = local_asset_names.difference(remote_asset_names)\n for asset_name in asset_names_to_remove:\n del self._account_available_balances[asset_name]\n del self._account_balances[asset_name]\n\n self._in_flight_orders_snapshot = {k: copy.copy(v) for k, v in self._in_flight_orders.items()}\n self._in_flight_orders_snapshot_timestamp = self.current_timestamp",
"def payment_post_save(**kwargs):\n payment = kwargs['instance']\n bill = payment.bill\n bill.update_cached_totals()",
"def update_paypal(sender, **kwargs):\n ipn_obj = sender\n try:\n payment = json.loads(ipn_obj.custom)\n\n # try to get payment. if not exist, exception will be catched\n p = Payment.objects.filter(id=payment.get('id'), token=payment.get('token')).get()\n\n # update payment\n p.method = constants.PAYPAL\n p.ipn = ipn_obj\n p.save()\n\n # if payment is completed, so valid\n if ipn_obj.payment_status == ST_PP_COMPLETED:\n # check correct price , currency and mail\n if int(ipn_obj.mc_gross) == int(p.price.price) and \\\n ipn_obj.mc_currency == 'EUR' and \\\n ipn_obj.business == settings.PAYPAL_RECEIVER_EMAIL:\n # all is OK, update state\n p.state = True\n p.save()\n sendmail_payment_success(p)\n else:\n # TODO: send alert / mail\n return\n except Payment.DoesNotExist:\n # TODO: send alert / mail\n pass\n except:\n # TODO: send alert / mail\n pass",
"def UpdateSumaries(vj):\n SumaryPresupuesto(vj)\n SumaryGastos(vj)\n SumaryCompras(vj)\n SumaryPagos(vj)\n SumaryVentas(vj)",
"def _update_ranks(sample_count):\n raise NotImplementedError",
"def update(self, rank):\n # calculate MR and MRR\n self.mr += rank\n self.mrr += 1 / rank\n # calculate Hits@k\n if rank <= 1:\n self.hits1 += 1\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 3:\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 5:\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 10:\n self.hits10 += 1",
"def UpdateStatus(self,pid):\n\t\tb1=Rents.objects.filter(paymentid_id=pid).first()\n\t\tamount=Payment.objects.filter(paymentid=pid).values('amount')\n\t\tb=b1.__dict__\n\t\tquant=b['quantity']\n\t\tbookid=b['bookid_id']\n\t\tprice=amount[0]['amount']/quant\n\t\t#price=float(\"{.2f}\".format(amount[0]['amount']))/float(\"{0:.2f}\".format(quant))\n\t\tRents.objects.filter(paymentid_id=pid).update(status='r')\n\t\tBook.objects.filter(bookid=bookid).update(quantity=F('quantity')+quant)\n\t\tStatus.objects.filter(ISBN=b['ISBN'],rentprice=price).update(quantity=F('quantity')+quant)\n\t\tUpload.objects.filter(owner_id_id=b['owner_id_id'],sellprice=price).update(qtyavailable=F('qtyavailable')+quant)\n\t\tself.notifyBuyer(b['ISBN'])",
"def __send_update(self):\n\n offset = self.app_id * 10\n\n # TODO set number of leading zero depending on max value\n print(\"Update run {}: {} {}/{} with {}S - {}F - {}B\".format(self.app_id,\n self.test_run.date,\n self.test_run.actual,\n self.test_run.total,\n self.test_run.succeed,\n self.test_run.failed,\n self.test_run.blocked))\n\n status_dict = {}\n # Test run advance status string\n status_dict[offset + self.PIN_STATUS_TEXT] = \"{}/{}\".format(self.test_run.actual,\n self.test_run.total)\n # Test run advance status percent\n percent = self.test_run.actual / self.test_run.total * 100\n status_dict[offset + self.PIN_STATUS_GRAPH] = percent\n # Test run result type number\n status_dict[offset + self.PIN_TYPES] = \"S{} F{} B{}\".format(self.test_run.succeed,\n self.test_run.failed,\n self.test_run.blocked)\n # Test run led TODO manage color\n status_dict[offset + self.PIN_LED] = 255\n\n self.post_dict(status_dict)",
"def action_payslip_done(self):\n for recd in self.overtime_ids:\n recd.payslip_paid = True\n return super(PayslipOverTime, self).action_payslip_done()",
"def update_account(row, account):\n if row['LAST_UPDATED_FROM_PAYGOV']:\n updated_at = datetime_from(row['LAST_UPDATED_FROM_PAYGOV'])\n account.donations.filter(time__lte=updated_at).delete()\n if account.category == Account.PROJECT:\n set_balances(row, account)\n account.save()",
"def update_settings(self):\n\n self.sim.account.set_balance(int(self.balance_str.get()))\n\n self.sim.config.set_base_bet(int(self.base_bet_str.get()))\n self.sim.config.set_payout(float(self.payout_str.get()))\n self.sim.config.set_iterations(int(self.iterations_str.get()))\n self.sim.config.set_loss_adder(int(self.loss_adder_str.get()))",
"def _update(self):\n self.all_params = {}\n self._update_experiment_params()\n self._update_preprocessing_params()\n self._update_model_params()",
"def task_saleorder_update_productskustats_waitingpay_num(sku_id):\n from flashsale.pay.models import SaleOrder\n\n product_id = ProductSku.objects.get(id=sku_id).product.id\n waitingpay_num_res = SaleOrder.objects.filter(item_id=product_id, sku_id=sku_id,\n status=SaleOrder.WAIT_BUYER_PAY).aggregate(\n Sum('num'))\n total = waitingpay_num_res['num__sum'] or 0\n stat = SkuStock.get_by_sku(sku_id)\n if stat.waitingpay_num != total:\n stat.waitingpay_num = total\n stat.save(update_fields=[\"waitingpay_num\"])"
] |
[
"0.5612123",
"0.5498229",
"0.545742",
"0.5438718",
"0.5407351",
"0.54060024",
"0.5373511",
"0.5276792",
"0.52203137",
"0.52070946",
"0.5103955",
"0.5061251",
"0.5003157",
"0.49942267",
"0.49661735",
"0.4962416",
"0.4912601",
"0.48971972",
"0.48836514",
"0.48782393",
"0.48590538",
"0.4853334",
"0.4853",
"0.4846037",
"0.4824853",
"0.48232794",
"0.48142788",
"0.47997978",
"0.47912672",
"0.47793967"
] |
0.6945879
|
0
|
Check an account's subscription status.
|
def _subscribed(self, account_id):
sql = """SELECT 1 FROM hive_subscriptions
WHERE community_id = :community_id
AND account_id = :account_id"""
return bool(DB.query_one(
sql, community_id=self.community_id, account_id=account_id))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def verifysubscriptionstatusinaccounttab():\n pass",
"async def status(ctx):\n redis = await RedisDB.create()\n user = ctx.message.author\n try:\n subscription_id = await get_subscription_id(user, redis)\n\n if subscription_id is None:\n subscription_json = await create_subscription(user, redis)\n # There is no active indicator returned on a create user call - add it here to prevent issues.\n subscription_json['active'] = False\n else:\n subscription_json = verify_subscription(subscription_id)\n\n await send_status_message(user, subscription_json)\n\n except Exception as e:\n await user.send(\n \"There was an unexpected error during checking the status of your subscription.\\n\"\n \"Please contact the Nano Center Ambassadors for more information.\"\n )\n raise e\n finally:\n await redis.close()",
"def verifysubscriptioninhomedevicestatus(sub):\n try:\n if \"Subscription Active\" in sub:\n print \" Hi chetan You have Active subscription\"\n else:\n print \" your subscription is not active \"\n except Exception as er:\n print(\"not able to get subscription details\")\n return False",
"def has_active_subscription(self, count=False):\n subs = self.subscriptions.filter(active=True)\n return subs.exists() if count is False else subs.count()",
"def get_is_subscribed(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return None\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n return profile in obj.subscribed_users.all()",
"async def check_account(self) -> tuple:\n results = await self._api.call('system', 'check_account')\n\n username = results.header.vars.get('un')\n result = results.header.vars.get('signed_out')\n if not result:\n result = results.header.vars.get('signed_in')\n\n return AccountStatus(result), username",
"def subscription_checker(**kwargs):\n count = 0\n with sentry_sdk.start_transaction(\n op=\"subscription_checker\",\n name=\"subscription_checker\",\n sampled=False,\n ):\n for subscription in QuerySubscription.objects.filter(\n status__in=(\n QuerySubscription.Status.CREATING.value,\n QuerySubscription.Status.UPDATING.value,\n QuerySubscription.Status.DELETING.value,\n ),\n date_updated__lt=timezone.now() - SUBSCRIPTION_STATUS_MAX_AGE,\n ):\n with sentry_sdk.start_span(op=\"repair_subscription\") as span:\n span.set_data(\"subscription_id\", subscription.id)\n span.set_data(\"status\", subscription.status)\n count += 1\n if subscription.status == QuerySubscription.Status.CREATING.value:\n create_subscription_in_snuba.delay(query_subscription_id=subscription.id)\n elif subscription.status == QuerySubscription.Status.UPDATING.value:\n update_subscription_in_snuba.delay(query_subscription_id=subscription.id)\n elif subscription.status == QuerySubscription.Status.DELETING.value:\n delete_subscription_from_snuba.delay(query_subscription_id=subscription.id)\n\n metrics.incr(\"snuba.subscriptions.repair\", amount=count)",
"def check_account_status(request):\n\n user = request.user\n\n if not user.is_authenticated():\n return {\n 'current_user': user,\n 'check_account_status_url': reverse('check_account_status'),\n }\n\n session = request.session\n\n flag = session.get('show_email_confirmation_dialog', True)\n show = not user.has_activated_account and flag\n session['show_email_confirmation_dialog'] = False\n\n # We don't want so show email confirmation when use is trying to buy a ticket.\n if 'payment-details' in request.path:\n show = False\n\n return {\n 'current_user': user,\n 'show_email_confirmation_dialog': False,\n 'check_account_status_url': reverse('check_account_status'),\n }",
"def is_subscribed(user_id, profile_user_id):\n\n subscription = Subscription.query.filter(\n Subscription.user_id == user_id,\n Subscription.subscribe_to_id == profile_user_id\n ).first()\n print(\"IS SUBSCRIBED\")\n print(subscription)\n print(subscription is not None)\n return subscription is not None",
"def helper_subscriptions_exists(\n self, stream: str, expect_success: bool, subscribed: bool\n ) -> None:\n result = self.client_post(\"/json/subscriptions/exists\", {\"stream\": stream})\n if expect_success:\n json = self.assert_json_success(result)\n else:\n self.assertEqual(result.status_code, 404)\n json = result.json()\n if subscribed:\n self.assertIn(\"subscribed\", json)\n self.assertEqual(json[\"subscribed\"], subscribed)",
"def rpc_campaign_alerts_is_subscribed(self, campaign_id):\n\t\tusername = self.basic_auth_user\n\t\tsession = db_manager.Session()\n\t\tquery = session.query(db_models.AlertSubscription)\n\t\tquery = query.filter_by(campaign_id=campaign_id, user_id=username)\n\t\tresult = query.count()\n\t\tsession.close()\n\t\treturn result",
"def is_subscribed(self) -> bool:\n return bool(self._subscriptions)",
"def validate_active(self, in_response_to):\n if not self.status == SS_ACTIVE:\n raise StatusMessageException(in_response_to,\n ST_FAILURE,\n 'The Subscription is not active!')",
"def confirm_subscription(self, sub_code):\n\t\tresult = {}\n\t\tconnection = DbHelper.connect()\n\t\tsub_id = 0\n\t\t# print(\"Subscription Code: \" + sub_code)\n\n\t\ttry:\n\t\t\twith connection.cursor() as cursor:\n\t\t\t\tsql = \"SELECT * FROM mail_list \\\n\t\t\t\t\t WHERE email_hash=%s;\"\n\t\t\t\tcursor.execute(sql, [sub_code])\n\t\t\t\tresult = cursor.fetchone()\n\t\t\t\t\n\t\t\t\tif not result:\n\t\t\t\t\tconnection.close()\n\t\t\t\t\treturn \"CODE_DOES_NOT_EXIST\"\n\t\t\t\t\n\t\t\t\telif result['is_activated']:\n\t\t\t\t\tconnection.close()\n\t\t\t\t\treturn \"CODE_ALREADY_ACTIVATED\"\n\n\t\t\t\tsub_id = result['sub_id']\n\n\t\t\t\tsql = \"UPDATE mail_list \\\n\t\t\t\t\t SET is_activated=is_activated+1 \\\n\t\t\t\t\t WHERE sub_id=%s;\"\n\t\t\t\tcursor.execute(sql, [sub_id])\n\t\t\t\tconnection.commit()\n\t\t\t\tconnection.close()\n\t\t\t\treturn result\n\t\texcept pymysql.MySQLError as e:\n\t\t\tconnection.close()\n\t\t\treturn \"DATABASE_ERROR\"",
"def test_is_subscribed(self):\n manager_root = ISubscriptionManager(self.root)\n manager_root.subscribability = SUBSCRIBABLE\n manager_root.subscribe('[email protected]')\n self.assertEqual(manager_root.is_subscribed('[email protected]'), True)\n self.assertEqual(manager_root.is_subscribed('[email protected]'), False)\n\n manager = ISubscriptionManager(self.root.folder)\n self.assertEqual(manager.is_subscribed('[email protected]'), True)\n self.assertEqual(manager.is_subscribed('[email protected]'), False)\n\n # If you turn off subscription off at the folder level, you\n # are no longer subscribed\n manager.subscribability = NOT_SUBSCRIBABLE\n self.assertEqual(manager.is_subscribed('[email protected]'), False)\n\n # That didn't changed anything on the parent\n self.assertEqual(manager_root.is_subscribed('[email protected]'), True)",
"def check_subscr(self):\n\n subscr_info = self.__get_general_subscr_info()\n\n if not subscr_info:\n # The subscription does not exist:\n self.attrs = deepcopy(self.empty_attrs)\n return False\n\n self.attrs['owner'] = subscr_info.get('rolname')\n self.attrs['enabled'] = subscr_info.get('subenabled')\n self.attrs['synccommit'] = subscr_info.get('subenabled')\n self.attrs['slotname'] = subscr_info.get('subslotname')\n self.attrs['publications'] = subscr_info.get('subpublications')\n if subscr_info.get('subconninfo'):\n for param in subscr_info['subconninfo'].split(' '):\n tmp = param.split('=')\n try:\n self.attrs['conninfo'][tmp[0]] = int(tmp[1])\n except ValueError:\n self.attrs['conninfo'][tmp[0]] = tmp[1]\n\n return True",
"def check_subscription_name_exists(self):\n return check_subscription_name_exists(self.project_id, self.subscription_name)",
"def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')",
"def verify_subscription(subscription_id: str):\n verify_data = {\n \"subscription_id\": subscription_id\n }\n json_data = json.dumps(verify_data)\n r = requests.post(f\"{os.getenv('API_ENDPOINT')}verify?token={os.getenv('NR_TOKEN')}\", json_data)\n return r.json()",
"def test_sub_account_asset():\n\n client = Client(key, secret)\n response = client.sub_account_status(email=\"[email protected]\")\n response.should.equal(mock_item)",
"def subscribed(cls, team):\n return cls.query(\n cls.status == 'subscribe',\n cls.team == team.lower()\n ).fetch(100)",
"def subscriptions(self):\r\n return subs.AccountSubscriptions(self)",
"def test_successful_subscriptions_exists_subbed(self) -> None:\n self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage\n self.helper_subscriptions_exists(self.streams[0], True, True)",
"def has_subscribers(cls, topic):\n\t\tif (cls.all().filter('topic_hash =', utils.sha1_hash(topic))\n\t\t\t\t.filter('subscription_state =', cls.STATE_VERIFIED).get() is not None):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def _is_active_subscription(self, topic: str) -> bool:\n return topic in self._simple_subscriptions or any(\n other.topic == topic for other in self._wildcard_subscriptions\n )",
"def do_rxn_status(self, arg):\n\n cmd_call = 'rxn_status'\n return self.run_cmd(arg, cmd_call)",
"def check_status(self):",
"def do_subscription_approval(sender, **kwargs):\r\n req_payment = sender.get_product_class().get_requires_payment_details()\r\n if not req_payment or has_valid_billing_details(sender.billing_account):\r\n status = 'approved'\r\n else:\r\n status = 'declined'\r\n sender.set_current_approval_status(status)\r\n return status",
"async def fetch_account_status(account_id):\n res_object = requests.get(_ACCOUNTS_URL.format(account_id=account_id))\n return res_object.json() if res_object.status_code == 200 else {}",
"async def validate_account(self) -> bool:\n raise NotImplementedError"
] |
[
"0.79412097",
"0.71638125",
"0.69243705",
"0.6228792",
"0.62228334",
"0.60814595",
"0.6052438",
"0.5985828",
"0.59778154",
"0.5947996",
"0.59201866",
"0.5842008",
"0.58302164",
"0.5800907",
"0.57920486",
"0.5789828",
"0.575155",
"0.5726498",
"0.57236356",
"0.56621563",
"0.56459755",
"0.5620504",
"0.56117594",
"0.5539785",
"0.55387264",
"0.55376184",
"0.55371606",
"0.5523266",
"0.5519887",
"0.54838324"
] |
0.7298385
|
1
|
Check post's muted status.
|
def _muted(self):
sql = "SELECT is_muted FROM hive_posts WHERE id = :id"
return bool(DB.query_one(sql, id=self.post_id))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def muted(self) -> bool:\n return self._muted",
"def is_muted(self):\n return self.muting_handler.is_muted()",
"def isMuted(self):\n return self._isMuted",
"def is_muted(self):\n # type: () -> bool\n return self._is_muted",
"def is_volume_muted(self):\n return self._state.get(\"mute\", None)",
"def is_volume_muted(self):\n return self._isMuted",
"def isTrackMuted(*args, **kwargs):\n pass",
"def is_volume_muted(self) -> bool:\n return int(self._state.get(\"playback_mute\", 0)) == 1",
"def is_volume_muted(self):\n return self._muted",
"def is_volume_muted(self):\n return self._muted",
"def is_volume_muted(self):\n return self._muted",
"def is_volume_muted(self):\n return self._mute",
"def _parent_muted(self):\n parent_id = \"SELECT parent_id FROM hive_posts WHERE id = :id\"\n sql = \"SELECT is_muted FROM hive_posts WHERE id = (%s)\" % parent_id\n return bool(DB.query_one(sql, id=self.post_id))",
"def mute(self) -> bool:\n return bool(self.audio_mixer.getmute()[0])",
"async def moderation(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"ban_kick_mute\")\n\n if new_value:\n message = \":white_check_mark: You will now receive DMs when you get muted, kicked or banned by me.\"\n else:\n message = \":white_check_mark: You will no longer receive DMs when you get muted, kicked or banned.\"\n\n await ctx.send(message)",
"def is_volume_muted(self) -> str | None:\n return self.zone.Mute",
"def muted(guild, channel):\n\tif str(guild.id) in Settings.muted_channels:\n\t\treturn str(channel.id) in Settings.muted_channels[str(guild.id)]\n\treturn False",
"async def mutedlist(self, ctx):\r\n server = ctx.message.guild\r\n msg = \"\"\r\n i = 0\r\n try:\r\n for userid in self.d[str(server.id)]:\r\n if self.d[str(server.id)][userid][\"toggle\"] == True:\r\n i = i + 1\r\n except:\r\n await ctx.send(\"No one is muted in this server :no_entry:\")\r\n return\r\n if i == 0:\r\n await ctx.send(\"No one is muted in this server :no_entry:\")\r\n return\r\n for userid in self.d[str(server.id)]:\r\n if self.d[str(server.id)][userid][\"time\"] == None or self.d[str(server.id)][userid][\"time\"] - ctx.message.created_at.timestamp() + self.d[str(server.id)][userid][\"amount\"] <= 0:\r\n time = \"Infinite\"\r\n else:\r\n m, s = divmod(self.d[str(server.id)][userid][\"time\"] - ctx.message.created_at.timestamp() +\r\n self.d[str(server.id)][userid][\"amount\"], 60)\r\n h, m = divmod(m, 60)\r\n d, h = divmod(h, 24)\r\n if d == 0:\r\n time = \"%d hours %d minutes %d seconds\" % (h, m, s)\r\n if h == 0 and d == 0:\r\n time = \"%d minutes %d seconds\" % (m, s)\r\n elif h == 0 and m == 0:\r\n time = \"%d seconds\" % (s)\r\n else:\r\n time = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\r\n if self.d[str(server.id)][userid][\"toggle\"] == True:\r\n user = discord.utils.get(server.members, id=int(userid))\r\n if user:\r\n msg += \"{} - {} (Till mute ends)\\n\".format(user, time)\r\n if not msg:\r\n await ctx.send(\"No one is muted in this server :no_entry:\")\r\n return\r\n s = discord.Embed(description=msg, colour=0xfff90d, timestamp=datetime.datetime.utcnow())\r\n s.set_author(name=\"Mute List for {}\".format(server), icon_url=server.icon_url)\r\n await ctx.send(embed=s)",
"async def status(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if adv.is_on_adventure(ctx.author.id):\n out = adv.print_adventure(ctx.author.id)\n else:\n out = 'You are not doing anything at the moment.'\n await ctx.send(out)",
"async def mute(self, *args, **kwargs):\n self.muted = not self.muted # toogle\n if self.muted:\n self.just_muted = True\n return \"I've been muted :(\"\n return \"I'm back! :D\"",
"async def get_mute(self) -> bool:\n return await self._pytheos.api.player.get_mute(self.id)",
"def getSafetyMute(self, unitCode=0):\n resp = self.XAPCommand('SFTYMUTE', unitCode=unitCode)\n return bool(int(resp))",
"def mute(self, msg, args):\n if self.mute:\n self.mute=False\n return \"Yay, I can make noise again!\"\n else:\n self.mute=True\n return \"OK, I'll shut up now!\"",
"def get_mute(self):\n return on_off_bool(self.get(COMMAND_UIC, 'GetMute')['mute'])",
"def is_muted(self, is_muted):\n # type: (bool) -> None\n\n if is_muted is not None:\n if not isinstance(is_muted, bool):\n raise TypeError(\"Invalid type for `is_muted`, type has to be `bool`\")\n\n self._is_muted = is_muted",
"def check_can_post(self, user):\n if not user.is_active or user.is_anonymous():\n return False\n\n membership = self.check_membership(user)\n\n if ((self.type != 'O' or self.post_membership_required or self.post_admin_required) and\n (membership is None or\n membership.is_banned() or\n membership.is_left())):\n return False\n elif self.post_admin_required and membership.role not in ['O', 'A']:\n return False\n elif (self.post_condition == 'K' and\n user.profile.karma < self.post_karma_threshold):\n return False\n else:\n return True",
"def muted(self, muted):\n if muted is None:\n raise ValueError(\"Invalid value for `muted`, must not be `None`\") # noqa: E501\n\n self._muted = muted",
"def mute_track(self, track, muted):\n pass",
"def get_mute(cls) -> bool:\n raise NotImplementedError",
"def get_posts_awaiting_moderation():\n return Post.query.filter(\n Post.is_active,\n Post.moderation_status == 'NEW',\n Post.moderator_id.is_(None)).count()"
] |
[
"0.7033664",
"0.68607223",
"0.6625169",
"0.6539288",
"0.6469418",
"0.63501257",
"0.63372064",
"0.6249322",
"0.62470967",
"0.62470967",
"0.62470967",
"0.6234355",
"0.6154165",
"0.61492485",
"0.612375",
"0.60978466",
"0.60734046",
"0.5952615",
"0.58471787",
"0.58437407",
"0.5768562",
"0.57142144",
"0.5604966",
"0.5591755",
"0.55669045",
"0.5546205",
"0.55169964",
"0.55000335",
"0.54824793",
"0.54353166"
] |
0.7875454
|
0
|
Check parent post's muted status.
|
def _parent_muted(self):
parent_id = "SELECT parent_id FROM hive_posts WHERE id = :id"
sql = "SELECT is_muted FROM hive_posts WHERE id = (%s)" % parent_id
return bool(DB.query_one(sql, id=self.post_id))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _muted(self):\n sql = \"SELECT is_muted FROM hive_posts WHERE id = :id\"\n return bool(DB.query_one(sql, id=self.post_id))",
"def muted(self) -> bool:\n return self._muted",
"def is_muted(self):\n return self.muting_handler.is_muted()",
"def is_volume_muted(self):\n return self._state.get(\"mute\", None)",
"def is_volume_muted(self):\n return self._isMuted",
"def isMuted(self):\n return self._isMuted",
"def is_volume_muted(self):\n return self._muted",
"def is_volume_muted(self):\n return self._muted",
"def is_volume_muted(self):\n return self._muted",
"def is_volume_muted(self):\n return self._mute",
"def is_muted(self):\n # type: () -> bool\n return self._is_muted",
"def is_volume_muted(self) -> bool:\n return int(self._state.get(\"playback_mute\", 0)) == 1",
"def mute(self) -> bool:\n return bool(self.audio_mixer.getmute()[0])",
"def is_volume_muted(self) -> str | None:\n return self.zone.Mute",
"def isTrackMuted(*args, **kwargs):\n pass",
"def muted(guild, channel):\n\tif str(guild.id) in Settings.muted_channels:\n\t\treturn str(channel.id) in Settings.muted_channels[str(guild.id)]\n\treturn False",
"async def get_mute(self) -> bool:\n return await self._pytheos.api.player.get_mute(self.id)",
"async def mute(self, *args, **kwargs):\n self.muted = not self.muted # toogle\n if self.muted:\n self.just_muted = True\n return \"I've been muted :(\"\n return \"I'm back! :D\"",
"def isParentControlEnabled(self):\n d, w = self.__stats.playLimits\n return d[0] < self.DAY_DURATION or w[0] < 7 * self.DAY_DURATION",
"def check_can_post(self, user):\n if not user.is_active or user.is_anonymous():\n return False\n\n membership = self.check_membership(user)\n\n if ((self.type != 'O' or self.post_membership_required or self.post_admin_required) and\n (membership is None or\n membership.is_banned() or\n membership.is_left())):\n return False\n elif self.post_admin_required and membership.role not in ['O', 'A']:\n return False\n elif (self.post_condition == 'K' and\n user.profile.karma < self.post_karma_threshold):\n return False\n else:\n return True",
"def sees_post(self, post, context_home_or_profile):\n if owns_post(self, post):\n return True\n if context_home_or_profile and post.author not in self.followings:\n return False\n if post.is_public:\n return True\n else:\n for circle in post.circles:\n circle = get_in_circle_cache(circle.id)\n if check_member(circle, self):\n return True\n return False",
"def permissive(self) -> bool:\n return self._permissive",
"def still_deciding(self):\n for player in self.players:\n if isinstance(player, user.User):\n if not player.has_played:\n return True\n return False",
"def owns_post(self, post):\n return self.id == post.author.id",
"async def moderation(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"ban_kick_mute\")\n\n if new_value:\n message = \":white_check_mark: You will now receive DMs when you get muted, kicked or banned by me.\"\n else:\n message = \":white_check_mark: You will no longer receive DMs when you get muted, kicked or banned.\"\n\n await ctx.send(message)",
"async def async_toggle(self):\n await self.async_mute_volume(not self._muted)",
"def get_mute(cls) -> bool:\n raise NotImplementedError",
"def is_moderator(self):\n return self.user_type == 'M'",
"def paused(self) -> bool:",
"def is_muted(self, is_muted):\n # type: (bool) -> None\n\n if is_muted is not None:\n if not isinstance(is_muted, bool):\n raise TypeError(\"Invalid type for `is_muted`, type has to be `bool`\")\n\n self._is_muted = is_muted"
] |
[
"0.7123572",
"0.6673975",
"0.6323126",
"0.61445504",
"0.613507",
"0.6108852",
"0.6080271",
"0.6080271",
"0.6080271",
"0.6044953",
"0.60223186",
"0.60145766",
"0.5991438",
"0.58561844",
"0.5629622",
"0.5451478",
"0.5331708",
"0.53028053",
"0.5275612",
"0.5273927",
"0.52620155",
"0.5260635",
"0.52341413",
"0.5232027",
"0.51751024",
"0.51398355",
"0.511924",
"0.5105337",
"0.5100829",
"0.5088135"
] |
0.7886913
|
0
|
Check post's pinned status.
|
def _pinned(self):
sql = "SELECT is_pinned FROM hive_posts WHERE id = :id"
return bool(DB.query_one(sql, id=self.post_id))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def has_pinned_content(self):\n if \"query\" in self.query:\n q = self.query[\"query\"]\n else:\n q = self.query\n if \"pinned_ids\" in q:\n return bool(len(q.get(\"pinned_ids\", [])))\n return False",
"def _pinned():\n result = \"pinned\" if this_thread_is_pinned() else \"not pinned\"\n return HttpResponse(result, content_type=\"text/plain\")",
"def test_pinned_ordering(self):\n # Add test projects.\n add_project(title='Title 1', description='Project 1', pinned=False)\n add_project(title='Title 2', description='Project 2', pinned=True)\n add_project(title='Title 3', description='Project 3', pinned=False)\n add_project(title='Title 4', description='Project 4', pinned=True)\n add_project(title='Title 5', description='Project 5', pinned=False)\n\n # Make sure pinned projects are retrieved before unpinned.\n projects = Project.objects.all()\n for index, project in enumerate(projects):\n if index > 0 and not projects[index-1].pinned and project.pinned:\n self.fail('Unpinned project retrieved before pinned project.')",
"def _flagged(self):\n from hive.indexer.notify import NotifyType\n sql = \"\"\"SELECT 1 FROM hive_notifs\n WHERE community_id = :community_id\n AND post_id = :post_id\n AND type_id = :type_id\n AND src_id = :src_id\"\"\"\n return bool(DB.query_one(sql,\n community_id=self.community_id,\n post_id=self.post_id,\n type_id=NotifyType['flag_post'],\n src_id=self.actor_id))",
"def HasPinButton(self):\r\n \r\n return self.HasFlag(self.buttonPin)",
"def sees_post(self, post, context_home_or_profile):\n if owns_post(self, post):\n return True\n if context_home_or_profile and post.author not in self.followings:\n return False\n if post.is_public:\n return True\n else:\n for circle in post.circles:\n circle = get_in_circle_cache(circle.id)\n if check_member(circle, self):\n return True\n return False",
"def pinboard(auth_token):\n\n\n existing_links = requests.get('https://api.pinboard.in/v1/posts/all', params={\n 'auth_token': auth_token,\n 'format': 'json',\n }).json()\n existing_links = set(link['href'] for link in existing_links)\n\n entries = get_entries()\n unsynced_entries = [e for e in entries if urlify(e) not in existing_links]\n\n print(f\"{len(unsynced_entries)} entries to sync...\")\n\n def show_item(entry):\n if entry == None:\n return \"\"\n return (entry.title or urlify(entry))[:50]\n\n with click.progressbar(unsynced_entries, item_show_func=show_item, label='Exporting to Pinboard...') as bar:\n for entry in bar:\n url = urlify(entry)\n title = entry.title or url\n\n time.sleep(3)\n\n res = requests.get('https://api.pinboard.in/v1/posts/add', params={\n 'auth_token': auth_token,\n 'url': url,\n 'description': title,\n 'extended': entry.summary,\n 'toread': 'no' if entry.read else 'yes',\n 'replace': 'no',\n 'dt': entry.time_added.isoformat(),\n 'tags': [slugify(tag) for tag in entry.tags],\n 'format': 'json',\n })\n\n result_code = res.json()['result_code']\n if result_code != 'done' and result_code != 'item already exists':\n click.echo(f\"Failed to add {title} with result code '{result_code}'.\", err=True)",
"def pinned_tasks(request):\n return Task.objects.filter(pinned=True, user=request.user).exclude(folder='trash')",
"async def get_app_pin(self) -> bool:\n return await self.AD.threading.get_app_pin(self.name)",
"def set_pinned_by_index(self, ids, pinned=True):\n\n if isinstance(ids, (int, long)):\n ids = [ids]\n\n ids = [int(id) for id in ids]\n\n update_result = self.update_many({ '$and': [{ pair_data.SEQUENCE: { '$in': ids } }, { pair_data.PROPERTIES + '.' + pair_data.DISABLED: False }] }, \n { '$set': { pair_data.PROPERTIES + '.' + pair_data.PINNED: pinned }})\n\n return update_result.matched_count == update_result.modified_count and update_result.matched_count > 0",
"def is_pinned_version(version):\n return is_valid_instance_id(version) or is_valid_tag(version)",
"def ping(self) -> bool:\n # consider 200 to be successful\n response = self.shards_response(\"ping\")\n return response.status_code == 200",
"def ison(self):\n return bool(self.pin.state) if self.pinishigh else not bool(self.pin.state)",
"def get_post_status_list(self, blogid=1):\n return self.execute('wp.getPostStatusList', blogid, self.username, self.password)",
"def is_new_post(self, post):\n return self.last_post != post['id']",
"def owns_post(self, post):\n return self.id == post.author.id",
"def ping(self):\n return (200 == self.client.head(self.name).getStatusCode())",
"def is_pin_on(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n return gpio.input(port_num) == gpio.HIGH",
"def pinned(self, pinned):\n if pinned is None:\n raise ValueError(\"Invalid value for `pinned`, must not be `None`\") # noqa: E501\n\n self._pinned = pinned",
"def maybe_repair_post_in_host_listing(post_obj):\n from channels import tasks\n\n post = post_obj._self_post # pylint: disable=protected-access\n\n tasks.maybe_repair_post_in_host_listing.apply_async(\n args=[post.channel.name, post.post_id],\n countdown=settings.OPEN_DISCUSSIONS_HOT_POST_REPAIR_DELAY,\n )",
"def _isconnected(self) -> bool:\n for pin in self.pins:\n if pin._isconnected():\n return True\n\n return False",
"def postfinance_status_good(self):\n return self.postfinance_status in ('5', '9')",
"def pinned_tasks_srp(request):\n return Task.objects.select_related('project').filter(pinned=True, user=request.user).exclude(folder='trash')",
"def pingback_post(response, target_uri, slug):\n post = Post.query.filter_by(slug=slug).first()\n if post is None:\n return False\n\n if post is None or not post.pings_enabled:\n raise PingbackError(33, 'no such post')\n elif not post.can_read():\n raise PingbackError(49, 'access denied')\n title, excerpt = get_excerpt(response, target_uri)\n if not title:\n raise PingbackError(17, 'no title provided')\n elif not excerpt:\n raise PingbackError(17, 'no useable link to target')\n old_pingback = Comment.query.filter(\n (Comment.is_pingback == True) &\n (Comment.www == response.url)\n ).first()\n if old_pingback:\n raise PingbackError(48, 'pingback has already been registered')\n Comment(post, title, excerpt, '', response.url, is_pingback=True,\n submitter_ip=get_request().remote_addr, parser='text')\n db.commit()\n return True",
"def pin(owner_id=None, post_id=None):\n params = {\n 'owner_id': owner_id,\n 'post_id': post_id\n }\n result = call('wall.pin', **params)\n return parse_response(result)",
"def check_if_postponed(wd):\n try:\n result = wd.find_element_by_id(\"js-eventstage\").text\n if result == \"Postponed\":\n print(\"Postponed\")\n return 1\n else:\n return 0\n except:\n return 0",
"def check_status(self):",
"def is_in_board(self):\n return self.is_alive()",
"def ping(self):\n return True",
"def ping(self):\n return True"
] |
[
"0.6700017",
"0.61331105",
"0.596125",
"0.57118297",
"0.56939167",
"0.55961037",
"0.5477324",
"0.54076844",
"0.5321976",
"0.5283378",
"0.52454925",
"0.5242679",
"0.5218903",
"0.5209596",
"0.51835704",
"0.51642054",
"0.5133033",
"0.5130174",
"0.512657",
"0.5092066",
"0.5090023",
"0.5088875",
"0.507685",
"0.50557697",
"0.5053705",
"0.50534445",
"0.50461984",
"0.5028753",
"0.50279176",
"0.50279176"
] |
0.8083206
|
0
|
Sets up a classifier for use
|
def setup_classifier(name):
global _classifier, _trained
if name == "euclid":
_classifier = name
_trained = True
elif name == "bayes":
_classifier = name
_trained = True
elif name == "rocchio":
_classifier = name
_trained = True
else:
print("Classifier with name '{0} does not exist".format(name))
raise Exception
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_classifier(self, classifier):\n self.classifier = classifier\n self.tester = Tester.Test(classifier)\n self.trained_ham_hist = Hist()\n self.trained_spam_hist = Hist()",
"def __init__(self, classifier, X, y, val_method, val_size, k, stratify):\n\t\tModel.counter += 1\n\n\t\tself.classifier = classifier\n\t\tself.X = X\n\t\tself.y = y\n\n\t\t# default name for the classifier\n\t\tself.name_ = 'classifier_' + str(Model.counter)\n\t\tself.type_ = ''\n\t\tself.comment_ = ''\n\t\tself.params_ = classifier.get_params()\t# hyperparameters of classifier\n\n\t\tif val_method == 'holdout':\n\t\t\tself.train_metrics, self.val_metrics = self.__hold_out_validation(\\\n\t\t\t\tclassifier, X, y, val_size, stratify)\n\t\telif val_method == 'cv':\n\t\t\tself.train_metrics, self.val_metrics = self.__cross_validation(\\\n\t\t\t\tclassifier, X, y, k, stratify)\n\t\telif val_method == 'loo':\n\t\t\tself.train_metrics, self.val_metrics = self.__cross_validation(\\\n\t\t\t\tclassifier, X, y, X.shape[0])\n\n\t\tself.test_metrics = None",
"def __init__(self, clf, **kwargs):\n\n # Is done before parents __init__ since we need\n # it for _set_retrainable called during __init__\n self.__clf = clf\n \"\"\"Store the classifier to use.\"\"\"\n\n Classifier.__init__(self, **kwargs)\n\n # adhere to slave classifier capabilities\n # TODO: unittest\n self.__tags__ = self.__tags__[:] + ['meta']\n if clf is not None:\n self.__tags__ += clf.__tags__",
"def initialize(self):\r\n\r\n loader = ModelLoader()\r\n\r\n input = None\r\n testDataPath = None\r\n while True:\r\n input = utils.menue(\"Majority Vote classifier\", [\"add classifier\", \"save\", \"test + finish\", \"finish\"], False, True)\r\n if input == 2:\r\n self.modelSaver(self, -1)\r\n continue\r\n\r\n if input > 2:\r\n break\r\n\r\n # Display loading menu\r\n model = loader.show_loading_screen()\r\n if not model is None:\r\n if testDataPath is None:\r\n testDataPath = model.testData.get_root_path()\r\n self.modelSaver.datasetPath = testDataPath\r\n else:\r\n # check if the test datasets are the same\r\n if testDataPath != model.testData.get_root_path():\r\n print \"Could not load classifier {0} because the classifier was trained on different test data.\".format(model.name)\r\n continue\r\n self.classifiers.append((1, model)) # Tuple[0] = model weight (1 is default weight) | Tuple[1] = model\r\n\r\n # why did we leave the loop?\r\n if input == 5:\r\n print \"Cancel\"\r\n return False # cancel -> back to start\r\n else:\r\n # initialize test data for the majority classifier\r\n\r\n # check if test data path has changed\r\n if not utils.check_if_dir_exists(testDataPath):\r\n testDataPath = utils.value_question(\"[...]\", \"Root path to new Dataset Path\", \"s\")\r\n self.testData = TestData(testDataPath, 1, False)\r\n self.testData.segment_test_data({\"test\": 1})\r\n self.testData.new_segmentation()\r\n\r\n self.tester = ModelTester(self)\r\n\r\n # test classifier if input == 3\r\n if input == 3:\r\n # Be careful: the results might not reflect the actual accuracy of the classifier.\r\n # if not changed the tester will test on the whole test data set. This might include images that the \r\n # classifiers has been trained on. For a real accuracy test the images have to be separated manually.\r\n results = self.tester.test_classifier([\"test\"])\r\n self.tester.save_results(results, exportToCSV=False)\r\n print self.tester.format_results_string(results)\r\n testLoss = results[\"test\"][1]\r\n save = utils.radio_question(\"[?]\", \"Save/Update classifier?\", None, [\"Yes\", \"No\"], [True, False])\r\n if save:\r\n self.modelSaver(self, testLoss)\r\n\r\n\r\n return self.classifiers != [] # finish. Return True if classifiers where loaded, False if not.\r",
"def new_classifier(self):\n self.set_classifier(classifier.Bayes())",
"def __init__(self, classifierType, hyperparams, dimension, trainPath, testPath, addBias=False):\n \n # set classifier type\n if classifierType.upper() == 'SVM':\n self.type = self.ClassifierType.SVM;\n else:\n raise ValueError('Unknown classifier type: ' + classifierType); \n \n # store value ranges for classifier hyperparameters\n self.hpRanges = hyperparams;\n \n # set default learning parameters\n self.k = 5; # k-fold cross validation\n self.cvEpochs = 10; # training epochs for cross validation\n self.epochs = 20; # training epochs for inference\n \n # read in train and test data\n self.D = dimension;\n self.trainSet = self.readFile(trainPath, dimension, addBias);\n self.testSet = self.readFile(testPath, dimension, addBias);",
"def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.multiclass\n self.model = sklearn.multiclass.OneVsRestClassifier",
"def __init__(self, options: ImageClassifierOptions,\n classifier: _CppImageClassifier) -> None:\n # Creates the object of C++ ImageClassifier class.\n self._options = options\n self._classifier = classifier",
"def __init__(self, classification_path):\n # TODO: Rodar novamente o KNN com a particao crisp 'otima' para reavaliar os valores de K\n self.data = list()\n self.class_data = np.loadtxt(classification_path, dtype=int)\n self.mfeat_fac_classifier = self.build_classifier(15, 0)\n self.mfeat_fou_classifier = self.build_classifier(13, 1)\n self.mfeat_kar_classifier = self.build_classifier(13, 2)",
"def define_classifiers(self):\n raise NotImplementedError(\"Classifier options must be defined first.\")",
"def __init__(self, classifier, X, y, val_method, val_size, k, stratify, mode):\n\t\tsuper().__init__(classifier, X, y, val_method, val_size, k, stratify)\n\t\tself.type_ = 'naive bayes'",
"def setup_svm(self, classifier_name=\"SVM\", **kwargs):\n if not classifier_name in self.classifiers:\n clf = svm.SVC(**kwargs)\n clf.fit(self.X_train, self.y_train)\n self.classifiers[classifier_name] = clf",
"def __init__(self):\n self.clf = DummyClassifier(strategy='most_frequent')",
"def __init__(self, tfiles, featurizer):\n\t\t### [ Featurize the classifier ] ###\n\t\t# random.shuffle(tfiles)\n\t\tself.featurizer = featurizer\n\t\tself.tfiles = tfiles\n\n\t\t# Now build a model based on these vectors\n\t\tnum_files = len(tfiles)\n\t\tnum_training_files = int(PERCENT_TRAINING * num_files)\n\t\tnum_test_files = num_files - num_training_files\n\n\t\tself.train_files = self.tfiles[:num_training_files]\n\t\tself.test_files = self.tfiles[num_training_files:]\n\n\t\tself.all_data = [featurizer.get_feature_matrix_and_output_vector(f) for f in self.tfiles]\n\t\tall_data_vectors = [d[0] for d in self.all_data]\n\t\tprint([v.shape for v in all_data_vectors])\n\t\tself.all_features = np.vstack(d[0] for d in self.all_data)\n\t\tself.all_labels = np.hstack(d[1] for d in self.all_data)\n\n\t\tself.train_data = [featurizer.get_feature_matrix_and_output_vector(f) for f in self.train_files]\n\t\tself.train_features = np.vstack([d[0] for d in self.train_data])\n\t\tself.train_labels = np.hstack([d[1] for d in self.train_data])\n\n\t\tself.test_data = [featurizer.get_feature_matrix_and_output_vector(f) for f in self.test_files]\n\t\tself.test_features = np.vstack([d[0] for d in self.test_data])\n\t\tself.test_labels = np.hstack(d[1] for d in self.test_data)\n\n\t\tself.trained_clf = []\n\t\tfor cl in used_classifiers:\n\t\t\tself.trained_clf += [cl(self.train_features, self.train_labels)]",
"def __init__(self, classifier: \"CLASSIFIER_TYPE\", batch_size: int = 128, nb_epochs: int = 10) -> None:\n super().__init__(classifier=classifier)\n self._is_fitted = True\n self.batch_size = batch_size\n self.nb_epochs = nb_epochs\n self._check_params()",
"def __init__(self, label='Unlabeled RF model', preprocessing=[],\n features=[], n_estimators=10, max_depth=None,\n min_samples_split=2, min_samples_leaf=1, max_features='auto',\n bootstrap=True, *args, **kwargs):\n\n print('Initialising model:', label)\n self.classifier = ensemble.RandomForestClassifier(\n n_estimators=n_estimators,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n max_features=max_features,\n bootstrap=bootstrap\n )\n\n self.label = label\n self.preprocessing = preprocessing\n self.features = features\n\n super().__init__(*args, **kwargs)",
"def classifier_set(tuning=False):\n\tif tuning==False:\n\t\tclassifiers = [\n\t\t\tKNeighborsClassifier(50),\n\t\t\tSVC(kernel=\"linear\", C=0.025, probability=True),\n\t\t\tSVC(gamma=1, C=1, probability=True),\n\t\t\tGaussianProcessClassifier(1.0 * RBF(1.0)),\n\t\t\tDecisionTreeClassifier(criterion= 'entropy', min_samples_leaf= 30, min_samples_split= 10, splitter= 'random'),\n\t\t\tRandomForestClassifier(n_estimators=50, min_samples_leaf=30, min_samples_split=2),\n\t\t\tMLPClassifier(early_stopping=True, hidden_layer_sizes=100,learning_rate_init=0.1),\n\t\t\tAdaBoostClassifier(n_estimators= 50),\n\t\t\tGaussianNB(),\n\t\t\tLogisticRegression()\n\t\t\t]\n\t\tnames = [\"KNN\",\n\t\t\t \t\"L SVM\",\n\t\t\t \t\"RBF SVM\", \n\t\t\t \t\"GP\",\n\t\t\t\t\"DT\",\n\t\t\t\t\"RF\",\n\t\t\t\t\"NN\", \n\t\t\t\t\"AB\",\n\t\t\t\t\"NB\",\n\t\t\t\t\"LR\"\n\t\t\t\t]\n\treturn classifiers, names",
"def make_classifiers(NAMES) :\r\n\r\n# if len(data_shape) != 2:\r\n# raise ValueError(\"Only 2-d data allowed (samples by dimension).\")\r\n\r\n classifiers = {\r\n \"Chance\": DummyClassifier(strategy=\"most_frequent\"),\r\n \"Nearest Neighbors\": KNeighborsClassifier(3),\r\n \"Linear SVM\": LinearSVC(penalty='l2', C=1,# probability=True,\r\n class_weight='balanced'),\r\n # sahil changed the configuration from \"probability\" True to False (probability\r\n # based inference doesn't work well in SVM models from part experiences,\r\n # as SVM original algorithm just split the data with no probablistic notion of inference.)\r\n \"RBF SVM\": SVC(gamma=2, C=1, probability=False),\r\n \"Decision Tree\": DecisionTreeClassifier(max_depth=None,\r\n max_features=\"auto\"),\r\n \"Random Forest\": RandomForestClassifier(max_depth=None,\r\n n_estimators=20,\r\n max_features=\"auto\",\r\n n_jobs=PROCESSORS),\r\n \"Logistic Regression\": LogisticRegression(penalty='l1',\r\n class_weight='balanced'),\r\n \"Naive Bayes\": GaussianNB(),\r\n \"LDA\": LDA(),\r\n \"SGD_logL1\": SGDClassifier(random_state=1952,loss='log', average = 3,\r\n penalty='l1',\r\n alpha=1e-3,\r\n class_weight='balanced'),\r\n \"SGD_log_elastic\": SGDClassifier(random_state=1952,loss='log',\r\n class_weight='balanced',\r\n alpha=1e-3,\r\n average = 3,\r\n penalty='elasticnet'),\r\n \"SGD_SVM_elastic\": SGDClassifier(random_state=1952,loss='log',\r\n class_weight='balanced',\r\n average = 3,\r\n alpha=1e-3,\r\n penalty='elasticnet'),\r\n\r\n # Sahil commented the two classiifer below as not able to install the packages\r\n # \"CGC_log_L1\": CDClassifier(penalty=\"l1\",\r\n # loss=\"log\",\r\n # multiclass=False,\r\n # max_iter=200,\r\n # C=1,\r\n # tol=1e-3),\r\n # \"SDCA_SVM_elastic\": SDCAClassifier(\r\n # loss=\"hinge\",\r\n # max_iter=200,\r\n # tol=1e-3)\r\n #\r\n }\r\n\r\n params = {\r\n \"Chance\": {},\r\n # 3, 4, 5\r\n # , 6, 7, 8, 10, 12, 15, 20, 30, 50, 75, 100\r\n #\r\n #\r\n \"Nearest Neighbors\": {\"n_neighbors\": [1, 2, 3, 5, 10, 20, 50, 75, 100, 150, 200, 250]},\r\n \"Linear SVM\": {\"C\": [0.1, 0.2, 0.3, 0.4, 0.5, 1, 1.5, 2],\r\n \"loss\":['hinge', 'squared_hinge']},\r\n \"RBF SVM\": {\"kernel\": [\"rbf\"],\r\n \"gamma\": np.logspace(-2, 0, 6).tolist() + \\\r\n np.logspace(0,1,5)[1:].tolist(),\r\n \"C\": np.logspace(-2, 2, 5).tolist()},\r\n \"Decision Tree\": {},\r\n \"Random Forest\": {\"max_depth\": np.round(np.logspace(np.log10(2), \\\r\n 1.2, 6)).astype(int).tolist()},\r\n \"Logistic Regression\": {\"C\": np.logspace(-2, 3, 6).tolist()},\r\n \"Naive Bayes\": {},\r\n \"LDA\": {},\r\n \"SGD_logL1\": {\"alpha\": np.logspace(-5, 2, 7)},\r\n \"SGD_log_elastic\": {\"alpha\": np.logspace(-5, 2, 6),\r\n \"l1_ratio\": 10**np.array([-2, -1, -.5, -.25,\r\n -.12, -.06, -.01])},\r\n \"SGD_SVM_elastic\": {\"alpha\": np.logspace(-5, 2, 6),\r\n \"l1_ratio\": 10**np.array([-2, -1, -.5, -.25,\r\n -.12, -.06, -.01])},\r\n \"CGC_log_L1\": {\"alpha\": np.logspace(-5, 2, 6)},\r\n \"SDCA_SVM_elastic\": {\"alpha\": np.logspace(-4, 4, 5),\r\n \"l1_ratio\": 10**np.array([-3,-2, -1, np.log10(.5),\r\n np.log10(.9)])}\r\n }\r\n out_classifiers = {cname: classifiers[cname] for cname in NAMES}\r\n out_params = {cname: params[cname] for cname in NAMES}\r\n logging.info(\"Using classifiers %r with params %r\" % (out_classifiers,\r\n out_params))\r\n return classifiers, params",
"def _initialise_classifier(self, comparison_vectors):\n\n # Set the start point of the classifier.\n self.kernel.init = numpy.array(\n [\n [0.05] * len(list(comparison_vectors)),\n [0.95] * len(list(comparison_vectors)),\n ]\n )",
"def __init__(self, tagger):\n self.tagger = tagger\n self.classifier = Perceptron()",
"def __init__(self, classification, extras=[]):\n self.model_list = []\n self._generate_model_list(classification)\n self.model_list.extend(extras)\n self.classification = classification",
"def setup(self):\n (self.X, self.Y) = load_iris(problem=\"label_ranking\")",
"def __init__(self, **config):\n super(Classifier, self).__init__()\n self.input_dim_drug = config['hidden_dim_drug']\n self.input_dim_protein = config['hidden_dim_protein']\n self.hidden_dims = config['cls_hidden_dims']\n self.visual_attention=config['visual_attention']\n dims = [self.input_dim_drug + self.input_dim_protein] + self.hidden_dims + [2]\n if config['attention']:\n if config['concatenation']:\n dims[0]+=config['cnn_target_filters'][-1]\n else:\n dims[0]=self.input_dim_drug+config['cnn_target_filters'][-1]\n self.predictor = nn.ModuleList([nn.Linear(dims[i], dims[i + 1]) for i in range(len(self.hidden_dims)+1)])\n self.dropout = nn.Dropout(0.25)\n self._initialize()",
"def populate_initial_valid_classifiers(self):\n pass",
"def __init__(self):\n\n constant_extratrees_kwargs = {'bootstrap': True,\n 'random_state': 0,\n 'oob_score': True,\n 'verbose': True}\n\n self.predictor_config(\n 'ExtraTreesClassifier', obj=ExtraTreesClassifier,\n n_features_dependent_kwargs={\n 'max_features': PredictorConfigScalers.max_feature_scaler,\n 'n_estimators': PredictorConfigScalers.n_estimators_scaler,\n 'n_jobs': PredictorConfigScalers.n_jobs_scaler},\n **constant_extratrees_kwargs)\n\n self.predictor_config(\n 'ExtraTreesRegressor', obj=ExtraTreesRegressor,\n n_features_dependent_kwargs={\n 'max_features': PredictorConfigScalers.max_feature_scaler,\n 'n_estimators': PredictorConfigScalers.n_estimators_scaler,\n 'n_jobs': PredictorConfigScalers.n_jobs_scaler},\n **constant_extratrees_kwargs)\n\n constant_boosting_kwargs = {'n_estimators': 80, 'max_features': 1000,\n 'learning_rate': 0.2, 'subsample': 0.6, }\n\n self.predictor_config('GradientBoostingClassifier',\n obj=GradientBoostingClassifier,\n **constant_boosting_kwargs)\n\n self.predictor_config('GradientBoostingRegressor',\n obj=GradientBoostingRegressor,\n **constant_boosting_kwargs)",
"def __init__(self):\n self.image_subscriber = rospy.Subscriber('/raspicam_node/image/compressed', CompressedImage, self.imageCallback)\n print 'Waiting for classifier service to come up...'\n rospy.wait_for_service('/classifier_node/classify')\n self.classify_client = rospy.ServiceProxy('/classifier_node/classify', Classify)",
"def _set_classifiers(self, clfs):\n # tuple to guarantee immutability since we are asssigning\n # __tags__ below and rely on having clfs populated already\n self.__clfs = tuple(clfs) if clfs is not None else tuple()\n \"\"\"Classifiers to use\"\"\"\n\n if len(clfs):\n # enable corresponding ca in the slave-classifiers\n if self.__propagate_ca:\n for clf in self.__clfs:\n clf.ca.enable(self.ca.enabled, missingok=True)\n\n # adhere to their capabilities + 'multiclass'\n # XXX do intersection across all classifiers!\n # TODO: this seems to be wrong since it can be regression etc\n self.__tags__ = [ 'binary', 'multiclass', 'meta' ]\n if len(clfs)>0:\n self.__tags__ += self.__clfs[0].__tags__",
"def train(self):\n # 1. Extracting details of attributes\n\n self.get_attribute_data()\n if self.train_data is None and self.train_data_file is None:\n raise ValueError(\"Neither training data not training file provided\")\n\n self.get_train_data()\n self.classifier = self.build_tree(rows=self.train_data, attribute_list=self.attribute_names)",
"def _start(self):\n if self._classifier is None:\n self._classifier = TFSlimClassifier(self.config)\n self._classifier.__enter__()",
"def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)"
] |
[
"0.7436511",
"0.733247",
"0.7201676",
"0.70582503",
"0.6993458",
"0.69929725",
"0.69442517",
"0.69266564",
"0.6838657",
"0.68037814",
"0.67974",
"0.67292416",
"0.671718",
"0.6693354",
"0.6679035",
"0.66750187",
"0.66644925",
"0.6577201",
"0.6555603",
"0.6542693",
"0.6528392",
"0.65007323",
"0.648249",
"0.64718753",
"0.6465841",
"0.6459097",
"0.64523137",
"0.6438646",
"0.64383584",
"0.64382946"
] |
0.77098763
|
0
|
Evaluate a text with given train set using the set up classifier
|
def evaluate(text, articles, no_preprocess=False):
if not _trained:
print("No classifier initialized. Make sure to do so first")
raise Exception
if not no_preprocess:
text = body_reader.get_words_in(text)
if _classifier == "euclid":
return euclidean.evaluate(articles, text)
elif _classifier == "bayes":
return bayes.evaluate(articles, text)
elif _classifier == "rocchio":
return rocchio.evaluate(articles, text)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def evaluate(self, featureset):\r\n #sequence, tag = featureset\r\n gs, labels = [], []\r\n for s, t in featureset:\r\n gs.append(t)\r\n label = self.tagger.choose_tag(s)\r\n labels.append(label)\r\n print (t, label)\r\n\r\n assert(len(gs) == len(labels))\r\n self.write_to_file(labels)\r\n words = self.tagger.test(self.r.test_sents, word=True)\r\n print (accuracy_score(gs, labels))",
"def test_text_classifier_train(self):\n pass",
"def classify(self, dataSet):\n\n return nltk.classify.apply_features(self.extrairFrase, dataSet)",
"def test_text_classifier_test(self):\n pass",
"def classify (self, text_test):\n test_features = self.vectorizer.transform(text_test)\n return self.nbc.predict(test_features)",
"def test_text_classifier_vaporise(self):\n pass",
"def classify_text(classifier, sentence):\n\n sentence = Sentence(sentence)\n classifier.predict(sentence, multi_class_prob=True)\n return sentence.labels",
"def test():\r\n source1 = TextModel('source1')\r\n source1.add_string('It is interesting that she is interested.')\r\n\r\n source2 = TextModel('source2')\r\n source2.add_string('I am very, very excited about this!')\r\n\r\n mystery = TextModel('mystery')\r\n mystery.add_string('Is he interested? No, but I am.')\r\n mystery.classify(source1, source2)",
"def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)",
"def test():\n source1 = TextModel('source1')\n source1.add_string('It is interesting that she is interested.')\n\n source2 = TextModel('source2')\n source2.add_string('I am very, very excited about this!')\n\n mystery = TextModel('mystery')\n mystery.add_string('Is he interested? No, but I am.')\n mystery.classify(source1, source2)",
"def test():\n source1 = TextModel('source1')\n source1.add_string('It is interesting that she is interested.')\n\n source2 = TextModel('source2')\n source2.add_string('I am very, very excited about this!')\n\n mystery = TextModel('mystery')\n mystery.add_string('Is he interested? No, but I am.')\n mystery.classify(source1, source2)",
"def test_model (self, text_test, labels_test):\n print(classification_report(labels_test, self.classify(text_test)))",
"def test():\n source1 = TextModel('source1')\n source1.add_string('It is interesting that she is interested.')\n source2 = TextModel('source2')\n source2.add_string('I am very, very excited about this!')\n \n\n mystery = TextModel('mystery')\n mystery.add_string('Is he interested? No, but I am.')\n mystery.classify(source1, source2)",
"def run_tests():\n source1 = TextModel('hilary_speaches')\n source1.add_file('hilary_source_text.txt')\n\n source2 = TextModel('bernie_speaches')\n source2.add_file('bernie_source_text.txt')\n\n new1 = TextModel('trump_speach')\n new1.add_file('trump_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('hilary_test')\n new2.add_file('hilary_test.txt')\n new2.classify(source1, source2)\n\n new3 = TextModel('bernie_test')\n new3.add_file('bernie_test.txt')\n new3.classify(source1, source2)\n\n new4 = TextModel('bill_clinton_test')\n new4.add_file('bill_clinton_source.txt')\n new4.classify(source1, source2)",
"def evaluate_nltk(self, X):\n X_, y_ = separate_labels_from_features(X)\n \n y = []\n n_sent_correct = 0\n num_sent = len(y_)\n \n for i in range(num_sent):\n sentence_correct = True\n for j in range(len(y_[i])):\n y.append(self.clf_nltk.classify(X_[i][j]))\n if y_[i][j] != y[-1]:\n sentence_correct = False\n \n if sentence_correct == True:\n n_sent_correct += 1\n \n y_ = flatten(y_)\n print(\"F1 score:\")\n print(sklearn.metrics.precision_recall_fscore_support(y_, y, average='micro'))\n print()\n print(\"Accuracy:\")\n print(sklearn.metrics.accuracy_score(y_, y))\n print()\n print(\"Sentence level accuracy:\")\n print(n_sent_correct / num_sent)\n print()\n print(\"F1 score per class:\")\n print(sklearn.metrics.precision_recall_fscore_support(y_, y))\n print()\n print(\"Confusion matrix:\")\n cfm = sklearn.metrics.confusion_matrix(y_, y)\n\n plot_confusion_matrix(cfm, np.unique(y_))\n \n print(np.unique(y_))\n print()\n print(print(np.unique(y)))",
"def test_text_classifier_curate(self):\n pass",
"def run_tests():\n source1 = TextModel('CS111 Syllabus')\n source1.add_file('CS111_Syllabus.txt')\n\n source2 = TextModel('AR Syllabus')\n source2.add_file('AR_Syllabus.txt')\n\n new1 = TextModel('WR120 Syllabus')\n new1.add_file('WR120_Syllabus.txt')\n new1.classify(source1, source2)\n \n new2 = TextModel('CS131 Syllabus')\n new2.add_file('CS131_Syllabus.txt')\n new2.classify(source1, source2)\n \n new3 = TextModel('My Paper 2 for WR120')\n new3.add_file('WR_Paper_2.txt')\n new3.classify(source1, source2)\n \n new4 = TextModel('CS111 PS9PR0')\n new4.add_file('ps9pr0.txt')\n new4.classify(source1, source2)",
"def Classify_Text(self, overview):\n\n # convert text to lower case\n overview = overview.lower()\n\n path = self.path\n\n # start time\n time0 = time.process_time()\n\n # Use ensemble classifier - voting with weights\n\n # model = joblib.load(path + \"MULTINOMIAL NB_TFIDF VECTORIZER\" + \".pkl\")\n model = joblib.load(\n \"/home/do/PycharmProjects/pythonProject/information-retrival-search-engine/informationRetrival/frontend/static/frontend/text/SVM_COUNT VECTORIZER.pkl\")\n dictionary = joblib.load(path + \"_Genre_Dictionary\")\n vec = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n\n print(vec)\n # overview=\"An undercover cop and a mole in the police\"\n Y = vec.fit_transform([overview]).toarray()\n print(vec.get_feature_names())\n print(Counter(Y[0]))\n # print(Counter(Y[1]))\n print(model)\n predicted_genre = model.predict(Y)\n print(predicted_genre)\n\n # Return predicted genre and time taken for classification\n return predicted_genre, str(round(time.process_time() - time0, 3)) + \" seconds\"",
"def eval_text(self, text):\n # Pre-process sentence given\n sents = text.split('\\n')\n words = []\n for sent in sents:\n words.extend(list(sent))\n\n for idx, word in enumerate(words):\n if (word, ) not in self.uni_dist:\n words[idx] = TOKENS[\"UNK\"]\n\n # Compute Log-Probablities\n log_prob = 0\n for ngram in nltk.ngrams(words, self.N):\n log_prob += self.eval_ngram(ngram)\n\n # Compute Perplexity\n num_words = len(words)\n perplexity = 2 ** ((-1 / num_words) * log_prob)\n\n return perplexity",
"def Fit(text):\n article_tfidf = TransformData([text])\n global CLASSIFIER\n predicted_probs = CLASSIFIER.predict_proba(article_tfidf)\n # the output shoud be an array with two elements, one corresponding to\n # probability it's a positive sentiment and the other corresponding to\n # probability it's a negative sentiment.\n return list(zip(CLASSIFIER.classes_, predicted_probs[0]))",
"def test_text_classifier_retrieve(self):\n pass",
"def test():\n listpost,listclass = bayes.loaddataset()\n myvocablist = bayes.createlist(listpost)\n tmatrix = list()\n for doc in listpost:\n\t vec = bayes.word2vec(myvocablist,doc)\n\t tmatrix.append(vec)\n p0,p1,pa = bayes.train(tmatrix,listclass)\n testdoc1 = ['love','my','dalmation']\n testvec1 = bayes.word2vec(myvocablist,testdoc1)\n print testdoc1,'classify as :',bayes.classify(testvec1,p0,p1,pa)\n testdoc2 = ['stupid','love']\n testvec2 = bayes.word2vec(myvocablist,testdoc2)\n print testdoc2,'classify as :',bayes.classify(testvec2,p0,p1,pa)",
"def test_text_classifier_get_training_samples(self):\n pass",
"def test_classifiers(train_docs, train_target, test_docs, test_target, min_docs, K, K2, removeStopWords):\n # test_classifiers(train_docs, train_target, test_docs, test_targets, i, 3)\n X_train_counts, X_train_tfidf, X_test_counts, X_test_tfidf = extract_text_features(train_docs, test_docs, min_docs, removeStopWords)\n \n \n num_docs, vocab_size = X_train_counts.shape\n print('Number of (training) documents =',num_docs)\n print('Vocabulary size =',vocab_size)\n \n\n # Now evaluate the classifiers on the test data\n # Print out the accuracy as a percentage for each classifier.\n # np.mean() can be used to calculate the accuracy. Round the accuracy to 2 decimal places.\n\n #predict according to different classifier--evaluate results \n predicted_multNB = fit_and_predict_multinomialNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_bernNB = fit_and_predict_BernoulliNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_counts, train_target, X_test_counts)\n predicted_KNN = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K)\n predicted_KNN2 = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K2)\n \n predicted_base = np.array([FreqDist(test_target).most_common(1)[0][0]]*len(test_target))\n\n # count num of correct predictions / total\n np_test_target = np.array(test_target)\n base = np.sum(predicted_base == np_test_target)/len(np_test_target)*100\n multNB = np.sum(predicted_multNB == np_test_target)/len(np_test_target)*100\n bernNB = np.sum(predicted_bernNB == np_test_target)/len(np_test_target)*100\n LR = np.sum(predicted_LR == np_test_target)/len(np_test_target)*100\n KN = np.sum(predicted_KNN == np_test_target)/len(np_test_target)*100\n KN2 = np.sum(predicted_KNN2 == np_test_target)/len(np_test_target)*100\n\n \n print('\\tBase Accuracy: {:.3f}'.format(base))\n print('\\tAccuracy with multinomial naive Bayes: {:.2f}'.format(multNB))\n print('\\tAccuracy with Bernoulli naive Bayes: {:.2f}'.format(bernNB))\n print('\\tAccuracy with logistic regression: {:.2f}'.format(LR))\n print('\\tAccuracy with kNN, k={} classifier: {:2f}'.format(K, KN))\n print('\\tAccuracy with kNN, k={} classifier: {:.2f}'.format(K2, KN2))",
"def run_tests():\n source1 = TextModel(\"Barack Obama\")\n source1.add_file('project/source_texts/barackobama_source_text.txt')\n\n source2 = TextModel('Donald Trump')\n source2.add_file('project/source_texts/donaldtrump_source_text.txt')\n\n new1 = TextModel('More Obama')\n new1.add_file('project/source_texts/moreobama_source_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('More Trump')\n new2.add_file('project/source_texts/moretrump_source_text.txt')\n new2.classify(source1, source2)\n\n new1 = TextModel('Gucci Gang by Lil Pump')\n new1.add_file('project/source_texts/guccigang_source_text.txt')\n new1.classify(source1, source2)\n\n new1 = TextModel(\"Spongebob Transcripts\")\n new1.add_file('project/source_texts/spongebobeps_source_text.txt')\n new1.classify(source1, source2)",
"def evaluate(ner_model, examples):\n # The Scorer computes and stores evaluation scores\n scorer = Scorer()\n for text, annotations in examples:\n # Process the text to get entities predicted\n document = ner_model.make_doc(text)\n correct_annotations = GoldParse(document, entities=annotations['entities'])\n predicted_annotations = ner_model(text)\n # Update the evaluation scores from the document\n scorer.score(predicted_annotations, correct_annotations)\n return scorer.scores",
"def predict(self, infile, model_path=None, eval_gold=False, as_text=False):\n\n\t\tif model_path is None: # Try default model location\n\t\t\tmodel_path = script_dir + os.sep + \"models\" + os.sep + self.corpus + \"_ensemble_sent.pkl\"\n\n\t\tclf, num_labels, cat_labels, multicol_dict, vocab, firsts, lasts = joblib.load(model_path)\n\n\t\tif as_text:\n\t\t\tconllu = infile\n\t\telse:\n\t\t\tconllu = io.open(infile,encoding=\"utf8\").read()\n\n\t\t#tagged = udpipe_tag(conllu,self.udpipe_model)\n\t\ttagged = tt_tag(conllu,self.lang)\n\n\t\ttrain_feats, _, toks, _, _ = read_conll(tagged,genre_pat=self.genre_pat,mode=\"sent\",as_text=True,char_bytes=self.lang==\"zho\")\n\t\theaders = sorted(list(train_feats[0].keys()))\n\n\t\tdata = []\n\n\t\tpreds = {}\n\t\tfor e in self.estimators:\n\t\t\tpred = e.predict(tagged)\n\t\t\t_, preds[e.name + \"_prob\"] = [list(x) for x in zip(*pred)]\n\t\t\theaders.append(e.name + \"_prob\")\n\n\t\tgenre_warning = False\n\t\tfor i, item in enumerate(train_feats):\n\t\t\titem[\"first\"] = item[\"word\"][0] if item[\"word\"][0] in firsts else \"_\"\n\t\t\titem[\"last\"] = item[\"word\"][-1] if item[\"word\"][-1] in lasts else \"_\"\n\t\t\tif \"genre\" in cat_labels:\n\t\t\t\tif item[\"genre\"] not in multicol_dict[\"encoder_dict\"][\"genre\"].classes_: # New genre not in training data\n\t\t\t\t\tif not genre_warning:\n\t\t\t\t\t\tsys.stderr.write(\"! WARN: Genre not in training data: \" + item[\"genre\"] + \"; suppressing further warnings\\n\")\n\t\t\t\t\t\tgenre_warning = True\n\t\t\t\t\titem[\"genre\"] = \"_\"\n\t\t\tif \"pos\" in cat_labels:\n\t\t\t\tif item[\"pos\"] not in multicol_dict[\"encoder_dict\"][\"pos\"].classes_:\n\t\t\t\t\titem[\"pos\"] = \"_\"\n\t\t\tif \"cpos\" in cat_labels:\n\t\t\t\tif item[\"cpos\"] not in multicol_dict[\"encoder_dict\"][\"cpos\"].classes_:\n\t\t\t\t\titem[\"cpos\"] = \"_\"\n\t\t\tif item[\"word\"] not in vocab and \"word\" in multicol_dict[\"encoder_dict\"]:\n\t\t\t\tif item[\"pos\"] in multicol_dict[\"encoder_dict\"][\"word\"].classes_:\n\t\t\t\t\titem[\"word\"] = item[\"pos\"]\n\t\t\t\telse:\n\t\t\t\t\titem[\"word\"] = \"_\"\n\t\t\tfor e in self.estimators:\n\t\t\t\titem[e.name + \"_prob\"] = preds[e.name + \"_prob\"][i]\n\n\t\t\tfeats = []\n\t\t\tfor k in headers:\n\t\t\t\tfeats.append(item[k])\n\n\t\t\tdata.append(feats)\n\n\t\tdata, headers, _, _ = self.n_gram(data,headers,[],[])\n\n\t\tdata = pd.DataFrame(data, columns=headers)\n\t\tdata_encoded = self.multicol_transform(data,columns=multicol_dict[\"columns\"],all_encoders_=multicol_dict[\"all_encoders_\"])\n\n\t\tdata_x = data_encoded[cat_labels+num_labels].values\n\t\tpred = clf.predict(data_x)\n\n\t\t# Ensure first token in document is always a sentence break\n\t\tfor i, x in enumerate(data_encoded[\"tok_id\"].values):\n\t\t\tif x == 1:\n\t\t\t\tpred[i] = 1\n\n\t\tif eval_gold:\n\t\t\tgold_feats, _,_,_,_ = read_conll(conllu,genre_pat=self.genre_pat,mode=\"sent\",as_text=True)\n\t\t\tgold = [int(t['wid'] == 1) for t in gold_feats]\n\t\t\tconf_mat = confusion_matrix(gold, pred)\n\t\t\tsys.stderr.write(str(conf_mat) + \"\\n\")\n\t\t\ttrue_positive = conf_mat[1][1]\n\t\t\tfalse_positive = conf_mat[0][1]\n\t\t\tfalse_negative = conf_mat[1][0]\n\t\t\tprec = true_positive / (true_positive + false_positive)\n\t\t\trec = true_positive / (true_positive + false_negative)\n\t\t\tf1 = 2*prec*rec/(prec+rec)\n\t\t\tsys.stderr.write(\"P: \" + str(prec) + \"\\n\")\n\t\t\tsys.stderr.write(\"R: \" + str(rec) + \"\\n\")\n\t\t\tsys.stderr.write(\"F1: \" + str(f1) + \"\\n\")\n\t\t\twith io.open(\"diff.tab\",'w',encoding=\"utf8\") as f:\n\t\t\t\tfor i in range(len(gold)):\n\t\t\t\t\tf.write(\"\\t\".join([toks[i],str(gold[i]),str(pred[i])])+\"\\n\")\n\t\t\treturn conf_mat, prec, rec, f1\n\t\telse:\n\t\t\treturn pred",
"def evaluate(self,text):\n return self.lm.evaluate(text)",
"def evaluate(self, test_set, predicted_values, certainty):\r\n\r\n if self.classification_type == \"classification\":\r\n self.classification_evaluation(test_set, predicted_values, certainty)\r\n elif self.classification_type == \"regression\":\r\n self.regression_evaluation(test_set, predicted_values)",
"def test_text_classifier_set_params(self):\n pass"
] |
[
"0.70962906",
"0.6943633",
"0.6822659",
"0.66456556",
"0.6644769",
"0.66341835",
"0.65980095",
"0.65802",
"0.65575683",
"0.6527537",
"0.6527537",
"0.6511052",
"0.6504692",
"0.64602184",
"0.64459735",
"0.64284915",
"0.6425316",
"0.64167213",
"0.640727",
"0.6388599",
"0.63437563",
"0.6323611",
"0.62768996",
"0.62752724",
"0.6270517",
"0.62542945",
"0.6250824",
"0.6230468",
"0.6213674",
"0.6169991"
] |
0.71262985
|
0
|
instead of querying the site twice (first leagues and then matches)
|
def get_all_matches_by_league(self):
raise NotImplementedError
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def search_matches() -> Union[WebElement, None]:\n #navigating to soccer matches page\n print(\"Starting to look for matches\")\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \".hm-MainHeaderLogoWide_Bet365LogoImage\"))).click()\n sports = driver.find_elements_by_class_name(\"wn-PreMatchItem \")\n find_item('futebol', sports).click() #go to soccer page\n live = driver.find_elements_by_class_name(\"hm-MainHeaderCentreWide_Link\")\n find_item('ao-vivo', live).click() #go to live games \n WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.CLASS_NAME, \"ip-ControlBar_BBarItem\"))).click() #click on \"Geral\"\n \n #collecting matches elements\n all_leagues = driver.find_elements_by_class_name(\"ovm-Competition\")\n leagues = find_all_items(all_leagues, PARAMS_LEAGUES)\n matches_unav, leagues_unav = [], []\n while len(leagues) != len(leagues_unav):\n try:\n current_league = find_item_avoiding(leagues_unav, leagues) #gets next non-visited league\n if not current_league:\n print(f'No matches left to search.')\n break #just exists this function if there's no league to explore\n leagues_unav.append(current_league.text.split('\\n')[0].lower())\n league_matches = current_league.find_elements_by_class_name(\"ovm-FixtureDetailsTwoWay_Wrapper\") #gets all matches of that league\n num_matches = len(league_matches)\n while True:\n try:\n assert len(current_league.find_elements_by_class_name(\"ovm-FixtureDetailsTwoWay_Wrapper\")) == num_matches #asserts no match started or ended\n current_match = find_item_avoiding(matches_unav, league_matches, mode = 'match')\n if not current_match:\n break\n matches_unav.append(current_match.text.split('\\n')[0].lower())\n yield current_match\n except (AssertionError, StaleElementReferenceException):\n break\n except StaleElementReferenceException:\n while True:\n try:\n i = 0 \n all_leagues = driver.find_elements_by_class_name(\"ovm-Competition\")\n leagues = find_all_items(all_leagues)\n break\n except StaleElementReferenceException:\n print(f'stale element raised {i}')\n i += 1\n if not current_match:\n break",
"def test_cyclingleagues_get(self):\n pass",
"def find_league(argin, argtype, host='localhost', root='root', password=''):\r\n con = pymysql.connect(host, root, password)\r\n results = []\r\n\r\n with con.cursor() as cur:\r\n cur.execute(f\"\"\"USE {DB_NAME};\"\"\")\r\n\r\n for inp in argin:\r\n if argtype == \"country\" and inp not in get_countries_dict().keys():\r\n logger.info(f\"{inp} is not one of the possible countries.\")\r\n continue\r\n\r\n cur.execute(f\"\"\"SELECT * FROM leagues WHERE {argtype}='{inp}'\"\"\")\r\n result = cur.fetchall()\r\n if result:\r\n results.append(result)\r\n\r\n else:\r\n logger.info(f\"League is not inside the DB - starting to scrape its teams and players.\")\r\n add_league(inp, argtype, con, host, root, password)\r\n cur.execute(f\"\"\"SELECT * FROM leagues WHERE {argtype}='{inp}'\"\"\")\r\n result = cur.fetchall()\r\n if result:\r\n results.append(result)\r\n else:\r\n logger.info(f\"{inp} was not found on site.\")\r\n\r\n con.close()\r\n return results",
"def get_match_features(match, matches):\n\n # Define variables\n date = match.date\n season = match.season\n home_team = match.home_team_api_id\n away_team = match.away_team_api_id\n\n matches_home_ashome = get_last_ashome_matches(matches, date, season, home_team, x=5)\n # print(matches_home_ashome, flush=True)\n matches_home_asaway = get_last_asaway_matches(matches, date, season, home_team, x=5)\n matches_away_ashome = get_last_ashome_matches(matches, date, season, away_team, x=5)\n matches_away_asaway = get_last_asaway_matches(matches, date, season, away_team, x=5)\n # matches_home = pd.concat([matches_home_ashome, matches_home_asaway])\n # matches_away = pd.concat([matches_away_ashome, matches_away_asaway])\n\n # Get last x matches of both teams against each other\n competing_matches_same = get_last_competing_matches(matches, date, home_team, away_team, x=2)\n competing_matches_diff = get_last_reverse_competing_matches(matches, date, home_team, away_team, x=2)\n\n # Define result data frame\n result = pd.Series()\n result['match_api_id'] = match.match_api_id\n result['league_id'] = match.league_id\n result['home_recent_goal_ashome'] = get_goals(matches_home_ashome)\n result['away_recent_goal_asaway'] = get_goals(matches_away_asaway, isAway=True)\n result['home_recent_lost_ashome'] = get_goals_lost(matches_home_ashome)\n result['away_recent_lost_asaway'] = get_goals_lost(matches_away_asaway, isAway=True)\n result['team_ova_diff'] = match.home_ova - match.away_ova\n result['home_recent_score'] \\\n = get_average_league_score(matches_home_ashome) + get_average_league_score(matches_home_asaway, isAway=True)\n result['away_recent_score'] \\\n = get_average_league_score(matches_away_ashome) + get_average_league_score(matches_away_asaway, isAway=True)\n result['home_competing_same_goal'] = get_goals(competing_matches_same)\n result['away_competing_same_goal'] = get_goals(competing_matches_same, isAway=True)\n result['home_competing_diff_goal'] = get_goals(competing_matches_diff, isAway=True)\n result['away_competing_diff_goal'] = get_goals(competing_matches_diff)\n result['home_recent_shoton'] = get_shoton(matches_home_ashome) + 0.8 * get_shoton(matches_home_asaway, isAway=True)\n result['away_recent_shoton'] = get_shoton(matches_away_asaway, isAway=True) + 0.8 * get_shoton(matches_away_ashome)\n result['home_recent_shotoff'] = get_shotoff(matches_home_ashome) + 0.8 * get_shotoff(matches_home_asaway, isAway=True)\n result['away_recent_shotoff'] = get_shotoff(matches_away_asaway, isAway=True) + 0.8 * get_shotoff(matches_away_ashome)\n\n # print(result)\n return result",
"def search_clues(self):\r\n print(\"\\n************Searching Clues************\\n\")\r\n for word_id in self.words.keys():\r\n if not self.words[word_id].see and not self.words[word_id].wth:\r\n clue = pop_backslash(self.words[word_id].clue)\r\n temp = word_domain(\"allintext:\" + clue +' -crossword',self.words[word_id].length)\r\n temp2 = temp + word_domain(clue +' -crossword',self.words[word_id].length)\r\n domain = temp2 + data_muse(clue, self.words[word_id].length)\r\n unique_list = []\r\n for x in domain: \r\n y = x.upper()\r\n # check if exists in unique_list or not \r\n if y not in unique_list: \r\n unique_list.append(y) \r\n \r\n self.words[word_id].assign_word_domain(unique_list)\r\n print(\"\\nSearch is done...\")",
"def test_recent_querys(self):\n CreateMatch()\n\n response = self.app.get(\"/\", follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n res_txt = response.get_data(as_text=True)\n self.assertIn(\"TESTURL1\", res_txt)\n self.assertIn(\"TESTURL2\", res_txt)",
"def add_league(inp_to_add, type_to_add, con, host, root, password):\r\n with con.cursor() as cur:\r\n if type_to_add == \"url\":\r\n league_soup = BeautifulSoup(requests.get(inp_to_add).text, 'html.parser')\r\n league_site = inp_to_add\r\n elif type_to_add == \"country\":\r\n midterm_url = get_countries_dict()[inp_to_add]\r\n league_soup = BeautifulSoup(requests.get(midterm_url).text, 'html.parser')\r\n league_site = SOCCER_URL + league_soup.find('ul', class_=\"left-tree\").li.a[\"href\"]\r\n else:\r\n league_soup, league_site = get_first_search_result(\r\n SOCCER_URL + \"/search/competitions/?q=\" + inp_to_add)\r\n\r\n if league_soup:\r\n cur.execute(\"SELECT MAX(id) FROM leagues\")\r\n league_id = cur.fetchall()[0][0]\r\n\r\n addition = (league_soup.body.h1.text, league_soup.body.h2.text, league_site)\r\n cur.execute(\"\"\"INSERT INTO leagues (name, country, url) VALUES (%s, %s, %s)\"\"\", addition)\r\n con.commit()\r\n\r\n league_dict = {league_id: {'name': addition[0], 'url': addition[2]}}\r\n add_all_teams_and_players_in_league(league_dict, con, host, root, password)",
"def query(self, string):\n rows, wordids = self.get_match_rows(string)\n scores = self.get_scored_list(rows, wordids)\n rankedscores = [(score, url) for (url, score) in scores.items()]\n rankedscores.sort()\n rankedscores.reverse()\n for (score, urlid) in rankedscores[0:10]:\n print('%f\\t%s' % (score, self.get_url_name(urlid)))\n return wordids, [r[1] for r in rankedscores[0:10]]",
"def scraping_league_stats():\n url=\"https://www.basketball-reference.com/teams/\"\n html = urlopen(url)\n bs = BeautifulSoup(html, 'html.parser')\n teams = bs.find(\"div\", {'id':'div_teams_active'}).findAll(\"a\")\n teams_url = [team.get(\"href\") for team in teams]\n team_players_list = []\n for team in teams_url:\n time.sleep(3)\n teamurl = domain + team\n print(teamurl) #fafaf\n html = urlopen(teamurl)\n bs = BeautifulSoup(html, 'html.parser')\n div_team = \"div_\" + teamurl[-4:-1]\n season = bs.find(\"div\", {'id':div_team}).find(\"a\").get(\"href\") #tengo que cambiarlo para que este guay\n #esto tiene la URL de la ultima season\n seasonurl = domain + season\n print(seasonurl)\n html = urlopen(seasonurl)\n bs = BeautifulSoup(html, 'html.parser')\n players = bs.find(\"table\", {'id':'roster'}).findAll(\"td\", {\"data-stat\":\"player\"})\n player_url_list = [player.find(\"a\").get(\"href\") for player in players]\n for player in player_url_list:\n player_url = domain + player\n time.sleep(3)\n print(player_url)\n html = urlopen(player_url)\n bs = BeautifulSoup(html, 'html.parser')\n try:\n tabla = pd.read_html(str(bs.find(\"div\", {'id':'all_per_poss'})).replace(\"<!--\", \"\"))[0] \n tabla[\"Player\"] = bs.find(\"h1\", {\"itemprop\" : \"name\"}).text.strip()\n indice = tabla[tabla[\"Season\"]==\"Career\"].index[0]\n tabla = tabla[0:indice]\n tabla = tabla.drop(axis= 1,columns = \"Unnamed: 29\")\n #no me encuentra tablas para uno del college que es el darlina01\n print(player_url)\n team_players_list.append(tabla)\n except:\n pass\n return pd.concat(team_players_list)",
"def get_people(team):",
"async def wiki(self, ctx, *, parse: str):\n parse = parse.split(' ', 1)\n anti = [\"antibirth\", \"anti\"]\n rev = [\"revelations\", \"rev\"]\n subdomain = \"antibirth\" if parse[0] in anti else \"tboirevelations\" if parse[0] in rev \\\n else \"bindingofisaacrebirth\"\n parse = ' '.join(parse) if subdomain == \"bindingofisaacrebirth\" else parse[1]\n page = requests.get(f\"https://{subdomain}.gamepedia.com/index.php?search={parse}\")\n if \"search\" in page.url:\n soup = BeautifulSoup(page.content, 'html.parser')\n if re.sub(r'\\W+', '', parse.lower()) == \\\n re.sub(r'\\W+', '', soup.find(class_=\"unified-search__result__title\").get(\"data-title\").lower()):\n await ctx.send(soup.find(class_=\"unified-search__result__title\").get(\"href\"))\n else:\n await ctx.send(f\"I couldn't find an exact match. Here is a link to this query's search page. {page.url}\")\n else: await ctx.send(page.url)",
"def test_find_phrase_matches2(self):\n\t\ttest = sentiment.LibraryRun(self.text2, self.lib)\n\t\tobj_ut = test.find_phrase_matches(self.tokens_generator2)[0]\n\t\tself.assertEqual(dict(obj_ut),\n\t\t\t{'not good': [[2, -1, 0], [4, -1, 0]]})",
"def search_articles(query, case_sensitive, all_articles):\n articles_with_matches = 0\n total_matches = 0\n for title in all_articles:\n article_content = all_articles[title][\"content\"]\n matches = list(get_matches(query, case_sensitive, article_content))\n\n if matches:\n print(f\"{title}:\")\n for snippet in matches:\n print(snippet)\n print()\n articles_with_matches += 1\n total_matches += len(matches)\n\n print(f\"Found {total_matches} mentions of '{query}' in {articles_with_matches} articles.\")",
"def matchmaking():\n from app import db, models\n # Split by character as they are stored as a list in the database.\n # Could improve by normalising skills & model already produced -- models::skills\n # Query would change below, but matchmaking algorithm would remain the same.\n uskills = [{'name': str(i.uid), 'skills':\n [str(i) for i in i.skills.split(\",\")]}\n for i in db.session.query(models.UserSkills).all()]\n # Used to select matches that have not been previously paired.\n pairs = [(str(i.uid), str(i.mid))\n for i in db.session.query(models.Pair).all()]\n\n # Find all the users who are not already matched, but should be!\n # IFF at least once skill overlaps then there is a match. We are too kind.\n matches = []\n for user in uskills:\n for other_user in uskills:\n # Do not add already existing pairs and matches are transitive;\n # Jay matched with Aare is the same as Aare matching with Jay.\n cu, ou = (user['name'], other_user['name'])\n if ((cu, ou) in pairs or (ou, cu) in pairs or\n (cu, ou) in matches or (ou, cu) in matches):\n break\n # TODO: skill comparison based on NLTK use (synonyms, etc...)\n # TODO: improve the algorithm to rank and sort pairs by skills.\n if (set(user['skills']).intersection(other_user['skills']) and\n user['name'] != other_user['name']):\n matches.append((user['name'], other_user['name']))\n return matches",
"def calculate_matches(teams: Dict[int, Team]) -> Dict[int, Match]:\r\n match_urls = TCS_Scraper.scrape_matches(end_round=CURRENT_ROUND)\r\n matches = {}\r\n for match in match_urls:\r\n print(\"Scraping\", match)\r\n team_1id, results, team_2id \\\r\n = TCS_Scraper.scrape_match(match, teams)\r\n # If nothing happened on this match page, skip it\r\n if not results:\r\n continue\r\n team_1 = teams[team_1id]\r\n team_2 = teams[team_2id]\r\n\r\n team_1elos = [team_1.elo]\r\n team_2elos = [team_2.elo]\r\n for result in results:\r\n # Calculate new elo for each team\r\n e1p, e2p = Team.calculate_elo(team_1.elo, team_2.elo, result[0])\r\n\r\n # Print elo changes for each team\r\n print(team_1.name, str(e1p - team_1.elo))\r\n print(team_2.name, str(e2p - team_2.elo))\r\n\r\n # Store the elo changes\r\n team_1elos.append(e1p)\r\n team_2elos.append(e2p)\r\n\r\n # Set new elo values\r\n team_1.elo = e1p\r\n team_2.elo = e2p\r\n\r\n # Create a new match object and append it to the list of matches\r\n new_match = Match(\r\n match,\r\n team_1id,\r\n team_2id,\r\n team_1elos,\r\n team_2elos,\r\n results\r\n )\r\n matches[new_match.id] = new_match\r\n\r\n # Add match id to each team object\r\n team_1.matches.append(new_match.id)\r\n team_2.matches.append(new_match.id)\r\n\r\n return matches",
"def get_matches(self):\n return self.rhyming_words",
"def _get_matches_from_one_page(self, items):\n response = []\n if items:\n for item in items:\n item_id = item.get('id')\n item_desc = item.get('description')\n url = item.get('html_url')\n\n desc_matches = self._get_matches_in_text(item_desc)\n page_matches = self._get_matches_on_page(item_id)\n self._add_data_to_response(desc_matches, page_matches, url, response)\n return response",
"def test_find_phrase_matches1(self):\n\t\ttest = sentiment.LibraryRun(self.text1, self.lib)\n\t\tobj_ut = test.find_phrase_matches(self.tokens_generator1)[0]\n\t\tself.assertEqual(dict(obj_ut),\n\t\t\t{'not good': [[2, -1, 0]]})",
"def calculate_match_at_backend():\n\n # Calculate lovermatch list for each user\n user_set = UserInfo.objects\n for u1 in user_set:\n matchlist = {}\n for u2 in user_set:\n if u1 != u2:\n features_to_match = u1.features\n weights = u1.percentage\n sim = get_similarity(u1, u2, features_to_match, weights)\n matchlist[u2.name] = sim\n u1.loverMatch = matchlist\n u1.save()\n\n # Calculate lovermatched list for each user\n user_set = UserInfo.objects\n for u1 in user_set:\n matchedlist = {}\n for u2 in user_set:\n if u1 != u2:\n if u1.name in u2.loverMatch.keys():\n matchedlist[u2.name] = u2.loverMatch[u1.name]\n u1.loverMatched = matchedlist\n u1.save()",
"def match_common_combinations(player_name, battle_tag):\r\n responce, url = \"\", \"\"\r\n # #Case 1: the default for overbuff.com titlecase playername-battletag\r\n # print(\" 1. Trying Title Case: \", end = \" \")\r\n responce, url = get_responce(player_name.title(), battle_tag)\r\n if responce.status_code == 200:\r\n return responce, url\r\n else:\r\n # print(\" 2. Trying Lower Case: \", end = \" \")\r\n responce, url = get_responce(player_name.lower(), battle_tag)\r\n if responce.status_code == 200:\r\n return responce, url\r\n else:\r\n # #Case 3: some players with uppercase playername-battletag\r\n # print(\" 3. Trying Upper Case: \", end = \" \")\r\n responce, url = get_responce(player_name.upper(), battle_tag)\r\n if responce.status_code == 200:\r\n return responce, url\r\n else:\r\n return responce, url\r\n\r\n return responce, url",
"def _get_correct_page(self, options, team):\n best_candidate = None\n best_yob = None\n for option in options:\n if 'disambiguation' not in option:\n try:\n wiki_player = wikipedia.page(option)\n except:\n continue\n self.soup = BeautifulSoup(wiki_player.html())\n if team not in str(self.soup):\n continue\n self._gen_table()\n yob = int(self.born[1:5])\n if best_yob is None or self.birth > best_yob:\n best_yob = yob\n best_candidate = self.soup\n self.soup = best_candidate",
"def generate_matching_courses(self,goal):\n\n searchstring = self.preprocess(goal.goal)\n wordlist = nltk.word_tokenize(searchstring)\n relevant_words = []\n mystopwords = stopwords.words(\"english\") + stopwords.words(\"german\")\n for word in wordlist:\n if word not in mystopwords:\n relevant_words.append(word)\n # TODO: Activate in production\n # TODO: For testing find workaround to make local courses available for local test systems\n #user_origin = goal.user.origin\n # TODO: The following two lines have to be exchanged to filter courses according to origin\n #courses = models.Course.objects.filter(origin=user_origin)\n courses = models.Course.objects.all()\n\n matches = {}\n for course in courses:\n if course == None:\n print(\"Course is None\")\n if course.TF_IDF_scores == {}:\n continue\n score = 0.0\n for word in relevant_words:\n if word in course.TF_IDF_scores:\n if word in course.TF_IDF_scores:\n score += course.TF_IDF_scores[word]\n if score > 0.0:\n if score in matches.keys():\n matches[score].append(course)\n else:\n matches[score] = []\n matches[score].append(course)\n scores = list(matches.keys())\n scores.sort()\n\n bestcourses = []\n\n i = 0\n for score in scores:\n for course in matches[score]:\n bestcourses.append(course)\n i += 1\n if i >= COURSES_TO_DISPLAY:\n break\n\n if len(bestcourses) == 0:\n a = models.Activity.objects.get_or_create(\n title=\"Keine passenden Lehrveranstaltungen\",\n description=\"Aktuell gibt es zu Ihrem Interesse keine passenden Lehrveranstaltungen. \" \\\n \"Siddata wird regelmäßig nach neuen passenden Kursen suchen und diese ggf. hier anzeigen. \",\n type=\"todo\",\n goal=goal,\n image=\"idea.png\",\n status=\"new\"\n )[0]\n a.save()\n i = 0\n for course in bestcourses:\n\n a = models.Activity.objects.get_or_create(\n title=course.title,\n description=course.description,\n type=\"course\",\n goal=goal,\n image=\"%s.png\" %random.choice([\"world\",\"idea\",\"library\",\"cat\",\"brainbulb\",\"braincloud\",\"friends\"]),\n course=course,\n status=\"new\"\n )[0]\n a.save()\n i += 1\n if i == COURSE_MAX:\n break",
"def mlbleagueleaders(self, irc, msg, args, optleague, optcategory):\n\n league = {'mlb': '9', 'al':'7', 'nl':'8'} # do our own translation here for league/category.\n category = {'avg':'avg', 'hr':'homeRuns', 'rbi':'RBIs', 'ra':'runs', 'sb':'stolenBases', 'era':'ERA', 'whip':'whip', 'k':'strikeoutsPerNineInnings'}\n\n optleague = optleague.lower()\n optcategory = optcategory.lower()\n\n if optleague not in league:\n irc.reply(\"League must be one of: %s\" % league.keys())\n return\n\n if optcategory not in category:\n irc.reply(\"Category must be one of: %s\" % category.keys())\n return\n\n url = self._b64decode('aHR0cDovL20uZXNwbi5nby5jb20vbWxiL2FnZ3JlZ2F0ZXM=') + '?category=%s&groupId=%s&y=1&wjb=' % (category[optcategory], league[optleague])\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to fetch: %s\" % url)\n return\n \n html = html.replace('class=\"ind alt nw\"', 'class=\"ind nw\"')\n\n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'class':'table'})\n rows = table.findAll('tr')\n \n append_list = []\n \n for row in rows[1:6]:\n rank = row.find('td', attrs={'class':'ind nw', 'nowrap':'nowrap', 'width':'10%'}).renderContents()\n team = row.find('td', attrs={'class':'ind nw', 'nowrap':'nowrap', 'width':'70%'}).find('a').text\n num = row.find('td', attrs={'class':'ind nw', 'nowrap':'nowrap', 'width':'20%'}).renderContents()\n append_list.append(rank + \". \" + team + \" \" + num)\n\n thelist = string.join([item for item in append_list], \" | \")\n\n irc.reply(\"Leaders in %s for %s: %s\" % (ircutils.bold(optleague.upper()), ircutils.bold(optcategory.upper()), thelist))",
"def get_leagues_and_countries(source=utils.get_native_source):\n if not isinstance(source, games.models.Source):\n # If I used source=native_source() or if native_source was a global variable then\n # during db initialization (running command initialize) you would get an error since\n # it gets its value when the database is empty.\n source = source()\n logger.info(\"getting leagues and countries from source %s...\", source)\n if not source:\n return [], []\n data, meta, status_code = sportmonks.countries.all(include='leagues.seasons')\n if not data:\n # if the status code is not 200 data and meta are None\n return [], []\n # with open('sportmonks/response_texts/aws_01.txt', 'w') as outfile:\n # json.dump(meta, outfile, indent=4)\n # json.dump(data, outfile, indent=4)\n\n pre_countries, pre_competitions = [], []\n\n try:\n # Notice that only the first supported sport will be processed (currently this is is acceptable since we only\n # support football and so the first supported sport will always be football)\n sport_sids = parse_sport(meta)\n sports = []\n for sport_sid in sport_sids:\n sport = games.models.Sport.by_sid(sid=sport_sid, source=source)\n if not sport:\n logger.info(\"Sport contained in the response with sid {} is not supported\".format(sport_sid))\n continue\n sports.append(sport)\n if not sports:\n logger.error(\"No supported sport in the response\")\n return [], []\n football_gname = games.naming.sport_names.get('football', None)\n football = games.models.Sport.objects.get(name=football_gname)\n if football not in sports:\n logger.info(\"Football is not in response\")\n return [], []\n # logger.debug(\"Trying to get sport from source: %s and sid: %s\", source, sport_sid)\n sport_gname = football_gname\n for item in data:\n try:\n country_sid = item.get('id')\n # logger.debug('country_sid: %s', country_sid)\n country_sname = item.get('name')\n # logger.debug('country_sname: %s', country_sname)\n extra = item.get('extra')\n # logger.debug('extra: %s', extra)\n leagues = item.get('leagues').get('data')\n # logger.debug('leagues: %s', leagues)\n try:\n fifa_code = extra.get('fifa') # some countries might lack extra information\n except AttributeError:\n fifa_code = None\n except Exception as e:\n logger.data_error('%s', e)\n continue\n pre_country = pre_models.PreCountry(source=source, sname=country_sname, sid=country_sid, fifa_code=fifa_code)\n pre_countries.append(pre_country)\n for league in leagues:\n try:\n # sportmonks uses sgname for leagues. I use this sgname as an sname (comp_season_specific name)\n competition_sname = league.get('name')\n # logger.debug('competition_sname: %s', competition_sname)\n sid = league.get('id')\n # logger.debug('sid: %s', sid)\n seasons = league.get('seasons').get('data')\n # logger.debug('seasons: %s', seasons)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n competition_season_utils = []\n # comp_seas_sids = []\n for season in seasons:\n try:\n season_name = season.get('name')\n # logger.debug('season_name: %s', season_name)\n # season_name = seasons_special_treatment(season_name)\n competition_season_sid = season.get('id')\n # logger.debug('competition_season_sid: %s', competition_season_sid)\n is_current_season = season.get('is_current_season', False)\n # logger.debug('is_current_season: %s', is_current_season)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n # comp_seas_sids.append(competition_season_sid)\n zak_season_name = games.models.Season.zakandify_season_string(season_name)\n season = zakanda.utils.season_from_season_name(zak_season_name)\n competition_season_type = get_competition_season_type(season)\n competition_season_util = pre_models.CompetitionSeasonUtil(season, competition_season_sid, competition_sname, competition_season_type)\n competition_season_utils.append(competition_season_util)\n # logger.debug(\"competition season sids: %s\", comp_seas_sids)\n pre_competition = pre_models.PreCompetition(\n source=source, sname=competition_sname, sid=sid, sport_name=sport_gname,\n competition_season_utils=competition_season_utils, pre_country=pre_country)\n pre_competitions.append(pre_competition)\n\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.countries.all from source %s', e, source)\n logger.info(\"%s pre countries and %s pre competitions were created\", len(pre_countries), len(pre_competitions))\n return pre_countries, pre_competitions",
"def query(url):",
"def match_test_cases(player_name, battle_tag, cmdLargs = False):\r\n responce, url = get_responce(player_name, battle_tag)\r\n\r\n # Here I am Using Recurson to try to match several test Cases for user Friendlieness\r\n if cmdLargs:\r\n if responce.status_code != 200:\r\n print(\"\\nError ! Invalid Battle Tag! Please Fix => ({})\".format(player_name+'#'+battle_tag))\r\n # exit()\r\n responce , url = match_common_combinations(player_name, battle_tag)\r\n if responce.status_code != 200:\r\n responce , url = match_all_possible_combinations(player_name, battle_tag)\r\n if responce.status_code != 200:\r\n print(\"\\nError ! BattleTag Does Not Exist! Please Fix => ({})\".format(player_name+'#'+battle_tag))\r\n exit()\r\n else:\r\n return responce, url\r\n\r\n return responce, url\r\n else:\r\n if responce.status_code != 200:\r\n responce , url = match_common_combinations(player_name, battle_tag)\r\n if responce.status_code != 200:\r\n responce , url = match_all_possible_combinations(player_name, battle_tag)\r\n else:\r\n return responce, url\r\n\r\n #else return the default responce\r\n print(\"Done Checking !\")\r\n return responce, url",
"def scraping_actual_team_players(team_abbreviation):\n starting_point = domain + \"/teams/\"\n teamurl = starting_point + team_abbreviation + \"/\"\n team_id = \"div_\" + team_abbreviation\n html = urlopen(teamurl)\n bs = BeautifulSoup(html, 'html.parser')\n actual_team_url = domain + str(bs.find(\"div\", {'id': team_id}).find(\"a\").get(\"href\"))\n html = urlopen(actual_team_url)\n bs = BeautifulSoup(html, 'html.parser')\n players = bs.find(\"table\", {'id':'roster'}).findAll(\"td\", {\"data-stat\":\"player\"})\n players_url = [player.find(\"a\").get(\"href\") for player in players]\n team_players_list = []\n for player_url in players_url:\n time.sleep(3)\n url = domain + player_url\n html = urlopen(url)\n bs = BeautifulSoup(html, 'html.parser')\n print(player_url)\n try:\n tabla = pd.read_html(str(bs.find(\"div\", {'id':'all_per_poss'})).replace(\"<!--\", \"\"))[0] \n tabla[\"Player\"] = bs.find(\"h1\", {\"itemprop\" : \"name\"}).text.strip()\n indice = tabla[tabla[\"Season\"]==\"Career\"].index[0]\n tabla = tabla[0:indice]\n tabla = tabla.drop(axis= 1,columns = \"Unnamed: 29\")\n #no me encuentra tablas para uno del college que es el darlina01\n print(player_url)\n team_players_list.append(tabla)\n except:\n pass\n return pd.concat(team_players_list)",
"def _find_matches(self, query, min_match):\n\t\tresult_list = []\n\t\tl_query = query.lower()\n\n\t\t#The card dictionary main keys are the sets card belongs to\n\t\tfor exp in self.card_dict:\n\t\t\tfor card in self.card_dict[exp]:\n\t\t\t\t#Change all uppercase letters to lowercase in preparation for string comparisons\n\t\t\t\tl_cardname = card['name'].lower()\n\n\t\t\t\tpercent_match = 0\n\n\t\t\t\tsearch_words = {}\n\n\t\t\t\t#Create a sub dictionary for each search word in the query\n\t\t\t\tfor word in l_query.split(' '):\n\t\t\t\t\tsearch_words.update({word : {}})\n\n\t\t\t\tcard_words = l_cardname.split(' ')\n\n\t\t\t\t#Calculate the match percentage between every search word and every card word\n\t\t\t\tfor search_word in search_words:\n\t\t\t\t\tfor card_word in card_words:\n\t\t\t\t\t\tmatch = 1 - (Searcher.levenshtein_distance(search_word, card_word) / \n\t\t\t\t\t\t\tmax(len(search_word), len(card_word)))\n\n\t\t\t\t\t\tif search_word not in search_words.keys():\n\t\t\t\t\t\t\tsearch_words[search_word] = {card_word: { 'match' : match } }\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsearch_words[search_word].update( {card_word: { 'match' : match } } )\n\n\t\t\t\t#Calculates the total match mercentage for the entire query and the card name\n\t\t\t\tfor search_word in search_words:\n\n\t\t\t\t\tmax_value_key = list(search_words[search_word].keys())[0]\n\t\t\t\t\tmax_value = search_words[search_word][max_value_key]\n\n\t\t\t\t\tfor card_word in search_words[search_word]:\n\t\t\t\t\t\tif search_words[search_word][card_word]['match'] > max_value['match']:\n\t\t\t\t\t\t\tmax_value_key = card_word\n\t\t\t\t\t\t\tmax_value = search_words[search_word][card_word]\n\n\t\t\t\t\tpercent_card_match = len(max_value_key) / len(l_cardname.replace(\" \", \"\"))\n\t\t\t\t\tpercent_query_match = len(search_word) / len(l_query.replace(\" \", \"\"))\n\n\t\t\t\t\t#These weights emphasizes matching the query more than the entire card\n\t\t\t\t\tcard_match_weight = 0.25\n\t\t\t\t\tquery_match_weight = 1 - card_match_weight\n\n\t\t\t\t\tpercent_match += (percent_query_match * max_value['match'] * query_match_weight + \n\t\t\t\t\t\tpercent_card_match * max_value['match'] * card_match_weight)\n\n\t\t\t\tif percent_match >= min_match:\n\t\t\t\t\tresult_list.append([card, percent_match])\n\n\t\treturn result_list",
"def get_teams():",
"def get_competitions_choice():\n\n cursor = mc.get_db_cursor(mc.DB_NAME)\n select_query = \"SELECT DISTINCT(league) FROM recom_formation;\"\n cursor.execute(select_query)\n leagues = cursor.fetchall()\n\n # initialize a dictionary for storing the different leagues\n result_dict = {\n \"values\": []\n }\n # save all the different leagues in the dictionary\n for league in leagues:\n result_dict[\"values\"].append(league[0])\n return result_dict"
] |
[
"0.6230952",
"0.58164877",
"0.57194513",
"0.5475062",
"0.5414087",
"0.540916",
"0.5403245",
"0.5382146",
"0.53518337",
"0.5312016",
"0.5311977",
"0.52966696",
"0.5252232",
"0.5247202",
"0.5239561",
"0.5224129",
"0.52188694",
"0.5210715",
"0.5197156",
"0.51748246",
"0.51668096",
"0.5161201",
"0.51592565",
"0.5154765",
"0.5153986",
"0.5152277",
"0.5147273",
"0.51316303",
"0.51216406",
"0.51138365"
] |
0.6190075
|
1
|
Get game name for user and set its proper id
|
def set_game_id(self, game_name):
dic = {(''.join(filter(str.isalpha, key))): v for key, v in self.games_map.items()}
dic = dic[self.league]
dic = {(''.join(filter(str.isalpha,key))):v for key,v in dic.items()}
self.game_id = dic[game_name][0]
self.game_time = dic[game_name][1]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_game_id(self) -> str:\n return self.game_name_entry.get()",
"def set_game_id(self, value: str) -> None:\n self.game_name_entry.delete(0, len(self.game_name_entry.get()))\n self.game_name_entry.insert(0, value)",
"def new_game(blank_game, user_id=None):\n if user_id:\n g.db.remove({'_id': user_id}, justOne=True)\n new_id = g.db.insert({'game': blank_game.serialise()})\n flash('New user successfully created')\n return new_id",
"def _get_user_id(self, name):\n try:\n apiResponse = twitchAPI.twitchAPIGet(\"/users\", {\"login\": name}) #Try to get user id from API\n userID = apiResponse[\"data\"][0][\"id\"]\n except (KeyError, APIUnavailable):\n userID = input(\"Please enter the user id of the user: \")\n except IndexError: #If Twitch API does not return user id\n print(\"That user does not exist on Twitch.\")\n userID = False\n return(userID)",
"def get_id(self):\r\n return self.username",
"def __assign_name_id(self):\n if not self.name_id:\n self.name_id = str(BaseTicketing.objects.create())",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def get_player_name(self):\n if self.name_entered is False:\n self.name = self.input_name(\"Please enter your name:\")\n self.name_entered = True\n self.score_file()",
"def Network_sendNameOfGame(self, data):\n self.Send({\"action\": \"defineGame\", \"ruleset\": self._server.ruleset})",
"def create(self, user, game):\n _id = rest.database.db.games.insert_one(game).inserted_id\n\n LOGGER.debug(\"Creating meta game reference.\")\n meta_game_id = self.meta_game_dao.create({\n 'game_id': _id,\n 'turn': game['turn'],\n 'game_name': game['name'],\n 'num_hints': game['num_hints'],\n 'num_errors': game['num_errors'],\n 'owner': user,\n 'num_players': len(game['players']),\n 'players': [user]\n })\n\n LOGGER.debug(\"Adding game to users list of owned games.\")\n\n try:\n self.user_dao.update(\n _id=user, as_model=True).owns(\n own_data={\n 'game': ObjectId(_id),\n 'player_id': 0,\n 'meta_game': ObjectId(meta_game_id)})\n except exceptions.UserNotFound as unf:\n LOGGER.debug(\"User could not be found. Deleting the game.\")\n self.delete(user, _id=_id)\n raise unf\n\n return str(_id)",
"def generate_username():\n while True:\n new_user_id = \"user-\" + str(random.randrange(10000000, 99999999))\n try:\n user = DjangoDev.objects.get(username=new_user_id)\n except DjangoDev.DoesNotExist:\n return new_user_id",
"def getId(self):\n return self.getUserName()",
"def getId(self):\n return self.getUserName()",
"def get_user_id(self, friend_name: str = \"\", force: bool = False) -> str:\n raise NotImplementedError",
"def get_id_from_name(slack_client, name):\n api_call = slack_client.api_call(\"users.list\")\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n users = api_call.get('members')\n for user in users:\n if 'name' in user and user['name'] == name:\n return user.get('id')\n return None",
"def describe_game(name):\n #meaning ,if we do not already have this user's name,\n #then they are a new player and we need to get their name\n if name !=\"\":\n print(\"\\n tahnk you for playing again, {} !\".format(name))\n else:\n stop=True\n while stop:\n if name==\"\":\n name=input(\"\\n What is your name? \\n >>>\").capitalize()\n if name!=\"\":\n print(\"\\n Welcome, {}!\".format(name))\n print(\"\\n In this game, you will be greeted \\n by several different people. \\n You can choose to be nice or mean\")\n print(\"but at the end of the game your fate \\n will be sealed by your actions\")\n stop=False\n return name",
"def get_username_and_id(self, obj):\n return \"%s - %s\" % (obj.user.username, obj.user.id)",
"def __updatePlayerName(db, player):\n c = db.cursor()\n id = player['email-hash']\n\n if player['name'] is not None:\n playerTournamentName = player['name']\n else:\n playerTournamentName = player['challonge-username']\n\n c.execute(\"SELECT id FROM players WHERE id='%s'\" % id)\n row = c.fetchone()\n if row is None:\n newPlayerRecord = (player['email-hash'],\n playerTournamentName,\n _newPlayerRating)\n c.execute(\"INSERT INTO players VALUES('%s','%s','%s')\" %\n newPlayerRecord)\n else:\n c.execute(\"SELECT nick FROM players WHERE id='%s'\" % id)\n storedName = c.fetchone()[0]\n if storedName != playerTournamentName:\n c.execute(\"SELECT alias FROM aliases WHERE player_id='%s'\" % id)\n if c.fetchone() is None:\n c.execute(\"INSERT INTO aliases VALUES('%s','%s')\" %\n (playerTournamentName, id))",
"def _getGameName(self):\n className = self.__class__.__name__\n gameName = className[0].lower() + className[1:]\n return gameName",
"def get_user_id():\n csc_name = get_user_csc_name()\n if csc_name:\n return csc_name\n haka_id = get_user_haka_identifier()\n if haka_id:\n return haka_id\n return None",
"def default_name(self):\n name = f\"Player {self.UID.split('-')[0]}\"\n return name",
"def get_game(username):\n user = User.objects.get(username=username)\n if user.two_player_game_id != None:\n return TwoPlayerGame.objects.get(\n game_id=user.two_player_game_id), \"two\"\n if user.four_player_game_id != None:\n return FourPlayerGame.objects.get(\n game_id=user.four_player_game_id), \"four\"\n return None, None",
"def start_the_game(self):\n print(f'{self.user_name.get_value()}, Do the job here!')\n return ('jeu')",
"def get_identifier(self, request):\r\n return request.user.username",
"def get_identifier(self, request):\r\n if hasattr(request, 'user'):\r\n try:\r\n return request.user.get_username()\r\n except AttributeError:\r\n pass\r\n return 'nouser'",
"def _username_to_uuid(self, username: str) -> str:\n http_conn = http.client.HTTPSConnection(\"api.mojang.com\")\n header = {'User-Agent': 'Minecraft Username -> UUID',\n 'Content-Type': 'application/json'}\n http_conn.request(\n \"GET\", \"/users/profiles/minecraft/\" + username, headers=header)\n response = http_conn.getresponse().read().decode(\"utf-8\")\n\n if not response:\n raise KeyError(\"player probably doesn't exist\")\n\n json_data = json.loads(response)\n return json_data['id']",
"def _get_user_name_from_user_id(self, user_id):\n if user_id in self._user_id_to_user_name.keys():\n return self._user_id_to_user_name[user_id]\n return self._get_user_name_from_user_id_by_slack_client(user_id)",
"def name(who):\r\n if who == 0:\r\n return 'Player 0'\r\n elif who == 1:\r\n return 'Player 1'\r\n else:\r\n return 'An unknown player'"
] |
[
"0.73327935",
"0.65675193",
"0.63035643",
"0.62938285",
"0.6293378",
"0.6210712",
"0.6188612",
"0.6188612",
"0.6188612",
"0.6105323",
"0.6088365",
"0.6041224",
"0.60354435",
"0.60188615",
"0.60188615",
"0.60095835",
"0.59916943",
"0.59532917",
"0.595209",
"0.5932363",
"0.5928909",
"0.5926422",
"0.5923837",
"0.59180546",
"0.5917362",
"0.5887365",
"0.5883424",
"0.58731675",
"0.58445644",
"0.5809114"
] |
0.7534376
|
0
|
Check if a path is inside of a source Rez package's list of variants. This function's purpose is hard to describe.
|
def _get_variant_less_path(root, path, variants):
for variant_less_path in _iter_variant_extracted_paths(root, path, variants):
if not imports.has_importable_module(variant_less_path, ignore={"__init__.py"}):
# This condition happens only when a Rez package defines
# A Python package but the package is empty. Which
# probably is user error and shouldn't happen often.
#
_LOGGER.warning(
'Path "%s" is inside "%s" but no Python module could be found.',
path,
root,
)
continue
return True
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __contains__(self, obj):\n if isinstance(obj, str):\n path = Path(obj)\n try:\n repo_path = Path(getattr(self, \"location\")).resolve()\n except AttributeError:\n return False\n\n # existing relative path\n if not path.is_absolute() and (repo_path / path).exists():\n return True\n\n # existing full path\n try:\n fullpath = path.resolve()\n if fullpath.relative_to(repo_path) and fullpath.exists():\n return True\n except ValueError:\n pass\n\n return False\n else:\n for pkg in self.itermatch(obj):\n return True\n return False",
"def has_path_source(self) -> bool:\n\n return any(self.is_path_type(x) for x in self.parameters)",
"def has_source(self):\n return any(map(utils.assert_package_is_source, self.pkg_arguments))",
"def _ShouldCopyFile(self, path):\n\n # check for C runtime, if desired\n path = os.path.normcase(path)\n dirName, fileName = os.path.split(path)\n if fileName.startswith(\"msvcr\") and fileName.endswith(\".dll\"):\n self.msvcRuntimeDir = dirName\n return self.includeMSVCR\n\n # check the full path\n if path in self.binIncludes:\n return True\n if path in self.binExcludes:\n return False\n\n # check the file name by itself (with any included version numbers)\n if fileName in self.binIncludes:\n return True\n if fileName in self.binExcludes:\n return False\n\n # check the file name by itself (version numbers removed)\n name = self._RemoveVersionNumbers(fileName)\n if name in self.binIncludes:\n return True\n if name in self.binExcludes:\n return False\n\n # check the path for inclusion/exclusion\n for path in self.binPathIncludes:\n if dirName.startswith(path):\n return True\n for path in self.binPathExcludes:\n if dirName.startswith(path):\n return False\n\n return True",
"def checkSourceLocations(packageKey):\n directoryPath = dotfilePath + \\\n configDict['options'][packageKey]['directoryName'] + \"/\"\n\n for link in configDict['options'][packageKey]['links']:\n for key, value in link.items():\n sourcePath = directoryPath + key\n\n if symMod.symlinkLocationExists(sourcePath):\n return False\n\n return True",
"def validate_venv_path(path: Path, check: bool = False) -> bool:\n win32 = sys.platform == 'win32'\n standard_struct = {\n 'bin': 'Scripts' if win32 else 'bin',\n 'include': 'Include' if win32 else 'include',\n 'lib': os.path.join('Lib', 'site-packages') if win32 else os.path.join('lib', '*', 'site-packages'),\n }\n standard_struct['python'] = f'{standard_struct[\"bin\"]}/python'\n standard_struct['site-packages'] = f'{standard_struct[\"lib\"]}/*/site-packages'\n valid = False\n if path and path.exists():\n checked = False\n subchecked = False\n for globbed_path in standard_struct.values():\n checked = True\n for resolved_path in path.glob(globbed_path):\n if not resolved_path.exists():\n if check:\n raise InvalidEnvironmentError(f'Could not find {globbed_path} under {path}.')\n\n return valid\n subchecked = True\n valid = checked and subchecked\n if not valid and check:\n raise InvalidEnvironmentError(f'Invalid virtual environment path: {path}.')\n return valid",
"def are_package_sources_available(self) -> bool:\n ok = True\n for name, path in self.update_sources(self.stub_sources):\n if (CONFIG.stub_path / path).exists():\n continue\n if name == StubSource.FROZEN:\n # not a blocking issue if there are no frozen stubs, perhaps this port/board does not have any\n continue\n # todo: below is a workaround for different types, but where is the source of this difference coming from?\n msg = (\n f\"{self.package_name}: source '{name.value}' not found: {CONFIG.stub_path / path}\"\n if isinstance(name, StubSource) # type: ignore\n else f\"{self.package_name}: source '{name}' not found: {CONFIG.stub_path / path}\"\n )\n self.status[\"error\"] = msg\n log.debug(msg)\n ok = False\n return ok",
"def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"rdp_lineage_to_tax.py\", get_files)",
"def contains_venv(_dir, **kargs):\n kargs.update(max_venvs=1)\n venvs = find_venvs(_dir, **kargs)\n return venvs and venvs[0]",
"def resolve_variants(self):\n\n def evaluate_clause(clause):\n if 'or' in clause or 'and' in clause:\n raise Exception(\"Reserved keyword 'and || or' used.\")\n v = dict_contains(self.traits, clause)\n return v\n \n def process_effects(variant_name, variant_details):\n \"\"\"\n This nested function handles the effects of a \n given clause.\n \n Right now, the only relevant effect is 'replace',\n which causes a variant to replace an existing variant\n \n \"\"\"\n if 'replaces' in variant_details:\n enabled_variants.remove(variant_details['replaces'])\n enabled_variants.add(variant_name)\n\n if 'cflags' in variant_details:\n if type(variant_details['cflags']) == dict:\n self.config['cflags'] += variant_details['cflags']['gcc']\n else:\n self.config['cflags'] += \" \" + variant_details['cflags']\n # Beginning of main function\n if 'filtered_variants' in self.__dict__:\n return self.filtered_variants\n \n enabled_variants = set(['src'])\n variants = self.get_variants()\n \n for variant in variants:\n assert len(variant) == 1\n for name, details in variant.items():\n if 'when' in details:\n enabled = evaluate_clause(details['when'])\n if enabled:\n process_effects(name, details)\n self.variant_dirs = {}\n for variant_name in enabled_variants:\n self.variant_dirs[variant_name] = join(self.path, variant_name)\n\n self.filtered_variants = [a for a in self.get_variants() if list(a.keys())[0] in enabled_variants]\n return self.filtered_variants",
"def has_sources(self, extension=None):\r\n return (self.has_label('sources') and\r\n (not extension or\r\n (hasattr(self, 'sources') and\r\n any(source.endswith(extension) for source in self.sources))))",
"def exists(self, path):",
"def is_package(path: str) -> bool:\n return os.path.isdir(path) and \"__init__.py\" in os.listdir(path)",
"def _IsWellFormattedFilePath(path):\n return path.startswith(SRC) and path.endswith(_OWNERS)",
"def matches(self, tgt_residence_dir: str) -> bool:",
"def _iter_variant_extracted_paths(root, path, variants):\n for variant in sorted(variants, key=len, reverse=True):\n inner_path = os.path.join(*[str(request) for request in variant])\n resolved_path = os.path.join(root, inner_path)\n\n if filer.in_directory(path, resolved_path, follow=False):\n yield path.replace(inner_path + os.sep, \"\")",
"def process(self, source_path: pathlib.Path) -> bool:",
"def is_bundle_file(conf, path):\n \n if path[0] == '/':\n path = path[1:]\n \n # walk the config, checking for a match\n for asset_type in ['js','css']:\n for bundle_name in conf[asset_type].iterkeys():\n for f in conf[asset_type][bundle_name]:\n if os.path.join(asset_type, f) == path:\n return True\n \n return False",
"def exists_in_path(self):\n return os.path.isfile(self.IN_PATH)",
"def check_paths( self ):\n check_a = utility_code.checkDirectoryExistence( self.PATH_TO_SOURCE_FILE_DIRECTORY )\n check_b = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_ORIGINALS_DIRECTORY )\n check_c = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_PARSED_DIRECTORY )\n check_d = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_DATA_DIRECTORY )\n check_e = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_COUNT_DIRECTORY )\n if check_a == 'exists' and check_b == 'exists' and check_c == 'exists' and check_d == 'exists' and check_e == 'exists':\n log.debug( 'path check passed' )\n else:\n message='path check failed; quitting'\n log.error( message )\n sys.exit( message )\n return",
"def matches_glob_list(path, glob_list):\n for glob in glob_list:\n try:\n if PurePath(path).match(glob):\n return True\n except TypeError:\n pass\n return False",
"def checkIfImport():\n instance_ipath, product_ipath = getImportedPathes()\n product_ilist = [i for i in os.listdir(product_ipath) \\\n if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]\n if product_ilist:\n return 1\n return 0",
"def __contains__(self, cvs_path):\n\n return cvs_path in self._entries",
"def __bool__(self):\n for root, products in self.rel_paths():\n if products:\n return True\n return False",
"def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)",
"def do_check(path):\n found_source_content = False\n iml_file = os.path.join(path, 'project.iml')\n self.assertTrue(os.path.exists(iml_file))\n dom = minidom.parse(iml_file)\n expected_paths = [\"file://\" + os.path.join(get_buildroot(), _path) for _path in [\n 'examples/src/java/org/pantsbuild/example/hello',\n 'examples/src/java/org/pantsbuild/example/hello/greet',\n 'examples/src/java/org/pantsbuild/example/hello/main',\n 'examples/src/java/org/pantsbuild/example/hello/simple',\n 'examples/src/resources/org/pantsbuild/example/hello',\n ]]\n expected_java_resource = [\"file://\" + os.path.join(get_buildroot(), _path) for _path in [\n 'examples/src/resources/org/pantsbuild/example/hello',\n ]]\n remaining = set(expected_paths)\n for sourceFolder in self._get_sourceFolders(dom):\n found_source_content = True\n self.assertEquals(\"False\", sourceFolder.getAttribute('isTestSource'))\n url = sourceFolder.getAttribute('url')\n # Check is resource attribute is set correctly\n if url in expected_java_resource:\n self.assertEquals(sourceFolder.getAttribute('type'), IdeaIntegrationTest.RESOURCE,\n msg=\"Type {c_type} does not match expected type {a_type} \"\n \"for {url}\".format(c_type=IdeaIntegrationTest.RESOURCE, url=url,\n a_type=sourceFolder.getAttribute('type')))\n self.assertIn(url, remaining,\n msg=\"Couldn't find url={url} in {expected}\".format(url=url,\n expected=expected_paths))\n remaining.remove(url)\n self.assertTrue(found_source_content)",
"def matches_path(self) -> bool:\n if not self._primitives:\n raise NotImplementedError\n\n if self.is_remote:\n matches = self._matches_remote()\n else:\n matches = self._matches_local()\n\n if matches:\n self._update_version()\n return matches\n\n return matches",
"def matches_path(cls, path):\n return path.startswith('/') or \\\n path.startswith('./') or \\\n path.startswith('../') or \\\n path.startswith('file://')",
"def exists_path(self, start, end):\n return end in self.paths(start)",
"def check_release_dir(options):\n if not options.release_path:\n error = ('--release_path cannot be empty.'\n ' Either specify a --release or a --release_path.')\n raise ValueError(error)\n\n if os.path.exists(options.release_path):\n return\n\n if options.release_path.startswith('gs://'):\n check_google_path(options.release_path)\n elif options.release_path.startswith('s3://'):\n check_s3_path(options.release_path)\n else:\n error = 'Unknown path --release_path={dir}\\n'.format(\n dir=options.release_path)\n raise ValueError(error)"
] |
[
"0.6436872",
"0.61978734",
"0.611825",
"0.59328175",
"0.5863272",
"0.57668215",
"0.56686926",
"0.5650742",
"0.5596057",
"0.5559053",
"0.5547557",
"0.5546915",
"0.5546251",
"0.5532427",
"0.55249786",
"0.54744756",
"0.5423943",
"0.5386734",
"0.5384271",
"0.5383693",
"0.53809667",
"0.5379169",
"0.5366744",
"0.53510505",
"0.532538",
"0.53047997",
"0.530069",
"0.5300398",
"0.52907985",
"0.52865595"
] |
0.7300656
|
0
|
Check if the given Rez package is a source directory or a built Rez package.
|
def is_built_package(package):
try:
parent_folder = finder.get_package_root(package)
except (AttributeError, TypeError):
raise ValueError(
'Input "{package}" is not a valid Rez package.'.format(package=package)
)
version = str(package.version)
if not version:
return False
return version == os.path.basename(parent_folder)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def IsPackage(path):\n init_base_path = os.path.join(path, '__init__.py')\n return (os.path.isfile(init_base_path) or\n os.path.isfile(init_base_path + 'c') or\n os.path.isfile(init_base_path + 'o'))",
"def _is_package(path):\n if not os.path.isdir(path):\n return False\n return os.path.isfile(os.path.join(path, '__init__.py'))",
"def _is_package(path):\n return (\n os.path.isdir(path)\n and os.path.exists(os.path.join(path, '__init__.py'))\n )",
"def _is_package(path):\n return (\n os.path.isdir(path)\n and os.path.exists(os.path.join(path, '__init__.py'))\n )",
"def has_source(self):\n return any(map(utils.assert_package_is_source, self.pkg_arguments))",
"def is_package(path: str) -> bool:\n return os.path.isdir(path) and \"__init__.py\" in os.listdir(path)",
"def has_python_package( # pylint: disable=too-many-branches,too-many-locals\n package, paths=None, allow_build=True, allow_current_context=False\n):\n from . import creator # Avoiding a cyclic import\n\n if not hasattr(package, \"name\") or not hasattr(package, \"version\"):\n raise ValueError(\n 'Object \"{package}\" is not a valid Rez package.'.format(package=package)\n )\n\n if not paths:\n paths = config.packages_path # pylint: disable=no-member\n\n version = \"\"\n is_built = is_built_package(package)\n\n if is_built:\n version = package.version\n\n if allow_current_context and in_valid_context(package):\n environment = os.environ.get(\"PYTHONPATH\", \"\").split(os.pathsep)\n else:\n context = resolved_context.ResolvedContext(\n [\"{package.name}=={version}\".format(package=package, version=version)],\n package_paths=[get_packages_path_from_package(package)] + paths,\n )\n\n environment = context.get_environ().get(\"PYTHONPATH\", \"\").split(os.pathsep)\n\n paths = get_package_python_paths(package, environment)\n\n # All zipped .egg files as valid Python \"packages\"\n for path in paths:\n if path.endswith(\".egg\") and os.path.isfile(path):\n return True\n\n for root_path in paths:\n for _, _, files in os.walk(root_path):\n for file_path in files:\n if file_path == \"__init__.py\":\n continue\n\n if file_path.endswith(\".py\"):\n return True\n\n if is_built or not allow_build:\n return False\n\n # If the package is a source package and PYTHONPATH is defined but\n # no path was found, it may actually be that the Python files are\n # generated on-build (such as C++ files with Python bindings). To\n # find out, we need to run this function again, but with the built\n # package.\n #\n build_directory = tempfile.mkdtemp(suffix=\"_some_temporary_rez_build_package\")\n build_package = creator.build(package, build_directory, quiet=True)\n\n # Reference: https://stackoverflow.com/questions/3850261/doing-something-before-program-exit\n atexit.register(functools.partial(shutil.rmtree, build_directory))\n\n return has_python_package(build_package)",
"def is_package_dir(path: Path) -> bool:\n if not path.is_dir():\n return False\n if path.name.endswith(\".egg-info\"):\n return False\n if (path / \"__init__.pyi\").exists():\n return True\n return False",
"def is_package(self, fullname):\n return hasattr(self.__get_module(fullname), \"__path__\")",
"def is_module_or_package(path):\r\n is_module = osp.isfile(path) and osp.splitext(path)[1] in ('.py', '.pyw')\r\n is_package = osp.isdir(path) and osp.isfile(osp.join(path, '__init__.py'))\r\n return is_module or is_package",
"def test_IsPackage_directory(tempdir: pathlib.Path):\n assert dpack._IsPackage(tempdir)",
"def is_package(self, fullmodname):\n submodname, is_package, relpath = self._get_info(fullmodname)\n return is_package",
"def check_module_path(pkg):\n src_dir_root = ''\n print(\"[root-get] DEBUG: Checking module path\")\n check_module_name = os.system('find %s -mindepth 2 -type d -name \"%s\" ! -path \"*tutorials*\" ! -path \"*dictpch*\"' % (ROOT_SOURCES, pkg))\n if check_module_name != 0:\n print(\"Not a ROOT package (we are working only with ROOT packages for now.)\")\n return False\n else:\n # if have such directory in root then we can try to get it's real path\n path = PathChecker()\n src_dir_root = path.path4module(pkg, ROOT_SOURCES)\n if src_dir_root != None:\n print(\"[root-get] We would use a module from {0:s}\".format(src_dir_root))\n else:\n print(\"Package not present in rootbase.\")\n print(\"Please provide manifest file path, else enter 'NA'\")\n p_manifest = raw_input()\n if p_manifest != 'NA':\n value = yaml_validator(p_manifest)\n if value == 1:\n print(\"Not a valid yml. Please provide valid yml. Exiting now.\")\n else:\n print(\"Downloading package using url.\")\n dn_path = downloader(p_manifest)\n #get path for downloaded directory\n filepath = Path(dn_path + \"/CMakeLists.txt\")\n if filepath.is_file():\n src_dir_root = dn_path\n else:\n print(\"No CMakeLists.txt present. Creating using manifest.\")\n rule_name = re.compile(\".*name:.*\")\n with open(p_manifest) as mn:\n read = mn.read()\n name = rule_name.findall(read)\n parc_name = [x.lstrip(' name: ') for x in name]\n cml = open(dn_path + \"/CMakeLists.txt\", 'a')\n cml.write(\"ROOT_STANDARD_LIBRARY_PACKAGE(\" + parc_name[0] + \" DEPENDENCIES RIO)\")\n src_dir_root = dn_path\n\n else:\n print(\"Can you provide package path..(if available)\")\n dir_path = raw_input()\n filepath = Path(dir_path + \"/CMakeLists.txt\")\n if filepath.is_file():\n src_dir_root = dir_path\n else:\n print(\"No CMakeLists.txt present. Creating using manifest.\")\n rule_name = re.compile(\".*name:.*\")\n with open(p_manifest) as mn:\n read = mn.read()\n name = rule_name.findall(read)\n parc_name = [x.lstrip(' name: ') for x in name]\n cml = open(dn_path + \"/CMakeLists.txt\", 'a')\n cml.write(\"ROOT_STANDARD_LIBRARY_PACKAGE(\" + parc_name[0] + \" DEPENDENCIES RIO)\")\n src_dir_root = dn_path\n\n print(\"[root-get] We would use a module from {0:s}\".format(src_dir_root))\n return src_dir_root",
"def check_cleaned(path):\n bool_1 = isdir(join(path, 'Main'))\n bool_2 = isdir(join(path, 'Finantial'))\n bool_3 = bool_1 and bool_2\n return bool_3",
"def _check_build(self, gppkg_file, gppkg_spec):\n return gppkg_file == gppkg_spec.get_filename()",
"def is_valid(project: Project) -> bool:\n return bool(compileall.compile_dir(project.root, quiet=1))",
"def needs_rebuild(source, target):\n return not os.path.isfile(target) or (\n os.path.getmtime(source) > os.path.getmtime(target))",
"def check_local_package(installed_title):\n user_app_dir = path.Path(pm.internalVar(uad = True))\n installed_dir = user_app_dir / installed_title\n\n if installed_dir.isfile():\n return False\n elif installed_dir.isdir():\n return True\n else:\n return False",
"def _check_orig(self):\n if self.is_dir():\n self._orig = False\n return\n\n parts = self._path.split('.')\n try:\n if parts[-1] == 'tgz':\n self._orig = True\n elif parts[-2] == 'tar':\n if (parts[-1] in Compressor.Opts or\n parts[-1] in Compressor.Aliases):\n self._orig = True\n except IndexError:\n self._orig = False",
"def has_package(self, doc):\n return doc.package is not None",
"def is_repo_root(path: str) -> bool:\n return os.path.isdir(os.path.join(path, \".repo\"))",
"def are_package_sources_available(self) -> bool:\n ok = True\n for name, path in self.update_sources(self.stub_sources):\n if (CONFIG.stub_path / path).exists():\n continue\n if name == StubSource.FROZEN:\n # not a blocking issue if there are no frozen stubs, perhaps this port/board does not have any\n continue\n # todo: below is a workaround for different types, but where is the source of this difference coming from?\n msg = (\n f\"{self.package_name}: source '{name.value}' not found: {CONFIG.stub_path / path}\"\n if isinstance(name, StubSource) # type: ignore\n else f\"{self.package_name}: source '{name}' not found: {CONFIG.stub_path / path}\"\n )\n self.status[\"error\"] = msg\n log.debug(msg)\n ok = False\n return ok",
"def is_built(args, task_name: str, artifact_name: str) -> bool:\n if task_name not in args._artifacts:\n return False\n\n for a in args._artifacts[task_name]:\n if a.name == artifact_name and a.built:\n return True\n elif a.name == artifact_name and not a.built:\n return False\n return False",
"def check_source(self,source): \n\n kind = None\n if os.path.exists(source):\n if os.path.isfile(source):\n kind = \"file\"\n elif os.path.isdir(source):\n kind = \"dir\"\n else:\n print(\" Source path : \\n{}\\n Does not exist...\\n\".format(source))\n #print(\" Sys.exit() called by : {}\".format())\n sys.exit()\n\n return kind",
"def check_release_dir(options):\n if not options.release_path:\n error = ('--release_path cannot be empty.'\n ' Either specify a --release or a --release_path.')\n raise ValueError(error)\n\n if os.path.exists(options.release_path):\n return\n\n if options.release_path.startswith('gs://'):\n check_google_path(options.release_path)\n elif options.release_path.startswith('s3://'):\n check_s3_path(options.release_path)\n else:\n error = 'Unknown path --release_path={dir}\\n'.format(\n dir=options.release_path)\n raise ValueError(error)",
"def _package_available(package_name: str) -> bool:\n try:\n return find_spec(package_name) is not None\n except ModuleNotFoundError:\n return False",
"def is_python_package(directory_path, file_path):\n # type: (str, str) -> Tuple[bool, Optional[str]]\n file_name = os.path.basename(file_path)\n init_file_path = os.path.join(file_path, \"__init__.py\")\n\n if os.path.isdir(file_path) and os.path.isfile(init_file_path):\n # Package\n return (True, file_name)\n\n return (False, None)",
"def is_directory(self):\n return all(isinstance(child, PyModule) for child in self._children())",
"def update_package(self) -> bool:\n log.info(f\"- Update {self.package_path.name}\")\n log.trace(f\"{self.package_path.as_posix()}\")\n\n # check if the sources exist\n ok = self.are_package_sources_available()\n if not ok:\n log.debug(f\"{self.package_name}: skipping as one or more source stub folders are missing\")\n self.status[\"error\"] = \"Skipped, stub folder(s) missing\"\n shutil.rmtree(self.package_path.as_posix())\n self._publish = False # type: ignore\n return False\n try:\n self.update_package_files()\n self.update_included_stubs()\n self.check()\n except Exception as e: # pragma: no cover\n log.error(f\"{self.package_name}: {e}\")\n self.status[\"error\"] = str(e)\n return False\n return True",
"def is_valid_directory(args):\n if args.directory is not None:\n return True\n return False"
] |
[
"0.6811845",
"0.6775147",
"0.67093",
"0.67093",
"0.6703259",
"0.66499954",
"0.6600665",
"0.6493551",
"0.63353056",
"0.6322277",
"0.6100381",
"0.6078929",
"0.60449576",
"0.5986843",
"0.59790695",
"0.5961668",
"0.5952385",
"0.59494877",
"0.5930723",
"0.5904908",
"0.5888437",
"0.585664",
"0.58055705",
"0.57728434",
"0.57666016",
"0.5765523",
"0.5764267",
"0.5760584",
"0.5754972",
"0.57377946"
] |
0.7505242
|
0
|
Get the Python files that a Rez package adds to the user's PYTHONPATH. If the Rez package is an installed Rez package and it contains variants, each variant will have its paths returned.
|
def get_package_python_paths(package, paths):
# Note: Here we're trying to get `package`'s specific changes to PYTHONPATH (if any)
#
# Unfortunately, the Rez API doesn't really support this yet.
# There's 2 GitHub links that may one-day implement it though:
# - https://github.com/nerdvegas/rez/issues/737
# - https://github.com/nerdvegas/rez/pull/739
#
# Reference: https://rez-talk.slack.com/archives/CHELFCTFB/p1578604659006100
#
# Once that work is merged, replace `get_package_python_paths` with it.
#
root = finder.get_package_root(package)
if is_built_package(package):
return {path for path in paths if filer.in_directory(path, root, follow=False)}
output = set()
for path in paths:
# If the Rez package is a source Rez package + has variants
# we need to strip the "variants" out of `path`, before
# returning it.
#
try:
variant_less_path = next(
_iter_variant_extracted_paths(root, path, package.variants or [])
)
except StopIteration:
pass
else:
output.add(variant_less_path)
continue
if filer.in_directory(path, root, follow=False) or filer.in_directory(
path, root, follow=True
):
output.add(path)
continue
return output
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_rpaths(pkg):\n rpaths = [pkg.prefix.lib, pkg.prefix.lib64]\n deps = get_rpath_deps(pkg)\n rpaths.extend(d.prefix.lib for d in deps if os.path.isdir(d.prefix.lib))\n rpaths.extend(d.prefix.lib64 for d in deps if os.path.isdir(d.prefix.lib64))\n # Second module is our compiler mod name. We use that to get rpaths from\n # module show output.\n if pkg.compiler.modules and len(pkg.compiler.modules) > 1:\n rpaths.append(path_from_modules([pkg.compiler.modules[1]]))\n return list(dedupe(filter_system_paths(rpaths)))",
"def get_pythonpath(working_set, buildout, prefixes):\n\n # get all paths available in the current working set\n paths = list(working_set.entries)\n\n if hasattr(zc.buildout.easy_install, 'distribute_loc'):\n prepend_path(zc.buildout.easy_install.distribute_loc, paths)\n elif hasattr(zc.buildout.easy_install, 'setuptools_loc'):\n prepend_path(zc.buildout.easy_install.setuptools_loc, paths)\n else:\n prepend_path(zc.buildout.easy_install.setuptools_path, paths)\n\n return [k for k in working_set.entries \\\n if os.path.realpath(k) not in site_paths(buildout, prefixes)]",
"def _generatePythonPath(self, pkg):\r\n if pkg in self._packages:\r\n return []\r\n\r\n # short-circuit if this is a catkin-ized package\r\n m = self._rp.get_manifest(pkg)\r\n\r\n if m.is_catkin:\r\n self._packages.add(pkg)\r\n return []\r\n\r\n packages = self._getDepends(pkg)\r\n packages.append(pkg)\r\n\r\n paths = []\r\n\r\n try:\r\n for p in packages:\r\n m = self._rp.get_manifest(p)\r\n d = self._rp.get_path(p)\r\n self._appendPackagePaths(m, paths, d)\r\n self._packages.add(p)\r\n except:\r\n self._packages.discard(pkg)\r\n raise\r\n\r\n return paths",
"def get_sitepackage_dirs():\n if 'getsitepackages' in dir(site):\n return site.getsitepackages()\n else:\n # workaround for https://github.com/pypa/virtualenv/issues/355\n return sys.path",
"def get_python_filepaths(*, roots=None, python_paths=None):\n if python_paths is None:\n python_paths = ['setup.py']\n if roots is None:\n roots = ['charmcraft', 'tests']\n for root in roots:\n for dirpath, dirnames, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".py\"):\n python_paths.append(os.path.join(dirpath, filename))\n return python_paths",
"def pypkgpath(self):\n pkgpath = None\n for parent in self.parts(reverse=True):\n if parent.isdir():\n if not parent.join(\"__init__.py\").exists():\n break\n if not isimportable(parent.basename):\n break\n pkgpath = parent\n return pkgpath",
"def library_search_path(self, pedantic=False):\n return []",
"def find_eggs(path):\n eggs, errors = pkg_resources.working_set.find_plugins(\n pkg_resources.Environment([path])\n )\n return eggs",
"def filepaths(self) -> Dict[str, 'BinPackageFile']:\n return self._get_package_files()",
"def find_all_pythons():\n \n allpys = []\n \n # split PATH according to platform rules\n pathlist = string.split( os.environ['PATH'], os.pathsep )\n\n # search PATH, excluding nonexistant dirs\n for path in filter( os.path.isdir, pathlist ):\n allpys.extend( find_pythons_in_dir( path ) )\n\n # check the win32 registry, as appropriate\n allpys.extend( get_pythons_from_registry() )\n\n # and of course I'm running under a Python, in case\n # no others were found\n allpys.append( os.path.abspath(sys.executable) )\n \n return allpys",
"def find_modules(x):\n return Path(x).rglob('*.py')",
"def get_packages(package):\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]",
"def get_installation_paths(versions=None):\n\n pass",
"def getsitepackages():\n\n _is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32\n _is_pypy = hasattr(sys, 'pypy_version_info')\n _is_jython = sys.platform[:4] == 'java'\n\n prefixes = [sys.prefix, sys.exec_prefix]\n\n sitepackages = []\n seen = set()\n\n for prefix in prefixes:\n if not prefix or prefix in seen:\n continue\n seen.add(prefix)\n\n if sys.platform in ('os2emx', 'riscos') or _is_jython:\n sitedirs = [os.path.join(prefix, \"Lib\", \"site-packages\")]\n elif _is_pypy:\n sitedirs = [os.path.join(prefix, 'site-packages')]\n elif sys.platform == 'darwin' and prefix == sys.prefix:\n if prefix.startswith(\"/System/Library/Frameworks/\"): # Apple's Python\n sitedirs = [os.path.join(\"/Library/Python\", sys.version[:3], \"site-packages\"),\n os.path.join(prefix, \"Extras\", \"lib\", \"python\")]\n\n else: # any other Python distros on OSX work this way\n sitedirs = [os.path.join(prefix, \"lib\",\n \"python\" + sys.version[:3], \"site-packages\")]\n\n elif os.sep == '/':\n sitedirs = [os.path.join(prefix,\n \"lib\",\n \"python\" + sys.version[:3],\n \"site-packages\"),\n os.path.join(prefix, \"lib\", \"site-python\"),\n ]\n lib64_dir = os.path.join(prefix, \"lib64\", \"python\" + sys.version[:3], \"site-packages\")\n if (os.path.exists(lib64_dir) and\n os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):\n if _is_64bit:\n sitedirs.insert(0, lib64_dir)\n else:\n sitedirs.append(lib64_dir)\n try:\n # sys.getobjects only available in --with-pydebug build\n sys.getobjects\n sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))\n except AttributeError:\n pass\n # Debian-specific dist-packages directories:\n sitedirs.append(os.path.join(prefix, \"local/lib\",\n \"python\" + sys.version[:3],\n \"dist-packages\"))\n sitedirs.append(os.path.join(prefix, \"lib\",\n \"python\" + sys.version[:3],\n \"dist-packages\"))\n if sys.version_info[0] >= 3:\n sitedirs.append(os.path.join(prefix, \"lib\",\n \"python\" + sys.version[0],\n \"dist-packages\"))\n sitedirs.append(os.path.join(prefix, \"lib\", \"dist-python\"))\n else:\n sitedirs = [prefix, os.path.join(prefix, \"lib\", \"site-packages\")]\n if sys.platform == 'darwin':\n # for framework builds *only* we add the standard Apple\n # locations. Currently only per-user, but /Library and\n # /Network/Library could be added too\n if 'Python.framework' in prefix:\n home = os.environ.get('HOME')\n if home:\n sitedirs.append(\n os.path.join(home,\n 'Library',\n 'Python',\n sys.version[:3],\n 'site-packages'))\n for sitedir in sitedirs:\n sitepackages.append(os.path.abspath(sitedir))\n\n sitepackages = [p for p in sitepackages if os.path.isdir(p)]\n return sitepackages",
"def _get_site_packages():\n paths_to_try = [\n # typically win32\n os.path.join(base, \"Lib\", \"site-packages\"),\n # standard\n os.path.join(base, \"lib\", \"python%s\" % sys.version[:3], \"site-packages\"),\n # typically pypy\n os.path.join(base, \"site-packages\"),\n ]\n for p in paths_to_try:\n if os.path.isdir(p):\n return p\n return os.path.join(base, \"lib\", \"python%s\" % sys.version[:3], \"site-packages\")",
"def find_package_data(package):\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return filepaths",
"def get_packages_with_prefixes():\n return get_resources('packages')",
"def getsitepackages():\n\tpass",
"def get_packages(root):\n root = os.path.realpath(root)\n proot = parent(root) + \"/\"\n py_files = [file.rsplit(proot)[1] for file in listfiles(root)]\n packages = list(np.unique([parent(file).replace(\"/\", \".\") for file in py_files]))\n # return list(np.unique([parent(file).replace(\"/\", \".\").split(\".{name_root}.\".format(name_root=name(root)))[1]\n # for file in py_files]))\n return packages",
"def get_installed_files(packagename, venv_pip, temp_dir):\n result = check_output(venv_pip + ['show', '-f', packagename])\n result = (result.decode()).split('\\n')\n files = []\n\n for line in result:\n # this line contains path to venv directory\n if line.startswith('Location:'):\n line = line[len('Location: '):]\n prefix = '/' + line.replace(temp_dir, 'usr') + '/'\n if line.startswith(' '*2):\n path = os.path.abspath(prefix + line.strip())\n if os.path.isdir(path):\n path += \"/\"\n files.append(path)\n return files",
"def moduleList(path):\n\n if os.path.isdir(path):\n folder_list = os.listdir(path)\n elif path.endswith('.egg'):\n try:\n folder_list = [f for f in zipimporter(path)._files]\n except:\n folder_list = []\n else:\n folder_list = []\n #folder_list = glob.glob(os.path.join(path,'*'))\n folder_list = [p for p in folder_list \\\n if os.path.exists(os.path.join(path, p,'__init__.py'))\\\n or p[-3:] in ('.py','.so')\\\n or p[-4:] in ('.pyc','.pyo','.pyd')]\n\n folder_list = [os.path.basename(p).split('.')[0] for p in folder_list]\n return folder_list",
"def get_packages(package):\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]",
"def get_packages(package):\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]",
"def get_packages(package):\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]",
"def iter_extension_paths():\n for entry_point in iter_entry_points(group=\"confab.extensions\"):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))",
"def find_site_packages(prefixes):\n\n from distutils.sysconfig import get_python_lib\n\n # Standard prefixes to check\n PYTHONDIR = 'python%d.%d' % sys.version_info[0:2]\n SUFFIXES = uniq([\n get_python_lib(prefix=''),\n os.path.join('lib', PYTHONDIR, 'site-packages'),\n os.path.join('lib32', PYTHONDIR, 'site-packages'),\n os.path.join('lib64', PYTHONDIR, 'site-packages'),\n ])\n\n retval = []\n\n for k in prefixes:\n for suffix in SUFFIXES:\n candidate = os.path.realpath(os.path.join(k, suffix))\n if os.path.exists(candidate) and candidate not in retval:\n retval.append(candidate)\n\n return retval",
"def get_installed_versions(cls) -> list[str]:\n\n pyenv_root = os.getenv(\"PYENV_ROOT\")\n if pyenv_root is None:\n raise Failure(\"PYENV_ROOT is not configured\")\n\n root_dir = Path(pyenv_root)\n version_dir = root_dir / \"versions\"\n\n return [i.name for i in version_dir.iterdir() if i.is_dir()]",
"def get_packages(package):\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))\n ]",
"def getsitepackages():\n # For now used only on Windows. Raise Exception for other platforms.\n if is_win:\n pths = [os.path.join(sys.prefix, 'Lib', 'site-packages')]\n # Include Real sys.prefix for virtualenv.\n if is_virtualenv:\n pths.append(os.path.join(base_prefix, 'Lib', 'site-packages'))\n return pths\n else:\n # TODO Implement for Python 2.6 on other platforms.\n raise NotImplementedError()",
"def _LoadPackages():\n return {module.__name__.split('.')[-1]: module for module in\n import_util.LoadModulesForPath(__path__, __name__)}"
] |
[
"0.68803835",
"0.6792354",
"0.6747557",
"0.67192644",
"0.66945404",
"0.6643895",
"0.6533894",
"0.65237635",
"0.6505269",
"0.6490075",
"0.6470572",
"0.64601886",
"0.64022046",
"0.6363765",
"0.63257396",
"0.62893164",
"0.62637025",
"0.6257684",
"0.62080556",
"0.61816895",
"0.61463135",
"0.6139574",
"0.6139574",
"0.6139574",
"0.6137927",
"0.613504",
"0.61335444",
"0.61256015",
"0.6116573",
"0.6092217"
] |
0.72466767
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.