query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Optimization method to find the best size for the hidden node according to a training set. | def find_optimal_model(
self, training_set, use_aic=False, min_node=10,
max_node=90, start_size=20, end_size=5, node_samples=4,
check_decreasing_ll=False, missing_residues=None):
if not self.show_warnings:
warning_list = warnings.filters[:]
warnings.filterwarnings('ignore', category=TorusDBNWarning)
if missing_residues is not None:
missing_residues = read_missing_residues(missing_residues)
self.seq_list, self.mismask_list = self._create_sequence_and_mismask(
training_set, missing_residues)
max_position = 0
start_res = start_size
avg_full_LL = []
IC_array = [[]*n for n in xrange(node_samples + 2)]
# Decrease size resolution until threshold (end_size)
while start_size >= end_size:
# Loop over node sizes
for i in xrange(min_node, max_node + 1, start_size):
# Continues if at the maximum node size from the previous resolution
if (len(IC_array[0]) > 0 and i == IC_array[0][max_position]) or i <= 0:
continue
# Add node-size value to header
IC_array[0].append(i)
IC_cum = 0
if start_res == start_size:
avg_full_LL.append(0)
for j in xrange(1, node_samples + 1):
self.info("Training with node size = %d (sample %d)" % (i, j))
self.model.create_dbn(hidden_node_size=i)
IC = self._train(use_aic)
IC_array[j].append(IC)
IC_cum += IC
if (check_decreasing_ll):
# Save forward likelihoods in order to infer if it is decreasing
hmm_ll_calculator = LikelihoodInfEngineHMM(
dbn=self.model.dbn, hidden_node_index=0, check_dbn=False)
ll_full = hmm_ll_calculator.calc_ll(self.seq_list, self.mismask_list)
avg_full_LL[-1] = avg_full_LL[-1] + ll_full/self._get_observation_count()
# Calculate mean IC for each node-size and add to array
IC_array[node_samples + 1].append(IC_cum / node_samples)
# Check if log-likelihood is decreasing
if (len(avg_full_LL) > 1) and (avg_full_LL[-1] < avg_full_LL[-2]) and \
(start_res == start_size) and check_decreasing_ll:
self.info("Log-likelihood is decreasing. There is no reason to test higher node sizes.")
break
# Column number for maximum IC value
max_position = IC_array[node_samples + 1].index(max(IC_array[node_samples + 1]))
self.info("Optimal node size: %s\n" % (IC_array[0][max_position]))
# Update resolution
start_size = start_size / 2
# Update node limits
min_node = IC_array[0][max_position] - start_size
max_node = IC_array[0][max_position] + start_size
IC_max_node = IC_array[0][max_position]
# Final train to the optimal model
dbn_list = []
IC_list = []
for j in xrange(node_samples):
self.model.create_dbn(hidden_node_size=IC_max_node)
IC = self._train(use_aic)
IC_list.append(IC)
dbn_list.append(self.model.dbn)
IC_max = max(IC_list)
self.model.dbn = dbn_list[IC_list.index(IC_max)]
self.info("Optimal Model:\nHidden node size = %s\nIC = %s\n" % (IC_max_node, IC_max))
if not self.show_warnings:
warnings.filters = warning_list
return IC_max_node, IC_max | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_hidden_layer_size(self):\r\n return self.hidden_layer_size",
"def get_num_hidden(self) -> int:\n return self.config.model_size",
"def get_n_best(self):\n pass",
"def get_final_emb_size(self):\n size = self.n_layers * 1 * 2 * self.hidden_size\n return size",
"def v_size(self) -> int:\n return self.nodes_on_graph",
"def hidden_size(self):\n return self._internal.get_hidden_size()",
"def top_dimensionality(self):\n return self._vocab_size",
"def estimate_cudnn_parameter_size(input_size, hidden_size, direction):\n single_rnn_size = 8 * hidden_size + 4 * (hidden_size * input_size) + 4 * (hidden_size * hidden_size)\n return direction * single_rnn_size",
"def train_size(self) -> int:\n return int(self.data_size * self.__train_fraction)",
"def get_num_hidden(self) -> int:\n return self.output_dim",
"def size(self) -> tf.Tensor:",
"def estimate_size(self, datasets):\n datasets = Datasets(datasets)\n \n# self.fit.run(datasets)\n\n if self.size_values:\n self.size_parameter.scan_values = self.size_values.to_value(self.size_parameter.unit)\n self.size_parameter.scan_min = self.size_min.to_value(self.size_parameter.unit)\n self.size_parameter.scan_max = self.size_max.to_value(self.size_parameter.unit)\n self.size_parameter.scan_n_values = self.size_n_values\n \n result = super().run(datasets, self.size_parameter)\n return result",
"def mut_space_size(graph: nx.MultiGraph, estimate=True) -> int:\n space = mut_space(graph)\n sizes = (len(gg) for g, gg in space)\n if estimate:\n return sum(map(log, sizes))\n return reduce(op.mul, sizes)",
"def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):\n shape = input_tensor.get_shape().as_list()\n if shape[1] is None or shape[2] is None:\n kernel_size_out = kernel_size\n else:\n kernel_size_out = [min(shape[1], kernel_size[0]),\n min(shape[2], kernel_size[1])]\n return kernel_size_out",
"def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):\n shape = input_tensor.get_shape().as_list()\n if shape[1] is None or shape[2] is None:\n kernel_size_out = kernel_size\n else:\n kernel_size_out = [min(shape[1], kernel_size[0]),\n min(shape[2], kernel_size[1])]\n return kernel_size_out",
"def node_size(graph):\n adj = nx.betweenness_centrality(graph)\n return np.array([x * 1e3 for x in adj.values()])",
"def hidden_size(self) ->int:\n return self._cell.hidden_size",
"def _get_state_sizes(self):\n ds = self.builder.nodes[self.ds_inputs[0]]\n return [[ds.xdim]]",
"def dim_per_head(self):\n p = self.params\n return p.dim_per_head or p.hidden_dim // p.num_heads",
"def optimal_neighbors(self, graph, controllers : list) -> (list, int):\n\t\t# This isn't efficient and does not take advantage of other variables in the class\n\t\t# TODO: Optimize to use cluster_info\n\t\tclusters = nx.get_node_attributes(graph, 'cluster')\n\t\tneighbors_list = []\n\t\tfor i in controllers:\n\t\t\tcluster = []\n\t\t\tcluster.append(i)\n\t\t\tneighbors = graph.neighbors(i)\n\t\t\tfor neighbor in neighbors:\n\t\t\t\tif(clusters[neighbor] == clusters[i]):\n\t\t\t\t\tcluster.append(neighbor)\n\t\t\tneighbors_list.append(cluster)\n\t\tprint(neighbors_list)\n\t\t# Find best controller set from neighbors\n\t\tcombinations = list(itertools.product(*neighbors_list))\n\t\tmin_dist = 1000000\n\t\tmin_combination = None\n\t\tfor combination in combinations:\n\t\t\tdist = super().step(combination)\n\t\t\tif(dist < min_dist):\n\t\t\t\tmin_dist = dist\n\t\t\t\tmin_combination = combination\n\t\treturn (min_combination, min_dist)",
"def _SD_optimal(t):",
"def findApproxDepth(train, valid, mD=0, mS=0):\n print(\n \"Building a random set of small trees to geuss the max depth and min set size values\"\n )\n res = []\n tree = DecisionTree(train.randSubSet(120, True))\n r = 10\n s = 3\n if mD != 0:\n s = mD - 1\n r = 1\n for i in range(\n s,\n r + s,\n ):\n depth = i + 1 # depth = randint(2,(len(train[0])-1)*3)\n a = 2\n b = 15\n if mS != 0:\n a = mS\n b = mS + 1\n for min_size in range(a, b, 2):\n # min_size = randint(2,(len(train[0])-1)*2)\n tree.buildTree(depth, min_size, True)\n acc = testTreeF(tree, valid)\n res.append([depth, min_size, acc])\n print(\"%.2f\" % (100 * (i - s + 1) / r), \"percent done\")\n best = max(res, key=lambda r: r[-1])\n # res.sort(key=lambda r: r[-1])\n # for r in res:\n # print(r)\n print(\"found a depth of\", best[0], \"and min size of\", best[1])\n return best",
"def n_train(self):\n return self.factors[0].shape[0]",
"def __init__(self, hidden_size, eps=1e-6):\n super().__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.variance_epsilon = eps",
"def __init__(self, hidden_size, eps=1e-6):\n super().__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.variance_epsilon = eps",
"def _input_size(self):\n return self.embedding_size + self.hidden_size",
"def get_num_heads_and_hidden_size(self, reshape_q: NodeProto) -> Tuple[int, int]:\n\n # we assume that reshape fusion has done, so the shape is a tensor like [0, 0, num_heads, head_size]\n q_shape = self.model.get_initializer(reshape_q.input[1])\n if q_shape is None:\n logger.debug(f\"{reshape_q.input[1]} is not initializer.\")\n return self.num_heads, self.hidden_size # Fall back to user specified value\n\n q_shape_value = NumpyHelper.to_array(q_shape)\n if len(q_shape_value) != 4 or (q_shape_value[2] <= 0 or q_shape_value[3] <= 0):\n logger.debug(f\"q_shape_value={q_shape_value}. Expected value are like [0, 0, num_heads, head_size].\")\n return self.num_heads, self.hidden_size # Fall back to user specified value\n\n num_heads = q_shape_value[2]\n head_size = q_shape_value[3]\n hidden_size = num_heads * head_size\n\n if self.num_heads > 0 and num_heads != self.num_heads:\n logger.warn(\"--num_heads is {self.num_heads}. Detected value is {num_heads}. Using detected value.\")\n\n if self.hidden_size > 0 and hidden_size != self.hidden_size:\n logger.warn(\"--hidden_size is {self.hidden_size}. Detected value is {hidden_size}. Using detected value.\")\n\n return num_heads, hidden_size",
"def __len__(self):\n if self.settype == \"train\":\n return 64000\n else:\n return len(self.list_ids)",
"def estimateSize(self):\n if 'szMean' in self.tags:\n self.size = self.tags[\"szMean\"]\n else:\n s,e = self.fetchbounds()\n self.size = e-s",
"def _get_node_size(self, index):\n\n pass"
]
| [
"0.5985635",
"0.5964452",
"0.593218",
"0.58241916",
"0.58158845",
"0.58039904",
"0.5761179",
"0.5725219",
"0.5707045",
"0.5702115",
"0.5667222",
"0.56550604",
"0.5606332",
"0.5548744",
"0.5548744",
"0.5546417",
"0.55433387",
"0.5542178",
"0.5492614",
"0.5483099",
"0.5469575",
"0.545779",
"0.5453084",
"0.54462963",
"0.54462963",
"0.5442502",
"0.5413444",
"0.54111457",
"0.5411117",
"0.54019535"
]
| 0.6054712 | 0 |
Print a message to the standard output, in case show_info is enabled. | def info(self, message):
if self.show_info:
print(message) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_info(msg):\n print(msg)",
"def show_info(title, message):\n\n pass",
"def print_info(message: str):\n global verbose\n if verbose:\n print(\"%s%s%s\" % (KYEL, message, KNRM))",
"def info(msg, *args):\n if args:\n msg %= args\n click.echo(msg, file=sys.stdout)",
"def info(cls, msg, debug=True):\n if debug:\n Console.info(msg)",
"def info(msg):\n sys.stdout.write('%s[ INFO ]%s %s\\n' % (colors.GREEN, colors.RESET , msg))",
"def info(msg):\n print(colored.green(\"[INFO]: {0}\".format(msg)))",
"def info(cls, message):\n print('[INFO] {0}'.format(message))",
"def info(msg):\n click.secho(msg, fg='blue')",
"def print_standout(info):\n sys.stdout.write(\"Info: %s\" % info)\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()",
"def showMessage(self, message):\r\n print message",
"def info(cls, message, level=0):\n\n if cls.print_level == 3 or (cls.print_level == 2 and level >= 1) or \\\n (cls.print_level == 1 and level >= 2):\n print(cls.marker_theme.info(level) + cls.time() + cls.parse(message))",
"def output_info(text):\n if conf.eval_output:\n info_dict = {'type':'info', 'text' : text}\n output_message_eval(info_dict)\n else:\n output_message('[INFO] ' + text)",
"def log_info(msg):\n msg = '{0}\\n'.format(msg)\n sys.stdout.write(msg)",
"def display_detail(msg, *args):\n msg = _concat_message(msg, *args)\n if verbose > 1:\n print \" %s\" % msg.encode(\"UTF-8\")\n sys.stdout.flush()\n if prefs.pref(\"LoggingLevel\") > 0:\n munkilog.log(u\" \" + msg)",
"def show_warning(title, message, print_message=False):\n\n pass",
"def check_and_print_debug_message(self, msg):\n if self._params.debug:\n print(\"Info: {}\".format(msg))",
"def print_info(self, message: str=\"\", src_file: str=\"\") -> None:\n if self._verbosity_level >= int(VerbosityLevel.VERBOSITY_LEVEL2):\n _mes = src_file + \": \" + message\n if self._print_statements_enabled:\n print(\"INFO \\t\\t- \", src_file + \": \\t\" + message)\n logging.info(_mes)",
"def info(self, msg, *args, **kwargs):\n pass",
"def showme(message):\n print(message)",
"def print_info(msg, level=1):\n if config.cfg.verbosity >= level:\n if config.cfg.excessive_verbosity:\n # Get caller info\n fn, lineno, funcnm = inspect.stack()[1][1:4]\n colour.cprint(\"INFO (level: %d) [%s:%d - %s(...)]:\" %\n (level, os.path.split(fn)[-1], lineno, funcnm),\n 'infohdr')\n msg = msg.replace('\\n', '\\n ')\n colour.cprint(\" %s\" % msg, 'info')\n else:\n colour.cprint(msg, 'info')\n sys.stdout.flush()",
"def info(message):\n global LAST_LOG\n LAST_LOG = message\n cprint('\\r[INF] {0}'.format(message), 'white', file=sys.stdout)",
"def msg(*args):\n if messages_on:\n print(*args)",
"def showinfo(self, msg):\n tkinter.messagebox.showinfo('Information', msg)",
"def printInfo(message):\n try:\n message = str(message)\n except Exception as e:\n print(f\"{Fore.RED}{str(ptime())}: [ERROR]{Style.RESET_ALL} {Fore.WHITE}\" + str(e) + Style.RESET_ALL)\n print(f\"{Fore.CYAN}{str(ptime())}: [INFO]{Style.RESET_ALL} {Fore.WHITE}\" + message + Style.RESET_ALL)",
"def info(msg):\n if logger.level <= logging.INFO:\n print('\\n~ ' + msg)\n logger.info(msg)",
"def info(message, exits=None): # pylint: disable=unused-argument\n print(crayons.cyan(fmt(message, \"[✓]\"), bold=True))\n sys.stdout.flush()",
"def verbose_print(msg: str = '') -> None:\n assert isinstance(msg, str)\n if __verbose:\n print(msg)",
"def show_message(message):\n print(message) # noqa: WPS421",
"def info(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['info']:\n self.print_lines(self.colored(('green', 'bold'), lines))"
]
| [
"0.78605205",
"0.7734865",
"0.76229465",
"0.7563187",
"0.7400993",
"0.73952097",
"0.7378441",
"0.7370511",
"0.7139444",
"0.7125532",
"0.7115362",
"0.71005535",
"0.7092673",
"0.70315975",
"0.70026535",
"0.6999533",
"0.69383913",
"0.69325316",
"0.6913429",
"0.6901743",
"0.6872121",
"0.6871858",
"0.68596536",
"0.6858818",
"0.68541735",
"0.68028104",
"0.67886007",
"0.678475",
"0.6779728",
"0.67763674"
]
| 0.83142126 | 0 |
Does the url contain a downloadable resource | def is_downloadable(url):
h = requests.head(url, allow_redirects=True)
header = h.headers
content_type = header.get('content-type')
if 'text' in content_type.lower():
return False
if 'html' in content_type.lower():
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_downloadable(url) -> bool:\n content_type = requests.head(url, allow_redirects=True).headers.get('content-type')\n if 'text' in content_type.lower() or 'html' in content_type.lower():\n return False\n return True",
"def is_downloadable(self):\n return True",
"def is_downloadable(self):\n return False",
"def is_downloadable(self,imgurl):\n h = requests.head(imgurl, allow_redirects=True)\n header = h.headers\n content_type = header.get('content-type')\n if 'text' in content_type.lower():\n return None\n if 'html' in content_type.lower():\n return None\n return True",
"def is_file_downloading(self, data_url):\n\n # Sometimes it takes too long to load the list\n self.parent.wait_for_element_displayed(DOM.DownloadManager.download_list[0],\n DOM.DownloadManager.download_list[1], 60)\n return self.get_download_status(data_url) == \"downloading\"",
"def is_downloadable(self):\n raise NotImplementedError('Implement this method.')",
"def allow_download(self, url, config):\n\n url = url.lstrip('/')\n asset_url = url.split('asset/')[-1]\n id, filename = asset_url.split('/')\n\n oai_server = OAIServer(self._db, config)\n try:\n header, metadata, description = oai_server.getRecord(\n 'oai_dc', config.oai_id_prefix + id)\n except oaipmh.error.IdDoesNotExistError:\n # record is not in the oai feed, don't download\n return False\n if header.isDeleted():\n # record has deleted status, don't download\n return False\n\n return True",
"def check_if_downloaded( url, debug_print = True ):\n\t# Get pdf filename\n\tfilename = basename( url )\n\tfileno, ext_pdf = splitext( filename )\n\tfor file in listdir( getcwd() ):\n\t\tif fileno in file:\n\t\t\tif debug_print:\n\t\t\t\tprint 'Skipping %s' % ( filename )\n\t\t\treturn True\n\treturn False",
"def download_allowed(self, url, scheme, netloc):\n robot = urllib.robotparser.RobotFileParser('%s://%s/%s' % (scheme, netloc, config.ROBOTS))\n try:\n robot.read()\n except ValueError:\n raise urllib.error.URLError('<urlopen error no protocol given>')\n\n return robot.can_fetch(config.USER_AGENT, url)",
"def is_downloaded(self) -> bool:\n if not self.download_path:\n return False\n return Path(self.download_path).exists()",
"def _is_downloaded(self):\n return self._system.file_exists(self._tar_name)",
"def file_downloaded(filename):\n fc = pathlib.Path(filename)\n if fc.is_file():\n return True\n else:\n return False",
"def is_valid_for_downloading(base_url, asset_url):\n if not asset_url:\n return False\n base_netloc = urlsplit(base_url).netloc\n asset_netloc = urlsplit(asset_url).netloc\n return base_netloc == asset_netloc",
"def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False",
"def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False",
"def _get_file(cls, url: str, ende: str) -> bool:\n resposta = requests.get(url)\n if resposta.status_code == requests.codes.OK:\n with open(ende, 'wb') as novo_arquivo:\n novo_arquivo.write(resposta.content)\n return True\n else:\n resposta.raise_for_status()\n return False",
"def check_url(url):\n return 'products.json' in url",
"def file_url(self, url):\n return self.is_regex_url(url, self.is_file_regex)",
"def file_exist(file_url):\n try:\n response = requests.head(file_url)\n if 200 <= response.status_code < 300:\n return True\n return False\n except ConnectionError:\n return False",
"def _is_package_downloadable(self):\n raise NotImplementedError('Implement this method.')",
"def download(self,fn):\n\t\treturn False #TODO: implement meme download",
"def is_restricted_download(self):\n return self.has_label(RESTRICTEDDOWNLOAD_LABEL)",
"def url_exist(url:str) -> bool:\r\n with closing(requests.head(url, allow_redirects=True)) as r:\r\n return r.ok",
"def url_exists(url):\n\n try:\n connection = urlopen(url)\n return connection.getcode() < 400\n except Exception as e:\n return False",
"def test_link(link):\n r = requests.get(link)\n if (r.status_code != 200):\n return False\n else:\n return True",
"def url_exists(url):\n # Check for URLs we can't validate\n if url.startswith(\"https://kiwiirc.com\"):\n return True\n if url.startswith(\"https://www.projectcalico.org\"):\n return True\n\n try:\n urllib2.urlopen(url)\n return True\n except urllib2.HTTPError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False\n except urllib2.URLError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False",
"def url_check(url):\n try:\n request = urllib.request.Request(url)\n request.get_method = lambda: 'HEAD'\n urllib.request.urlopen(request)\n return True\n \n except ValueError:\n return False\n\n except urllib.request.HTTPError:\n return False\n \n except URLError:\n return False",
"def download_if_needed(url, filename):\n if os.path.exists(filename):\n print \"already exists\"\n else:\n wget.download(url)",
"def _url_exists(url):\n h = httplib2.Http()\n try:\n resp = h.request(url, 'HEAD')\n if resp[0].status == 200:\n return True\n except (httplib2.RelativeURIError, httplib2.ServerNotFoundError):\n return False",
"def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']"
]
| [
"0.7863911",
"0.76044476",
"0.75145316",
"0.74376166",
"0.736647",
"0.7229701",
"0.71826774",
"0.7152465",
"0.71362215",
"0.70722294",
"0.7068388",
"0.70574546",
"0.7050864",
"0.6838795",
"0.6838795",
"0.6756243",
"0.67460054",
"0.66819596",
"0.66469514",
"0.6644246",
"0.6609554",
"0.6562532",
"0.65464735",
"0.6505228",
"0.6493656",
"0.64753443",
"0.6472829",
"0.64660984",
"0.64418703",
"0.6433086"
]
| 0.7924052 | 0 |
Go to the given workspace number. | def go_to(i3: i3ipc.Connection, workspace: int):
i3.command(f"workspace number {workspace}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def change_workspace(num):\n LOGGER.debug('change_workspace: requested workspace {}'.format(num))\n output = get_focused_output()\n prefix = get_workspace_prefix(output)\n ws_num = get_workspace_num(prefix, num)\n workspace = get_workspace(ws_num, prefix, num)\n result = switch_workspace(f'{workspace}')\n if not result.success:\n LOGGER.debug(f'change_workspace: faild to run command: {workspace}')\n LOGGER.debug(result.error)",
"def _open_workspace(self, name, workspace):\n workspace_url = self.environment_driver.extract_workspace_url(\n name, workspace)\n result = webbrowser.open(workspace_url, new=2)\n\n return result",
"def goto(n):\n n = int('{}'.format(n))\n get_controller().step_to(n)",
"def move_to(i3: i3ipc.Connection, workspace: int):\n i3.command(f\"move container to workspace number {workspace}\")",
"def SwitchWorkspace(workspace_name, create_if_missing=True):\n if workspace_name == '':\n raise ValueError('The workspace name should not be empty.')\n _C.SwitchWorkspace(workspace_name, create_if_missing)",
"def SwitchWorkspace(workspace_name, create_if_missing=True):\n if workspace_name == '':\n raise ValueError('The workspace name should not be empty.')\n SwitchWorkspaceCC(workspace_name, create_if_missing)",
"def open(self, index):\n\n index = int(index.strip())\n index -= 1\n section = self.program.state.last_viewed\n storyid = getattr(self.program.state, section)[index]\n data = self.program.state.stories[storyid]\n webbrowser.open(data['url'])",
"def goto(number):\n if isinstance(number, tuple):\n number = number[-1]\n if not isinstance(number, str):\n number = number()\n return redirect(question_url(number))",
"def action_goto(self):\n dialog = GoToDialog(self)\n dialog.exec()\n\n # Re-focus the main window\n self.activateWindow()",
"def goToFirstFrame():\n nuke.frame(int(nuke.root()[\"first_frame\"].getValue()))",
"def _open_project(project):\n api_segment = '/_apis/'\n pos = project.url.find(api_segment)\n if pos >= 0:\n url = project.url[:pos + 1] + uri_quote(project.name)\n logger.debug('Opening web page: %s', url)\n webbrowser.open_new(url=url)\n else:\n raise CLIError(\"Failed to open web browser, due to unrecognized url in response.\")",
"def next_workspace(self, flag='next'):\n non_visible_workspaces = filter(lambda w: not w['visible'],\n self.get_workspaces())\n\n if non_visible_workspaces == []:\n return\n\n focused_workspace = self.get_focused_workspace()\n focused_num = focused_workspace['num']\n\n non_visible_workspaces_nums = [w['num'] for w in non_visible_workspaces]\n non_visible_workspaces_nums = sorted(non_visible_workspaces_nums)\n\n\n if flag == 'next':\n fallback = non_visible_workspaces_nums[0]\n rest = filter(lambda n: n > focused_num,\n non_visible_workspaces_nums)\n else:\n fallback = non_visible_workspaces_nums[-1]\n rest = filter(lambda n: n < focused_num,\n non_visible_workspaces_nums)\n rest.reverse()\n\n if len(rest) > 0:\n self.focus_workspace(rest[0])\n else:\n self.focus_workspace(fallback)",
"def set_workspace(client, workspace):\n data = {\"workspace\": workspace}\n return client._creoson_post(\"windchill\", \"set_workspace\", data)",
"def goto(self, item):\n command = 'goto ' + str(item)\n self.run_command(command)",
"def go_to_object_home(self, obj_name):\n url = self.cumulusci.org.lightning_base_url\n url = \"{}/lightning/o/{}/home\".format(url, obj_name)\n self.selenium.go_to(url)\n self.wait_until_loading_is_complete(lex_locators[\"actions\"])",
"def home(self):\n self.goto(0, 0)",
"def go_to_url(self, url):\n if self.browser is not None:\n self.browser.get(url)\n else:\n print('Browser is not running')",
"def go_to_watchlist(self):\n try:\n self.sleep_approx(0.5)\n self.driver.find_element(\n By.XPATH, '/html/body/main/section/nav/button[3]').click()\n self.sleep_approx(0.5)\n self.driver.find_element(\n By.XPATH, '/html/body/main/section/section/div[2]/div/div/div[4]').click()\n self.sleep_approx(0.5)\n except:\n log_event(self.queue, \"Bot broke - go_to_watchlist method\")",
"def _go_to_page(self):\n self.salesforce.go_to_setup_home()\n self.eda.wait_for_new_window(\"Home | Salesforce\")\n self.selenium.switch_window(\"Home | Salesforce\")\n self.salesforce.wait_until_loading_is_complete()",
"def workspace(self, value: str) -> None:\n self._workspace = value",
"def switch_to_the_tab(self, tab_number=-1):\n self.driver.switch_to.window(self.driver.window_handles[tab_number])",
"def goto(self, n_house):\n el = self.wait_n_get(By.LINK_TEXT, houses[n_house])\n el.click()",
"def go_to_record_home(self, obj_id):\n url = self.cumulusci.org.lightning_base_url\n url = \"{}/lightning/r/{}/view\".format(url, obj_id)\n self.selenium.go_to(url)\n self.wait_until_loading_is_complete(lex_locators[\"actions\"])",
"def step():\n \n step = models.Step(action=u\"goto\", target=u\"http://www.joesfunerals.com\")",
"def navigate_to(self):\n #self._kernel.navigate_to(route)\n pass",
"def goto(url):\r\n terminal(f'start \"\" \"{url}\"')",
"def go(self, address):\n self._send_command(self.__COMMAND['go'])\n self._set_address(address)",
"def get_current_workspace_num(i3: i3ipc.Connection):\n return i3.get_tree().find_focused().workspace().num",
"def jump_to_previous(self):\n self.nvim.command('silent! wincmd p')",
"def go_to_setup_home(self):\n url = self.cumulusci.org.lightning_base_url\n self.selenium.go_to(url + \"/lightning/setup/SetupOneHome/home\")\n self.wait_until_loading_is_complete()"
]
| [
"0.71325797",
"0.6187763",
"0.6157023",
"0.59943575",
"0.5943539",
"0.5912566",
"0.58262795",
"0.57753295",
"0.5770139",
"0.55275774",
"0.5519753",
"0.5467741",
"0.54426324",
"0.5431287",
"0.5407863",
"0.5407681",
"0.5323033",
"0.5264269",
"0.5262899",
"0.5244698",
"0.52416396",
"0.5229783",
"0.5227258",
"0.5193271",
"0.5185149",
"0.5136975",
"0.51145405",
"0.5065975",
"0.5061435",
"0.50549"
]
| 0.79520303 | 0 |
Move the focused container to the given workspace. | def move_to(i3: i3ipc.Connection, workspace: int):
i3.command(f"move container to workspace number {workspace}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move_container(i3, name, monitor, container_id=None):\n i3.command(f'move container to workspace {name}')\n i3.command(f'workspace {name}, move workspace to output {monitor}')\n if container_id:\n i3.command(f'[con_id=\"{container_id}\"] focus')",
"def set_workspace(client, workspace):\n data = {\"workspace\": workspace}\n return client._creoson_post(\"windchill\", \"set_workspace\", data)",
"def MoveWorkspace(target_ws, source_ws):\n if target_ws == '' or source_ws == '':\n raise ValueError('The target or source name can not be empty.')\n _C.MoveWorkspace(target_ws, source_ws)",
"def MoveWorkspace(target_ws, source_ws):\n if target_ws == '' or source_ws == '':\n raise ValueError('The target or source name can not be empty.')\n MoveWorkspaceCC(target_ws, source_ws)",
"def go_to(i3: i3ipc.Connection, workspace: int):\n i3.command(f\"workspace number {workspace}\")",
"def move(self):\n \n self.position = self.explore()",
"def SwitchWorkspace(workspace_name, create_if_missing=True):\n if workspace_name == '':\n raise ValueError('The workspace name should not be empty.')\n SwitchWorkspaceCC(workspace_name, create_if_missing)",
"def change_workspace(num):\n LOGGER.debug('change_workspace: requested workspace {}'.format(num))\n output = get_focused_output()\n prefix = get_workspace_prefix(output)\n ws_num = get_workspace_num(prefix, num)\n workspace = get_workspace(ws_num, prefix, num)\n result = switch_workspace(f'{workspace}')\n if not result.success:\n LOGGER.debug(f'change_workspace: faild to run command: {workspace}')\n LOGGER.debug(result.error)",
"def move_to_win(self):\n self.external_win = PlotWindow(plot=self.pw, parent=self)\n self.external_win.closeWin.connect(lambda: self.layout().takeAt(1))\n self.external_win.closeWin.connect(lambda: self.layout().insertWidget(1, self.pw))\n self.external_win.closeWin.connect(lambda: self.btn_open.setEnabled(True))\n self.external_win.show()",
"def SwitchWorkspace(workspace_name, create_if_missing=True):\n if workspace_name == '':\n raise ValueError('The workspace name should not be empty.')\n _C.SwitchWorkspace(workspace_name, create_if_missing)",
"def workspace(self, value: str) -> None:\n self._workspace = value",
"def move_to_element(self, elem):\n ActionChains(self.driver).move_to_element(elem).perform()",
"def focus_window(i3, container_id):\n i3.command(f'[con_id=\"{container_id}\"] floating enable')\n i3.command(f'[con_id=\"{container_id}\"] focus')",
"def siguiente(self, widget):\n window = widget.get_toplevel()\n window.do_move_focus(window, gtk.DIR_TAB_FORWARD)",
"def move_to(self, mobject_or_point):\n layer_center = self.surrounding_rectangle.get_center()\n if isinstance(mobject_or_point, Mobject):\n target_center = mobject_or_point.get_center() \n else:\n target_center = mobject_or_point\n\n self.shift(target_center - layer_center)",
"def run(self):\n # type: () -> None\n self.move_to(self.location)",
"def move_and_restore(win_filter_fn, xywh):\n x, y, w, h = xywh[0], xywh[1], xywh[2], xywh[3]\n win = ahk.find_window(win_filter_fn)\n if win:\n win.restore()\n win.move(x, y, w, h)\n return win is not None",
"def move(self, x, y):\n if self.computer_first:\n self.app.config(cursor='watch')\n self.board = self.board.move(x, y)\n self.update()\n self.computer_first = 0\n self.app.config(cursor='')\n else:\n self.board = self.board.move(x, y)\n self.update()\n self.app.config(cursor='watch')\n move = self.board.best()\n if move:\n self.board = self.board.move(*move)\n self.update()\n self.app.config(cursor='')",
"def set_workspace(self):\n try:\n chdir(self._path_temp) # assure we stay in the workspace\n except FileNotFoundError:\n raise MissingContextError(\"Context does not exist!\") from None",
"def workspaceLayoutManager(*args, collapseMainWindowControls: List[AnyStr, bool]=None, current:\n bool=True, delete: AnyStr=\"\", i: AnyStr=\"\", listLayouts: bool=True,\n listModuleLayouts: bool=True, listUserLayouts: bool=True, modified:\n AnyStr=\"\", parentWorkspaceControl: AnyStr=\"\", reset: bool=True,\n restoreMainWindowControls: bool=True, save: bool=True, saveAs:\n AnyStr=\"\", setCurrent: AnyStr=\"\", setCurrentCallback: AnyStr=\"\",\n setModifiedCallback: AnyStr=\"\", type: AnyStr=\"\", q=True, query=True,\n e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def addWorkspace(self, dryrun):\n pass",
"def move(self):\n \n self.position = self.wander()",
"def ResetWorkspace(workspace_name=''):\n _C.ResetWorkspace(workspace_name)",
"def getTargetContainer(self):\n\n settings = zope.component.getUtility(IPMR2GlobalSettings)\n\n if settings.create_user_workspace:\n uwc = settings.getCurrentUserWorkspaceContainer()\n if uwc is not None:\n return uwc\n\n # Otherwise return the global workspace container.\n target = settings.getWorkspaceContainer()\n if target is None:\n raise NotFound(self.context, settings.default_workspace_subpath)\n return target",
"def next_workspace(self, flag='next'):\n non_visible_workspaces = filter(lambda w: not w['visible'],\n self.get_workspaces())\n\n if non_visible_workspaces == []:\n return\n\n focused_workspace = self.get_focused_workspace()\n focused_num = focused_workspace['num']\n\n non_visible_workspaces_nums = [w['num'] for w in non_visible_workspaces]\n non_visible_workspaces_nums = sorted(non_visible_workspaces_nums)\n\n\n if flag == 'next':\n fallback = non_visible_workspaces_nums[0]\n rest = filter(lambda n: n > focused_num,\n non_visible_workspaces_nums)\n else:\n fallback = non_visible_workspaces_nums[-1]\n rest = filter(lambda n: n < focused_num,\n non_visible_workspaces_nums)\n rest.reverse()\n\n if len(rest) > 0:\n self.focus_workspace(rest[0])\n else:\n self.focus_workspace(fallback)",
"def move_to_zone(self, zone):\n if isinstance(zone, basestring):\n zone = self.project.get_flow().get_zone(zone)\n zone.add_item(self)",
"def ResetWorkspace(workspace_name=''):\n ResetWorkspaceCC(workspace_name)",
"def switch_to_window(self, wptrunner_id, initial_window=None):\n if wptrunner_id is None:\n return\n\n if initial_window is None:\n initial_window = self.parent.base.current_window\n\n stack = [str(item) for item in self.parent.base.window_handles()]\n first = True\n while stack:\n item = stack.pop()\n\n if item is None:\n assert first is False\n self._switch_to_parent_frame()\n continue\n\n if isinstance(item, str):\n if not first or item != initial_window:\n self.parent.base.set_window(item)\n first = False\n else:\n assert first is False\n try:\n self._switch_to_frame(item)\n except ValueError:\n # The frame no longer exists, or doesn't have a nested browsing context, so continue\n continue\n\n try:\n # Get the window id and a list of elements containing nested browsing contexts.\n # For embed we can't tell fpr sure if there's a nested browsing context, so always return it\n # and fail later if there isn't\n result = self.parent.base.execute_script(\"\"\"\n let contextParents = Array.from(document.querySelectorAll(\"frame, iframe, embed, object\"))\n .filter(elem => elem.localName !== \"embed\" ? (elem.contentWindow !== null) : true);\n return [window.__wptrunner_id, contextParents]\"\"\")\n except Exception:\n continue\n\n if result is None:\n # With marionette at least this is possible if the content process crashed. Not quite\n # sure how we want to handle that case.\n continue\n\n handle_window_id, nested_context_containers = result\n\n if handle_window_id and str(handle_window_id) == wptrunner_id:\n return\n\n for elem in reversed(nested_context_containers):\n # None here makes us switch back to the parent after we've processed the frame\n stack.append(None)\n stack.append(elem)\n\n raise Exception(\"Window with id %s not found\" % wptrunner_id)",
"def move_buildings(self):",
"def move(self, x, y):\n\n\t\tself._window.move(x, y)"
]
| [
"0.72039133",
"0.56448215",
"0.55889195",
"0.5587443",
"0.5435935",
"0.5378369",
"0.5328443",
"0.52069813",
"0.5197307",
"0.51802254",
"0.50865215",
"0.50180376",
"0.50041604",
"0.49425906",
"0.48882422",
"0.48878294",
"0.4881337",
"0.48195142",
"0.4800425",
"0.4756636",
"0.47550562",
"0.47381598",
"0.4720542",
"0.47098613",
"0.47074538",
"0.46990296",
"0.46880442",
"0.46869335",
"0.4675246",
"0.46617308"
]
| 0.73667777 | 0 |
Find the workspace to switch to. If the current workspace is a target, go to the next target. If the current workspace isn't a target, go to the first open target. | def get_target_workspace(current_workspace, open_workspaces, target_workspaces):
logger.debug('get_target_workspace(current: %s, open: %s, targets: %s)',
current_workspace, open_workspaces, target_workspaces)
if len(target_workspaces) <= 0:
logger.debug('No workspaces given - defaulting to current workspace %s',
current_workspace)
return current_workspace
if current_workspace in target_workspaces:
logger.debug('Current workspace is a target - going to next target')
target_index = target_workspaces.index(current_workspace) + 1
target_index %= len(target_workspaces)
return target_workspaces[target_index]
logger.debug("Current workspace isn\'t a target - going to first open "
"target")
for ws in target_workspaces:
if ws in open_workspaces:
logger.debug('Found open target %s - going there', ws)
return ws
logger.debug('No targets are open - going to the first')
return target_workspaces[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def go_to(i3: i3ipc.Connection, workspace: int):\n i3.command(f\"workspace number {workspace}\")",
"def next_workspace(self, flag='next'):\n non_visible_workspaces = filter(lambda w: not w['visible'],\n self.get_workspaces())\n\n if non_visible_workspaces == []:\n return\n\n focused_workspace = self.get_focused_workspace()\n focused_num = focused_workspace['num']\n\n non_visible_workspaces_nums = [w['num'] for w in non_visible_workspaces]\n non_visible_workspaces_nums = sorted(non_visible_workspaces_nums)\n\n\n if flag == 'next':\n fallback = non_visible_workspaces_nums[0]\n rest = filter(lambda n: n > focused_num,\n non_visible_workspaces_nums)\n else:\n fallback = non_visible_workspaces_nums[-1]\n rest = filter(lambda n: n < focused_num,\n non_visible_workspaces_nums)\n rest.reverse()\n\n if len(rest) > 0:\n self.focus_workspace(rest[0])\n else:\n self.focus_workspace(fallback)",
"def the(target: Target) -> \"SwitchTo\":\n return SwitchTo(target)",
"def _step(current, target):\n \n if current is None: # Base case: target not found.\n return current\n if current == other: # Base case: target found!\n return current\n if target < current: # Recursively search to the left.\n return _step(current.left, target)\n else: # Recursively search to the right.\n return _step(current.right, target)",
"def change_workspace(num):\n LOGGER.debug('change_workspace: requested workspace {}'.format(num))\n output = get_focused_output()\n prefix = get_workspace_prefix(output)\n ws_num = get_workspace_num(prefix, num)\n workspace = get_workspace(ws_num, prefix, num)\n result = switch_workspace(f'{workspace}')\n if not result.success:\n LOGGER.debug(f'change_workspace: faild to run command: {workspace}')\n LOGGER.debug(result.error)",
"def go_to_next_state(self):\n pass",
"def SwitchWorkspace(workspace_name, create_if_missing=True):\n if workspace_name == '':\n raise ValueError('The workspace name should not be empty.')\n SwitchWorkspaceCC(workspace_name, create_if_missing)",
"def findTarget(self, initial_call):\n if self.vision.hasTarget():\n self.next_state(\"driveToTarget\")\n else:\n self.chassis.setOutput(self.SEARCH_SPEED, -self.SEARCH_SPEED)",
"def getNextTarget(self):\r\n\r\n\t\tif self.pathToGoal == []:\r\n#\t\t\tprint \"\\tPath empty, finding a new one.\"\r\n\t\t\tself.decideOnGoal()\r\n\t\t\tself.calculateNewPath()\r\n\t\r\n\t\tself.currentTarget = self.pathToGoal.pop(0)",
"def CurrentWorkspace():\n return _C.CurrentWorkspace()",
"def SwitchWorkspace(workspace_name, create_if_missing=True):\n if workspace_name == '':\n raise ValueError('The workspace name should not be empty.')\n _C.SwitchWorkspace(workspace_name, create_if_missing)",
"def workspace(self):\n\n # get workspace specified for Vehicle or from its driver\n if self._workspace is not None:\n return self._workspace\n if self._control is not None:\n return self._control._workspace",
"def action_goto(self):\n dialog = GoToDialog(self)\n dialog.exec()\n\n # Re-focus the main window\n self.activateWindow()",
"def moveCurrentNodeToTarget(self, checked=False):\n\n c = self.c\n p = c.p\n\n vnodes = [i.v for i in c.getSelectedPositions()]\n\n needs_undo = self.type_ != \"jump\"\n\n if needs_undo:\n bunch = c.undoer.beforeMoveNode(p)\n\n for v in vnodes:\n\n p2 = c.vnode2position(self.target)\n p = c.vnode2position(v)\n\n if not c.positionExists(p2):\n g.error('Target no longer exists: %s' % self.targetHeadString)\n return\n\n if self.type_ in ('clone', 'move'): # all others are always valid?\n if p.v == p2.v or not self.checkMove(p, p2):\n g.error('Invalid move: %s' % (self.targetHeadString))\n return\n if p2.isAncestorOf(p): # not for sibling moves\n p2.expand()\n nxt = p.visNext(c) or p.visBack(c)\n nxt = nxt.v\n # store a VNode instead of position as positions are too easily lost\n\n if self.type_ != 'jump':\n p.setDirty() # before move to dirty current parent\n p2.setDirty()\n c.setChanged()\n\n if self.type_ == 'clone':\n p = p.clone()\n\n if self.type_ in ('move', 'clone'):\n if self.which == 'first child':\n p.moveToFirstChildOf(p2)\n elif self.which == 'last child':\n p.moveToLastChildOf(p2)\n elif self.which in ('next sibling', 'prev sibling'):\n if not p2.parent():\n raise NotImplementedError(\"Not implemented for top-level nodes\") #FIXME\n if self.which == 'next sibling':\n p.moveToNthChildOf(p2.parent(), p2._childIndex)\n elif self.which == 'prev sibling':\n p.moveToNthChildOf(p2.parent(), p2._childIndex - 1)\n else:\n raise TypeError(f\"Unknown move type: {self.which!r}\")\n\n elif self.type_ == 'bkmk':\n unl = self.computeUNL(p) # before tree changes\n if self.which == 'first child':\n nd = p2.insertAsNthChild(0)\n elif self.which == 'last child':\n nd = p2.insertAsLastChild()\n elif self.which == 'next sibling':\n nd = p2.insertAfter()\n elif self.which == 'prev sibling':\n nd = p2.insertBefore()\n else:\n raise TypeError(f\"Unknown move type: {self.which!r}\")\n h = p.anyAtFileNodeName() or p.h\n while h and h[0] == '@':\n h = h[1:]\n nd.h = h\n nd.b = unl\n\n elif self.type_ == 'copy':\n\n if self.which == 'first child':\n nd = p2.insertAsNthChild(0)\n quickMove.copy_recursively(p, nd)\n # unlike p.copyTreeFromSelfTo, deepcopys p.v.u\n elif self.which == 'last child':\n nd = p2.insertAsLastChild()\n quickMove.copy_recursively(p, nd)\n elif self.which == 'next sibling':\n nd = p2.insertAfter()\n quickMove.copy_recursively(p, nd)\n elif self.which == 'prev sibling':\n nd = p2.insertBefore()\n quickMove.copy_recursively(p, nd)\n else:\n raise TypeError(f\"Unknown move type: {self.which!r}\")\n\n elif self.type_ in ('linkTo', 'linkFrom'):\n blc = getattr(c, 'backlinkController', None)\n if blc is None:\n g.es(\"Linking requires backlink.py plugin\")\n return\n if self.type_ == 'linkTo':\n blc.vlink(p.v, p2.v)\n else:\n blc.vlink(p2.v, p.v)\n\n if self.type_ in ('bkmk', 'clone', 'copy', 'move'):\n nxt = c.vnode2position(nxt)\n elif self.type_ == 'jump':\n nxt = c.vnode2position(self.target)\n else:\n nxt = None # linkTo / linkFrom don't move\n\n if nxt is not None and c.positionExists(nxt):\n c.selectPosition(nxt)\n\n if needs_undo:\n c.undoer.afterMoveNode(p, 'Quick Move', bunch)\n c.setChanged()\n\n c.redraw()",
"def doSwitchToNextWindow(self, timeout=10.0):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n cmdId = self.getCurrentWindowHandle()\n rsp = self.hasWindowHandle(timeout=timeout, commandId=cmdId)\n if rsp is None: ret = False\n else:\n elementVall = rsp.get('GUI', 'value')\n currentHandle = elementVall.get('value')\n \n cmdId = self.getAllWindowHandles()\n rsp = self.hasWindowHandles(timeout=timeout, commandId=cmdId)\n if rsp is None: ret = False\n else:\n elementVall = rsp.get('GUI', 'value')\n listHandles = elementVall.get('handles').getItems()\n \n z = 0\n for h in sorted(listHandles):\n if h == currentHandle: \n break\n z += 1\n \n z += 1\n nextHandle = listHandles[z]\n\n cmdId = self.switchToWindow( windowName=nextHandle )\n rsp = self.isWindowsSwitched(timeout=timeout, commandId=cmdId)\n if rsp is None: ret = False\n \n return ret",
"def switch_to_main(self):\n return main.switch()",
"def goto_node(self):\n p = self.get_position()\n if p and p != self.c.p:\n self.c.selectPosition(p)",
"def move_to(i3: i3ipc.Connection, workspace: int):\n i3.command(f\"move container to workspace number {workspace}\")",
"def get_current_workspace_num(i3: i3ipc.Connection):\n return i3.get_tree().find_focused().workspace().num",
"def goToFirstFrame():\n nuke.frame(int(nuke.root()[\"first_frame\"].getValue()))",
"def target_found( self ):\n print( \"Solution: \" + self.path );",
"def step_to(self, inp, out):\n for new_state in self.current_state.transitions[inp]:\n if new_state[0] == out:\n self.current_state = new_state[1]\n return out\n return None",
"def _open_workspace(self, name, workspace):\n workspace_url = self.environment_driver.extract_workspace_url(\n name, workspace)\n result = webbrowser.open(workspace_url, new=2)\n\n return result",
"def move_to(self, target):\n self.map.breadth_first_search(self.position, target)\n path = self.map.get_path(target, self.position)\n for node in path[1:]:\n mask = (\n node.x - self.position.x,\n node.y - self.position.y\n )\n direction = self.MASKS[mask]\n self.move(direction)",
"def next_state(self):\r\n observed_state: State = self.opened.popleft()\r\n if not self.is_solvable():\r\n print(\"UNSOLVABLE\")\r\n return\r\n\r\n if np.all(observed_state == self.target_state):\r\n self.current_state = observed_state\r\n return\r\n\r\n self.closed.add(observed_state)\r\n\r\n for neighbor in observed_state.neighbors():\r\n if neighbor not in self.closed or neighbor not in self.opened:\r\n self.opened.append(neighbor)",
"def move_to_stage_2(self, target):\n # type: (RoomPosition) -> None\n ordered_members = self.members_movement_order()\n\n self.log(\"Members {} moving to {} - stage 2.\", _.pluck(ordered_members, 'name'), target)\n\n movement_opts = self.new_movement_opts()\n\n for i in range(len(ordered_members) - 1, -1, -1):\n if i == 0:\n if not ordered_members[i].pos.isEqualTo(target):\n if target == self.location:\n ordered_members[i].follow_military_path(self.find_origin(), target, movement_opts)\n else:\n ordered_members[i].move_to(target, movement_opts)\n else:\n next_drone = ordered_members[i - 1]\n this_drone = ordered_members[i]\n if this_drone.pos.isNearTo(next_drone.pos) or movement.is_edge_position(next_drone.pos):\n if this_drone.creep.fatigue and not movement.is_edge_position(next_drone.pos):\n self.log(\"drone {} at {},{} breaking due to fatigue\", i, this_drone.pos.x, this_drone.pos.y)\n break\n direction = movement.diff_as_direction(this_drone.pos, next_drone.pos)\n this_drone.creep.move(direction)\n this_drone.creep.__direction_moved = direction\n elif movement.is_edge_position(this_drone.pos):\n this_drone.move_to(next_drone)\n elif movement.chebyshev_distance_room_pos(this_drone.pos, next_drone.pos) > 3 or (\n movement.chebyshev_distance_room_pos(this_drone.pos, next_drone.pos) > 1\n and not movement.is_edge_position(next_drone.pos)\n ):\n this_drone.move_to(next_drone)\n self.log(\"drone {} at {},{} breaking due to distance\", i, this_drone.pos.x, this_drone.pos.y)\n break\n else:\n # for j in range(len(ordered_members) - 1, i, -1):\n # ordered_members[j].creep.move(\n # movement.diff_as_direction(ordered_members[j], ordered_members[j - 1]))\n moved = False\n\n if movement.chebyshev_distance_room_pos(this_drone.pos, next_drone.pos) == 2:\n # Note: we are guaranteed not to be in an edge position because if we were, the above\n # if would be triggered instead! This allows us to ignore the room name of the next pos.\n next_pos = movement.next_pos_in_direction_to(this_drone.pos, next_drone.pos)\n if movement.is_block_empty(this_drone.room, next_pos.x, next_pos.y):\n other_creeps_there = cast(List[Creep], this_drone.room.look_at(LOOK_CREEPS, next_pos))\n other_drone = _.find(other_creeps_there, 'my')\n if other_drone:\n other_drone.move(movement.diff_as_direction(other_drone.pos, this_drone.pos))\n this_drone.creep.move(movement.diff_as_direction(this_drone.pos, next_drone.pos))\n moved = True\n elif not len(other_creeps_there):\n direction = movement.diff_as_direction(this_drone.pos, next_drone.pos)\n this_drone.creep.move(direction)\n this_drone.creep.__direction_moved = direction\n moved = True\n if not moved:\n this_drone.move_to(next_drone)",
"def backtrack_to_start(board, end):\r\n cell = board.at(end)\r\n # print(cell)\r\n path = []\r\n lis = []\r\n while cell != None:\r\n path.append(cell)\r\n cell = cell.path_from\r\n for i in path[-1:]:\r\n for j in i.position:\r\n lis.append(j)\r\n next_move = lis[-4:-2]\r\n\r\n return next_move",
"def jump_to_previous(self):\n self.nvim.command('silent! wincmd p')",
"def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n while to_move < len(movables):\n # Make the move\n movable_statement = movables[to_move]\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n\n # Find out if this state has already been explored\n visited = False\n for visited_state in self.visited.keys():\n if visited_state.state == new_state:\n visited = True\n\n # If the new state hasn't been visited then add it as a child then move down to this child\n if not visited:\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.currentState = new_gs\n break\n\n # Else skip this state and try going to the next movable statement\n else:\n # print(\"SKIP THIS STATE\")\n self.gm.reverseMove(movable_statement)\n to_move += 1\n\n # Went all the way down to a leaf, backtrack\n if (to_move >= len(movables)):\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n\n return False",
"def navigate_to(self):\n #self._kernel.navigate_to(route)\n pass"
]
| [
"0.60954845",
"0.58509964",
"0.5680681",
"0.56290454",
"0.54656833",
"0.53953046",
"0.5384196",
"0.53578043",
"0.529885",
"0.5252498",
"0.520959",
"0.52038735",
"0.51325685",
"0.50813437",
"0.50762445",
"0.5061682",
"0.50541234",
"0.5039189",
"0.50001895",
"0.49859533",
"0.4914396",
"0.4911385",
"0.49064213",
"0.4899712",
"0.4895445",
"0.4884328",
"0.48729104",
"0.48656473",
"0.48626783",
"0.48548838"
]
| 0.73417485 | 0 |
Get the number of the current workspace. | def get_current_workspace_num(i3: i3ipc.Connection):
return i3.get_tree().find_focused().workspace().num | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> str:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> str:\n return pulumi.get(self, \"workspace_id\")",
"def get_workspace(self):\n wid = self._config[\"workspace\"]\n return sim_consts.workspace_origin[wid], sim_consts.workspace_size[wid]",
"def CurrentWorkspace():\n return _C.CurrentWorkspace()",
"def workspace_id(self) -> Optional[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace(self) -> str:\n return self._workspace",
"def workspace(self):\n return self.rpc.call(MsfRpcMethod.DbCurrentWorkspace)['workspace']",
"def get_current_window():\n\n try:\n return vim.current.window.number - 1\n except AttributeError:\n return int(vim.eval('winnr()')) - 1",
"def workspace_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_id\")",
"def get_current(self) -> int:\n return self._current",
"def current_node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"current_node_count\")",
"def workspaceInfo(self):\n pass",
"def name(self):\n return self.attributes.workspace.name",
"def get_current_lot_id() -> int:\n return services.active_lot_id() or -1",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def mainWorkspace(self):\n return self._mainWorkspace",
"def get_workspace(client):\n return client._creoson_post(\"windchill\", \"get_workspace\", key_data=\"workspace\")",
"def Workspace(self):\n return self._module.workspace",
"def synapse_workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def 取项目数(self): # real signature unknown; restored from __doc__\n return self.GetCount()",
"def ipython_current_number(self):\n return self.IP.outputcache.prompt_count",
"def get_program_counter(self):\n return self.get_thread().program_counter",
"def get_open_workspaces(i3: i3ipc.Connection):\n return [ws.num for ws in i3.get_tree().workspaces()]",
"def num_projects(self):\n return self._num_projects"
]
| [
"0.70615363",
"0.70615363",
"0.70028317",
"0.70028317",
"0.68003416",
"0.6721158",
"0.67083037",
"0.6684436",
"0.6620677",
"0.6572125",
"0.6547534",
"0.6448233",
"0.6448233",
"0.6275984",
"0.6210025",
"0.616503",
"0.6121598",
"0.6117455",
"0.6078367",
"0.6078367",
"0.6078367",
"0.6059495",
"0.60265654",
"0.6011544",
"0.6010107",
"0.60026485",
"0.5991658",
"0.5958743",
"0.591692",
"0.5915892"
]
| 0.81946534 | 0 |
Get the numbers of open workspaces. | def get_open_workspaces(i3: i3ipc.Connection):
return [ws.num for ws in i3.get_tree().workspaces()] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_opened_windows_list():\n\n global opened_windows_names\n EnumWindows(EnumWindowsProc(foreach_window), 0)\n return opened_windows_names",
"def get_open_disk_space(self):\n count = 0\n for i in range(self.size):\n if self.disk_mem[i]==\".\":\n count += 1\n return count",
"async def getOpenZoneCount(self):\n open_zone_count = await self.director.getItemVariableValue(\n self.item_id, \"OPEN_ZONE_COUNT\"\n )\n return open_zone_count",
"def get_current_workspace_num(i3: i3ipc.Connection):\n return i3.get_tree().find_focused().workspace().num",
"def num_projects(self):\n return self._num_projects",
"def get_open_spaces(board):\n open_spaces = []\n for i in range(3):\n for j in range(3):\n if board[i][j] == 0:\n open_spaces.append(encode_pos(i, j))\n return open_spaces",
"def get_num_objects(cls):\n return cls.mum_objects",
"def open_connections(self):\n return len(self._connections_openned)",
"def get_number_of_stools(self):\n return len(self._stools)",
"def number_of_open_sites(self):\n return sum(sum(line) for line in self._grid)",
"def LayerNumbers(self):\n\t\treturn self.acad.ActiveDocument.Layers.Count",
"def list(self):\n return self.rpc.call(MsfRpcMethod.DbWorkspaces)['workspaces']",
"def getNamespacesLength(self):\n return _libsbml.XMLToken_getNamespacesLength(self)",
"async def get_open_order_nos(self):\n success, error = await self._rest_api.get_open_orders(self._raw_symbol)\n if error:\n return None, error\n order_nos = []\n for order_info in success:\n order_no = \"{}_{}\".format(order_info[\"orderId\"], order_info[\"clientOrderId\"])\n order_nos.append(order_no)\n return order_nos, None",
"def getOpenEditorsCount(self):\n return len(self.editors)",
"def scope_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"scope_count\")",
"def list_workspaces(client):\n return client._creoson_post(\"windchill\", \"list_workspaces\", key_data=\"workspaces\")",
"async def get_open_order_nos(self):\n success, error = await self._rest_api.get_open_orders(self._raw_symbol)\n if error:\n return None, error\n else:\n order_nos = []\n for order_info in success:\n order_no = \"{}_{}\".format(order_info[\"orderId\"], order_info[\"clientOrderId\"])\n order_nos.append(order_no)\n return order_nos, None",
"def get_numStocks(self):\n return len(self.DoS)",
"def N_shells(self):\n return self._N_shells",
"def getNbStations(self) :\n return len(self._stations)",
"def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number",
"def get_num_of_sessions(self):\n return len(self.current_sessions)",
"def subsystem_count(self):\n return len(self)",
"def getNumRegisteredPackages():\n return _libsbml.SBMLExtensionRegistry_getNumRegisteredPackages()",
"def ndocuments(self):\n return self._ndocuments",
"def _get_n_jobs(self):\n self._validate_n_jobs()\n return deepcopy(self.n_jobs)",
"def get_number_executors(self):\n with self.__threads_lock:\n return self.__number_executors",
"def active(self):\r\n return len(self._namespaces)",
"def processes(self):\n return self._getint('processes')"
]
| [
"0.64218664",
"0.63412476",
"0.61509293",
"0.6022424",
"0.60122925",
"0.5947481",
"0.5943279",
"0.5939426",
"0.59035146",
"0.58829737",
"0.5813977",
"0.57102287",
"0.56623214",
"0.56597006",
"0.5650875",
"0.5648794",
"0.56483734",
"0.5648095",
"0.5639259",
"0.5620538",
"0.56127596",
"0.5609872",
"0.5574102",
"0.5567098",
"0.55541426",
"0.55531645",
"0.55517054",
"0.554666",
"0.55396044",
"0.55395013"
]
| 0.7764141 | 0 |
Provides an ndimensional parallel iterator that generates index tuples for each iteration point. Sequentially, pndindex is identical to np.ndindex. | def pndindex(*args):
return np.ndindex(*args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pndindex(*args):\n return np.ndindex(*args)",
"def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check",
"def _get_index_iterator(indexes, length):\n return combinations(indexes, length)",
"def index_iterator((x_min, x_max, y_min, y_max)):\n for row in xrange(y_min, y_max):\n for col in xrange(x_min, x_max):\n yield (row, col)",
"def __iter__(self):\n for i in range(self.m):\n for j in range(self.n):\n yield self[i, j]",
"def construct_indices(after_pooling):\n our_indices = np.zeros_like(after_pooling, dtype=np.int64)\n batch_num, channel_num, row_num, col_num = after_pooling.shape\n for batch_id in range(batch_num):\n for channel_id in range(channel_num):\n for row_id in range(row_num):\n for col_id in range(col_num):\n our_indices[batch_id, channel_id, row_id, col_id] = col_num * 2 * 2 * row_id + 2 * col_id\n return torch.from_numpy(our_indices)",
"def _neuron_location(self, m, n):\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])",
"def ndgrid(*args,**kwargs):\n kwargs['indexing'] = 'ij'\n return meshgrid(*args,**kwargs)",
"def __iter__(self):\n indices = []\n for i, size in enumerate(self.group_sizes):\n if size == 0:\n continue\n indice = np.where(self.flag == i)[0]\n if not len(indice) == size:\n raise ValueError('the length of the indice should be equal to the size')\n np.random.shuffle(indice)\n num_extra = int(np.ceil(size / self.samples_per_gpu)\n ) * self.samples_per_gpu - len(indice)\n indice = np.concatenate([indice, indice[:num_extra]])\n indices.append(indice)\n indices = np.concatenate(indices)\n indices = [\n indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]\n for i in np.random.permutation(\n range(len(indices) // self.samples_per_gpu))\n ]\n indices = np.concatenate(indices)\n indices = torch.from_numpy(indices).long()\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samples\")\n return iter(indices)",
"def similiar_chunks_indexes(n_values, n_chunks) -> Generator[Tuple[int, int], None, None]:\n chunk_size = int(numpy.ceil(n_values / n_chunks))\n for i in range(0, n_values, chunk_size):\n yield i, i + chunk_size",
"def __iter_test_indices(self, n_samples):\n if self.shuffle:\n np.random.seed(self.random_state)\n indices = np.random.permutation(n_samples)\n else:\n indices = np.arange(n_samples)\n\n fold_sizes = np.full(\n self.n_splits, n_samples // self.n_splits, dtype=int\n )\n fold_sizes[: n_samples % self.n_splits] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n mask = np.zeros(n_samples, dtype=bool)\n mask[indices[start:stop]] = True\n yield mask\n current = stop",
"def getindex(ndim, ind, strides):\n ret = 0\n for i in range(ndim):\n ret += strides[i] * ind[i]\n return ret",
"def getindex(ndim, ind, strides):\n ret = 0\n for i in range(ndim):\n ret += strides[i] * ind[i]\n return ret",
"def _compute_ind_mat(n, m, nb_coeff):\r\n\r\n ind_mat = np.zeros((nb_coeff, n))\r\n curr_idx = 0\r\n for indexes in itr.combinations_with_replacement(range(m), n):\r\n ind_mat[curr_idx] = np.array(indexes)\r\n curr_idx += 1\r\n\r\n return ind_mat",
"def __iter__(self):\n for idx in range(0, self.Npoints):\n position = self.start + (self.end-self.start)/self.Npoints*idx\n yield position\n raise StopIteration()",
"def indsk_iterparams():\n iter_params = {'input_vectorized': True,\n 'batch_size': 32,\n 'in_mem': False}\n return iter_params",
"def memory_index(indices, t):\n memlen, itemsize, ndim, shape, strides, offset = t\n p = offset\n for i in range(ndim):\n p += strides[i]*indices[i]\n return p",
"def BatchCreator(self, j, n_batch):\n j_start = (j-1)*n_batch + 1\n j_end = j*n_batch + 1\n ind = np.arange(start= j_start, stop=j_end, step=1)\n return ind",
"def indices(shape):\n iterables = [range(v) for v in shape]\n return product(*iterables)",
"def indices(shape):\n iterables = [range(v) for v in shape]\n return product(*iterables)",
"def step_indices(group_idx):\n ilen = step_count(group_idx) + 1\n indices = np.empty(ilen, np.int64)\n indices[0] = 0\n indices[-1] = group_idx.size\n cmp_pos = 0\n ri = 1\n for i in range(len(group_idx)):\n if group_idx[cmp_pos] != group_idx[i]:\n cmp_pos = i\n indices[ri] = i\n ri += 1\n return indices",
"def memory_index(indices, t):\n memlen, itemsize, ndim, shape, strides, offset = t\n p = offset\n for i in range(ndim):\n p += strides[i] * indices[i]\n return p",
"def index(i, j):\n return i * N + j",
"def random_index(\r\n ngrid: int, nt: int, dim_subset: Union[tuple, list], warmup_length=0\r\n) -> tuple:\r\n batch_size, rho = dim_subset\r\n i_grid = np.random.randint(0, ngrid, [batch_size])\r\n i_t = np.random.randint(0 + warmup_length, nt - rho, [batch_size])\r\n return i_grid, i_t",
"def set_particle_IDs_partition(index, iterator):\n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n arr['iGroup'] = loc_to_glob_map_b.value[index]\n local_index += len(arr)\n yield arr",
"def __iter__(self):\n for i in self.loopindices:\n pid = self.frametracks.particle.values[i]\n yield pid, self.neighbors(pid)",
"def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])",
"def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])",
"def __iter__(self):\n while True:\n if self.batches is None:\n for indexed_sentence in self.indexed_sentences:\n yield indexed_sentence\n else:\n for batch in self.batches:\n yield batch[:-1, :], batch[1:, :] # Return batch and target indices\n\n if not self.repeat:\n return",
"def next(self):\n if self.curr_idx == len(self.idx):\n raise StopIteration\n\n # Fetch the index\n i = self.idx[self.curr_idx]\n self.curr_idx += 1\n\n # Get labels & ids\n labels = self.ndlabels[i:i+self.batch_size]\n users = self.ndusers[i:i + self.batch_size]\n items = self.nditems[i:i + self.batch_size]\n\n # Get feature arrays\n if self.create_batches:\n user_features = self.nduserfeatures[i:i+self.batch_size]\n item_features = self.nditemfeatures[i:i+self.batch_size]\n else:\n # Create user feature arrays\n user_features = mx.ndarray.take(a=self.unique_user_features, indices=users)\n item_features = mx.ndarray.take(a=self.unique_item_features, indices=items)\n\n return mx.io.DataBatch([user_features, item_features], [labels], index = users, pad=0,\n provide_data=[mx.io.DataDesc(name=self.data_names[0], shape=user_features.shape),\n mx.io.DataDesc(name=self.data_names[1], shape=item_features.shape)],\n provide_label=[mx.io.DataDesc(name=self.label_names[0], shape=labels.shape)])"
]
| [
"0.698849",
"0.67596734",
"0.6384372",
"0.6358893",
"0.6060668",
"0.5980556",
"0.59671396",
"0.59533405",
"0.58904546",
"0.583905",
"0.57712317",
"0.57544845",
"0.57544845",
"0.57114184",
"0.56809837",
"0.5676592",
"0.5672165",
"0.5670588",
"0.5669132",
"0.5669132",
"0.5633394",
"0.5621001",
"0.56108326",
"0.5574716",
"0.55740654",
"0.5568105",
"0.55642265",
"0.55642265",
"0.55582744",
"0.55539703"
]
| 0.69749606 | 1 |
Creates the name for the handler called from ``__init__`` if a name is not given. | def _create_name(self) -> str:
return self.stream.__class__.__name__ | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __set_name__(self, cls, name):\n pass",
"def _get_name(cls, **kwargs: Any) -> str:\n raise NotImplementedError('Subclasses must implement this method.') # pragma: no cover",
"def __init__(__self__, *,\n name: pulumi.Input[str]):\n pulumi.set(__self__, \"name\", name)",
"def _set_name_scope(self):\n if self.name is None:\n self._name_scope = self.__class__.__name__\n elif self.name == '<lambda>':\n self._name_scope = 'lambda'\n else:\n # E.g. '_my_loss' => 'my_loss'\n self._name_scope = self.name.strip('_')",
"def get_name(self, request, *args, **kwargs):\n raise NotImplementedError",
"def set_name(name=False):\n if not name:\n name = name_generator()\n return name",
"def __init__(self):\n self.__name = 'name'",
"def getDefaultName(self): # real signature unknown; restored from __doc__\n pass",
"def __init__(self, command_handler_name):\n\n # Set the command handler attributes\n self.name = command_handler_name",
"def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)",
"def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)",
"def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)",
"def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)",
"def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)",
"def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)",
"def GetHandlerName(self):\n return u'NULL'",
"def name(self, *args, **kwargs) -> Any:\n pass",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name"
]
| [
"0.62612844",
"0.61916685",
"0.6182883",
"0.61657673",
"0.6151574",
"0.61496806",
"0.6144922",
"0.61412007",
"0.61395377",
"0.61388636",
"0.61388636",
"0.61388636",
"0.61388636",
"0.61388636",
"0.61388636",
"0.6127988",
"0.6106006",
"0.60805684",
"0.60805684",
"0.60805684",
"0.60805684",
"0.60805684",
"0.60805684",
"0.60805684",
"0.60805684",
"0.60805684",
"0.60805684",
"0.60805684",
"0.60805684",
"0.60805684"
]
| 0.6380641 | 0 |
Attempts to cache `objdoc` for module `modname`. | def __setitem__(self, modname: str, objdoc: Objdoc):
spec = importlib.util.find_spec(modname)
if spec.origin is None:
raise CannotCache(f"not a module file; can't cache: {modname}")
path = self.__get_path(spec)
path = path.parent / (path.name + ".json.gz")
check = _get_check(spec)
try:
file = gzip.open(path, "wt", encoding="utf-8")
except OSError:
raise CannotCache(f"can't write cache: {modname}")
with file:
json.dump({"check": check, "objdoc": objdoc}, file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __put_module_in_sys_cache(module_name, module_obj):\n #try:\n #if hasattr(sys, 'stypy_module_cache'):\n sys.stypy_module_cache[module_name] = module_obj\n # else:\n # __preload_sys_module_cache()\n # sys.stypy_module_cache[module_name] = module_obj\n # except:\n # pass\n # finally:\n # return None",
"def cache_modules(*modnames) -> None:\n inspector = Inspector()\n modnames = { n for m in modnames for n in find_submodules(m) }\n\n for modname in modnames:\n logging.debug(f\"inspecting: {modname}\")\n objdoc = inspector.inspect_module(modname)\n logging.debug(f\"writing cache: {modname}\")\n try:\n PYCACHE[modname] = objdoc\n except CannotCache as exc:\n logging.warning(f\"cannot cache: {exc}\")",
"def docmod(module_name):\n return BaseGroup.factory(module_name).doc",
"def get_documented_in_docstring(name, module=None, filename=None):\r\n try:\r\n obj, real_name = import_by_name(name)\r\n lines = pydoc.getdoc(obj).splitlines()\r\n return get_documented_in_lines(lines, module=name, filename=filename)\r\n except AttributeError:\r\n pass\r\n except ImportError, e:\r\n print \"Failed to import '%s': %s\" % (name, e)\r\n return {}",
"def get_documented_in_docstring(name, module=None, filename=None):\n try:\n obj, real_name = import_by_name(name)\n lines = pydoc.getdoc(obj).splitlines()\n return get_documented_in_lines(lines, module=name, filename=filename)\n except AttributeError:\n pass\n except ImportError, e:\n print \"Failed to import '%s': %s\" % (name, e)\n return {}",
"def pymod_cache():\n pymod.cache.cache = Singleton(pymod.cache.factory)",
"def _load_libdoc(self, name, source=None, use_cache=True):\n strategies = [name]\n\n if source is not None and source.endswith(\".robot\"):\n try:\n strategies += [find_file(source)]\n except Exception:\n pass\n\n for strategy in strategies:\n if use_cache and strategy in self._doc_cache:\n return self._doc_cache[strategy]\n\n try:\n libdoc = LibraryDocumentation(strategy)\n self._doc_cache[strategy] = self._doc_cache[name] = libdoc\n return libdoc\n except Exception as err:\n pass\n self.log.debug(\"Could not load libdoc for %s: %s\", strategy, err)",
"def resolve(self, doc_uri, obj):\n self.cache = {doc_uri: obj}\n session = requests.Session()\n session.mount(\"file://\", requests_file.FileAdapter())\n session.mount(\"resource://\", requests_resource.ResourceAdapter())\n with session:\n return self._resolve(type(obj)(), obj, session)",
"def resolve(self, doc_uri, obj):\n self.cache = {doc_uri: obj}\n return self._resolve(doc_uri, obj, obj)",
"def get_module_from_sys_cache(module_name):\n try:\n if hasattr(sys, 'stypy_module_cache'):\n return sys.stypy_module_cache[module_name]\n else:\n __preload_sys_module_cache()\n return sys.stypy_module_cache[module_name]\n except:\n return None",
"def _cherrypy_pydoc_resolve(thing, forceload=0):\n if isinstance(thing, _ThreadLocalProxy):\n thing = getattr(serving, thing.__attrname__)\n return _pydoc._builtin_resolve(thing, forceload)",
"def get_object(self, docobj):\n if docobj is None:\n return None\n return self._docmap.get(docobj)",
"def doc(obj):\n return Documentation.fromObject(obj).first",
"def generate(\r\n self,\r\n more_content=None,\r\n real_modname=None,\r\n check_module=False,\r\n all_members=False,\r\n ):\r\n if not self.parse_name():\r\n # need a module to import\r\n logger.warning(\r\n \"[sphinxcontrib-matlabdomain] don't know which module to import for autodocumenting \"\r\n '%r (try placing a \"module\" or \"currentmodule\" directive '\r\n \"in the document, or giving an explicit module name)\",\r\n self.name,\r\n )\r\n return\r\n\r\n # now, import the module and get object to document\r\n if not self.import_object():\r\n return\r\n\r\n # If there is no real module defined, figure out which to use.\r\n # The real module is used in the module analyzer to look up the module\r\n # where the attribute documentation would actually be found in.\r\n # This is used for situations where you have a module that collects the\r\n # functions and classes of internal submodules.\r\n self.real_modname = real_modname or self.get_real_modname()\r\n\r\n # try to also get a source code analyzer for attribute docs\r\n try:\r\n self.analyzer = MatModuleAnalyzer.for_module(self.real_modname)\r\n # parse right now, to get PycodeErrors on parsing (results will\r\n # be cached anyway)\r\n self.analyzer.find_attr_docs()\r\n except PycodeError as err:\r\n self.env.app.debug(\r\n \"[sphinxcontrib-matlabdomain] module analyzer failed: %s\", err\r\n )\r\n # no source file -- e.g. for builtin and C modules\r\n self.analyzer = None\r\n # at least add the module.__file__ as a dependency\r\n if hasattr(self.module, \"__file__\") and self.module.__file__:\r\n self.directive.record_dependencies.add(self.module.__file__)\r\n else:\r\n self.directive.record_dependencies.add(self.analyzer.srcname)\r\n\r\n # check __module__ of object (for members not given explicitly)\r\n if check_module:\r\n if not self.check_module():\r\n return\r\n\r\n # make sure that the result starts with an empty line. This is\r\n # necessary for some situations where another directive preprocesses\r\n # reST and no starting newline is present\r\n self.add_line(\"\", \"<autodoc>\")\r\n\r\n # format the object's signature, if any\r\n sig = self.format_signature()\r\n\r\n # generate the directive header and options, if applicable\r\n self.add_directive_header(sig)\r\n self.add_line(\"\", \"<autodoc>\")\r\n\r\n # e.g. the module directive doesn't have content\r\n self.indent += self.content_indent\r\n\r\n # add all content (from docstrings, attribute docs etc.)\r\n self.add_content(more_content)\r\n\r\n # document members, if possible\r\n self.document_members(all_members)",
"def md_module(module_obj, module_link=None):\n\n def should_doc(name):\n return (not isinstance(module_obj.__dict__[name], types.ModuleType)\n and not name.startswith('_'))\n\n stuff_to_doc = [\n obj for name, obj in sorted(module_obj.__dict__.items())\n if should_doc(name)\n ]\n\n classes_to_doc = []\n functions_to_doc = []\n\n for s in stuff_to_doc:\n if isinstance(s, type):\n classes_to_doc.append(s)\n elif isinstance(s, types.FunctionType):\n functions_to_doc.append(s)\n\n heading_text = module_obj.__name__\n if module_link:\n heading_text = md_link(heading_text, module_link)\n\n content = [\n md_heading(heading_text, level=1),\n '',\n md_italic('This page was autogenerated. '\n 'Run `devil/bin/generate_md_docs` to update'),\n '',\n ]\n\n for c in classes_to_doc:\n content += md_class(c)\n for f in functions_to_doc:\n content += md_function(f)\n\n print('\\n'.join(content))\n\n return 0",
"def get_docs( mysource , basename ):\n import parser\n ast = parser.suite(mysource)\n return ModuleInfo(ast.totuple(1), basename)",
"def __exist_module_in_sys_cache(module_name):\n try:\n if hasattr(sys, 'stypy_module_cache'):\n return module_name in sys.stypy_module_cache\n else:\n __preload_sys_module_cache()\n return False\n except:\n return False",
"def getobj(modulename, objname):\n return getattr(import_module(modulename), objname)",
"def test_article_object_caching(self):\r\n settings = get_settings(filenames={})\r\n settings['CACHE_PATH'] = self.temp_cache\r\n settings['CONTENT_CACHING_LAYER'] = 'generator'\r\n settings['READERS'] = {'asc': None}\r\n\r\n generator = ArticlesGenerator(\r\n context=settings.copy(), settings=settings,\r\n path=CONTENT_DIR, theme=settings['THEME'], output_path=None)\r\n generator.generate_context()\r\n self.assertTrue(hasattr(generator, '_cache'))\r\n\r\n generator = ArticlesGenerator(\r\n context=settings.copy(), settings=settings,\r\n path=CONTENT_DIR, theme=settings['THEME'], output_path=None)\r\n generator.readers.read_file = MagicMock()\r\n generator.generate_context()\r\n generator.readers.read_file.assert_called_count == 0",
"def getdoc(object):\r\n try:\r\n doc = object.__doc__\r\n except AttributeError:\r\n return None\r\n if not isinstance(doc, types.StringTypes):\r\n return None\r\n return cleandoc(doc)",
"def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:\r\n if obj_name == '':\r\n return module\r\n obj = module\r\n for part in obj_name.split(\".\"):\r\n obj = getattr(obj, part)\r\n return obj",
"def read_docstrings(lang):\n modname = \"turtle_docstringdict_%(language)s\" % {'language':lang.lower()}\n module = __import__(modname)\n docsdict = module.docsdict\n for key in docsdict:\n try:\n# eval(key).im_func.__doc__ = docsdict[key]\n eval(key).__doc__ = docsdict[key]\n except Exception:\n print(\"Bad docstring-entry: %s\" % key)",
"def test_page_object_caching(self):\r\n settings = get_settings(filenames={})\r\n settings['CACHE_PATH'] = self.temp_cache\r\n settings['CONTENT_CACHING_LAYER'] = 'generator'\r\n settings['READERS'] = {'asc': None}\r\n\r\n generator = PagesGenerator(\r\n context=settings.copy(), settings=settings,\r\n path=CONTENT_DIR, theme=settings['THEME'], output_path=None)\r\n generator.generate_context()\r\n self.assertTrue(hasattr(generator, '_cache'))\r\n\r\n generator = PagesGenerator(\r\n context=settings.copy(), settings=settings,\r\n path=CONTENT_DIR, theme=settings['THEME'], output_path=None)\r\n generator.readers.read_file = MagicMock()\r\n generator.generate_context()\r\n generator.readers.read_file.assert_called_count == 0",
"def test_module_doc(self):\n self.assertTrue(len(r.__doc__) > 10)",
"def getdoc(obj):\n try:\n doc = obj.__doc__\n except AttributeError:\n return None\n if not isinstance(doc, str):\n return None\n return inspect.cleandoc(doc)",
"def _set_object_doc(self, obj, doc, stacklevel=3):\n if isinstance(obj, types.MethodType) and six.PY2:\n obj = obj.im_func\n try:\n obj.__doc__ = doc\n except AttributeError: # probably python2 class\n if self.python2_classes != \"raise\" and (inspect.isclass(obj) and six.PY2):\n if self.python2_classes == \"warn\":\n warn(\n \"Cannot modify docstring of classes in python2!\",\n stacklevel=stacklevel,\n )\n else:\n raise\n return obj",
"def _auto_rst_for_module(module: types.ModuleType, exclude_members: List[Any]) -> str:\n name = module.__name__\n functions: List[Tuple[str, types.FunctionType]] = []\n exceptions: List[Tuple[str, Type[BaseException]]] = []\n classes: List[Tuple[str, Type[object]]] = []\n methods: List[Tuple[str, types.MethodType]] = []\n attributes: List[Tuple[str, object]] = []\n\n lines = [\n f'{name}',\n f'{\"=\" * len(name)}\\n',\n f'.. automodule:: {name}\\n',\n f'.. currentmodule:: {name}',\n ]\n try:\n all_members = list(module.__all__)\n except AttributeError:\n all_members = list(vars(module).keys())\n\n for item_name, val in vars(module).items():\n if val in exclude_members:\n continue\n\n if item_name.startswith('_'):\n # Skip private members\n continue\n\n if item_name not in all_members:\n # Skip members not in `__all__``\n continue\n\n if isinstance(val, types.ModuleType):\n # Skip modules; those are documented by autosummary\n continue\n\n if isinstance(val, types.FunctionType):\n functions.append((item_name, val))\n elif isinstance(val, types.MethodType):\n methods.append((item_name, val))\n elif isinstance(val, type) and issubclass(val, BaseException):\n exceptions.append((item_name, val))\n elif isinstance(val, type):\n assert issubclass(val, object)\n classes.append((item_name, val))\n else:\n attributes.append((item_name, val))\n continue\n\n # Sort by the reimported name\n functions.sort(key=lambda x: x[0])\n exceptions.sort(key=lambda x: x[0])\n classes.sort(key=lambda x: x[0])\n attributes.sort(key=lambda x: x[0])\n\n for category, category_name in ((functions, 'Functions'), (classes, 'Classes'), (exceptions, 'Exceptions')):\n if sphinx_lines := [f' {item_name}' for item_name, _ in category]:\n lines.extend(\n (\n f'\\n.. rubric:: {category_name}\\n',\n '.. autosummary::',\n ' :toctree: generated',\n ' :nosignatures:',\n )\n )\n if category_name in ('Classes', 'Hparams'):\n lines.append(' :template: classtemplate.rst')\n elif category_name == 'Functions':\n lines.append(' :template: functemplate.rst')\n lines.append('')\n lines.extend(sphinx_lines)\n lines.append('')\n\n lines.append('.. This file autogenerated by docs/source/conf.py\\n')\n\n return '\\n'.join(lines)",
"def _get_pycache_path(spec: ModuleSpec) -> Path:\n # Refuse to do __pycache__ caching for anything in PREFIX.\n # FIXME: Not sure if this is the right policy.\n if is_subpath(spec.origin, sys.prefix):\n raise CannotCache(spec.name)\n\n # Find out where the module cache file goes.\n mod_cache_path = importlib.util.cache_from_source(spec.origin)\n # Put the odoc cache next to it.\n *_, name = spec.name.rsplit(\".\", 1)\n return Path(mod_cache_path).parent / (name + \".supdoc\")",
"def patch_automodapi(app):\n from sphinx_automodapi import automodsumm\n from sphinx_automodapi.utils import find_mod_objs\n automodsumm.find_mod_objs = lambda *args: find_mod_objs(\n args[0], onlylocals=True)",
"def docstrings2rst(module_path, module_name, sphinx_directory):\n\n #inoutfile = Path(swig_working_dir,module_path, module_name + '.py')\n # postprocess_docstrings(inoutfile)\n\n # --- Set current module name and import it in 'comp' ---\n # Test case with submodules (e.g. sensor in control)\n if module_path in ('.', ''):\n module_name = 'siconos.' + module_name\n else:\n module_path = module_path.replace(r'/', r'.')\n module_name = 'siconos.' + module_path + '.' + module_name\n\n try:\n comp = importlib.import_module(module_name)\n except ImportError as e:\n raise ImportError(e)\n \n # --- Set output path for rst files ---\n sphinx_directory = Path(sphinx_directory, 'reference',\n 'python', module_name.replace(r'.', '_'))\n if not sphinx_directory.exists():\n os.makedirs(sphinx_directory)\n\n outputname = Path(sphinx_directory, 'autodoc.rst')\n title = module_name + '\\n'\n title += len(title) * '=' + '\\n\\n'\n basename = '/reference/python/' + module_name.replace(r'.', '_')\n header = '**Module documentation**\\n\\n'\n with open(outputname, 'wt') as out:\n out.write(title)\n out.write(header)\n\n directive = f'.. automodule:: {module_name}\\n'\n directive += '\\t:members:\\n'\n directive += '\\t:show-inheritance:\\n\\n' \n out.write(directive)"
]
| [
"0.636846",
"0.60199344",
"0.5941785",
"0.5705336",
"0.56785536",
"0.56135726",
"0.5584943",
"0.54140854",
"0.53702223",
"0.5348207",
"0.5324705",
"0.52313477",
"0.51907396",
"0.51213115",
"0.50879973",
"0.5077939",
"0.5061241",
"0.50270915",
"0.49785343",
"0.49644643",
"0.49642318",
"0.49413168",
"0.4873661",
"0.4860296",
"0.4854413",
"0.4838757",
"0.48292002",
"0.481647",
"0.48113993",
"0.4809972"
]
| 0.66609263 | 0 |
True if `path` is a subpath of `other`. | def is_subpath(path: Path, other: Path):
try:
Path(path).relative_to(other)
except ValueError:
return False
else:
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_sub(parent, path):\n parent = canonical_path(parent, resolve_link=False)\n path = canonical_path(path, resolve_link=False)\n return os.path.commonprefix([parent, path]) == parent",
"def _issubpath(self, a, b):\n p1 = a.rstrip(os.sep).split(os.sep)\n p2 = b.rstrip(os.sep).split(os.sep)\n return p1[:len(p2)] == p2",
"def is_subpath_of(parent, child):\n # Based on https://stackoverflow.com/a/37095733 .\n\n # In Python 3.9, the `Path.is_relative_to()` method will supplant this, so\n # we can stop using crusty old os.path functions.\n parent_realpath = os.path.realpath(parent)\n child_realpath = os.path.realpath(child)\n return os.path.commonpath([parent_realpath, child_realpath]) == parent_realpath",
"def issubpath(filename, superpath, trueifsame = True):\n filename = os.path.abspath(filename)\n superpath = os.path.abspath(superpath)\n if filename.startswith(superpath + os.sep) or (trueifsame is True and filename == superpath):\n return(True)\n else:\n return(False)",
"def is_subpath(directory: str, path: str) -> bool:\n directory = os.path.join(os.path.realpath(directory), '')\n path = os.path.realpath(path)\n\n # return true, if the common prefix of both is equal to directory\n # e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b\n return os.path.commonprefix([path, directory]) == directory",
"def is_relative_to(sub_path, parent):\n try:\n parent_path = Path(parent).resolve()\n sub_path.resolve().relative_to(parent_path)\n return True\n except ValueError:\n return False",
"def is_subgraph_of(self, other):\n # If it is already recognized that it is a subgraph this procedure can be skipped.\n if other in self.__supergraph :\n return True\n \n if type(self)!=type(other):\n raise TypeError(\"Only works between graphs.\")\n elif other.return_num_vertices() == 0:\n return False\n elif self.return_num_vertices() == 0:\n return True\n names_to_check = self.return_names()\n # Checks if the vertices are a subset\n if not set(names_to_check).issubset(set(other.return_names())):\n return False\n \n # Traverses each node and checks if the adjacencies build a subset.\n # To do so, the node indices must be replaced by node names.\n # This is laborious, but only needs to be done once.\n for name in names_to_check:\n selflist = set(map(lambda x: (self.return_vertexName(x[0]),x[1]), self.return_adjacencies(self.return_vertexIndex(name))))\n otherlist = set(map(lambda x: (other.return_vertexName(x[0]),x[1]), other.return_adjacencies(other.return_vertexIndex(name))))\n if not selflist.issubset(otherlist):\n return False\n self.__supergraph.append(other)\n \n return True",
"def ChildOrMatch(self, other):\n return self._dir == other or other.startswith(self._dir + \"/\")",
"def _is_child_path(path, parent_path, link_name=None):\n b_path = to_bytes(path, errors='surrogate_or_strict')\n\n if link_name and not os.path.isabs(b_path):\n # If link_name is specified, path is the source of the link and we need to resolve the absolute path.\n b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))\n b_path = os.path.abspath(os.path.join(b_link_dir, b_path))\n\n b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')\n return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep))",
"def has_path(self, source, target):\n try:\n sp = nx.shortest_path(self.G, source, target)\n except nx.NetworkXNoPath:\n return False\n return True",
"def exists_path(self, start, end):\n return end in self.paths(start)",
"def ParentOrMatch(self, other):\n return self._dir == other or self._dir.startswith(other + \"/\")",
"def __eq__(self, other):\n if not isinstance(other, Path):\n return False\n\n return list.__eq__(self, other)",
"def is_path(self, s):\n return True",
"def __eq__(self, other):\n return type(self) == type(other) and self._full_path == other.full_path",
"def IsSubpath(answer, resolution_map):\n return any(len(answer) < len(x) and answer == x[:len(answer)]\n for x in resolution_map)",
"def is_path(t, path):\n if label(t) != path[0]:\n return False\n if len(path) == 1:\n return True\n return any([is_path(b, path[1:]) for b in branches(t)])",
"def starts_with(self, other: \"ProofPath\") -> bool:\n return self.common_prefix_len(other) == len(other)",
"def inPath(self, oth: 'StateNode') -> bool:\n if self == oth:\n return True\n if self.isSameState(oth):\n return True\n if self.previous is not None:\n return self.previous.inPath(oth)",
"def check_relpath(path1, path2, exception=True):\r\n p1 = op.normpath(path1)\r\n p2 = op.normpath(op.join(path1, path2))\r\n if op.relpath(p1, p2).endswith(op.basename(p1)):\r\n if exception:\r\n raise ValueError(\"Invalid path '%s'\" % path2)\r\n return False\r\n return p2",
"def is_parent_of(a, b):\n a = a.rstrip(\"/\") + \"/\"\n b = b.rstrip(\"/\") + \"/\"\n return b.startswith(a)",
"def isSubDir(parent, child):\n pParts = pathComponents(os.path.abspath(parent))\n cParts = pathComponents(os.path.abspath(child))\n if len(pParts) < len(cParts):\n return cParts[:len(pParts)] == pParts",
"def samepath(p1, p2):\n # type: (str, str) -> bool\n return pathnormalize(p1) == pathnormalize(p2)",
"def path_has_subreddit(self):\r\n return (self.path.startswith('/r/') or\r\n self.path.startswith('/categories/'))",
"def _is_subdir(dir1, dir2):\n r1 = os.path.realpath(dir1)\n r2 = os.path.realpath(dir2)\n if r1.startswith(r2):\n return True\n return False",
"def __eq__(self, other):\n if not isinstance(other, AdaptivePath):\n return False\n\n return super().__eq__(other)",
"def is_in(self, other):\n if self.name == other.name:\n return True\n else:\n if self.parent:\n return self.parent.is_in(other)\n else:\n return False",
"def IsValidSubPath(self, command_path):\n current = self\n for part in command_path:\n current = current.LoadSubElement(part)\n if not current:\n return False\n return True",
"def _isInterestingPath(self, path: str) -> bool:\n for suffix in self.extensions:\n if path.endswith(suffix):\n return True\n return False",
"def _is_nested(pkg: str, pkg_path: str, parent: str, parent_path: str) -> bool:\n norm_pkg_path = _path.normpath(pkg_path)\n rest = pkg.replace(parent, \"\", 1).strip(\".\").split(\".\")\n return pkg.startswith(parent) and norm_pkg_path == _path.normpath(\n Path(parent_path, *rest)\n )"
]
| [
"0.73897827",
"0.7339206",
"0.67567295",
"0.65496707",
"0.6387099",
"0.6286626",
"0.61776984",
"0.61434525",
"0.61350846",
"0.61014986",
"0.60929364",
"0.6068817",
"0.60278535",
"0.5977593",
"0.59686697",
"0.59164315",
"0.58905256",
"0.58664703",
"0.58248734",
"0.58200943",
"0.5779905",
"0.57708853",
"0.5753427",
"0.568722",
"0.5678858",
"0.56546444",
"0.5651386",
"0.56312513",
"0.56227756",
"0.5609813"
]
| 0.8505086 | 0 |
Returns the path to the objdoc cache for a module. | def _get_pycache_path(spec: ModuleSpec) -> Path:
# Refuse to do __pycache__ caching for anything in PREFIX.
# FIXME: Not sure if this is the right policy.
if is_subpath(spec.origin, sys.prefix):
raise CannotCache(spec.name)
# Find out where the module cache file goes.
mod_cache_path = importlib.util.cache_from_source(spec.origin)
# Put the odoc cache next to it.
*_, name = spec.name.rsplit(".", 1)
return Path(mod_cache_path).parent / (name + ".supdoc") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cache_path(self):",
"def get_cache_path(self):",
"def cache_path(self):",
"def cache_path(self):",
"def cachepath(self, *args, **kw):\n cachename = self.cachefunc(*args, **kw)\n ret = os.path.join(self.cachedir, cachename)+'.'+self.serializer\n return ret",
"def get_module_path(module):\n return pathlib.Path(os.path.dirname(os.path.abspath(inspect.getfile(module))))",
"def get_cache_file_path(self) -> str:\n return self.cache_file_path",
"def get_module_dir_by_obj_name(obj_name: str) -> str:\r\n module, _ = Onrolux.get_module_from_obj_name(obj_name)\r\n return os.path.dirname(inspect.getfile(module))",
"def cachedir(self):\n\n return self._cachedir",
"def _get_cache_dir(self):\n return self.manager.cache_dir",
"def get_cached_path(self):\n if util.IS_CACHE_ENABLED and not self.physical_key.is_local():\n return ObjectPathCache.get(str(self.physical_key))\n return None",
"def cache_file(self, repo):\n token = blake2b(repo.location.encode()).hexdigest()[:10]\n dirname = f\"{repo.repo_id.lstrip(os.sep)}-{token}\"\n return pjoin(self.options.cache_dir, \"repos\", dirname, self.cache.file)",
"def get_cache_path(app_path, er_config, entity_type):\n string = json.dumps(er_config, sort_keys=True)\n hashid = Hasher(algorithm=\"sha1\").hash(string=string)\n hashid = f\"{hashid}$synonym_{entity_type}\"\n\n return path.get_entity_resolver_cache_file_path(app_path, hashid)",
"def _get_cache_dir(self):\n return self.data['info']['root_cache_dir']",
"def cache_path(cls):\n system_cache_path = SystemCachePath()\n\n if sys.platform.startswith('win'):\n cache_directory = system_cache_path.cache_path_win()\n return os.path.join(cache_directory, 'Espressif', 'ComponentManager', 'Cache')\n else:\n if sys.platform == 'darwin':\n cache_directory = system_cache_path.cache_path_macos()\n else:\n cache_directory = system_cache_path.cache_path_unix()\n\n return os.path.join(cache_directory, 'Espressif', 'ComponentManager')",
"def object_path(self):\n return self._object_path",
"def cache_path(self):\n cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache')\n if not os.path.exists(cache_path):\n os.mkdir(cache_path)\n return cache_path",
"def _get_cache_filename(self):\n home_dir = os.path.expanduser(\"~\")\n filename = 'dbcollection.json'\n return os.path.join(home_dir, filename)",
"def module_path():\n from sys import path\n from os import getcwd\n from os.path import basename,exists\n from inspect import getmodulename,getfile\n from logging import warn\n # 'getfile' retreives the source file name name compiled into the .pyc file.\n pathname = getfile(lambda x: None)\n if exists(pathname): return pathname\n # The module might have been compiled on a different machine or in a\n # different directory.\n pathname = pathname.replace(\"\\\\\",\"/\")\n filename = basename(pathname)\n dirs = [dir for dir in [getcwd()]+path if exists(dir+\"/\"+filename)]\n if len(dirs) == 0: warn(\"pathname of file %r not found\" % filename)\n dir = dirs[0] if len(dirs) > 0 else \".\"\n pathname = dir+\"/\"+filename\n return pathname",
"def get_cache_file(cls, root_dir, constants):\n return os.path.join(root_dir, '%s__%s.hdf5' % (constants['mos_type'], cls.__name__))",
"def get_cache_file(cls, root_dir, constants):\n return os.path.join(root_dir, '%s__%s.hdf5' % (constants['mos_type'], cls.__name__))",
"def modpath(request):\n return os.path.dirname(request.module.__file__)",
"def path(self):\n return self.lib.path",
"def get_cache_filename(self):\n filename = _slugify(self.parent_filename.replace('.py', ''))\n funcname = _slugify(self.__name__)\n folder = os.path.curdir if USE_CURRENT_DIR else os.path.dirname(self.parent_filepath)\n return os.path.join(folder, filename + '_' + funcname + '.cache')",
"def go_mod_path(self):\n return self.full_path() + \"/go.mod\"",
"def _cachefilename(self, cachedir):\n\n return cachedir / \"filename\"",
"def path(self):\n return self.repository_obj.path / self.name",
"def module_path():\n return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))",
"def ModulePath(self):\n return os.path.join(self._module.workspace, self._module.module_cvspath)",
"def _module(self):\n if self._module_cache is None:\n self._module_cache = load_module(self._name, self._path)\n return self._module_cache"
]
| [
"0.6733192",
"0.6733192",
"0.65243727",
"0.65243727",
"0.6192765",
"0.6134021",
"0.6128572",
"0.6034457",
"0.59029865",
"0.58758134",
"0.584348",
"0.583628",
"0.5831629",
"0.5797197",
"0.57850033",
"0.5753407",
"0.57359135",
"0.56733376",
"0.56616026",
"0.5656423",
"0.5656423",
"0.5651163",
"0.5650012",
"0.55939174",
"0.55915606",
"0.5554534",
"0.5525244",
"0.552427",
"0.5502186",
"0.5486728"
]
| 0.71900195 | 0 |
Returns the path to the supdoc cache directory. | def get_cache_dir() -> Path:
try:
cache_dir = Path(os.environ["SUPDOC_CACHE_DIR"])
except KeyError:
try:
cache_dir = Path(os.environ["XDG_CACHE_DIR"])
except KeyError:
if sys.platform == "linux":
cache_dir = Path.home() / ".cache"
elif sys.platform == "darwin":
cache_dir = Path.home() / "Library/Caches"
cache_dir /= "supdoc"
# Make sure the directory exists. Use restrictive permissions for our cache
# directory, but default permissions for the parents.
os.makedirs(cache_dir.parent, exist_ok=True)
with suppress(FileExistsError):
os.mkdir(cache_dir, mode=0o700)
return cache_dir | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cache_path(self):\n cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache')\n if not os.path.exists(cache_path):\n os.mkdir(cache_path)\n return cache_path",
"def cache_path(self):",
"def cache_path(self):",
"def get_cache_path(self):",
"def get_cache_path(self):",
"def cachedir(self):\n\n return self._cachedir",
"def _get_cache_dir(self):\n return self.data['info']['root_cache_dir']",
"def host_cache_dir(self):\n cache_dir = SpaCyModel.model_class_dir() / \"cache\"\n cache_dir.mkdir(exist_ok=True, parents=True)\n return cache_dir",
"def get_cache_file_path(self):\n home_path = os.path.expanduser(\"~\")\n # path to the programs cache directory\n full_cache_dir = os.path.join(home_path, \".cache\", CACHE_DIR)\n\n if not os.path.exists( full_cache_dir ):\n os.makedirs( full_cache_dir )\n \n return os.path.join( full_cache_dir, FILE_NAME )",
"def _get_cache_dir():\n\n # Or is that making too many assumptions?\n return os.path.join(__opts__[\"cachedir\"], \"s3cache\")",
"def _get_cache_dir(self):\n return self.manager.cache_dir",
"def cachedir() -> Path:\n if os.environ.get(\"TEST_TMPDIR\"):\n return Path(os.environ[\"TEST_TMPDIR\"])\n else:\n return Path(\"~/.cache/programl\").expanduser()",
"def cache_directory(self):\n return ('/var/cache/npm-accel'\n if os.getuid() == 0 and os.access('/var/cache', os.W_OK)\n else parse_path('~/.cache/npm-accel'))",
"def cache_directory(self) -> str:\n # TODO: Find better solution than local import?\n from settings import CACHE_DIR\n return os.path.join(\n CACHE_DIR,\n self.name.lower())",
"def get_cache_file_path(self) -> str:\n return self.cache_file_path",
"def cache_path(cls):\n system_cache_path = SystemCachePath()\n\n if sys.platform.startswith('win'):\n cache_directory = system_cache_path.cache_path_win()\n return os.path.join(cache_directory, 'Espressif', 'ComponentManager', 'Cache')\n else:\n if sys.platform == 'darwin':\n cache_directory = system_cache_path.cache_path_macos()\n else:\n cache_directory = system_cache_path.cache_path_unix()\n\n return os.path.join(cache_directory, 'Espressif', 'ComponentManager')",
"def host_cache_dir(self):\n cache_dir = Transformer.model_class_dir() / \"cache\"\n cache_dir.mkdir(exist_ok=True, parents=True)\n return cache_dir",
"def _get_cache_filename(self):\n home_dir = os.path.expanduser(\"~\")\n filename = 'dbcollection.json'\n return os.path.join(home_dir, filename)",
"def rdap_info_cache_directory() -> str:\n current_path = Path(__file__).resolve().parent\n return os.path.join(current_path, 'cache', 'rdap')",
"def cache_path(self):\n benchmark_name, image_set_name, _ = self.name.rsplit(\"_\", 2)\n cache_path = os.path.join(self._cache_path,'{}_{}_cache'.format(benchmark_name, image_set_name))\n if not os.path.exists(cache_path):\n os.mkdir(cache_path)\n return cache_path",
"def output_directory(self):\n if self._output_directory is None:\n cache_filename = self._original_cache\n output_directory = settings.cache_folder / cache_filename\n output_directory.makedirs_p()\n self._output_directory = output_directory.expand()\n return Path(self._output_directory)",
"def _get_default_cache_dir(self):\n default_cache_dir = os.path.join(os.path.expanduser(\"~\"), 'dbcollection')\n return default_cache_dir",
"def default_cache_dir() -> str:\n running_on_colab = 'google.colab' in sys.modules\n if running_on_colab:\n base_dir = '/tmp'\n else:\n base_dir = os.path.expanduser('~')\n cache_dir = os.path.join(base_dir, '.cache/fedjax')\n return cache_dir",
"def get_preference_file_cache_destination_path():\n\n return read_preference_key(search_key=\"cache_manager_cache_path\")",
"def _get_pycache_path(spec: ModuleSpec) -> Path:\n # Refuse to do __pycache__ caching for anything in PREFIX.\n # FIXME: Not sure if this is the right policy.\n if is_subpath(spec.origin, sys.prefix):\n raise CannotCache(spec.name)\n\n # Find out where the module cache file goes.\n mod_cache_path = importlib.util.cache_from_source(spec.origin)\n # Put the odoc cache next to it.\n *_, name = spec.name.rsplit(\".\", 1)\n return Path(mod_cache_path).parent / (name + \".supdoc\")",
"def get_cache_destination_path():\n\n # if env variable is set\n if (_MANAGER_CACHE_DESTINATION and\n os.path.exists(_MANAGER_CACHE_DESTINATION)):\n return _MANAGER_CACHE_DESTINATION\n\n # if pref file exists\n cache_path = get_preference_file_cache_destination_path()\n if cache_path:\n return cache_path\n\n # returns temp. folder\n return os.getenv(\"TMPDIR\")",
"def ivy_cache_dir(self):\r\n return self._ivy_cache_dir",
"def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)",
"def cachepath(self, *args, **kw):\n cachename = self.cachefunc(*args, **kw)\n ret = os.path.join(self.cachedir, cachename)+'.'+self.serializer\n return ret",
"def get_local_cache_folder(self):\n\n return self._cache_folder"
]
| [
"0.8008654",
"0.77709967",
"0.77709967",
"0.7667538",
"0.7667538",
"0.7616764",
"0.75312096",
"0.75106233",
"0.74233025",
"0.7411959",
"0.73736256",
"0.7335555",
"0.7317151",
"0.7300587",
"0.7297673",
"0.7265668",
"0.7265422",
"0.7102412",
"0.7053087",
"0.7030431",
"0.69598114",
"0.6907279",
"0.69034207",
"0.6883293",
"0.6813712",
"0.6766881",
"0.6734015",
"0.67050976",
"0.6692526",
"0.66341406"
]
| 0.81167746 | 0 |
Returns a cache that stores files in a separate directory. | def DirCache(dir: Path) -> Cache:
dir = Path(dir)
return Cache(lambda spec: dir / spec.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cachefile(filename):\n if not os.path.exists(cachedir):\n os.makedirs(cachedir)\n return os.path.join(cachedir, filename)",
"def cache_file(cache_key):\n\n return MASTOOLS_DIR / f\"{cache_key}_cache.json\"",
"def cache_path(self):",
"def cache_path(self):",
"def cache(*filepath):\n expected_dir = os.path.join(config.cache_dir, \"/\".join(filepath))\n if not os.path.exists(expected_dir):\n raise FileNotFoundError(\"Couldn't find {}\".format(expected_dir))\n return expected_dir",
"def use_cached_files(self, cache_key):\r\n pass",
"def cacheDir(readDirPath, writeDirPath=None):\n imageFileNames = sorted(os.listdir(readDirPath))\n cache = {\n 'readDir': readDirPath,\n 'writeDir': writeDirPath or readDirPath\n }\n for fileName in imageFileNames:\n if fileName[-4:] != '.jpg':\n continue\n cacheInsert(cache, fileName)\n return cache",
"def _cachedir(self, url):\n\n md5 = hashlib.md5(url.encode('utf-8')).hexdigest()\n cachedir = self.directory / md5[0:2] / md5\n\n if not cachedir.exists():\n logger.debug(f\"Creating cache directory {cachedir}\")\n cachedir.mkdir(parents=True)\n with open(cachedir / \"url\", 'w') as f:\n f.write(url)\n\n return cachedir",
"def get_cache_path(self):",
"def get_cache_path(self):",
"def _cache(self, path):\n # We import these here because importing them is slow and\n # a significant fraction of numpy's total import time.\n import shutil\n from urllib.request import urlopen\n\n upath = self.abspath(path)\n\n # ensure directory exists\n if not os.path.exists(os.path.dirname(upath)):\n os.makedirs(os.path.dirname(upath))\n\n # TODO: Doesn't handle compressed files!\n if self._isurl(path):\n with urlopen(path) as openedurl:\n with _open(upath, 'wb') as f:\n shutil.copyfileobj(openedurl, f)\n else:\n shutil.copyfile(path, upath)\n return upath",
"def getCacheFile(ns, digest):\n return os.path.join(getDir(cacheDir, ns), digest)",
"def load_cached(cache_path, in_dir):\n\n print(\"Creating dataset from the files in: \" + in_dir)\n\n # If the object-instance for DataSet(in_dir=data_dir) already\n # exists in the cache-file then reload it, otherwise create\n # an object instance and save it to the cache-file for next time.\n\n cache=Cache()\n dataset = cache.cache_data(cache_path=cache_path,\n fn=Dataset, in_dir=in_dir)\n\n return dataset",
"def _get_cache_dir():\n\n # Or is that making too many assumptions?\n return os.path.join(__opts__[\"cachedir\"], \"s3cache\")",
"def cache_directory(self) -> str:\n # TODO: Find better solution than local import?\n from settings import CACHE_DIR\n return os.path.join(\n CACHE_DIR,\n self.name.lower())",
"def _get_cache_dir(self):\n return self.manager.cache_dir",
"def cached(cache_path, generator):\n if path.exists(cache_path):\n with open(cache_path, 'rb') as f:\n return pickle.load(f)\n output = generator()\n with open(cache_path, 'wb+') as f:\n pickle.dump(output, f)\n return output",
"def get_cache_file(cls, root_dir, constants):\n return os.path.join(root_dir, '%s__%s.hdf5' % (constants['mos_type'], cls.__name__))",
"def get_cache_file(cls, root_dir, constants):\n return os.path.join(root_dir, '%s__%s.hdf5' % (constants['mos_type'], cls.__name__))",
"def cache_path(self):\n cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache')\n if not os.path.exists(cache_path):\n os.mkdir(cache_path)\n return cache_path",
"def __init__(self, dir_path: str, rel_cache_path: str = \".cache\"):\n self.dir_path = dir_path\n self.cache_path = os.path.join(dir_path, rel_cache_path)",
"def cache(cachedir=None):\n if cachedir is not None:\n os.environ['VIPY_CACHE'] = remkdir(cachedir)\n GLOBAL['CACHE'] = cachedir\n return os.environ['VIPY_CACHE'] if 'VIPY_CACHE' in os.environ else None",
"def __init__(self, cachedir):\n\n self._cachedir = os.path.abspath(cachedir)\n self._queue_path = os.path.join(self._cachedir, 'thumbnails.txt')\n self._queue = []\n\n try:\n os.makedirs(self._cachedir)\n except (IOError, OSError):\n pass",
"def __init__(self,cacheLocation):\n self.cacheLocation = cacheLocation\n if not os.path.exists(self.cacheLocation):\n os.mkdir(self.cacheLocation)",
"def get_cached(factory, cache_file_name, **kwargs):\n if os.path.exists(cache_file_name):\n _logger.info('Loading {}'.format(cache_file_name))\n cached = deserialize(cache_file_name)\n return cached\n\n _logger.info('Creating {}'.format(cache_file_name))\n data = factory()\n serialize(cache_file_name, data, **kwargs)\n return data",
"def cache(cache_path):\n def cache_decorator(generator):\n def wrapper():\n return cached(cache_path, generator)\n return wrapper\n return cache_decorator",
"def cache_file(self, repo):\n token = blake2b(repo.location.encode()).hexdigest()[:10]\n dirname = f\"{repo.repo_id.lstrip(os.sep)}-{token}\"\n return pjoin(self.options.cache_dir, \"repos\", dirname, self.cache.file)",
"def set_cache(config):\n config[\"cache\"] = _get_remote_files(config)\n return config",
"def host_cache_dir(self):\n cache_dir = SpaCyModel.model_class_dir() / \"cache\"\n cache_dir.mkdir(exist_ok=True, parents=True)\n return cache_dir",
"def cachepath(self, *args, **kw):\n cachename = self.cachefunc(*args, **kw)\n ret = os.path.join(self.cachedir, cachename)+'.'+self.serializer\n return ret"
]
| [
"0.7048509",
"0.6932892",
"0.69263685",
"0.69263685",
"0.6910586",
"0.68598944",
"0.68423784",
"0.6811715",
"0.67268217",
"0.67268217",
"0.66995484",
"0.6692884",
"0.66060275",
"0.6589656",
"0.65850544",
"0.6582661",
"0.6568272",
"0.6557517",
"0.6557517",
"0.6490931",
"0.64549303",
"0.6454123",
"0.6449307",
"0.6433216",
"0.6366129",
"0.6349184",
"0.63287175",
"0.63259286",
"0.63230145",
"0.63073385"
]
| 0.7716556 | 0 |
Caches modules in `modnames` and their submodules. | def cache_modules(*modnames) -> None:
inspector = Inspector()
modnames = { n for m in modnames for n in find_submodules(m) }
for modname in modnames:
logging.debug(f"inspecting: {modname}")
objdoc = inspector.inspect_module(modname)
logging.debug(f"writing cache: {modname}")
try:
PYCACHE[modname] = objdoc
except CannotCache as exc:
logging.warning(f"cannot cache: {exc}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pymod_cache():\n pymod.cache.cache = Singleton(pymod.cache.factory)",
"def _refresh_cache():\n global _num_types, _num_funcs\n\n num_types = interrogate_number_of_global_types()\n num_funcs = interrogate_number_of_functions()\n\n if num_types != _num_types:\n for i in range(num_types):\n itype = interrogate_get_global_type(i)\n if interrogate_type_outer_class(itype):\n continue\n modname = interrogate_type_module_name(itype)\n _modules.add(modname)\n _store_type(modname, itype)\n\n _num_types = num_types\n\n if num_funcs != _num_funcs:\n for i in range(num_funcs):\n ifunc = interrogate_get_function(i)\n parent = interrogate_function_class(ifunc)\n if not parent:\n parent = interrogate_function_module_name(ifunc)\n _modules.add(parent)\n\n # Store it by both the original and mangled name.\n name = interrogate_function_name(ifunc)\n mangled_name1 = _translate_function_name(name, False)\n _func_cache[(parent, mangled_name1)] = ifunc\n if not name.startswith('~'):\n mangled_name2 = _translate_function_name(name, True)\n _func_cache[(parent, mangled_name2)] = ifunc\n\n _num_funcs = num_funcs",
"def rimport(self, modulename):\r\n if modulename not in self.module_cache:\r\n module = self.sync_request(\"handle_import\", modulename)\r\n self.module_cache[modulename] = module\r\n return self.module_cache[modulename]",
"def refresh(self):\n self.modules.clear()\n module_files = []\n module_paths = os.environ['MAYA_MODULE_PATH'].split(os.pathsep)\n for p in module_paths:\n try:\n module_files += [os.path.join(p, x).replace(os.sep, os.altsep or os.sep) for x in os.listdir(p) if\n x.lower()[-3:] == \"mod\"]\n except OSError:\n pass # ignore bad paths\n for eachfile in module_files:\n for eachmod in self.parse_mod(eachfile):\n self.modules[\"{0.name} ({0.version})\".format(eachmod)] = eachmod",
"def reload(*mods):\n for mod in mods:\n importlib.reload(importlib.import_module(mod))",
"def getRootModules():\n modules = []\n if ip.db.has_key('rootmodules'):\n return ip.db['rootmodules']\n t = time()\n store = False\n for path in sys.path:\n modules += moduleList(path) \n if time() - t >= TIMEOUT_STORAGE and not store:\n store = True\n print \"\\nCaching the list of root modules, please wait!\" \n print \"(This will only be done once - type '%rehashx' to \" + \\\n \"reset cache!)\"\n print\n if time() - t > TIMEOUT_GIVEUP:\n print \"This is taking too long, we give up.\"\n print\n ip.db['rootmodules'] = []\n return []\n \n modules += sys.builtin_module_names\n \n modules = list(set(modules))\n if '__init__' in modules:\n modules.remove('__init__')\n modules = list(set(modules))\n if store:\n ip.db['rootmodules'] = modules\n return modules",
"def update_modules(self) -> None:\n\n matches = apache_util.parse_modules(self.configurator.options.get_modules_cmd)\n for mod in matches:\n self.add_mod(mod.strip())",
"def __put_module_in_sys_cache(module_name, module_obj):\n #try:\n #if hasattr(sys, 'stypy_module_cache'):\n sys.stypy_module_cache[module_name] = module_obj\n # else:\n # __preload_sys_module_cache()\n # sys.stypy_module_cache[module_name] = module_obj\n # except:\n # pass\n # finally:\n # return None",
"def patch_sys_modules(\n modules=default_patches\n) -> Generator[None, None, None]:\n cache = {}\n for mod, name in modules:\n try:\n cache[(mod, name)] = getattr(mod, name)\n setattr(mod, name, raiser)\n except Exception as ex:\n logger.debug(f'Failed to replace module {name}, {ex}')\n\n try:\n yield\n finally:\n # replace the references\n for (mod, name), method in cache.items():\n setattr(mod, name, method)",
"def add_mod(self, mod_name: str) -> None:\n if mod_name + \"_module\" not in self.modules:\n self.modules[mod_name + \"_module\"] = None\n if \"mod_\" + mod_name + \".c\" not in self.modules:\n self.modules[\"mod_\" + mod_name + \".c\"] = None",
"def reset_modules(self) -> None:\n self.modules = {}\n self.update_modules()\n self.parse_modules()",
"def parse_modules(self) -> None:\n mods: Dict[str, str] = {}\n matches = self.find_dir(\"LoadModule\")\n iterator = iter(matches)\n # Make sure prev_size != cur_size for do: while: iteration\n prev_size = -1\n\n while len(mods) != prev_size:\n prev_size = len(mods)\n\n for match_name, match_filename in zip(\n iterator, iterator):\n mod_name = self.get_arg(match_name)\n mod_filename = self.get_arg(match_filename)\n if mod_name and mod_filename:\n mods[mod_name] = mod_filename\n mods[os.path.basename(mod_filename)[:-2] + \"c\"] = mod_filename\n else:\n logger.debug(\"Could not read LoadModule directive from Augeas path: %s\",\n match_name[6:])\n self.modules.update(mods)",
"def modules():",
"async def reload_modules(self) -> bool:\n self.reloading_modules = True\n newmodules = await self.detect_modules()\n todrop = []\n toload = []\n\n # Logs!\n errors = False\n\n for name, module in self.modules.items():\n if module.loaded:\n if hasattr(module.module, \"unload\"):\n try:\n await module.module.unload(self.client.loop)\n except:\n LOGGER.exception(\n f\"Hit an exception while unloading module {name}.\")\n errors = True\n\n if name not in newmodules:\n LOGGER.debug(f\"Dropping removed module {name}.\")\n if hasattr(module.module, \"shutdown\"):\n try:\n await module.module.shutdown(self.client.loop)\n except:\n LOGGER.exception(\n f\"Hit an exception while shutting down module {name}.\")\n errors = True\n\n todrop.append(module)\n continue\n\n newmodules.remove(name)\n module.handlers = {}\n try:\n importlib.reload(module.module)\n\n except:\n LOGGER.exception(\n f\"Hit an exception while reloading module {name}.\")\n todrop.append(module)\n errors = True\n continue\n\n toload.append(module)\n module.loaded = True\n\n # Loops over NEW modules. Because we can't just reload them.\n for name in newmodules:\n newmod = MModule(name)\n self.modules[name] = newmod\n\n try:\n mod = importlib.import_module(name)\n except:\n LOGGER.exception(\n f\"Hit an exception while loading module {name}.\")\n # Alas it was not meant to be.\n del self.modules[name]\n errors = True\n continue\n\n newmod.module = mod\n toload.append(newmod)\n\n newmod.loaded = True\n for server in self.servers.values():\n server.modules[name] = newmod\n #LOGGER.info(f\"$BLUESuccessfully loaded module $WHITE{name}$BLUE.\")\n\n for module in toload:\n if hasattr(module.module, \"load\"):\n try:\n await module.module.load(self.client.loop)\n\n except:\n LOGGER.exception(\n f\"Hit an exception while load()ing module {module.name}.\")\n errors = True\n\n for module in todrop:\n for server in self.servers.values():\n if module.name in server.modules:\n del server.modules[module.name]\n\n del self.modules[module.name]\n\n self.reloading_modules = False\n\n for handler in self.temp_module_handlers:\n try:\n if handler.module in self.modules:\n self.register_handler(handler)\n\n else:\n LOGGER.warning(f\"Attempted to late-register for nonexistant module: {handler.module}/{handler.name}\")\n\n except:\n LOGGER.exception(\n f\"Exception while registering handler {handler.module}/{handler.name}!\")\n errors = True\n\n self.temp_module_handlers = []\n\n return errors",
"def cache_items(self, system, base_block_ids, course_key, depth=0, lazy=True):\n with self.bulk_operations(course_key, emit_signals=False):\n new_module_data = {}\n for block_id in base_block_ids:\n new_module_data = self.descendants(\n system.course_entry.structure['blocks'],\n block_id,\n depth,\n new_module_data\n )\n\n # This method supports lazy loading, where the descendent definitions aren't loaded\n # until they're actually needed.\n if not lazy:\n # Non-lazy loading: Load all descendants by id.\n descendent_definitions = self.get_definitions(\n course_key,\n [\n block.definition\n for block in new_module_data.values()\n ]\n )\n # Turn definitions into a map.\n definitions = {definition['_id']: definition\n for definition in descendent_definitions}\n\n for block in new_module_data.values():\n if block.definition in definitions:\n definition = definitions[block.definition]\n # convert_fields gets done later in the runtime's xblock_from_json\n block.fields.update(definition.get('fields'))\n block.definition_loaded = True\n\n system.module_data.update(new_module_data)\n return system.module_data",
"def make_modules_importable(modules: Iterable[Module]) -> Dict[str, Module]:\n sys.modules.update({ module.__name__: module for module in modules })\n return sys.modules",
"def create_modules(*names):\n source = 'attr = {0!r}'\n created_paths = []\n mapping = {}\n state_manager = None\n uncache_manager = None\n try:\n temp_dir = tempfile.mkdtemp()\n mapping['.root'] = temp_dir\n import_names = set()\n for name in names:\n if not name.endswith('__init__'):\n import_name = name\n else:\n import_name = name[:-len('.__init__')]\n import_names.add(import_name)\n if import_name in sys.modules:\n del sys.modules[import_name]\n name_parts = name.split('.')\n file_path = temp_dir\n for directory in name_parts[:-1]:\n file_path = os.path.join(file_path, directory)\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n created_paths.append(file_path)\n file_path = os.path.join(file_path, name_parts[-1] + '.py')\n with open(file_path, 'w') as file:\n file.write(source.format(name))\n created_paths.append(file_path)\n mapping[name] = file_path\n uncache_manager = util.uncache(*import_names)\n uncache_manager.__enter__()\n state_manager = util.import_state(path=[temp_dir])\n state_manager.__enter__()\n yield mapping\n finally:\n if state_manager is not None:\n state_manager.__exit__(None, None, None)\n if uncache_manager is not None:\n uncache_manager.__exit__(None, None, None)\n support.rmtree(temp_dir)",
"def __get_modules(self, data: dict):\n\n v = Validator([(data[\"modules\"], list)])\n for module in data[\"modules\"]:\n mod_data = load_configuration(module)\n mod_path = Path(mod_data[\"path\"])\n self.__cache[mod_path.stem] = (\n load_class(\n mod_data[\"path\"],\n mod_path.stem,\n mod_data[\"class_name\"],\n ),\n data[\"args\"][module]\n if \"args\" in data and module in data[\"args\"]\n else {},\n )\n for mod_folder in [\n a.stem.lower() for a in mod_path.parents\n ]: # check if parent folder is android\n if mod_folder == \"android\": # to know if android or not\n self.__cache[mod_path.stem][0].is_android = True",
"def __enter__(self):\n if self.plugin_name == 'sideboard':\n return\n \n self._original_path = sys.path\n self._original_modules = sys.modules.copy()\n self.original_keys = set(self._original_modules.keys())\n\n #TODO: determine if there a sufficiently negative performance implication\n # to rethink doing this in recursive imports\n sys.path = _path_cache[self.plugin_name] + sys.path\n\n #This really does need to be an update in place.\n #Setting sys.modules = SOME_NEW_DICTIONARY means that\n # imports still write to the original sys.modules\n sys.modules.update(_module_cache[self.plugin_name])",
"def _update_extension_modules(self):\r\n for plugin in self.plugins.values():\r\n plugin.set_pref('extension_modules',\r\n self._submods_thread.submods)",
"def _get_modules(self, names):\n loaded_modules = []\n for name in names:\n loaded_modules.append(sys.modules[name])\n return loaded_modules",
"def _reload(mod,larch=None,**kw):\n\n if isinstance(mod, str):\n return larch.import_module(mod, do_reload=True)\n\n for k,v in chain(larch.symtable._sys.modules.iteritems(), sys.modules.iteritems()):\n if v == mod:\n modname = k\n break\n try:\n return larch.import_module(modname,do_reload=True)\n except NameError:\n pass",
"def _swap_child_modules(\n module: torch.nn.Module,\n static_mappings: Dict[Callable, Any],\n dynamic_mappings: Dict[Callable, Any],\n) -> None:\n\n reassign = {}\n for name, mod in module.named_children():\n # both fused modules and observed custom modules are\n # swapped as one unit\n if not isinstance(mod, _FusedModule):\n _swap_child_modules(mod, static_mappings, dynamic_mappings)\n\n qconfig = getattr(mod, 'qconfig', None)\n if not qconfig:\n continue\n activation_int8_quantized = activation_is_int8_quantized(qconfig)\n op_int8_dynamically_quantized = op_is_int8_dynamically_quantized(qconfig)\n if activation_int8_quantized:\n if not type(mod) in static_mappings:\n continue\n reassign[name] = swap_module(mod, static_mappings, {})\n elif op_int8_dynamically_quantized:\n if not type(mod) in dynamic_mappings:\n continue\n reassign[name] = swap_module(mod, dynamic_mappings, {})\n # TODO(future PR): add support for other dtypes\n\n for key, value in reassign.items():\n module._modules[key] = value",
"def pin_gitmodules(self):\n self.gitmodules_status = gitutil.save_submodule_status(self.dist_dir)",
"def test_import_when_cache_exists(enaml_module):\n name, folder, _ = enaml_module\n assert name not in sys.modules\n with imports():\n importlib.import_module(name)\n\n assert name in sys.modules\n del sys.modules[name]\n\n cache_folder = os.path.join(folder, '__enamlcache__')\n assert os.path.isdir(cache_folder)\n cache_name = os.listdir(cache_folder)[0]\n cache_path = os.path.join(cache_folder, cache_name)\n cache_time = os.path.getmtime(cache_path)\n\n with imports():\n importlib.import_module(name)\n\n assert os.path.getmtime(cache_path) == cache_time\n assert name in sys.modules",
"def install_modules(self, capability=None, name=None):\n repositories = self.weboob.repositories\n # Update modules list\n repositories.update_repositories(DummyProgress())\n # Get module infos\n if name:\n modules = {name: repositories.get_module_info(name)}\n else:\n modules = repositories.get_all_modules_info(capability)\n # Install modules if required\n for infos in modules.values():\n if infos is not None and (\n not infos.is_installed() or\n not infos.is_local()\n ):\n try:\n repositories.install(infos, progress=DummyProgress())\n except ModuleInstallError as exception:\n logger.info(str(exception))\n return {\n module_name: dict(infos.dump())\n for module_name, infos in modules.items()\n if infos.is_installed()\n }",
"def simplify_(self):\n for base_mod_name in sorted(self.base_mod_map.keys(), reverse=True):\n mapped_mod_name = self.base_mod_map.pop(base_mod_name) # just temporarily\n if not self.should_wrap_mod_name(base_mod_name):\n # Need it, put back in.\n self.base_mod_map[base_mod_name] = mapped_mod_name\n else:\n # Some other entry maps.\n mapped_mod_name_ = self.map_mod_name(base_mod_name)\n if mapped_mod_name != mapped_mod_name_:\n # Need it, put back in.\n self.base_mod_map[base_mod_name] = mapped_mod_name\n else:\n pass # Can keep it that way.",
"def on_modules_command(sender, command, label, args):\n plugin_header(sender, \"Modules\")\n msg(sender, \", \".join([((\"&a\" if mod in shared[\"modules\"] else \"&c\") + mod) for mod in shared[\"load_modules\"]]))",
"def _import_all(self):\n # on first load, documents dir may not be in import path\n if not self.app.documents_dir in sys.path:\n sys.path += [self.app.documents_dir]\n # clean modules dict before (re)loading anything\n self._remove_non_current_game_modules()\n # make copy of old modules table for import vs reload check\n old_modules = self.modules.copy()\n self.modules = {}\n # load/reload new modules\n for module_name in self._get_game_modules_list():\n try:\n # always reload built in modules\n if module_name in self.builtin_module_names or \\\n module_name in old_modules:\n m = importlib.reload(old_modules[module_name])\n else:\n m = importlib.import_module(module_name)\n self.modules[module_name] = m\n except Exception as e:\n self.app.log_import_exception(e, module_name)",
"def get_modules(name_only=False):\n\n mods = list()\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n # This just returns the name\n if name_only:\n\n sql = ('SELECT name '\n 'FROM modules ')\n\n for mod in cur.execute(sql):\n mods.append(mod[0])\n\n # This returns a list of items\n else:\n\n sql = ('SELECT name, version, '\n 'about, author '\n 'FROM modules '\n 'ORDER BY name')\n\n cur.execute(sql)\n\n while True:\n\n item = dtf.core.item.Item()\n line = cur.fetchone()\n if line is None:\n break\n\n item.type = dtf.core.item.TYPE_MODULE\n item.name = line[0]\n item.version = line[1]\n item.about = line[2]\n item.author = line[3]\n\n mods.append(item)\n\n return mods"
]
| [
"0.65368474",
"0.647988",
"0.60674167",
"0.6010362",
"0.5922582",
"0.5811887",
"0.5756406",
"0.57526934",
"0.5709762",
"0.5666927",
"0.5654441",
"0.5649179",
"0.5631578",
"0.5589954",
"0.5557051",
"0.5550639",
"0.5539117",
"0.54829466",
"0.5482664",
"0.5446342",
"0.5420631",
"0.54182565",
"0.54100937",
"0.53902954",
"0.5365596",
"0.5358046",
"0.53351784",
"0.53285944",
"0.52956223",
"0.528515"
]
| 0.82407486 | 0 |
Create a driver to the specified appium server & return driver,touch objects. | def create_driver(self, app_server):
config = self.config[app_server]
cmd = config['CMD']
server_name = config['NAME']
log_file_name = config['LOG_FILE_NAME']
full_log_path = os.path.join(os.environ['basedir'], 'logs', 'appium', log_file_name)
url = config['URL']
desired_cap = config['DESIRED_CAP']
self.mobile_name = config['MOBILE_NAME']
with open(full_log_path, "w") as file:
subprocess.Popen(cmd, shell=True, stdout=file, stderr=subprocess.STDOUT)
LOGGER.info("{name} started !".format(name=server_name))
try:
self.driver = webdriver.Remote(url, desired_cap)
self.touch = TouchAction(self.driver)
LOGGER.info("Connected to {mob}".format(mob=self.mobile_name))
except WebDriverException:
LOGGER.error("{dev} is not connected!".format(
dev=self.mobile_name))
time.sleep(3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def appium_init(self):\n desired_cups = {}\n desired_cups['platformName'] = 'Android'\n desired_cups['platformVersion'] = android_version\n desired_cups['deviceName'] = device_name\n desired_cups['appPackage'] = pkg_name\n desired_cups['appActivity'] = activity\n desired_cups['autoLaunch'] = 'false'\n desired_cups['noReset'] = 'true'\n desired_cups['automationName'] = 'uiautomator2'\n driver = webdriver.Remote('http://127.0.0.path:4723/wd/hub', desired_cups)\n global driver\n return driver",
"def getWebDriverInstance(self):\n #baseURL = \"https://qa-test.avenuecode.com/\"\n if self.device == \"ios_mobile\":\n self.driver = appiumdriver.Remote('http://localhost:4723/wd/hub', self.capabilities)\n self.driver.implicitly_wait(10)\n #return driver\n #driver.get(baseURL)\n\n else:\n print(\"Hello\")\n # Setting Driver Implicit Time out for An Element\n self.driver = appiumdriver.Remote('http://localhost:4723/wd/hub', self.capabilities)\n self.driver.implicitly_wait(10)\n return self.driver\n # Maximize the window\n #driver.maximize_window()\n # Loading browser with App URL\n #driver.get(baseURL)\n #return driver",
"def start(self):\n # iPhone\n #driver = webdriver.Remote(browser_name=\"iphone\", command_executor='http://172.24.101.36:3001/hub')\n # Android\n #driver = webdriver.Remote(browser_name=\"android\", command_executor='http://127.0.0.1:8080/hub')\n # Google Chrome \n #driver = webdriver.Chrome()\n # Firefox \n #FirefoxProfile fp = new FirefoxProfile();\n #fp.setPreference(\"webdriver.load.strategy\", \"unstable\");\n #WebDriver driver = new FirefoxDriver(fp);\n \n #driver = webdriver.Firefox(firefox_profile=self.disableImages())\n driver = webdriver.Firefox()\n \n self.driver = driver",
"def start(self):\n # iPhone\n #driver = webdriver.Remote(browser_name=\"iphone\", command_executor='http://172.24.101.36:3001/hub')\n # Android\n #driver = webdriver.Remote(browser_name=\"android\", command_executor='http://127.0.0.1:8080/hub')\n # Google Chrome \n #driver = webdriver.Chrome()\n # Firefox \n #FirefoxProfile fp = new FirefoxProfile();\n #fp.setPreference(\"webdriver.load.strategy\", \"unstable\");\n #WebDriver driver = new FirefoxDriver(fp);\n \n #driver = webdriver.Firefox(firefox_profile=self.disableImages())\n driver = webdriver.Firefox()\n \n self.driver = driver",
"def initialize(self, device, platform, version, app, package, activity):\n desired_caps = dict()\n desired_caps['platformName'] = platform\n desired_caps['platformVersion'] = version\n desired_caps['deviceName'] = device\n desired_caps['app'] = app\n desired_caps['appPackage'] = package\n desired_caps['appActivity'] = activity\n server_url = 'http://{0}:{1}/wd/hub'.format(APPIUM_SERVER, APPIUM_PORT)\n print server_url\n self.driver = webdriver.Remote(server_url, desired_caps)\n print DEBUG_STRING, self.driver, id(self.driver)\n self.driver.implicitly_wait(5)\n return self.driver",
"def create_app(self):\n coverage.process_startup()\n app = create_test_app()\n selenium_server_url = \"http://{}:{}/wd/hub\".format(\n os.environ.get(\"{{ cookiecutter.project_slug|upper }}_SELENIUM_HOST\", \"chrome\"),\n os.environ.get(\"{{ cookiecutter.project_slug|upper }}_SELENIUM_PORT\", \"4444\"),\n )\n self.browser = Remote(\n command_executor=selenium_server_url,\n desired_capabilities=DesiredCapabilities.CHROME.copy(),\n )\n self.browser.implicitly_wait(3)\n return app",
"def start_server_and_driver(path_to_driver, path_to_browser):\n \n server = service.Service(path_to_driver)\n server.start()\n\n capabilities = {'chrome.binary': path_to_browser}\n driver = webdriver.Remote(server.service_url, capabilities)\n \n return server, driver",
"def driver(self):\n from dallinger.config import get_config\n\n config = get_config()\n if not config.ready:\n config.load()\n driver_url = config.get(\"webdriver_url\", None)\n driver_type = config.get(\"webdriver_type\")\n driver = None\n\n if driver_url:\n capabilities = CAPABILITY_MAP.get(driver_type.lower())\n if capabilities is None:\n raise ValueError(\n \"Unsupported remote webdriver_type: {}\".format(driver_type)\n )\n driver = webdriver.Remote(\n desired_capabilities=capabilities, command_executor=driver_url\n )\n else:\n driver_class = DRIVER_MAP.get(driver_type.lower())\n if driver_class is not None:\n kwargs = {}\n if driver_type.lower() == \"chrome_headless\":\n from selenium.webdriver.chrome.options import Options\n\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n kwargs = {\"options\": chrome_options}\n driver = driver_class(**kwargs)\n\n if driver is None:\n raise ValueError(\"Unsupported webdriver_type: {}\".format(driver_type))\n\n driver.set_window_size(1024, 768)\n logger.info(\"Created {} webdriver.\".format(driver_type))\n return driver",
"def launch_application(path) -> WebDriver:\n try:\n exp_cap = {\n \"deviceName\": \"WindowsPC\",\n \"app\": path}\n exp_session = webdriver.Remote(\n command_executor='http://127.0.0.1:4723',\n desired_capabilities=exp_cap)\n return exp_session\n except Exception as ex:\n logger.info(f':Error in launching app ERROR: {ex}')",
"def _create_driver(self, config):\n raise NotImplementedError(\"Must override WebAccess::_create_driver.\")",
"def aisappium_set_driver_instance(self, oAppiumInfo):\n self._cache.current = oAppiumInfo.driver",
"def create_driver():\n\n request_data = request.get_json()\n if not request_data.get('name') or not request_data.get('name').strip():\n return Response.error(KEY_REQUIRED.format('name'), 400)\n\n if not request_data.get('license_number') or not request_data.get('license_number').strip():\n return Response.error(KEY_REQUIRED.format('license_number'), 400)\n\n if not request_data.get('motorcycle_id'):\n return Response.error(KEY_REQUIRED.format('motorcycle_id'), 400)\n\n motorcycle = Motorcycle.query.filter_by(\n id=request_data.get('motorcycle_id')).first()\n\n if not motorcycle:\n return Response.error(MOTORCYCLE_NOT_EXIST, 400)\n\n new_driver = Driver(**request_data_strip(request_data))\n new_driver.save()\n driver_schema = DriverSchema()\n response_data = {\n 'driver': driver_schema.dump(new_driver)\n }\n return Response.success(DRIVER_CREATED, response_data, 201)",
"def start(self):\n\n #print 'start tcp port 8080 forwarding'\n subprocess.call(r'%s forward tcp:%d tcp:8080'%(self.adbCmd,self.port),shell=True)\n\n\n # this is not mandatory as we already killed adb server, but this could \n # decrease the webview created in andriod server application. maybe\n # it's a bug to create one webview per launch of app?\n #print 'stop existing android server by sending back key'\n for i in xrange(4):\n subprocess.call(r'%s shell input keyevent 4'%self.adbCmd,shell=True)\n\n #print 'start android server activity'\n output=subprocess.check_output(r'%s shell am start -n %s'%(self.adbCmd,\n Service.ANDROID_DRIVER_CLIENT_APP_CMP),\n stderr=subprocess.STDOUT,shell=True).split()\n if len(output)> 5: #if app not installed, there would be error messages\n raise WebDriverException(\"\"\"AndroidDriver needs to be installed on device.\n Download android-server-2.x.apk from\n http://code.google.com/p/selenium/downloads/list\"\"\")\n # wait for WebDriver Client to be launched completely\n time.sleep(2)\n print \"AndroidDriver started on device %s\" % repr(self.device)",
"def connect():\n\n driver = webdriver.Chrome(driver_exe) # Run the simulated chrome driver\n driver.get(url) # go to the whatsapp web page\n driver.implicitly_wait(10) # wait a little to make sure the page loads\n return driver",
"def connect_firefox_driver(headless):\n #initialise chrome options\n options = FirefoxOptions()\n #set headless option on driver\n options.headless = headless\n #initialise driver\n driver = webdriver.Firefox(executable_path=GeckoDriverManager().install(), options=options)\n return driver",
"def setup_selenium():\n # Define the options to run with headless mode enabled\n options = Options()\n options.headless = True\n\n # Instatiate the browser object here, pointing at an exectable location which should be located in the same\n # base directory as the script\n driver = webdriver.Firefox(options=options, executable_path='./geckodriver')\n\n # Impicit wait tell the browser to wait up to 30s for an object to appear, this helps if the connection is slow.\n driver.implicitly_wait(30)\n return driver",
"def create_device():\n sonyapilib.device.TIMEOUT = 0.1\n device = SonyDevice(\"test\", \"test\")\n device.api_version = 3\n device.cookies = jsonpickle.decode(read_file(\"data/cookies.json\"))\n return device",
"def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['deviceName'] = 'Android Emulator'\n desired_caps['app'] = os.path.abspath(os.path.join(os.getcwd(), 'apps/Android.apk'))\n desired_caps['appPackage'] = 'com.view.viewglass'\n desired_caps['appActivity'] = 'com.view.viewglass.Splash'\n desired_caps['autoGrantPermissions'] = True\n desired_caps['noReset'] = True\n desired_caps['clearSystemFiles'] = True\n self.driver = webdriver.Remote('http://localhost:4444/wd/hub', desired_caps)",
"def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['deviceName'] = 'Android Emulator'\n desired_caps['app'] = os.path.abspath(os.path.join(os.getcwd(), 'apps/Android.apk'))\n desired_caps['appPackage'] = 'com.view.viewglass'\n desired_caps['appActivity'] = 'com.view.viewglass.Splash'\n desired_caps['autoGrantPermissions'] = True\n desired_caps['noReset'] = True\n desired_caps['clearSystemFiles'] = True\n self.driver = webdriver.Remote('http://localhost:4444/wd/hub', desired_caps)",
"def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['deviceName'] = 'Android Emulator'\n desired_caps['app'] = os.path.abspath(os.path.join(os.path.dirname(__file__), 'apps/Android.apk'))\n desired_caps['appPackage'] = 'com.view.viewglass'\n desired_caps['appActivity'] = 'com.view.viewglass.Splash'\n desired_caps['autoGrantPermissions'] = True\n desired_caps['noReset'] = True\n desired_caps['clearSystemFiles'] = True\n self.driver = webdriver.Remote('http://localhost:4444/wd/hub', desired_caps)",
"def createDriver(self, browser, driverPath, headless=None):\n\n self.headless = headless\n\n if browser == \"Edg\":\n edge_options = EdgeOptions()\n if self.headless:\n # make Edge headless\n edge_options.use_chromium = True\n edge_options.add_argument(\"headless\")\n edge_options.add_argument(\"disable-gpu\")\n edge_options.add_argument(\"--log-level=3\")\n edge_options.add_experimental_option(\n 'excludeSwitches',\n ['enable-logging']\n )\n # edge_options.page_load_strategy(\"eager\")\n self.driver = Edge(\n executable_path=str(driverPath),\n options=edge_options\n )\n elif browser == \"Chrome\":\n chrome_options = Options()\n if self.headless:\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--log-level=3\")\n chrome_options.add_experimental_option(\n 'excludeSwitches',\n ['enable-logging']\n )\n # chrome_options.page_load_strategy(\"eager\")\n # don't know the chrome command\n self.driver = webdriver.Chrome(\n executable_path=str(driverPath),\n options=chrome_options\n )\n else:\n print(\"Browser not supported yet\")\n\n self.driver.set_window_size(1800, 1080)\n self.driver.set_page_load_timeout(100000)\n\n return self.driver",
"def aisappium_get_driver_instance(self):\n return self._current_application()",
"def create_device(self, app_name='FooBar', device_type='Raspberry Pi 2'):\n\n app = self.resin.models.application.create(app_name, device_type)\n return app, self.resin.models.device.register(app['id'], self.resin.models.device.generate_uuid())",
"def connect(self):\n\n log.info('Connecting to device \"{0}\" using {1} at \"{2}\".'.format(\n self.name, self.driver, self.connection_resource))\n\n if self.driver == drivers.pyvisa:\n try:\n if not (legacyVisa):\n rm = pyvisa.ResourceManager()\n self.device = rm.open_resource(**self.connection_resource)\n else:\n self.device = pyvisa.Instrument(**self.connection_resource)\n except pyvisa.VisaIOError as e:\n raise DeviceNotFoundError(\n 'Could not open device at \"{0}\".'.format(self.connection_resource), e)\n\n elif self.driver == drivers.telnet:\n self.device = telnetlib.Telnet(\n timeout=2, **self.connection_resource)\n elif self.driver == drivers.requests:\n r = requests.get(self.request_address)\n if r.status_code != 200:\n raise DeviceNotFoundError(\n 'Could not connect to device at \"{0}\".'.format(self.connection_resource), e)\n\n elif self.driver == drivers.lgpib:\n try:\n self.device = Gpib.Gpib(**self.connection_resource)\n except gpib.GpibError as e:\n raise DeviceNotFoundError(\n 'Could not open device at \"{0}\".'.format(self.connection_resource), e)\n elif self.driver == drivers.pyvisa_usb:\n try:\n if not (legacyVisa):\n rm = pyvisa.ResourceManager()\n self.device = rm.open_resource(**self.connection_resource)\n else:\n class USBDevice(pyvisa.Instrument):\n \"\"\"\n Using USB devices with PyVISA requires a small hack: the object must be an Instrument, but we can't call Instrument.__init__.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # Bypass the initialization in visa.Instrument, due to \"send_end\" not being valid for USB.\n pyvisa.ResourceTemplate.__init__(\n self, *args, **kwargs)\n\n self.device = USBDevice(**self.connection_resource)\n\n except pyvisa.VisaIOError as e:\n raise DeviceNotFoundError(\n 'Could not open device at \"{0}\".'.format(self.connection_resource), e)\n\n try:\n self._connected()\n except Exception as e:\n raise DeviceNotFoundError('Could not finish connection to device at \"{0}\".'.format(\n self.connection_resource), e)",
"def get_driver(drv):\n return GenericDriver.get_driver(drv)",
"def __setup_driver(driver_type: str) -> webdriver:\n if driver_type == \"chrome\":\n return __setup_chrome()\n if driver_type == \"edge\":\n return __setup_edge()\n if driver_type == \"safari\":\n return __setup_safari()\n if driver_type == \"firefox\":\n return __setup_firefox()",
"def build_driver(options: Options = None, headless=True, timeout=20) -> webdriver:\n\n if options is not None:\n driver = webdriver.Chrome(options=options)\n else:\n default_options = get_default_driver_options()\n default_options.headless = headless\n driver = webdriver.Chrome(options=default_options)\n\n driver.set_page_load_timeout(timeout)\n return driver",
"def init_webdriver(driver_name=None, headless=False):\n\n if driver_name == \"chrome\":\n try:\n #try to connect with chrome driver\n driver = connect_chrome_driver(headless)\n except:\n print_pretty(\"Sorry, you can't use chrome driver, please try another driver!\")\n sys.exit(0)\n elif driver_name == \"ie\":\n try:\n #try to connect with ie driver\n driver = connect_ie_driver(headless)\n except:\n print_pretty(\"Sorry, you can't use internet explorer driver, please try another driver!\")\n sys.exit(0)\n elif driver_name == \"firefox\":\n try:\n #try to connect with firefox driver\n driver = connect_firefox_driver(headless)\n except:\n print_pretty(\"sorry, you can't use firefox driver, please try another driver!\")\n sys.exit(0)\n else:\n print_pretty(\"No browser selected, please choose 'chrome', 'ie' or 'firefox'\")\n return None\n\n print_pretty(\"Selenium driver\", driver_name, \"sucessfully initialised\")\n return driver",
"def get_driver():\n # user_agent = user_agent_rotator.get_random_user_agent()\n try:\n if len(sys.argv) > 1:\n if Driver._driver is None:\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-extensions')\n chrome_options.add_argument('--profile-directory=Default')\n chrome_options.add_argument(\"--incognito\")\n chrome_options.add_argument(\"--disable-plugins-discovery\")\n chrome_options.add_argument(\"--start-maximized\")\n chrome_options.add_argument(f'--user-agent={USER_AGENT}')\n Driver._driver = webdriver.Chrome(executable_path=sys.argv[1], chrome_options=chrome_options)\n logging.info(f\"Driver loaded succesfully from {sys.argv[1]}\")\n return Driver._driver\n else:\n logging.error(f\"Driver cannot be found. User entered: {sys.argv[1]}\")\n raise Exception(\"Please provide a path to the chromedriver\")\n\n except Exception as e:\n logging.error(f\"Driver cannot be found. User entered: {sys.argv[1]}\")\n raise FileNotFoundError(\"Could not execute webdriver. Make sure you provided the correct path to the \"\n \"chromedriver\", e)",
"def create_webdriver_with_retry(self, *args, **kwargs):\n # Get selenium without referencing selenium.driver which doesn't exist yet\n selenium = self.builtin.get_library_instance(\"SeleniumLibrary\")\n for _ in range(12):\n try:\n return selenium.create_webdriver(*args, **kwargs)\n except ProtocolError:\n # Give browser some more time to start up\n time.sleep(5)\n raise Exception(\"Could not connect to remote webdriver after 1 minute\")"
]
| [
"0.6603057",
"0.6471961",
"0.6052382",
"0.6052382",
"0.6046912",
"0.5883527",
"0.5859736",
"0.5768993",
"0.56850535",
"0.55772156",
"0.54033375",
"0.5355655",
"0.5336413",
"0.52726877",
"0.5258186",
"0.5250311",
"0.52132326",
"0.5192618",
"0.5192618",
"0.51881385",
"0.51792973",
"0.51609284",
"0.515188",
"0.51440144",
"0.5136609",
"0.5113951",
"0.5113283",
"0.5113169",
"0.50986224",
"0.5054002"
]
| 0.7865572 | 0 |
Close the application, quits the drivers. | def close_driver(self):
package_dict = self.config['PACKAGE']
try:
self.driver.terminate_app(package_dict[self.app_name]) # Kill app
self.driver.quit() # Kill drivers
except WebDriverException:
pass
finally:
LOGGER.info("Closed {apl} on {mob}!".format(
apl=self.app_name, mob=self.mobile_name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quit(self):\n self.driver.close_app()\n self.driver.quit()",
"def close_application(self):\n self.close()\n QtGui.QApplication.exit()",
"def close(self):\n\n self._driver.quit()",
"def program_close():\n\n print(\"\\n\")\n sys.exit(0)",
"def close(self):\n\n #Kill all zombie PIDs and exit gracefully\n try:\n self.webdriver.quit()\n except:\n pass\n if 'p' not in sys.argv:\n self.kill()\n sys.exit()",
"def quit(self):\n \n if 'driver' in self.__dict__:\n self.driver.quit()\n if 'session' in self.__dict__:\n self.session.close()\n if 'conn' in self.__dict__:\n self.conn.close()",
"def close(self):\n self.sendCommand(\"quit\")\n if self.process is not None:\n os.kill(self.process.pid, 15)\n self.process.wait()\n self.process = None\n self.log.reset_status()\n self.config.save()",
"def close(self):\n self.driver.quit()",
"def close_app(self):\n logging.info(\"Finished.\")\n Gtk.main_quit()",
"def close(self):\n self.exit()",
"def close(self):\n self.m_driver.close()\n self.m_driver = None\n\n if self.m_display is not None:\n self.m_display.stop()\n self.m_display = None\n\n if self.m_vpn_handle is not None:\n self.m_vpn_handle.close()\n self.m_vpn_handle = None\n\n l_result = run(['sudo', 'killall', '-9', 'chromedriver'], stdout=PIPE, stderr=PIPE)\n self.m_logger.info('Killing chromedriver : ' + repr(l_result))\n l_result = run(['sudo', 'killall', '-9', 'chromium-browser'], stdout=PIPE, stderr=PIPE)\n self.m_logger.info('Killing Chromium : ' + repr(l_result))",
"def quit(self):\n driver.quit()",
"def quit(self):\n self.disconnect()\n mySerialConnection = None\n logging.info(EXIT_STRING)\n self.frame.destroy()\n self.endCommand()\n #sys.exit()",
"def close_driver( self ):\r\n self.com_driver.close()\r\n self.gui.set_open( \"Closed\" )\r\n msg = \"Closed Comm Port\"\r\n self.gui.print_info_string( msg )\r\n return",
"def close_and_finish_execution():\n driver.close()\n driver.quit()\n exit(0)",
"def close(self):\n self._driver.close()",
"def close_and_exit(self):\n self.close()\n sys.exit(1)",
"def exit_program():\n quit()",
"def close_app(self):\n os.system ('adb shell am force-stop com.tencent.mm')",
"def close_driver(self):\r\n\t\tif not self.driver:\r\n\t\t\treturn;\r\n\r\n\t\tself.driver.close()",
"def close():\n sys.exit()",
"def close_program():\n print(\"End with Calculations\\nSee u later :).\")\n exit(0)",
"def __exit__(self, *args):\n self.driver.quit()",
"def closeProgram():\n\t#Save logs\n\ttry:\n\t\tsaveLogs()\n\texcept Exception as e:\n\t\tprint(\"Error saving logs because..\",e)\n\t\t#Exit even if error saving logs\n\ttry:\n\t\tif masterPod.currentMasterPod:\n\t\t\tmasterPod.currentMasterPod.save()\n\texcept:\n\t\tprint(\"Error saving master pod\")\n\t#Destory the window\n\twindow.destroy()",
"def close(self):\n self.driver.close()",
"def close_app(self, app_name: str) -> None:\n self.app.close(self.launch_app(app_name))",
"def quit(self):\n import sdl2\n if P.verbose_mode:\n print_tb(print_stack(), 6)\n\n err = ''\n try:\n self.database.commit()\n self.database.close()\n except Exception:\n err += \"<red>Error encountered closing database connection:</red>\\n\\n\"\n err += full_trace()+\"\\n\\n\"\n err += \"<red>Some data may not have been saved.</red>\\n\\n\\n\"\n\n if P.eye_tracking and P.eye_tracker_available:\t\n try:\n self.el.shut_down(incomplete=self.incomplete)\n except Exception:\n err += \"<red>Eye tracker encountered error during shutdown:</red>\\n\\n\"\n err += full_trace()+\"\\n\\n\"\n err += \"<red>You may need to manually stop the tracker from recording.</red>\\n\\n\\n\"\n\n if P.multi_user and P.version_dir:\n newpath = P.version_dir.replace(str(P.random_seed), str(P.participant_id))\n os.rename(P.version_dir, newpath)\n\n self.audio.shut_down()\n sdl2.ext.quit()\n\n if err:\n cso(\"\\n\\n\" + err + \"<red>*** Errors encountered during shutdown. ***</red>\\n\\n\")\n os._exit(1)\n cso(\"\\n\\n<green>*** '{0}' successfully shut down. ***</green>\\n\\n\".format(P.project_name))\n os._exit(1)",
"def close(self):\n self.emu.close()",
"def exit(self):\n self.close()",
"def close_app_critical_error():\n # close any plugins\n if plugins_enabled and plugins:\n for plugin in plugins:\n plugin.close(globals())\n # alt scanner close\n if alt_scanner:\n try:\n alt_scanner.close(globals())\n except AttributeError:\n pass\n os._exit(1)"
]
| [
"0.75867534",
"0.7412425",
"0.73730874",
"0.73533124",
"0.72978216",
"0.7290101",
"0.71743464",
"0.7127176",
"0.71195775",
"0.7048997",
"0.7048084",
"0.6993627",
"0.69605225",
"0.69441557",
"0.69411415",
"0.6929923",
"0.6924687",
"0.69232714",
"0.69213384",
"0.69039327",
"0.68626964",
"0.6854798",
"0.6851033",
"0.680924",
"0.6794098",
"0.6764919",
"0.6757517",
"0.6746009",
"0.67367506",
"0.67360675"
]
| 0.7819079 | 0 |
Read mobile window size & sets the scroll length for a mobile. | def set_scroll_length(self):
size = self.driver.get_window_size()
self.x_cord = int(size['width'] / 2)
self.start_y = int(size['height'] * 0.9)
self.end_y = int(size['height'] * 0.1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _GetViewportSize(self):\n return self.tab.EvaluateJavaScript(\n '[ window.innerWidth, window.innerHeight ]')",
"def get_window_size():\n global windowSize\n windowSize = DRIVER.driver.get_window_size()\n return windowSize",
"def set_viewport_size(driver, device):\n if device == \"laptop\":\n window_size = driver.execute_script(\"\"\"return [window.outerWidth - window.innerWidth + arguments[0], \n window.outerHeight - window.innerHeight + arguments[1]];\"\"\", 1200, 700)\n driver.set_window_size(*window_size)\n elif device == \"tablet\":\n window_size = driver.execute_script(\"\"\"return [window.outerWidth - window.innerWidth + arguments[0], \n window.outerHeight - window.innerHeight + arguments[1]];\"\"\", 768, 700)\n driver.set_window_size(*window_size)\n elif device == \"mobile\":\n window_size = driver.execute_script(\"\"\"return [window.outerWidth - window.innerWidth + arguments[0], \n window.outerHeight - window.innerHeight + arguments[1]];\"\"\", 500, 700)\n driver.set_window_size(*window_size)\n else:\n raise Exception(\"The device is not supported.\")",
"def get_window_size(self):\n raise NotImplementedError",
"def getwinsize(self):",
"def ReadWindowSize(self, win_name):\n default_width = default_height = width_str = height_str = -1\n if win_name == \"main\":\n default_width = 605\n default_height = 400\n width_str = \"window_width\"\n height_str = \"window_height\"\n else:\n default_width = 125\n default_height = 590\n width_str = \"pref_width\"\n height_str = \"pref_height\"\n width = height = -1 \n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n if config.has_section(\"Settings\"):\n if config.has_option(\"Settings\", width_str):\n width = config.get(\"Settings\", width_str)\n else:\n width = default_width\n if config.has_option(\"Settings\", height_str):\n height = config.get(\"Settings\", height_str)\n else:\n height = default_height\n size = []\n size.append(int(width))\n size.append(int(height))\n return size",
"def _get_window_width(self):",
"def getDesktopSize(self):\n return convToUnits(self.desktopBytes, divby=1000)",
"def getDesktopSize(force_refresh=False):\n global __desktop_size\n if __desktop_size is None or force_refresh:\n refreshDesktopSize()\n return __desktop_size",
"def ev_windowsizechanged(self, event: WindowResized) -> None:",
"def get_window_size(self):\n return self.__window_size",
"def get_screen_size(self):\n return self.__screen_size",
"def _get_screen_size():\n import PySide.QtGui\n rect = PySide.QtGui.QDesktopWidget().screenGeometry(-1)\n return [rect.width(), rect.height()]",
"def get_window_size(self):\n coordinate = self.run_command('shell wm size').replace(b'\\r\\r\\n', b'')\n coordinate = coordinate.decode('utf-8').replace('Physical size: ', '').split('x')\n if len(coordinate) != 2:\n raise ADBError('Error when detecting window size')\n return [int(x) for x in coordinate]",
"def OnScrollSlider(self,event):\n\n size = str(self.sizeSlider.GetValue())\n self.sizeTextCtrl.SetValue(size)",
"def get_desktop_size(self):\n\n _ptr = ffi.new('SDL_DisplayMode *')\n check_int_err(lib.SDL_GetDesktopDisplayMode(self._index, _ptr))\n return (_ptr.w, _ptr.h)",
"def on_size(self, window, width, height):\n viewport = glfw.get_framebuffer_size(window)\n GL.glViewport(0, 0, *viewport)\n self.camera.viewport = viewport",
"def update_dimensions(self):\r\n # stores the old screen height for cleaning the screen\r\n old_w_height = self.w_height\r\n\r\n self.w_width, self.w_height = get_terminal_size()\r\n # see __init__\r\n self.w_width -= self.w_width % 2\r\n self.w_height -= self.w_height % 2\r\n\r\n # no need to clear screen if window size hasn't changed\r\n if old_w_height != self.w_height:\r\n self.clear_screen(old_w_height)",
"def get_screen_size():\n screen = QDesktopWidget().screenGeometry()\n return screen.width(), screen.height()",
"def get_curr_screen_size():\n root = tk.Tk()\n root.update_idletasks()\n root.attributes('-fullscreen', True)\n root.state('iconic')\n size = (root.winfo_width(), root.winfo_height(),)\n root.destroy()\n return size",
"def ev_windowsizechanged(self, event: tcod.event.WindowResized) -> T | None:",
"def eval_screen_size():\n center_x = 32 // 2 * app_manager.get_map_width()\n center_y = 32 // 2 * app_manager.get_map_height()\n\n loc1_le = EPD(0x58DC60)\n loc1_te = EPD(0x58DC60 + 4)\n loc1_re = EPD(0x58DC60 + 8)\n loc1_be = EPD(0x58DC60 + 12)\n\n # screen position and location\n loc1_lv = f_dwread_epd(loc1_le)\n loc1_tv = f_dwread_epd(loc1_te)\n loc1_rv = f_dwread_epd(loc1_re)\n loc1_bv = f_dwread_epd(loc1_be)\n prev_sx = f_dwread_epd(EPD(0x0062848C))\n prev_sy = f_dwread_epd(EPD(0x006284A8))\n\n # centerview and update x, y\n SeqCompute([\n (loc1_le, SetTo, center_x),\n (loc1_te, SetTo, center_y),\n (loc1_re, SetTo, center_x),\n (loc1_be, SetTo, center_y)])\n f_dwwrite_epd(loc1_le, center_x)\n f_dwwrite_epd(loc1_te, center_y)\n f_dwwrite_epd(loc1_re, center_x)\n f_dwwrite_epd(loc1_be, center_y)\n DoActions(CenterView(1))\n cur_sx = f_dwread_epd(EPD(0x0062848C))\n cur_sy = f_dwread_epd(EPD(0x006284A8))\n\n # get size\n dx = center_x - cur_sx\n dy = center_y - cur_sy\n\n # restore screen\n screen_x = prev_sx + dx\n screen_y = prev_sy + dy\n SeqCompute([\n (loc1_le, SetTo, screen_x),\n (loc1_te, SetTo, screen_y),\n (loc1_re, SetTo, screen_x),\n (loc1_be, SetTo, screen_y)])\n DoActions(CenterView(1))\n\n # restore location\n SeqCompute([\n (loc1_le, SetTo, loc1_lv),\n (loc1_te, SetTo, loc1_tv),\n (loc1_re, SetTo, loc1_rv),\n (loc1_be, SetTo, loc1_bv)])\n\n EUDReturn([dx*2, dy*2])",
"def GetWindowSize(self):\r\n \r\n return self._windowsize",
"def window_scroll(self, width=None, height=None):\n if width is None:\n width = \"0\"\n if height is None:\n height = \"0\"\n js = \"window.scrollTo({w},{h});\".format(w=width, h=height)\n self.run_script(js)",
"def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)",
"def __window_scroll(self, x, y):\n pass",
"def on_scroll(self, win, _deltax, deltay):\n self.zoom(deltay, glfw.get_window_size(win)[1])",
"def on_scroll(self, win, _deltax, deltay):\n self.zoom(deltay, glfw.get_window_size(win)[1])",
"def window_size(self, window_size):\n\n self._window_size = window_size",
"def OnSize(self, event):\n wx.CallAfter(self.DoSetViewport)\n event.Skip()"
]
| [
"0.5914855",
"0.5613949",
"0.5598261",
"0.5566157",
"0.55612123",
"0.5541413",
"0.5541",
"0.5487701",
"0.5459118",
"0.5452546",
"0.5340624",
"0.5302332",
"0.524592",
"0.51727504",
"0.51644754",
"0.5149017",
"0.51306283",
"0.50865525",
"0.5083901",
"0.5060446",
"0.50539947",
"0.50354016",
"0.5023848",
"0.50057375",
"0.49953806",
"0.49569345",
"0.4945126",
"0.4945126",
"0.49256936",
"0.49241787"
]
| 0.65684086 | 0 |
Perform tap for requested element or coordinates. | def tap_screen(self, element=None, config=None, x_cord=None, y_cord=None):
if element and config:
self.touch.tap(x=config[element]['x'],
y=config[element]['y']).perform()
elif x_cord:
self.touch.tap(x=x_cord, y=y_cord).perform()
else:
LOGGER.error('Either element or co-ordinates must be given for tap!')
time.sleep(2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tap(self, locator, x_offset=None, y_offset=None, count=1):\r\n driver = self._current_application()\r\n el = self._element_find(locator, True, True)\r\n action = TouchAction(driver)\r\n action.tap(el, x_offset, y_offset, count).perform()",
"def double_tap(self, locator):\r\n driver = self._current_application()\r\n el = self._element_find(locator, True, True)\r\n action = TouchAction(driver)\r\n action.press(el).move_to(x=100, y=0).release().perform()",
"def tap():\n return \"I have clicked on the elements\"",
"def click(self, x, y):\n # adb click 0,0 will have a weird behavior\n if x <= 0 and y <= 0:\n return\n cmd = \"shell input tap {x} {y}\".format(x=x, y=y)\n self.android_device_driver.adb.exec_adb_cmd(cmd).wait()",
"def click_by_location(self, elem, **kwargs):\n loc = elem.location\n size = elem.size\n screen_size = self.driver.get_window_size()\n if self.tablet:\n if kwargs['side'] == 'middle':\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'left':\n x = loc['x'] + size['width'] / 4\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'right':\n x = loc['x'] + size['width'] - 50\n y = loc['y'] + 10\n else:\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n elif self.phone:\n if kwargs['side'] == 'middle':\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'left':\n x = loc['x'] + size['width'] / 4\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'right':\n x = screen_size['width'] - 40\n y = loc['y'] + 5\n else:\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n # an array of tuples\n action = TouchAction(self.driver)\n action.tap(x=x, y=y).perform()",
"def click_element_at_coordinates(self, coordinate_X, coordinate_Y):\r\n self._info(\"Pressing at (%s, %s).\" % (coordinate_X, coordinate_Y))\r\n driver = self._current_application()\r\n action = TouchAction(driver)\r\n action.press(x=coordinate_X, y=coordinate_Y).release().perform()",
"def click_element_by_point(self,param,ignore_error_handle = False):\n message = {};\n step = 'click element by point x:' + str(param['x']) + ' y:' + str(param['y']);\n try:\n point_x = param['x'];\n point_y = param['y'];\n click_count = param['count'];\n touch_action = TouchAction(self.driver);\n touch_action.tap(x=point_x,y=point_y,count=click_count).release().perform();\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n print 'catch exception:'+ str(e);\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;",
"def _click(self):\n self._touch = self.view.touch",
"def Tap(self, item):\n _ = item\n return True",
"def click_a_point(self, x=0, y=0, duration=100):\r\n self._info(\"Clicking on a point (%s,%s).\" % (x, y))\r\n driver = self._current_application()\r\n action = TouchAction(driver)\r\n try:\r\n action.press(x=float(x), y=float(y)).wait(float(duration)).release().perform()\r\n except:\r\n assert False, \"Can't click on a point at (%s,%s)\" % (x, y)",
"def withTouch(self, item, contentType=None, length=None, timeout=None, index=1, containerObject=None, relatedAreaEnd=None, doNotSelect=False):\r\n # Press (x, y) coordinate point when item is tuple\r\n if isinstance(item, tuple):\r\n self.phone._touch.press(item,length)\r\n self.phone._run('Press to coordinates: %s,%s' % item)\r\n return\r\n\r\n # Press HW key\r\n if item.startswith('KBD_KEY_'):\r\n self.phone._pressKey(item, length)\r\n self.phone._run('Press (%s) key' % item)\r\n if item == 'KBD_KEY_KEYLOCK_TOGGLE':\r\n self.phone.delay(100, False)\r\n return\r\n\r\n touchType=False#Fix touchable not working with images ,must be set not to false\r\n\r\n coordinates = self.phone.uiState.revealItem(item,timeout, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n\r\n if coordinates:\r\n if not doNotSelect:\r\n itemCommented = self.phone.uiState._getCommented(item) # get step commented here so teststep won't be messed up with sx queries\r\n\r\n if containerObject != None:\r\n containerCommented = self.phone.uiState._getCommented(containerObject) # get container in commented format before touching\r\n self.phone._touch.press(coordinates,length)\r\n self.phone._run('select(%s) (on same area as %s)' % (itemCommented, containerCommented))\r\n else:\r\n self.phone._touch.press(coordinates,length)\r\n self.phone._run('select(%s)' % itemCommented)\r\n elif containerObject != None:\r\n self.phone.fail(\"Cannot select %s, no item found related to \\\"%s\\\".\"%(self.phone.uiState._getCommented(item), self.phone.uiState._getCommented(containerObject)))\r\n else:\r\n self.phone.fail(\"Cannot select %s, item is not found from screen.\"%self.phone.uiState._getCommented(item))",
"def do_click(self, str_arg):\n arg = validateString(str_arg)\n for tmp in range(REPEAT_TIMES_ON_ERROR):\n try:\n if arg.startswith('('):\n point = self.__getPointXY(arg)\n printLog(self.threadName + '[clicking point %s...]' % arg, logging.DEBUG)\n self.adbc.touch(point[0], point[1], \"DOWN_AND_UP\")\n else:\n if \"/\" not in arg:\n raise ValueError('bad argument of do_click().')\n # get the target view\n tv = self.__getView(arg)\n if tv:\n if DEBUG:\n printLog('Found view %s.' % arg, logging.DEBUG)\n printLog(self.threadName + 'tinyStr: %s' % tv.__tinyStr__(), logging.DEBUG)\n # printLog(self.threadName + 'position and size: {}'.format(tv.getPositionAndSize()),\n # logging.DEBUG)\n printLog(self.threadName + '[clicking id %s...]' % arg, logging.DEBUG)\n tv.touch()\n else:\n printLog('Target view %s not found.' % arg, logging.ERROR)\n self.resultFlag = False\n return\n except Exception, e:\n printLog(self.threadName + 'the %dst try failed due to %s, will retry.' % (tmp, e.message),\n logging.ERROR)\n # self.reconnect()\n time.sleep(1)\n continue\n # finally:\n # printLog(self.threadName + \"[status=%s]\" % self.resultFlag)\n printLog(self.threadName + 'CLICK FAILED: still can\\'t make the click. please check the test environment.',\n logging.CRITICAL)\n self.resultFlag = False",
"def click_element_at_screen_scale(self, scale_X, scale_Y):\r\n width, height = self._get_screen_size()\r\n try:\r\n coordinate_X = float(scale_X) * float(width)\r\n coordinate_Y = float(scale_Y) * float(height)\r\n except ValueError:\r\n self._error(\"Please check your param,scale_X or scale_Y is not float.\")\r\n self._info(\"Pressing at (%s, %s).\" % (coordinate_X, coordinate_Y))\r\n driver = self._current_application()\r\n action = TouchAction(driver)\r\n action.press(x=coordinate_X, y=coordinate_Y).release().perform()",
"def click(self, element):\n element.click()",
"def touches(a, b, **kwargs):\n return lib.touches(a, b, **kwargs)",
"def generic_click_element(self, element_id, click_type=\"press\"):\n element = self.session.findById(element_id)\n element_type = self.get_element_type_of_object(element)\n if click_type.lower() == \"select\" and hasattr(element, \"select\"):\n element.select()\n if click_type.lower() == \"press\" and hasattr(element, \"press\"):\n element.press()\n else:\n self.take_screenshot()\n message = (\n \"You cannot use '%s' on element type '%s', maybe use \"\n \"'select checkbox' instead?\" % (click_type, element_type)\n )\n raise Warning(message)\n time.sleep(self.explicit_wait)",
"def atomacclick(objecttoclick):\n try:\n objecttoclick.Press()\n #print \"clicked on : %s\" %objecttoclick\n except Exception as er:\n print \"Not able to click on: %s\" %objecttoclick",
"def on_click(self, e: ti.template()):\n for i, j in ti.ndrange(self.nx, self.ny):\n if self.inside(self.Vector2(i / self.nx, j / self.ny),\n self.Vector2(e.pos[0], e.pos[1]), 0.03):\n self.T[i, j] = 1",
"def click(self, selector):\n el = self.locate_element(selector)\n el.click()",
"def click_place_order(self):\n WebDriverWait(self.driver, 20).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, self.CSS_PLACEORDER)))\n placeorder = self.driver.find_element_by_css_selector(self.CSS_PLACEORDER)\n action = TouchActions(self.driver)\n action.tap(placeorder).perform()",
"def click(self, element_tuple):\n current_state = self.change_monitor()\n self.log_info(f\"Browser.click: Clicking {element_tuple}\")\n self.CORE.find_element(*self.format_element(element_tuple)).click()\n self.change_monitor(previous_data=current_state)\n return",
"def synthesizeTapGesture(\n self,\n x: Union[int, float],\n y: Union[int, float],\n duration: Optional[int] = None,\n tapCount: Optional[int] = None,\n gestureSourceType: Optional[str] = None,\n ) -> Awaitable[Dict]:\n msg = {\"x\": x, \"y\": y}\n if duration is not None:\n msg[\"duration\"] = duration\n if tapCount is not None:\n msg[\"tapCount\"] = tapCount\n if gestureSourceType is not None:\n msg[\"gestureSourceType\"] = gestureSourceType\n return self.client.send(\"Input.synthesizeTapGesture\", msg)",
"def click_element(self, element: Union[WebElement, Tuple[By, str]]):\n element = self.find_element(element)\n element.click()",
"def do_click(self, xpath):\n e = self._find_element_by_xpath(xpath)\n e.click()",
"def click(self, position):\n w, h = self.window.size\n sx, sy = self.tictactoe.size\n rx, ry = position\n x, y = sx * rx // w, sy * ry // h\n if self.tictactoe.available((x, y)):\n self.choice = (x, y)",
"def click(point):\n m = PyMouse()\n m.move(*point)\n m.press(*point)\n m.release(*point)",
"def long_click_element_by_point(self,param,ignore_error_handle = False):\n message = {};\n step = 'long click element by point x:' + str(param['x']) + ' y:' + str(param['y']);\n try:\n point_x = param['x'];\n point_y = param['y'];\n click_duration = param['duration'];\n touch_action = TouchAction(self.driver);\n touch_action.long_press(x=point_x,y=point_y,duration=click_duration).perform();\n message = self.feedback.feedback_action_ok(step);\n except BasicAction,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;",
"def fake_click(self, x, y):\n # Currently only restacks windows, and does not trigger bindings\n self.manager.c.eval(\n textwrap.dedent(\n f\"\"\"\n self.core.warp_pointer({x}, {y})\n self.core._focus_by_click()\n \"\"\"\n )\n )",
"def elementClick(self,locator=\"\",locatorType='id',element=None):\n\n\n try:\n if locator:\n element=self.getElement(locator,locatorType)\n\n element.click()\n self.logger.info(\"clicked on element with locator\"+locator+\" locatorType: \"+locatorType)\n\n except:\n self.logger.info('Cannot click on element with locator '+locator+\" locatorType: \"+locatorType)\n print_stack()",
"def test_tap(TAPS_LIST, HOMEBREW_BIN):\n mock_failure = MagicMock(return_value={\"retcode\": 0})\n mock_user = MagicMock(return_value=\"foo\")\n mock_cmd = MagicMock(return_value=\"\")\n with patch(\"salt.utils.path.which\", MagicMock(return_value=HOMEBREW_BIN)):\n with patch.dict(\n mac_brew.__salt__,\n {\n \"cmd.run_all\": mock_failure,\n \"file.get_user\": mock_user,\n \"cmd.run\": mock_cmd,\n },\n ), patch(\n \"salt.modules.mac_brew_pkg._list_taps\", MagicMock(return_value=TAPS_LIST)\n ):\n assert mac_brew._tap(\"homebrew/test\")"
]
| [
"0.73958546",
"0.6878971",
"0.6814113",
"0.6399038",
"0.6340633",
"0.58948916",
"0.5836868",
"0.57950395",
"0.5761519",
"0.5747372",
"0.57057613",
"0.56118274",
"0.5496889",
"0.5388753",
"0.52636874",
"0.5225673",
"0.52115715",
"0.5081884",
"0.5049696",
"0.50416523",
"0.50377464",
"0.502651",
"0.5013561",
"0.49959108",
"0.4974933",
"0.49549434",
"0.4942193",
"0.4941449",
"0.49353227",
"0.49196413"
]
| 0.7536684 | 0 |
Swipe the screen to scroll down. | def swipe_up(self):
self.driver.swipe(start_x=self.x_cord, start_y=self.start_y,
end_x=self.x_cord, end_y=self.end_y, duration=1000) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def swipe_down(self):\n self.swipe_sub(SWIPE_MATRIX[1])",
"def swipe_element_to_bottom_of_screen(self):\n window_size_y = self.driver.get_window_size()[\"height\"]\n self.swipe(30, window_size_y - 80, 30, window_size_y - 500)",
"def swipe_down(self, page_portion=0.25):\n driver = self.driver.appium_driver\n window_size = driver.get_window_size()\n max_width = window_size[\"width\"] - 1\n max_height = window_size[\"height\"] - 1\n start_y = (int)(max_height * 0.25)\n end_y = int(max_height * (0.25 + page_portion))\n start_x = max_width / 2\n driver.swipe(start_x, start_y, start_x, end_y, 3000)",
"def page_down(self):\n counter = self.get_entry_count_per_screen()\n while counter != 0 and self.pointer < (len(self.contents)-1):\n logging.debug(\"moved down\")\n self.pointer += 1\n counter -= 1\n self.refresh()\n self.reset_scrolling()\n return True",
"def swipeDown (self) :\n rotated = Grid(np.rot90(np.rot90(self.grid)))\n self.grid = np.rot90(np.rot90(rotated.swipeBase()))",
"def move_down(self):\n if self.pointer < (len(self.contents)-1):\n logging.debug(\"moved down\")\n self.pointer += 1\n self.reset_scrolling()\n self.refresh()\n return True\n else:\n return False",
"def swipe_down_android(driver, steps=1):\n size = driver.get_window_size()\n start_y = size[\"height\"] * 0.80\n end_y = size[\"height\"] * 0.20\n start_x = size[\"width\"] / 2\n for i in range(0, steps):\n driver.swipe(start_x, start_y, start_x, end_y, 3000)\n return PASSED",
"def scroll_down(self):\r\n self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\r\n sleep(self.wait)",
"def scroll_down(self, locator):\r\n driver = self._current_application()\r\n element = self._element_find(locator, True, True)\r\n driver.execute_script(\"mobile: scroll\", {\"direction\": 'down', 'element': element.id})",
"def swipe_up(self):\n self.swipe_sub(SWIPE_MATRIX[0])",
"def __backToIdleWithSwipe(self):\r\n # NOTE: Don't grab from the very end coordinates, causes phone reset\r\n\r\n # If Virtual keyboard is on screen, do not swipe over it\r\n if self.phone.uiState.vkb.vkbOnScreen or self.phone.uiState.vkbTransitionInProgress:\r\n yCoordinate = self.phone.uiState.getScreenHeight()/5\r\n\r\n self.phone._touch.drawLine((self.phone.uiState.getScreenWidth()-2, yCoordinate), (2, yCoordinate), stepSize = 30)\r\n else:\r\n # swipe a bit below from the center of the screen to avoid hitting e.g usb-manager app in the background\r\n yCoordinate = int(self.phone.uiState.getScreenHeight()/1.5)\r\n\r\n self.phone._touch.drawLine((self.phone.uiState.getScreenWidth()-2, yCoordinate), (2, yCoordinate), stepSize = 30)\r\n\r\n self.phone._run('Go back to idle with touch swipe', testStepReporting = False)\r\n self.phone.delay(300, False)",
"def scroll_half_page_down(event):\n scroll_forward(event, half=True)",
"def scroll_half_page_up(event):\n scroll_backward(event, half=True)",
"def move_down(self):\n self.move_step(1)",
"def down(self):\n self.move(0,-1)",
"def move_down(self):\n client.moveByVelocityAsync(0, 0, -1, 0.3).join()\n # if self.logging:\n # self.log_arr.append(\"down\")",
"def swipe_right(self, config):\n self.driver.swipe(start_x=config['SWIPE_RIGHT']['x'],\n start_y=config['SWIPE_RIGHT']['y'],\n end_x=(config['SWIPE_RIGHT']['x'] - 400),\n end_y=config['SWIPE_RIGHT']['y'], duration=1000)",
"def scroll_page_down(event):\n w = _current_window_for_event(event)\n b = event.cli.current_buffer\n\n if w and w.render_info:\n # Scroll down one page.\n line_index = max(w.render_info.last_visible_line(), w.vertical_scroll + 1)\n w.vertical_scroll = line_index\n\n b.cursor_position = b.document.translate_row_col_to_index(line_index, 0)\n b.cursor_position += b.document.get_start_of_line_position(after_whitespace=True)",
"def onMoveDown(self):\n self.mainGrid.moveDown()",
"def move_scroll_bar_down(self):\n scroll = self.textBrowser.verticalScrollBar()\n scroll.setSliderPosition(scroll.maximum())",
"def swipeUp (self) :\n self.grid = self.swipeBase()",
"def swipe_right(self):\n self.swipe_sub(SWIPE_MATRIX[3])",
"def page_down(self):\n self.set_initial_offset(self.initial_offset + self.my_surface.get_height())",
"def move_down(self):\n if self.pointer < (len(self._contents)-1):\n logging.debug(\"moved down\")\n self.pointer += 1 \n self.refresh() \n return True\n else: \n return False",
"def move_down(self):\n self.move_measurement(1)",
"def swipe_element_to_top_of_screen(self, elem=None, endy=None, startx=-20):\n loc = elem.location\n startx = loc['x'] + startx\n starty = loc['y']\n\n # in case it's behind the banner ad at the bottom, swipe up a little\n window_height = self.driver.get_window_size()['height']\n if starty > .8 * window_height:\n self.swipe(.5, .5, .5, .3, 1500)\n starty = starty - window_height * .2\n sleep(1)\n\n if not endy:\n if self.phone:\n endy = 70\n else:\n endy = 180\n\n self.swipe(startx, starty, startx, endy, 1500)",
"def move_down(self):\n self.y -= 1",
"def move_down(self):\n self.pitch_motor.step_forward()",
"def page_up(self):\n counter = self.get_entry_count_per_screen()\n while counter != 0 and self.pointer != 0:\n logging.debug(\"moved down\")\n self.pointer -= 1\n counter -= 1\n self.refresh()\n self.reset_scrolling()\n return True",
"def move_down(self):\n #if user moves paddle right below on the screen, they won't be able to move it more downwards by using this if statement\n #SCREEN_HEIGHT - 280 = Exact number of pixels where paddle can stop exactly on bottom edge but still has its body fully shown\n if self.center.y > SCREEN_HEIGHT - 280:\n self.center.y -= MOVE_AMOUNT"
]
| [
"0.758218",
"0.71756256",
"0.71166104",
"0.7037795",
"0.6950055",
"0.69194794",
"0.68665427",
"0.6834109",
"0.67843467",
"0.676057",
"0.66562134",
"0.6606951",
"0.65145755",
"0.65032494",
"0.6459125",
"0.64556295",
"0.64275724",
"0.6410148",
"0.6391791",
"0.63822305",
"0.6373177",
"0.6366305",
"0.6342946",
"0.634275",
"0.6332875",
"0.6324913",
"0.6315867",
"0.63109595",
"0.6294147",
"0.62578833"
]
| 0.7214927 | 1 |
Swipe the screen to move right. | def swipe_right(self, config):
self.driver.swipe(start_x=config['SWIPE_RIGHT']['x'],
start_y=config['SWIPE_RIGHT']['y'],
end_x=(config['SWIPE_RIGHT']['x'] - 400),
end_y=config['SWIPE_RIGHT']['y'], duration=1000) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def swipe_right(self):\n self.swipe_sub(SWIPE_MATRIX[3])",
"def swipeRight (self) :\n rotated = Grid(np.rot90(self.grid))\n self.grid = np.rot90(np.rot90(np.rot90(rotated.swipeBase())))",
"def right(self):\n self.move(1,0)",
"def onMoveRight(self):\n self.mainGrid.moveRight()",
"def move_right(self):\n self.rect.x += 5 # Moves to the right by 5\n\n # If the player reaches the edge of the screen, they can't go further\n if self.rect.x >= 580:\n self.rect.x = 580",
"def move_right(self):\n\n if self.xcor() > 230:\n self.setx(250)\n else:\n new_x = self.xcor() + 40\n self.setx(new_x)",
"def turn_right(self):\n self.facing_direction += self.config\n if self.facing_direction > 7:\n self.facing_direction -= 8\n self.x, self.y = self.compute_positions()",
"def move_right(self):\n\t\tself.set_x_vector(constants.DONKEY_SPEED)",
"def go_right(self):\n self.rect.centerx += self.__dx",
"def move_right(self):\r\n self.left += self.__speed",
"def move_right(self):\r\n if self.rect.right < BG_WIDTH:\r\n self.rect.right += self.speed",
"def on_right_key(self, event) -> None:\r\n\r\n self.move_view(1, 0)",
"def go_right(self):\n self.change_x = 6\n self.direction = \"R\"",
"def right(self, speed):\n self.controller.front_left_forward(speed)\n self.controller.front_right_backward(speed)\n self.controller.rear_left_backward(speed)\n self.controller.rear_right_forward(speed)",
"def swipe_left(self):\n self.swipe_sub(SWIPE_MATRIX[2])",
"def MoveRightStep(self):\n if self.facing == 0:\n self.facing = 1\n self.x += self.stepLeft\n elif self.facing == 1:\n self.facing = 2\n self.y += self.stepUp\n elif self.facing == 2:\n self.facing = 3\n self.x -= self.stepRight\n elif self.facing == 3:\n self.facing = 0\n self.y -= self.stepDown",
"def go_right(self):\n self.rect.centerx += 9",
"def move_right(self, step: int = 1) -> None:\n if self.cursor_pos.y < self.width - 1:\n self.cursor_pos = Point(self.cursor_pos.x, self.cursor_pos.y+step)\n else:\n self.cursor_pos = Point(self.cursor_pos.x, 0)",
"def moveRight(self):\n if self._position.x != 14:\n self._position.x +=1\n return True\n return False",
"def move_right(self):\n self.yaw_motor.step_forward()",
"def move_right(self,distance):\n self.turn_right()\n self.move_forward(distance)\n # self.log_arr.append(\"right\")",
"def move_right(self):\n if self.change_valid(dx=1):\n self.x += 1",
"def _move_right(self):\n self.x += self.settings.mario_speed\n if self.settings.direction == -1:\n self.image = pygame.transform.flip(self.image, True, False)\n self.settings.direction = 1",
"def right(event):\n if event.action == sense_hat.ACTION_RELEASED:\n snake.changeDirection(RIGHT)",
"def move_right(self, num):\n self.right_position = num",
"def go_right(self):\n self.change_x = 6",
"def go_right(self):\n self.change_x = 6",
"def scrollDisplayRight(self):\n self.displayshift = self.LCD_DISPLAYMOVE | self.LCD_MOVERIGHT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_CURSORSHIFT | self.displayshift)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_CURSORSHIFT | self.displayshift)",
"def move_right():\n return __maze.move_right()",
"def move_right(self):\n self._time += 1\n if self._position < len(self._list) - 1:\n self._position += 1\n return True\n else:\n return False"
]
| [
"0.8454436",
"0.78103113",
"0.7573303",
"0.74607587",
"0.7398949",
"0.73591566",
"0.72639054",
"0.7250936",
"0.71935",
"0.7191789",
"0.7183003",
"0.7181764",
"0.71255744",
"0.71070856",
"0.70980036",
"0.70931053",
"0.7075506",
"0.7070613",
"0.70595074",
"0.70166427",
"0.69603956",
"0.6956078",
"0.6944567",
"0.69236284",
"0.69019103",
"0.68994224",
"0.68994224",
"0.6814176",
"0.6788658",
"0.67634755"
]
| 0.83857 | 1 |
Method to perform long press of element or a coordinate and slide. | def press_long_and_slide(self, element, x_cord, y_cord, hold_time):
if element:
self.touch.long_press(el=element, duration=hold_time).move_to(
x=x_cord, y=y_cord).release().perform()
else:
LOGGER.error('Element and co-ordinates must be given for long press!') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_longpress(self, str_arg):\n arg = validateString(str_arg)\n # if arg.startswith(r'('):\n # raise ValueError('Bad argument, You may want to use longpress2 with coordinates as auguments.')\n x = 0\n y = 0\n seconds = 2000\n try:\n if arg.startswith(r'('):\n point, sec = arg.split(')')\n if len(sec) > 0:\n seconds = int(validateDigit(sec))\n x, y = self.__getPointXY(point + ')')\n if not isinstance(x, int):\n raise ValueError('bad x type: not int.')\n elif arg.startswith('id') or arg.startswith('text'):\n if ' ' in arg:\n view_id, sec = arg.split(' ')\n if len(sec) > 0:\n seconds = int(validateDigit(sec.strip()))\n else:\n view_id = arg\n # get the target view\n tv = self.__getView(view_id)\n if tv:\n if DEBUG:\n printLog('Found view %s.' % arg, logging.DEBUG)\n print tv.__tinyStr__()\n print tv.getPositionAndSize()\n x, y = tv.getCenter()\n if not isinstance(x, int):\n raise ValueError('Bad center coordinate: not int.')\n else:\n printLog('Target view %s not found.' % arg, logging.ERROR)\n self.resultFlag = False\n return\n else:\n raise ValueError('bad argument in longpress().')\n # perform long press\n if self.adbc.getSdkVersion() >= 19:\n printLog(self.threadName + \"[running longTouch %s, %s...]\" % (x, y))\n self.adbc.longTouch(x, y, seconds)\n\n # solution for API level > 17:\n # http://stackoverflow.com/questions/11142843/how-can-i-use-adb-to-send-a-longpress-key-event\n elif self.adbc.getSdkVersion() > 17:\n cmd = 'adb shell input touchscreen swipe %s %s %s %s %d' % (x, y, x, y, seconds)\n printLog(self.threadName + \"[running cmd %s...]\" % cmd)\n if call(cmd, shell=True) != 0:\n printLog(\"LONGPRESS FAILED: Failed to execute command '%s'.\" % cmd, logging.ERROR)\n self.resultFlag = False\n else:\n printLog(\"LONGPRESS FAILED: API < 18 is not supported yet.\", logging.ERROR)\n self.resultFlag = False\n\n except Exception, e:\n printLog(self.threadName + 'LONGPRESS FAILED:%s' % e.message, logging.WARNING)\n traceback.print_exc()\n self.resultFlag = False",
"def press_long(self, hold_time, element=None, config=None, x_cord=None, y_cord=None):\n if config:\n self.touch.long_press(x=config[element]['x'],\n y=config[element]['y'],\n duration=hold_time).release().perform()\n elif element:\n self.touch.long_press(el=element, duration=hold_time).release().perform()\n elif x_cord:\n self.touch.long_press(x=x_cord, y=y_cord, duration=hold_time).release().perform()\n else:\n LOGGER.error('Either element or co-ordinates must be given for long press!')\n time.sleep(2)",
"def long_click(self, x, y, duration=2000):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell input swipe {x} {y} {x} {y} {duration}\".format(\n x=x, y=y, duration=duration)).wait()",
"def long_press(self, locator):\r\n driver = self._current_application()\r\n element = self._element_find(locator, True, True)\r\n long_press = TouchAction(driver).long_press(element)\r\n long_press.perform()",
"def RightClick(self):\n self._PressRightButton()\n self._ReleaseAllButtons()",
"def doubleclick(point):\n m = PyMouse()\n m.press(*point)\n m.release(*point)\n m.press(*point)\n m.release(*point)",
"def long_click_element_by_point(self,param,ignore_error_handle = False):\n message = {};\n step = 'long click element by point x:' + str(param['x']) + ' y:' + str(param['y']);\n try:\n point_x = param['x'];\n point_y = param['y'];\n click_duration = param['duration'];\n touch_action = TouchAction(self.driver);\n touch_action.long_press(x=point_x,y=point_y,duration=click_duration).perform();\n message = self.feedback.feedback_action_ok(step);\n except BasicAction,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;",
"def right_click(coords=(0, 0)):\n _perform_click_input(button='right', coords=coords)",
"def right_click(self):\n self.scroll_to()\n ActionChains(self.driver).context_click(self._element).perform()",
"def longTouch(self, x, y, duration=2000, orientation=-1):\n self.drag((x, y), (x, y), duration, orientation)",
"def double_click(button='left', coords=(0, 0)):\n _perform_click_input(button=button, coords=coords, double=True)",
"def right_click(self, *args):\n return _ida_hexrays.Hexrays_Hooks_right_click(self, *args)",
"def mouse_click(self,x,y,button,double_click):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")",
"def click(point):\n m = PyMouse()\n m.move(*point)\n m.press(*point)\n m.release(*point)",
"def on_click(self, x, y):\n self.menu_pointer.on_click(x, y)",
"def click(self,x:int=None,y:int=None):\n x = int(x/self.zoom_count)#1.5是缩放比例\n y = int(y/self.zoom_count)\n lParam = win32api.MAKELONG(x, y)\n win32gui.PostMessage(self.ScreenBoardhwnd, wcon.WM_MOUSEMOVE,wcon.MK_LBUTTON, lParam)\n win32gui.SendMessage(self.ScreenBoardhwnd, wcon.WM_SETCURSOR, self.ScreenBoardhwnd, win32api.MAKELONG(wcon.HTCLIENT, wcon.WM_LBUTTONDOWN))\n # win32gui.PostMessage(self.ScreenBoardhwnd, wcon.WM_SETCURSOR, 0, 0)\n while (win32api.GetKeyState(wcon.VK_CONTROL) < 0 or\n win32api.GetKeyState(wcon.VK_SHIFT) < 0 or\n win32api.GetKeyState(wcon.VK_MENU) < 0):\n time.sleep(0.005)\n win32gui.PostMessage(self.ScreenBoardhwnd, wcon.WM_LBUTTONDOWN,\n wcon.MK_LBUTTON, lParam)\n win32gui.PostMessage(self.ScreenBoardhwnd, wcon.WM_LBUTTONUP, 0, lParam)",
"def right_click(self, selector):\n el = self.locate_element(selector)\n ActionChains(self.base_driver).context_click(el).perform()",
"def press(button='left', coords=(0, 0)):\n _perform_click_input(button=button, coords=coords, button_down=True, button_up=False)",
"def _click(self):\n self._touch = self.view.touch",
"def long_press_keycode(\n self, keycode: int, metastate: Optional[int] = None, flags: Optional[int] = None\n ) -> 'WebDriver':\n ext_name = 'mobile: pressKey'\n args = {'keycode': keycode}\n if metastate is not None:\n args['metastate'] = metastate\n if flags is not None:\n args['flags'] = flags\n try:\n self.assert_extension_exists(ext_name).execute_script(\n ext_name,\n {\n **args,\n 'isLongPress': True,\n },\n )\n except UnknownMethodException:\n # TODO: Remove the fallback\n self.mark_extension_absence(ext_name).execute(Command.LONG_PRESS_KEYCODE, args)\n return cast('WebDriver', self)",
"def get_longpress(self):\n return self.longpress",
"def on_mouse_press(self, x, y, button):\n\n pass",
"def on_right_click(self, client, game) -> None:\n pass",
"def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass",
"def mouse_click(left_right, down_up, x, y):\n mouse_events = {\n \"leftdown\": 0x8002,\n \"leftup\": 0x8004,\n \"rightdown\": 0x8008,\n \"rightup\": 0x8010\n }\n ctypes.windll.user32.SetCursorPos(x, y)\n ctypes.windll.user32.mouse_event(mouse_events[left_right.lower() + down_up.lower()], int(x), int(y), 0, 0)",
"def double_clicked(mouse):\n global state, current_action\n\n smallest_element = get_element(mouse)\n\n with data_lock:\n if smallest_element:\n state = 0\n current_action = wtl.actions.Click(wtl.Selector(f'[wtl-uid=\"{smallest_element.wtl_uid}\"]'))",
"def wheel_click(coords=(0, 0)):\n _perform_click_input(button='middle', coords=coords)",
"def double_click(self):\n self.scroll_to()\n ActionChains(self.driver).double_click(self._element).perform()",
"def _press(self, event):",
"def click(button='left', coords=(0, 0)):\n _perform_click_input(button=button, coords=coords)"
]
| [
"0.8066344",
"0.76812494",
"0.7387225",
"0.71065205",
"0.65214574",
"0.64916337",
"0.6470704",
"0.64311725",
"0.62847203",
"0.62028587",
"0.60926753",
"0.60913044",
"0.60368675",
"0.59834635",
"0.59390086",
"0.58624774",
"0.58039314",
"0.57924074",
"0.57913846",
"0.57878906",
"0.57370377",
"0.5727225",
"0.571922",
"0.5706394",
"0.57028234",
"0.56817037",
"0.5671822",
"0.5670605",
"0.5659077",
"0.5658573"
]
| 0.8362803 | 0 |
Select an key on the screen using keycode. | def press_using_keycode(self, text):
num = KEY_CODE_DICT[text]
self.driver.press_keycode(num)
time.sleep(3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keyevent(self, keycode: Union[str, int]) -> None:\n self.shell(['input', 'keyevent', str(keycode)])",
"def keypress(key):\n k = PyKeyboard()\n if key == 'enter':\n key = k.return_key\n k.tap_key(key)",
"def keypress(cls, _, key):\n return key",
"def presskey(self, key):\n \"\"\"Method to press any key\n Need to add further code for other keys based on requirements\"\"\"\n action = ActionChains(self.driver)\n action.send_keys(key)\n action.perform()",
"def key_press(self):\n self.screen.nodelay(True)\n return self.screen.getch()",
"def keyevent(self, keycode: int, metastate: Optional[int] = None) -> 'WebDriver':\n return self.press_keycode(keycode=keycode, metastate=metastate)",
"def get_key_press():\n return ord(getch.getch());",
"def k_press(self, key: KKey):\n pass",
"def click_keypad(key, timeout=default_timeout):\n return click_key(KEYPAD[key], timeout)",
"def setWindowKey(key='return'):\n wdict = {'click':'NONE','return':'RETURN','escape':'ESCAPE'}\n dislin.winkey(wdict[key])",
"def keypress(self, key): # pragma: no cover\n if key == \"s\":\n self.screenshot()\n\n elif key == \"q\" or key == \"Esc\":\n self.close()\n\n elif key == \"c\":\n self._print_camera()",
"def on_key_press(self, key: str):\n if key == \"down\":\n self.selection_index += 1\n self.selection_index %= len(self.OPTIONS)\n self.draw()\n elif key == \"up\":\n self.selection_index -= 1\n self.selection_index %= len(self.OPTIONS)\n self.draw()\n elif key == \"enter\":\n self.OPTIONS[self.selection_index][1]()\n self.draw()",
"def _pressSpecialKey(self, key, mode='Abc', orientation='portrait'):\r\n assert key in ['shift', 'enter']\r\n self.phone.comment('Input special key %s' % key)\r\n self._loadVkbSettings()\r\n self._loadCoords(orientation, mode)\r\n if not key in self.coords.keys(): # check transition to different mode\r\n new_mode = self._findModeForChar(key)\r\n if not new_mode:\r\n raise Exception('Cannot find vkb mode where char %s is' % key)\r\n else:\r\n self.phone._touch.press(tuple(self.coords[key]),None, self.defaultDelay)\r\n self.phone._run('Press to coordinates: %s,%s' % tuple(self.coords[key]))",
"def keypress(self, key, state=None):\n\n\t\tself._interface.keypress(key, state)",
"def send_key(self, keycode):\n print(keycode)",
"def key_up(key):\n\n vk = key\n # XXX exception if >= 256\n _key_up(vk)",
"def set_keyboard_selection(self, key):\r\n\t\tfor item in self.items:\r\n\t\t\t# Return all to neutral\r\n\t\t\titem.set_italic(False)\r\n\t\t\titem.set_font_color(WHITE)\r\n \r\n\t\tif self.cur_item is None:\r\n\t\t\tself.cur_item = 0\r\n\t\telse:\r\n\t\t\t# Find the chosen item\r\n\t\t\tif key == pygame.K_UP and \\\r\n\t\t\t\t\tself.cur_item > 0:\r\n\t\t\t\tself.cur_item -= 1\r\n\t\t\telif key == pygame.K_UP and \\\r\n\t\t\t\t\tself.cur_item == 0:\r\n\t\t\t\tself.cur_item = len(self.items) - 1\r\n\t\t\telif key == pygame.K_DOWN and \\\r\n\t\t\t\t\tself.cur_item < len(self.items) - 1:\r\n\t\t\t\tself.cur_item += 1\r\n\t\t\telif key == pygame.K_DOWN and \\\r\n\t\t\t\t\tself.cur_item == len(self.items) - 1:\r\n\t\t\t\tself.cur_item = 0\r\n\r\n\t\tself.items[self.cur_item].set_italic(True)\r\n\t\tself.items[self.cur_item].set_font_color(YELLOW)\r\n\r\n\t\t# Finally check if Enter or Space is pressed\r\n\t\tif key == pygame.K_SPACE or key == pygame.K_RETURN:\r\n\t\t\ttext = self.items[self.cur_item].text\r\n\t\t\tself.funcs[text]()",
"def keyPressed(key):\n if isinstance(key, str):\n key = eval(\"locals.K_\" + key.upper())\n return (pygame.key.get_pressed()[key])",
"def on_press(key):\n try:\n # gets pressed key char value and searches it from dict with get method.\n mapped_key = key_mappings.get(key.char) # gets value and type tuple or None\n if mapped_key:\n module.pressed_key = mapped_key\n except AttributeError:\n traceback.print_exc()\n except KeyboardInterrupt:\n print(f\"\\n{module.current_time()} Application stopped\")",
"def key_down(key):\n vk = key\n # XXX exception if >= 256\n _key_down(vk)",
"def press_keycode(self, keycode: int, metastate: Optional[int] = None, flags: Optional[int] = None) -> 'WebDriver':\n ext_name = 'mobile: pressKey'\n args = {'keycode': keycode}\n if metastate is not None:\n args['metastate'] = metastate\n if flags is not None:\n args['flags'] = flags\n try:\n self.assert_extension_exists(ext_name).execute_script(ext_name, args)\n except UnknownMethodException:\n # TODO: Remove the fallback\n self.mark_extension_absence(ext_name).execute(Command.PRESS_KEYCODE, args)\n return cast('WebDriver', self)",
"def emulate_press(self, key_code, scan_code, value, timeval):\n scan_event = self.create_event_object(\n \"Misc\",\n 0x04,\n scan_code,\n timeval)\n key_event = self.create_event_object(\n \"Key\",\n key_code,\n value,\n timeval)\n return scan_event, key_event",
"def input_key_event(self, key, custom_key=None):\n\n key_event = INPUT_ACTION_SWITCHER.get(key)\n if key_event == \"-1\":\n key_event = custom_key\n self.android_device_driver.adb.exec_adb_cmd(\"shell input keyevent \" +\n key_event).wait()",
"def set_keyboard_selection(self, key):\n\t\tfor item in self.items:\n\t\t\t# Return all to neutral\n\t\t\titem.set_neutral()\n\t \n\t\tif self.cur_item is None:\n\t\t\tself.cur_item = 0\n\t\telse:\n\t\t\t# Find the chosen item\n\t\t\tif key == pygame.K_UP and \\\n\t\t\t\tself.cur_item > 0:\n\t\t\t\tself.cur_item -= 1\n\t\t\telif key == pygame.K_UP and \\\n\t\t\t\tself.cur_item == 0:\n\t\t\t\tself.cur_item = len(self.items) - 1\n\t\t\telif key == pygame.K_DOWN and \\\n\t\t\t\tself.cur_item < len(self.items) - 1:\n\t\t\t\tself.cur_item += 1\n\t\t\telif key == pygame.K_DOWN and \\\n\t\t\t\tself.cur_item == len(self.items) - 1:\n\t\t\t\tself.cur_item = 0\n\n\t\tself.items[self.cur_item].set_italic(True)\n\t\tself.items[self.cur_item].set_font_color(RED)\n \n\t\t# Finally check if Enter or Space is pressed\n\t\tif key == pygame.K_SPACE or key == pygame.K_RETURN:\n\t\t\ttext = self.items[self.cur_item].text\n\t\t\treturn text\n\n\t\treturn None",
"async def sendKeyPress(self, key):\n key = str(key)\n await self.director.sendPostRequest(\n \"/api/v1/items/{}/commands\".format(self.item_id),\n \"KEY_PRESS\",\n {\"KeyName\": key},\n )",
"def SetKeyEvent(self, event):\r\n\r\n self._evtKey = event",
"def press_key(self, event):\n if self.active:\n keycode = self.mapping[event.pin_num]\n while self.busy:\n sleep(0.01)\n self.busy = True\n self.send_key(keycode)\n self.busy = False",
"def keyboard(key, x, y):\n\n # Handle ESC key.\n if key == b'\\033':\t\n\t# \"\\033\" is the Escape key\n sys.exit(1)\n \n if key == b',' and selected_face:\n move_face('LEFT')\n\n if key == b'.' and selected_face:\n move_face('RIGHT')",
"def _on_key_press(self, event):",
"def keypress(self, event):\n events = {\n '1': lambda: self.slot.set(1),\n '2': lambda: self.slot.set(2),\n '6': lambda: self.digits.set(6),\n '8': lambda: self.digits.set(8),\n }\n try:\n events[event.keysym]()\n except KeyError:\n pass\n if event.keysym in ('1', '2', 'Return', 'Enter'):\n self.get_totp()\n self.root.wm_withdraw()"
]
| [
"0.72298855",
"0.70825064",
"0.7031476",
"0.7016587",
"0.7005039",
"0.6958417",
"0.6877484",
"0.6845089",
"0.67478025",
"0.674171",
"0.6733207",
"0.6687262",
"0.66723675",
"0.6649785",
"0.6627836",
"0.66275096",
"0.65697426",
"0.65491766",
"0.6541191",
"0.6495486",
"0.6452972",
"0.6409952",
"0.63964415",
"0.63404477",
"0.63337725",
"0.63252836",
"0.6315945",
"0.62976706",
"0.62679434",
"0.62395334"
]
| 0.7188671 | 1 |
Press back button on mobile for 'num' times. | def press_back(self, num=1):
for _11 in range(0, num): # _11 as dummy variable
self.driver.back() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def press_back_button(self):\n self.driver.back()",
"def __call__(self, count = 1):\r\n assert type(count) == types.IntType and count >= 1, \\\r\n 'Invalid count value (%s) given!' % count\r\n\r\n self.phone.comment('back(%s)' % (count != 1 and str(count) or \"\"))\r\n\r\n self.phone._pressKey('KBD_KEY_BACK', repeat = count)\r\n self.phone._run('Press back key for back stepping')",
"def __select_back_btn(self):\n for _ in range(2):\n self.fc.select_back()\n if self.printers.verify_printers_screen(raise_e=False):\n return True\n raise AssertionError(\"printers screen didn't display after clicking back button 2 times\")",
"def press_back_navigation(self):\n back_navigation = self.driver.find_element_by_name(self.BACK_NAVIGATION_NAME)\n back_navigation.click()",
"def click_back_button(driver):\n driver.back()\n return PASSED",
"def back_click(self):\n self.controller.show_account_display_screen(self.us)",
"def go_back(self):\n app = App.get_running_app()\n app.sm.current = 'menu'",
"def skip(self):\n self.click_back_button()",
"def press_up_navigation_back_button(self):\n back_button = self.driver.find_element_by_id(self.UP_NAVIGATION_BACK_BUTTON_NAME)\n back_button.click()",
"def back(self):\n self.input_key_event(InputActions.BACK)",
"def back(self):\r\n if self.phone.isFullBlackBox():\r\n self.__navigateToIdle()\r\n return True\r\n\r\n currentApplication = self.phone.uiState.getCurrentApplication()\r\n\r\n if currentApplication != 'evo-home':\r\n self.phone.comment('exit.back()')\r\n if currentApplication == 'ntf-drawer':\r\n self.__backToIdleWithBackPress()\r\n else:\r\n self.__backToIdleWithSwipe()\r\n self.phone.delay(300, False)\r\n self.phone.uiState.getCurrentState(True)\r\n else:\r\n self.phone.comment('exit.back() is not done for %s' % currentApplication)",
"def back_click(self):\n self.controller.show_user_menu_screen(self.us)",
"def continue_shopping(self):\n self._back_btn.click()",
"def back(self):",
"def back(self,**kwargs):\n self.mm.loadPreviousMenu()",
"def back_button(self):\r\n self.update_settings()\r\n self.is_action = True\r\n if self.back_call is not None:\r\n self.back_call()",
"def page_back(self):\n self._npos = max(0, self._npos - 1)\n self.display()",
"def go_back(self):\n self.hide()",
"def go_back(self):\n self.hide()",
"def transition_back():\n SCREEN_MANAGER.current = MAIN_SCREEN_NAME",
"def page_back(self):\n self._pos = max(0, self._pos - 1)\n self._display()",
"def on_back_button(self):\n symbol_top = self.currency_value_top.text().split()[0]\n symbol_bottom = self.currency_value_bottom.text().split()[0] \n try:\n if len(self.arg_nums) == 1:\n self.currency_value_top.setText(\"{} 0.0\".format(symbol_top))\n self.currency_value_bottom.setText(\"{} 0.0\".format(symbol_bottom))\n self.arg_nums.pop()\n elif len(self.arg_nums) > 12: # max number displayed on screen\n self.arg_nums = self.arg_nums[:10]\n arg_string = \"\".join(self.arg_nums)\n self.currency_value_top.setText(\"{} {}\".format(symbol_top, arg_string))\n self.currency_value_bottom.setText(\"{} {}\".format(symbol_bottom, arg_string))\n else:\n self.arg_nums.pop()\n arg_string = \"\".join(self.arg_nums)\n self.currency_value_top.setText(\"{} {}\".format(symbol_top, arg_string))\n self.currency_value_bottom.setText(\"{} {}\".format(symbol_bottom, arg_string))\n except IndexError: # if the list is empty\n pass",
"def i_go_back(self):\n if not world.using_selenium:\n assert False, (\"this step needs to be implemented for the \"\n + \"django test client\")\n world.browser.back()",
"def back_clicked(self):\n self.close()",
"def goBack(self):\n self.hide()",
"def back(self):\n self.log_info(f\"Browser.back: Telling browser to return to previous page\")\n self.CORE.back()\n return",
"def OnBackView( self, event ):\n self.historyIndex -= 1\n try:\n self.RestoreHistory( self.history[ self.historyIndex ] )\n except IndexError, err:\n self.SetStatusText( _('No further history available'))",
"def jump_to_previous(self):\n self.nvim.command('silent! wincmd p')",
"def back(self):\n self.clearScreen()\n from screen1 import Screen1\n Screen1(self.parent, self.store)",
"def call_q(self, _):\n return MENU_GO_BACK"
]
| [
"0.686586",
"0.6603821",
"0.6489235",
"0.62731624",
"0.6251216",
"0.60601574",
"0.6037806",
"0.60344905",
"0.5995057",
"0.5986757",
"0.59838754",
"0.59783083",
"0.59600383",
"0.5889037",
"0.5838801",
"0.58346796",
"0.57942283",
"0.57372975",
"0.57372975",
"0.5688485",
"0.5674434",
"0.5669783",
"0.5664236",
"0.5646192",
"0.5635029",
"0.56193066",
"0.5614176",
"0.5593647",
"0.5581287",
"0.5560368"
]
| 0.7851241 | 0 |
Return element according to element type given. | def return_element(self, el_type, text, bounds=False):
if el_type == 'access':
element = self.driver.find_element_by_accessibility_id(text)
elif el_type == 'id':
element = self.driver.find_element_by_id(text)
elif el_type == 'xpath' and bounds:
element = self.driver.find_element_by_xpath(text).get_attribute('bounds')
elif el_type == 'xpath':
element = self.driver.find_element_by_xpath(text)
else:
element = None
LOGGER.error('No match found for input parameters!')
return element | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def element_type(self) -> global___Type:",
"def element(selection, sel_type='id'):\n selector = get_selector_method(sel_type)\n return selector(selection)",
"def element_by_atom_type(atom_type, verbose=False):\n matched_element = None\n\n if matched_element is None and atom_type.mass:\n matched_element = element_by_mass(\n atom_type.mass, exact=False, verbose=verbose\n )\n if matched_element is None and atom_type.name:\n matched_element = element_by_symbol(atom_type.name, verbose=verbose)\n if matched_element is None and atom_type.definition:\n matched_element = element_by_smarts_string(\n atom_type.definition, verbose=verbose\n )\n\n if matched_element is None:\n raise GMSOError(\n f\"Failed to find an element from atom type\"\n \"{atom_type} with \"\n \"properties mass: {atom_type.mass}, name:\"\n \"{atom_type.name}, and \"\n \"definition: {atom_type.definition}\"\n )\n\n return matched_element",
"def _getElement(self, element, literal=False, local=False, namespaceURI=None):\r\n if not element.isElement():\r\n raise TypeError, 'Expecting an ElementDeclaration'\r\n\r\n tc = None\r\n elementName = element.getAttribute('name')\r\n tp = element.getTypeDefinition('type')\r\n\r\n typeObj = None\r\n if not (tp or element.content):\r\n nsuriType,localName = element.getAttribute('type')\r\n typeClass = self._getTypeClass(nsuriType,localName)\r\n \r\n typeObj = typeClass(elementName)\r\n elif not tp:\r\n tp = element.content\r\n\r\n if not typeObj:\r\n typeObj = self._getType(tp, elementName, literal, local, namespaceURI)\r\n\r\n minOccurs = int(element.getAttribute('minOccurs'))\r\n typeObj.optional = not minOccurs\r\n\r\n maxOccurs = element.getAttribute('maxOccurs')\r\n typeObj.repeatable = (maxOccurs == 'unbounded') or (int(maxOccurs) > 1)\r\n\r\n return typeObj",
"def get_element_type(cls):\r\n return cls._type_name(cls.element_type)",
"def get_by_element(self, element):\n token_ct = ContentType.objects.get_for_model(element)\n try:\n return self.get(\n elements__content_type=token_ct,\n elements__object_id=element.pk,\n )\n except ObjectDoesNotExist:\n return None",
"def _get_content_by_type(self, elem, attr_name=None):\n if not attr_name:\n resp = elem.text\n else:\n resp = elem.get(attr_name, \"\")\n \n return resp.strip()",
"def get_element_by_tag_type(self, tag_type):\n elts = []\n if self is None:\n return elts\n if self.tag_type == tag_type:\n elts.append(self)\n\n for child in self.children:\n if child is None:\n continue\n sub_elts = child.get_element_by_tag_type(tag_type)\n if sub_elts is not None:\n for sub_elt in sub_elts:\n elts.append(sub_elt)\n\n return elts",
"def get_element(self, locator,type=None):\n self.smart_wait(locator)\n dom_element = None\n try:\n dom_element = self.driver.find_element(By.XPATH, locator)\n except Exception as e:\n print(str(e) + 'debug')\n print(\"Error\" % (locator))\n return dom_element",
"def extract_element(element,default_tag_type='regular'):\r\n if element.tag == 'node':\r\n return extract_node(element,NODE_FIELDS,PROBLEMCHARS,default_tag_type)\r\n if element.tag == 'way':\r\n return extract_way(element,WAY_FIELDS,PROBLEMCHARS,default_tag_type)",
"def element(_input):\n\n value = type_(_input)\n\n if value is int:\n return session.query(Element).filter_by(atomic=_input).first()\n\n if value is float:\n return session.query(Element).filter_by(mass=_input).first()\n\n if value is str:\n _input = _input.capitalize()\n\n if value is str and 0 < len(_input) <= 2:\n return session.query(Element).filter_by(symbol=_input).first()\n\n if value is str and len(_input) > 2:\n return session.query(Element).filter_by(name=_input).first()",
"def get_element_by_name(self, name):\n for e in self.E:\n if e.name == name:\n return e",
"def an_element(self):\n return self.a_realization().an_element()",
"def get_elements_collection(resource, type):\n\tif 'element_types' in resource and type in resource['element_types']:\n\t\treturn resource['element_types'][type]\n\telse:\n\t\treturn {}",
"def get_element_by_element_name(self, element_name):\n for element in self.iterate():\n if element.get_name() == element_name:\n return element",
"def get_element(self,p):\n self._validate(p)\n return p.element()",
"def get_element( self, element_name, base_element = None ):\n if base_element is not None:\n if not etree.iselement( base_element ):\n return None\n else:\n base_element = self.xml_root\n element = base_element.find( element_name )\n if element == 'None':\n return None\n return element",
"def _find_element(locator, timeout=1, type = By.XPATH):\n elements = _find_elements(locator, timeout, type)\n if elements:\n if len(elements) > 1:\n logger.warning(f\"There is more than one element matching the locator {locator}.\"\n \"Try a more specific locator, or use _find_elements if this is expected.\")\n return None\n return elements[0]\n else:\n logger.warning(\"Could not find element with the locator [%s]\"%(locator))\n return None",
"def _get_element_type(self, element):\n\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n el_type = None\n for value in self._client.wsdl.schema.types.values():\n if (value.name == element):\n if ('Simple' in value.id):\n el_type = 'Simple'\n elif ('Complex' in value.id):\n el_type = 'Complex'\n break\n\n return el_type",
"def parse(el, typ):\n if not el:\n return typ()\n txt = text(el)\n if not txt:\n return typ()\n return typ(txt)",
"def _get_element(self, xpath=None, selenium_element=None, lxml_element=None):\n\n # Get the xpath associated with this element.\n if xpath is None and selenium_element is not None:\n xpath = self._get_xpath_for_selenium_element(selenium_element)\n if xpath is None and lxml_element is not None:\n xpath = self.get_state_data().get_xpath_for_lxml(lxml_element)\n if xpath is None:\n return None # We had no way to get the element xpath.\n \n # See if it's already cached.\n if xpath in self._elements:\n return self._elements[xpath]\n\n # If it wasn't in the cache, initalize a new element.\n element = self.Element()\n element.xpath = xpath\n element._selenium_element = selenium_element\n element._lxml_element = lxml_element or self.get_lxml_element(element)\n # Add it to the cache.\n self._elements[xpath] = element\n return element",
"def get_element_by_name(self, name):\n for element in self._elements:\n if element.get_name() == name:\n return element",
"def first(self) -> Element:\n return typing.cast(Element, self[0])",
"def findElement(self, element, elementType, timeout = 5):\n print \"Looking for element : {0} with type : {1}\".format(element,elementType)\n startTime = time.time()\n while time.time() - startTime < timeout:\n try:\n if elementType == \"id\" or elementType == \"resource-id\":\n el = self.driver.find_element_by_id(element)\n elif elementType == \"class\" or elementType == \"class-name\":\n el = self.driver.find_element_by_class_name(element)\n return el\n except Exception as exp:\n print \"Unable to locate element : {0} of type {1} : {2}. Retrying...\".format(element,elementType,exp)\n time.sleep(self.shortWait)\n print \"Failed to fine element {0} of type {1}\".format(element,elementType)\n return False",
"def element_type(self):\r\n result = conf.lib.clang_getElementType(self)\r\n if result.kind == TypeKind.INVALID:\r\n raise Exception('Element type not available on this type.')\r\n\r\n return result",
"def find_element(self, element: WebElement) -> WebElement:\n return element",
"def get_element_from_id(self, identifier):\n classification, org, rel, com = classify_id(identifier)\n if classification == id_classification.org:\n return self.get_org_question(org)\n elif classification == id_classification.rel:\n return self.get_rel_question(org, rel)\n elif classification == id_classification.com:\n return self.get_rel_comment(org, rel, com)\n return None",
"def get_element(self):\n return self.element",
"def get_element(self):\n return self.element",
"def element(self, Z = None):\n if Z is not None:\n return Element(Z = Z)\n if self.is_element():\n return self\n if (self.is_isotope() or self.is_isomer()):\n return Element(Z = self.Z)\n return self.VOID"
]
| [
"0.7039218",
"0.6531085",
"0.65199196",
"0.6413907",
"0.6378142",
"0.63359797",
"0.6290994",
"0.6273379",
"0.62482876",
"0.62119603",
"0.6187754",
"0.61422116",
"0.60904515",
"0.6073134",
"0.60068643",
"0.5997687",
"0.59854156",
"0.59780675",
"0.59744245",
"0.59307486",
"0.5915906",
"0.58368415",
"0.5816414",
"0.58099335",
"0.5791793",
"0.57903236",
"0.5771782",
"0.57710266",
"0.57710266",
"0.57592016"
]
| 0.71218866 | 0 |
Return list of elements of class 'android.widget.TextView'. | def return_textview_elements(self):
return self.driver.find_elements_by_class_name('android.widget.TextView') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def texts(self):\n return [elem.text for elem in self.web_elements]",
"def get_elements_text(self, elements: Union[List[WebElement], Tuple[By, str]]) -> List[str]:\n elements = self.find_elements(elements)\n return [element.get_attribute('innerText') for element in elements]",
"def get_all_text_view_text(self, textview):\n\t\ttextbuffer = textview.get_buffer()\n\t\tstartiter, enditer = textbuffer.get_bounds()\n\t\ttext = startiter.get_slice(enditer)\n\t\treturn text",
"def get_text(self) -> List[str]:\n return self.__texts",
"def get_texts(self) -> List[str]:\n return self.texts",
"def div_text_list(self):\n return self.q(css='div.test').text",
"def get_text_data_list(self):\n return [self.name, str(self.type)]",
"def list_texts(self, start: int = None, end: int = None) -> List:\n return [str(i.text) for i in self.data[start:end]]",
"def _get_texts(locator, timeout=default_timeout, type = By.XPATH):\n logger.debug(\"Entered _get_text() method\")\n elts = _find_elements(locator, type = type, timeout = timeout)\n if elts:\n return [elt.text for elt in elts]\n return None",
"def getLines(self):\n lines = []\n for item in self:\n lines.extend(item.textLines)\n return lines",
"def item_texts(self, name, text_key=None, axis_edit=None):\n return self._get_itemmap(name, 'texts', text_key, axis_edit)",
"def _generateDisplayedText(self, obj, **args ):\n result = self._generateSubstring(obj, **args)\n if result:\n return result\n\n displayedText = self._script.utilities.displayedText(obj)\n if not displayedText:\n return []\n\n return [displayedText]",
"def get_all_elements_text(self, *locator):\n all_texts = []\n elements = self.__driver.find_elements(*locator)\n for element in elements:\n element_text = element.text\n all_texts.append(element_text)\n return \" \".join(all_texts).strip(\"[]\")",
"def get_text(parent, tag, plural = False):\n text = None\n for item in parent.findall(tag):\n t = item.text\n if not text:\n if plural:\n text = [t]\n else:\n text = t\n elif isinstance(text, list):\n text.append(t)\n else:\n text = [text, t]\n return text",
"def 取所有项目文本(self): # real signature unknown; restored from __doc__\n return self.GetStrings()",
"def text(self, just_text=False):\n lines = []\n for node, data in self.traverse():\n if just_text or data['has_text'] or data['pad']:\n lines += data['text']\n else:\n lines += [data['meta']] + data['title'] + data['text']\n return flatten(lines)",
"def tag_strings(self):\n return [tag.tag_text for tag in self.tags.all()]",
"def getItemsInDialog(elm):\n items = []\n items.extend(getAllChildrenWithTagName(elm, \"control\"))\n return items",
"def find_text_content_by_class(bs, tag, class_name):\n result = []\n for item in bs.find_all(tag, {\"class\":class_name}):\n item_text = strip_tags(str(item))\n result.append(\" \".join(item_text.split()))\n return result",
"def trait_view_elements ( self ):\n return self.__class__.class_trait_view_elements()",
"def get_all_text(self):\n result = list()\n\n for path in ['./OrgQuestion/OrgQSubject',\n './OrgQuestion/OrgQBody',\n './OrgQuestion/Thread/RelQuestion/RelQSubject',\n './OrgQuestion/Thread/RelQuestion/RelQBody',\n './OrgQuestion/Thread/RelComment/']:\n result.extend([\n element.text if element.text is not None else '' for element in self.merged_root.findall(path)\n ]) # extract text from each element matching the path\n\n return result",
"def getRichTextFields(self):\n fields_list = []\n if hasattr(self, 'questionTextArea'):\n fields_list.append(self.questionTextArea)\n if hasattr(self, 'feedbackTextArea'):\n fields_list.append(self.feedbackTextArea)\n return fields_list",
"def getListOfTextGlyphs(self):\n return _libsbml.Layout_getListOfTextGlyphs(self)",
"def words(self):\n return self.text.split()",
"def viewVocab(self): \n mapping = []\n views = registration.getViews(IBrowserRequest)\n for view in views:\n if view.name and self.getRenderableView(view.name):\n mapping.append((view.name, view.name))\n return atapi.DisplayList(mapping)",
"def get_results(self) -> List[str]:\n output = []\n for row in self.row_layout.children():\n if self.possible_values is None:\n text = row.itemAt(0).widget().text()\n else:\n text = row.itemAt(0).widget().currentText()\n\n if text != \"\":\n output.append(text)\n return output",
"def get_tweet_text(self):\n return self.get_element_from_list(self.TWEETS_LIST, 0)",
"def used_text_keys(self):\n text_func = self._used_text_keys\n args = ()\n kwargs = {'tks': {'tks': []}}\n DataSet._apply_to_texts(text_func, self._meta, args, kwargs)\n return kwargs['tks']['tks']",
"def get_text(self):",
"def make_list(self):\n return list(self.widget_dict.values())"
]
| [
"0.6248276",
"0.60573405",
"0.60127634",
"0.5948078",
"0.5905251",
"0.577305",
"0.5733306",
"0.56923234",
"0.5615578",
"0.556697",
"0.5562241",
"0.5440046",
"0.5421704",
"0.53165835",
"0.53053695",
"0.5239165",
"0.52215254",
"0.5188379",
"0.51882476",
"0.517459",
"0.51421756",
"0.5128736",
"0.508898",
"0.50875926",
"0.5081238",
"0.50508523",
"0.50370914",
"0.50356317",
"0.50284237",
"0.50164926"
]
| 0.85841584 | 0 |
Return element according to 'text' or 'search text' and clicks it. | def click_using_class(self, text, search_text=None, delay=3, is_button=False):
if search_text:
class_name = 'android.widget.EditText'
button = self.return_button(search_text, class_name)
elif is_button:
class_name = 'android.widget.Button'
button = self.return_button(text, class_name)
else:
class_name = 'android.widget.TextView'
button = self.return_button(text, class_name)
if not button:
raise NoSuchElementException
if search_text:
button.send_keys(text)
else:
button.click()
time.sleep(delay) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def return_button(self, text, class_name='android.widget.TextView'):\n for button in self.driver.find_elements_by_class_name(class_name):\n if button.text == text:\n return button\n return None",
"def click_search_button(self):\n self.click_element(self.generic_search_button_locator)",
"def click_element(self, el_type, text, delay=3, handle_error=True):\n if el_type not in ['access', 'xpath']:\n LOGGER.error('Mentioned element does not exist!')\n button = None\n else:\n button = self.return_element(el_type=el_type, text=text)\n\n if handle_error:\n try:\n button.click()\n except NoSuchElementException:\n LOGGER.error('{ele} is not found: {err}'.format(ele=el_type, err=text))\n sys.exit(1)\n else:\n button.click()\n time.sleep(delay)",
"def doSelectByText(self, text, timeout=10.0, name=None, tagName=None, className=None, id=None, xpath=None, linkText=None, \n partialLinkText=None, cssSelector=None, location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n \n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n \n cmdId = self.findElement(name=name, tagName=tagName, className=className, id=id, xpath=xpath, linkText=linkText, \n partialLinkText=partialLinkText, cssSelector=cssSelector, location=location)\n selectElement = self.hasElement(timeout=timeout, commandId=cmdId) \n if selectElement is None:\n ret = False\n else:\n selectValue= selectElement.get('GUI', 'value')\n selectId = selectValue.get('element-id')\n \n xpath = \".//option[normalize-space(.) = %s]\" % self._escapeString(text)\n cmdId = self.findChildElements(elementId=selectId, xpath=xpath)\n optionsSelect = self.hasChildElements(timeout=timeout, commandId=cmdId) \n if optionsSelect is None:\n ret = False\n else:\n optionsValue = optionsSelect.get('GUI', 'value')\n optionsIds = eval( optionsValue.get('value') )\n \n if not len(optionsIds): \n ret = False\n else:\n for opId in optionsIds:\n cmdId = self.clickElement(elementId=opId['ELEMENT'])\n elementClicked = self.isElementClicked(timeout=timeout, commandId=cmdId) \n if elementClicked is None:\n ret = False\n else:\n ret = True\n return ret",
"def find_element_by_text(self, text, wait_time=5):\n return self.wait_for_element_by_xpath(\"//*[contains(text(), '%s')]\" % text, wait_time)",
"def _click_pager_with_text(self, text, page_number):\r\n targets = [elem for elem in self.q(css=self.PAGING_SELECTOR) if elem.text == text]\r\n targets[0].click()\r\n EmptyPromise(\r\n lambda: self.get_current_page() == page_number,\r\n \"navigated to desired page\"\r\n ).fulfill()",
"def search_btn_clicked(self, widget, data=None):\n # Method to handle search here.\n search_text = self.get_text(\"txt_search\")\n print search_text",
"def get_element(\n driver: webdriver, locator_text: str, locator_type: str = \"id\", many: bool = None\n):\n\n locator_type = locator_type.upper()\n if hasattr(By, locator_type):\n try:\n locator = get_locator(locator_text, locator_type)\n is_multiple = \"s\" if many else \"\"\n func = getattr(driver, f\"find_element{is_multiple}\")\n return func(*locator)\n except NoSuchElementException:\n return None\n else:\n raise SToolException(\"INVALID_SELECTOR\")",
"def search(self, value):\n self.base_selenium.set_text(element='general:search', value=value)\n self.base_selenium.click(element='general:search')\n time.sleep(self.base_selenium.TIME_MEDIUM)\n return self.result_table()",
"def search_resources(self,searchtext):\n\n self.search.value = searchtext\n self.submit.click()",
"def return_element(self, el_type, text, bounds=False):\n if el_type == 'access':\n element = self.driver.find_element_by_accessibility_id(text)\n elif el_type == 'id':\n element = self.driver.find_element_by_id(text)\n elif el_type == 'xpath' and bounds:\n element = self.driver.find_element_by_xpath(text).get_attribute('bounds')\n elif el_type == 'xpath':\n element = self.driver.find_element_by_xpath(text)\n else:\n element = None\n LOGGER.error('No match found for input parameters!')\n return element",
"def enter_text_by_xpath(self, xpath, some_text):\n try:\n print(f\"xpath provided: {xpath}\")\n element = self.wwait.until(EC.element_to_be_clickable((By.XPATH, xpath)))\n\n utils.LOG.info(f\"entering the following text: {some_text}\")\n element.send_keys(some_text)\n except NoSuchElementException as err:\n utils.LOG.warning(f\"Entering Text failed by following xpath: {xpath}\")\n utils.LOG.error(err)\n self.take_screenshot('ErrorEnterText_')",
"def other_search(self):\n test = self.ask_zoekarg.text()\n if test:\n self.parent().search_arg = test\n self.parent().do_select()",
"def doSelectByValue(self, text, timeout=10.0, name=None, tagName=None, className=None, id=None, xpath=None, linkText=None, \n partialLinkText=None, cssSelector=None, location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n \n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n \n cmdId = self.findElement(name=name, tagName=tagName, className=className, id=id, xpath=xpath, linkText=linkText, \n partialLinkText=partialLinkText, cssSelector=cssSelector, location=location)\n selectElement = self.hasElement(timeout=timeout, commandId=cmdId) \n if selectElement is None:\n ret = False\n else:\n selectValue= selectElement.get('GUI', 'value')\n selectId = selectValue.get('element-id')\n \n css = \"option[value =%s]\" % self._escapeString(text)\n cmdId = self.findChildElements(elementId=selectId, cssSelector=css)\n optionsSelect = self.hasChildElements(timeout=timeout, commandId=cmdId) \n if optionsSelect is None:\n ret = False\n else:\n optionsValue = optionsSelect.get('GUI', 'value')\n optionsIds = eval( optionsValue.get('value') )\n \n if not len(optionsIds): \n ret = False\n else:\n for opId in optionsIds:\n cmdId = self.clickElement(elementId=opId['ELEMENT'])\n elementClicked = self.isElementClicked(timeout=timeout, commandId=cmdId) \n if elementClicked is None:\n ret = False\n else:\n ret = True\n return ret",
"def search(self, keyword):\n from modules.pages.search_page import SearchPage\n self.button_click(self.SEARCH_BUTTON)\n search_item = self.get_element(self.SEARCH_INPUT)\n search_item.send_keys(keyword, Keys.RETURN)\n return SearchPage(self.driver)",
"def find_element_by_text(self,param,ignore_error_handle = False):\n message = {};\n step = 'find element by text:' + param['text'];\n try:\n text = param['text'];\n element = self.driver.find_element_by_name(text);\n message = self.feedback.feedback_action_ok(step);\n message['element'] = element;\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;",
"def find_by_contains_text(self, text, element_tag=\"*\"):\n return self.wait_until_element_find(locator_type=By.XPATH,\n locator=f\".//{element_tag}[contains(text(), '{text}')]\")",
"def doFindText(self, expectedText, timeout=10.0, name=None, tagName=None, className=None,\n id=None, xpath=None, linkText=None, partialLinkText=None, cssSelector=None,\n location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n if expectedText is None: \n raise TestAdapterLib.ValueException(TestAdapterLib.caller(), \"expectedText argument cannot be equal to none\" )\n\n ret = True\n \n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n \n cmdId = self.findElement(elementId=None, name=name, tagName=tagName, className=className,\n id=id, xpath=xpath, linkText=linkText, partialLinkText=partialLinkText,\n cssSelector=cssSelector, location=location)\n rsp = self.hasElement(timeout=timeout, commandId=cmdId) \n if rsp is None: ret = False\n else:\n elementVall = rsp.get('GUI', 'value')\n elementId = elementVall.get('element-id')\n \n cmdId = self.getTextElement(elementId=elementId)\n rsp = self.hasTextElement(timeout=timeout, commandId=cmdId, expectedText=expectedText)\n if rsp is None:\n ret = False\n return True\n return ret",
"def showFind(self, txt=\"\"):\n self.__searchWidget.showFind(txt)",
"def elementClick(self,locator=\"\",locatorType='id',element=None):\n\n\n try:\n if locator:\n element=self.getElement(locator,locatorType)\n\n element.click()\n self.logger.info(\"clicked on element with locator\"+locator+\" locatorType: \"+locatorType)\n\n except:\n self.logger.info('Cannot click on element with locator '+locator+\" locatorType: \"+locatorType)\n print_stack()",
"def search(text):\n s = Search()\n result = _search(s, text)\n _print_results(result)\n return result",
"def doTypeText(self, text, timeout=10.0, name=None, tagName=None, className=None,\n id=None, xpath=None, linkText=None, partialLinkText=None, cssSelector=None,\n location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n if text is None: \n raise TestAdapterLib.ValueException(TestAdapterLib.caller(), \"text argument cannot be equal to none\" )\n\n ret = True\n \n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n \n cmdId = self.findElement(elementId=None, name=name, tagName=tagName, className=className,\n id=id, xpath=xpath, linkText=linkText, partialLinkText=partialLinkText, \n cssSelector=cssSelector, location=location)\n rsp = self.hasElement(timeout=timeout, commandId=cmdId) \n if rsp is None: ret = False\n else:\n elementVall = rsp.get('GUI', 'value')\n elementId = elementVall.get('element-id')\n\n cmdId = self.typeTextElement(elementId=elementId, text=str(text) )\n if self.hasTextEntered(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret",
"def select_by_visible_text(self, selector, text):\n el = self.locate_element(selector)\n Select(el).select_by_visible_text(text)",
"def test_search_720(self):\n self.driver.get(self.domain)\n self.assertTrue(u'XXXX' in\n self.driver.page_source, 'Title text not found')\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n wait = ui.WebDriverWait(self.driver, 5)\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n search.click()\n search_field = self.driver.find_element_by_css_selector(\"#XXXX\")\n search_field.send_keys(\"XXXX\")\n search_field.submit()\n try:\n wait.until(lambda driver: u\"XXXX\" in\n self.driver.find_element_by_css_selector(\"xxxx > a\").text,\n 'Not found!')\n except:\n current_url = self.driver.current_url\n resp = requests.get(current_url)\n if resp.status_code != 200:\n raise Exception(\"Search failed! => [%s] %s\" % (resp.status_code,\n current_url))",
"def click(self, element):\n element.click()",
"def click(self, selector):\n el = self.locate_element(selector)\n el.click()",
"def click_document_search_button(self):\n self.click_element(self.document_search_button_locator, True)",
"def handle_text_search(self, text):\n log.debug(\"Handling text search: %s\", text)\n\n self.current_selected = 0\n self._refresh()",
"def doWaitClickElement(self, timeout=10.0, name=None, tagName=None, className=None,\n id=None, xpath=None, linkText=None, partialLinkText=None, cssSelector=None,\n location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout}\n \n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n\n cmdId = self.findElement(elementId=None, name=name, tagName=tagName, className=className,\n id=id, xpath=xpath, linkText=linkText, partialLinkText=partialLinkText, cssSelector=cssSelector,\n location=location, more=more)\n rsp = self.hasElement(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False\n return ret\n \n elementVall = rsp.get('GUI', 'value')\n elementId = elementVall.get('element-id')\n \n cmdId = self.clickElement(elementId=elementId)\n if self.isElementClicked(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret",
"def enable_search(self):\n html_element = self.find_element_by_xpath(\n '/html/body').get_attribute('outerHTML')\n soup = Scraper(html_element)\n\n elms_obj = soup.find_search_enable_btn()\n\n for tag, target in elms_obj.items():\n if len(target) > 0:\n for elem in target:\n for attr, value in elem.items():\n try:\n if str(attr) == 'class':\n for element in value:\n btn = self.find_elements_by_class_name(\n f'{element}')\n for e in btn:\n try:\n e.click()\n print(\n colored(':: The Searching is able ::', 'green'))\n return\n except:\n print(\n 'The searching isn\\'t able yet =(')\n except:\n pass\n btn = self.find_elements_by_css_selector(\n f'{tag}[{attr}=\"{value}\"]'\n )\n for element in btn:\n try:\n element.click()\n print(\n colored(':: The Searching is able ::', 'green'))\n return\n except:\n print('The searching isn\\'t able yet =(')"
]
| [
"0.661339",
"0.65298027",
"0.64589846",
"0.63079387",
"0.6279679",
"0.6225251",
"0.61082816",
"0.60499865",
"0.5844906",
"0.5839642",
"0.58275473",
"0.58186126",
"0.5792587",
"0.57632035",
"0.57194805",
"0.569059",
"0.568484",
"0.5681119",
"0.5595558",
"0.5590351",
"0.55692565",
"0.55624163",
"0.554438",
"0.55205745",
"0.551623",
"0.55135304",
"0.54787904",
"0.5475861",
"0.54727453",
"0.54540074"
]
| 0.72133875 | 0 |
Count letters in a name. | def counter(name):
count_name = list(name)
counter = 0
for letter in count_name:
counter += 1
print(f"There are {counter} letter in the name {name}.")
print(f"\tAnd btw... {name} backwards is {name[::-1].lower()}.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def char_count(self, _list, char):\n\t\tcont = 0\n\t\tfor i in _list:\n\t\t\tcont += i[\"name\"].count(char.upper())\n\t\t\tcont += i[\"name\"].count(char.lower())\n\t\treturn cont",
"def test_single_letter_count(self):\n self.assertEqual(functions.single_letter_count(\"Hello World\", \"h\"), 1)\n self.assertEqual(functions.single_letter_count(\"Hello World\", \"z\"), 0)\n self.assertEqual(functions.single_letter_count(\"HelLo World\", \"l\"), 3)",
"def count_letters(wordform: str):\n return len(re.findall('[a-z]', wordform))",
"def count_letters(word, letter):\n assert isinstance(word, str) == True, \"Please input a string!\"\n count = 0\n for char in word:\n if char == letter:\n count += 1\n\n return count",
"def letter_count( text ):\n\tchars = string.ascii_uppercase\n\ttext = text.upper()\n\tresult = get_letter_dict()\n\tfor char in chars:\n\t\tresult[char] = text.count(char)\n\treturn result",
"def count_letter(content, letter):\n if (not isinstance(letter, str)) or len(letter) != 1:\n raise ValueError('`letter` must be a single character string.')\n return len([char for char in content if char == letter])",
"def count_letter(content, letter):\n if (not isinstance(letter, str)) or len(letter) != 1:\n raise ValueError('`letter` must be a single character string.')\n return len([char for char in content if char == letter])",
"def count_letter(content, letter):\n if (not isinstance(letter, str)) or len(letter) != 1:\n raise ValueError('`letter` must be a single character string.')\n return len([char for char in content if char == letter])",
"def count_letter(content, letter):\n if (not isinstance(letter, str)) or len(letter) != 1:\n raise ValueError('`letter` must be a single character string.')\n return len([char for char in content if char == letter])",
"def count_letter(content, letter):\n if (not isinstance(letter, str)) or len(letter) != 1:\n raise ValueError('`letter` must be a single character string.')\n return len([char for char in content if char == letter])",
"def count_alpha(word):\n\n alpha = re.compile(r'[a-z]', re.IGNORECASE)\n return len(alpha.findall(word))",
"def tester(name):\n return len(name)",
"def letterSpace(text):\n\n count = 0\n alphabet = string.ascii_lowercase + string.ascii_uppercase\n for char in text:\n if char in alphabet:\n count += 1\n return count",
"def count_name(text, adj):\n for x in re.finditer(r'[A-Z][a-z]*[\\s][A-Z][a-z]*',text):\n adj[x.group()] += 1\n return",
"def getNameNum(name):\n dicto = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26}\n summ = 0\n for letter in name:\n summ += dicto.get(letter.lower())\n return summ",
"def test_multiple_letter_count(self):\n self.assertEqual(functions.multiple_letter_count(\"awesome\"), {\n 'a': 1, 'e': 2, 'm': 1, 'o': 1, 's': 1, 'w': 1})",
"def add_letter_count(self):\n self.dataframe['letter_count'] = self.dataframe['letter'].str.len()",
"def count_characters(text):\n counted_characters = initialize_dictionary_with_characters()\n for char in next_uppercase_character(text.upper()):\n counted_characters[char] += 1\n return counted_characters",
"def num_chars(word):\n return len(word)",
"def get_number_of_letters(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n \"\"\"Count number of lettes without digits, non letter characters, without xml tags\"\"\"\n data = file.read()\n data = re.sub('<.*?binary.*?>*<.*?binary.*?>',' ', data)\n data = re.sub('\\\\s\\\\s*', '', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', data))))\n let_count = len(data)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_letters', let_count)\n print(datetime.now(), '-', 'number_of_letters for', self.filename, 'calculated =', let_count)\n return None",
"def CountNames():\r\n return _hiew.HiewGate_Names_CountName()",
"def countLetters(inputtedString):\n\n counted = 0\n inBetween = list(inputtedString)\n i = 0\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n\n while i < len(inBetween):\n if inBetween[i] in alphabet:\n counted += 1\n i += 1\n\n return counted",
"def count(word):\n\n return len(word)",
"def fancy_count(sentence, alphabet):\n sentence = sentence.lower()\n\n # create dictionary of all letters set to 0\n letter_count = {}\n for char in alphabet:\n letter_count[char] = 0\n\n for char in sentence:\n if char in letter_count.keys():\n letter_count[char] += 1\n return letter_count",
"def value(name):\r\n return sum(alpha.index(str(l)) + 1 for l in name)",
"def count_letters_mod(word, letter, index=0):\n count = 0\n while index < len(word):\n result = word.find(letter, index)\n if result != -1:\n count += 1\n index = result + 1\n else:\n index += 1\n\n return count",
"def basic_count(sentence):\n letter_count = {}\n for char in sentence:\n if char not in letter_count:\n letter_count[char] = 0\n letter_count[char] += 1\n return letter_count",
"def word_char_count(word):\n\talphabet = alphabet_init()\n\tfor i in range(len(word)):\n\t\tchar_index = convert_letter_to_int(word[i])\n\t\talphabet[char_index] += 1 \n\treturn alphabet",
"def frequencyLetterDic(s):\n pass",
"def count(self, word):\n pass"
]
| [
"0.75467575",
"0.74104375",
"0.74040395",
"0.739459",
"0.73699015",
"0.7357591",
"0.7343402",
"0.7343402",
"0.7343402",
"0.7343402",
"0.731937",
"0.7260459",
"0.72601306",
"0.72523135",
"0.72024745",
"0.7005587",
"0.69186723",
"0.6886823",
"0.68676317",
"0.68437684",
"0.67980176",
"0.67924356",
"0.6752559",
"0.67480206",
"0.67263204",
"0.67036337",
"0.6677373",
"0.66684824",
"0.66530716",
"0.6608531"
]
| 0.74775517 | 1 |
Test case for delete_opening_balance_journals_key | def test_delete_opening_balance_journals_key(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_opening_balance_journals_key(self):\n pass",
"def test_get_opening_balance_journals(self):\n pass",
"def test_post_opening_balance_journals(self):\n pass",
"def test_client_risk_assessment_delete(self):\n pass",
"def test_deleteItinerary(self):\n event = dict(start = '2015-08-21T01:23:00.000Z',\n end = '2015-08-21T01:25:00.000Z',\n date = '2015-08-21T00:00:00.000Z')\n date = {'date': '2015-08-21T00:00:00.000Z'}\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n # Create sample itinerary for naina for the event day\n self.json_post('/createItinerary/naina', dict(\n name = 'New Day1',\n date = date['date']\n ))\n\n euid = str('alex_' + event['start'] + event['end'])\n naina_euid = str('naina_' + event['start'] + event['end'])\n iuid = str('alex_' + date['date'])\n naina_iuid = str('naina_' + date['date'])\n invuid = '00000000000000000000000'\n\n rv = self.json_post('/createEvent/alex', event)\n assert euid in str(rv.data)\n\n # Share event with naina\n rv = self.json_post('/inviteToEvent/alex', dict(\n uid = euid,\n invited = 'naina'\n ))\n assert euid in str(rv.data)\n\n rv = self.json_post('/createEvent/naina', dict(\n uid = euid\n ))\n assert euid in str(rv.data)\n\n rv = self.json_delete('/deleteItinerary/bbbb', {'uid': iuid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_delete('/deleteItinerary/alex', {'uid': invuid})\n assert 'Itinerary not found' in str(rv.data)\n\n rv = self.json_get('/getItineraryFromId/alex', {'uid': iuid})\n assert iuid in str(rv.data)\n\n rv = self.json_delete('/deleteItinerary/alex', {'uid': iuid})\n assert iuid in str(rv.data)\n\n rv = self.json_get('/getItineraryFromId/alex', {'uid': iuid})\n assert 'Itinerary not found' in str(rv.data)\n\n rv = self.json_delete('/deleteItinerary/naina', {'uid': naina_iuid})\n assert naina_iuid in str(rv.data)\n\n rv = self.json_get('/getEventFromId/alex', {'uid': euid})\n assert \"Event not found\" in str(rv.data)\n\n rv = self.json_get('/getEventFromId/naina', {'uid': naina_euid})\n print(rv.data)\n assert \"Event not found\" in str(rv.data)",
"def test_meeting_delete(self):\n pass",
"def test_delete_case(self):\n pass",
"def test_client_bank_account_delete(self):\n pass",
"def test_delete7(self):\n pass",
"def test_delete_risk_profile_using_delete(self):\n pass",
"def test_delete_nveto_pmt_item(self):\n pass",
"def test_delete_goal(self):\n pass",
"def test_delete_api_key(self):\n pass",
"def test_kyc_delete_legal_board_member(self):\n pass",
"def test_delete_activity(self):\n pass",
"def test_workflows_id_delete(self):\n pass",
"def test_duo_account_delete(self):\n pass",
"def test_delete_api_key_from_org(self):\n pass",
"def test_delete_occurrence(self):\n pass",
"def test_delete_records(self):\n pass",
"def test_delete_boat(self):\n pass",
"def test_delete_company_props_using_delete(self):\n pass",
"def test_issue_delete_issue_reaction(self):\n pass",
"def test_delete_note(self):\n pass",
"def _testKeySubNsDel(self):\n if len(self._getKeyList()) == 0 and len(self._getSubNsList()) == 0:\n parent = self.parent()\n if parent:\n parent._delChild(self.path[-1])",
"def test_fax_inbound_automation_delete(self):\n pass",
"def test_client_nationlity_delete(self):\n pass",
"def test_delete_milestone(self):\n milestone1_startdate = timezone.datetime(2020, 9, 1).date()\n milestone1_targetdate = timezone.datetime(2020, 9, 20).date()\n kippomilestone_1 = KippoMilestone(\n project=self.project,\n title=\"test milestone 1\",\n start_date=milestone1_startdate,\n target_date=milestone1_targetdate,\n )\n kippomilestone_1.save()\n\n # assign milestone to tasks\n self.task1.milestone = kippomilestone_1\n self.task1.save()\n task1_id = self.task1.id\n\n # delete milestone\n kippomilestone_1.delete()\n\n # confirm task still exists\n self.assertTrue(KippoTask.objects.filter(id=task1_id).exists())",
"def test_issue_delete_time(self):\n pass",
"def test_delete_record(self):\n pass"
]
| [
"0.7443269",
"0.6647047",
"0.6586451",
"0.6173807",
"0.59971863",
"0.5906599",
"0.5883001",
"0.58747035",
"0.58452064",
"0.5810683",
"0.5803397",
"0.5792871",
"0.57869107",
"0.57812667",
"0.5751271",
"0.5725391",
"0.5719262",
"0.57173496",
"0.56513774",
"0.5649916",
"0.564739",
"0.56048936",
"0.5603146",
"0.55558133",
"0.55533075",
"0.5553021",
"0.55482036",
"0.55447954",
"0.55290306",
"0.5513557"
]
| 0.954454 | 0 |
Test case for get_opening_balance_journals | def test_get_opening_balance_journals(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_opening_balance_journals_key(self):\n pass",
"def test_post_opening_balance_journals(self):\n pass",
"def test_delete_opening_balance_journals_key(self):\n pass",
"def test_open_ru_ballance(self, ):\n if self.report_type == 'open.ru':\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n deals = self.get_deals()\n repo_deals = self.get_repo_deals()\n \n if self.open_ru_report_type == 'stock':\n comm = self.open_ru_get_micex_commission(deals, repo_deals)\n elif self.open_ru_report_type == 'future':\n atl = self.get_account_totally_line()\n comm = self.open_ru_get_forts_comm(atl)\n ballance = sum([float(d.getAttribute('deal_sign')) *\n float(d.getAttribute('price')) *\n float(d.getAttribute('quantity'))\n for d in deals])\n ballance += sum([float(d.getAttribute('deal_sign')) *\n float(d.getAttribute('deal_price')) *\n float(d.getAttribute('quantity'))\n for d in repo_deals])\n ballance += 10000 - comm # 10000 is the initial account amount\n accs = self.model.list_view_accounts().fetchall()\n self.assertEqual(1, len(accs))\n self.assertAlmostEqual(ballance, accs[0]['current_money'])",
"def test_journals_paged_fields(self, api_client):\n rv = api_client.get(\"/journals-paged\")\n json_data = rv.get_json()\n sample = next(\n (item for item in json_data[\"results\"] if item[\"issn_l\"] == \"1907-1760\"),\n None,\n )\n top_level_keys = [\n \"id\",\n \"issn_l\",\n \"issns\",\n \"title\",\n \"publisher\",\n \"previous_issn_ls\",\n \"other_titles\",\n \"journal_metadata\",\n \"total_dois\",\n \"dois_by_issued_year\",\n \"sample_dois\",\n \"subscription_pricing\",\n \"apc_pricing\",\n \"open_access\",\n \"status\",\n \"status_as_of\",\n ]\n\n i = 0\n for key in sample.keys():\n assert key == top_level_keys[i]\n i += 1",
"def test_create_payment_verify_amounts(self):\n test_shift, _, __ = self._make_shift(\n shiftkwargs={'status': 'OPEN', 'starting_at': timezone.now(),\n 'ending_at': timezone.now() + timedelta(hours=8), 'minimum_hourly_rate': 15,\n 'minimum_allowed_rating': 0, 'maximum_clockin_delta_minutes': 15,\n 'maximum_clockout_delay_minutes': 15, 'maximum_allowed_employees': 5,\n 'employees': self.test_employee},\n employer=self.test_employer)\n test_period = mixer.blend('api.PayrollPeriod', employer=self.test_employer)\n\n payload = {\n 'payroll_period': test_period.id,\n 'employee': self.test_employee.id,\n 'employer': self.test_employer.id,\n 'shift': test_shift.id,\n 'splited_payment': True,\n 'status': 'PENDING',\n 'breaktime_minutes': 5,\n 'regular_hours': 6,\n 'over_time': 2.42,\n 'hourly_rate': 8.4,\n 'total_amount': 13,\n }\n url = reverse_lazy('api:me-get-payroll-payments-employer')\n response = self.client.post(url, data=payload)\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('breaktime_minutes'), 5, response_json)\n self.assertEqual(Decimal(response_json.get('regular_hours')), Decimal(payload.get('regular_hours')), response_json)\n self.assertEqual(Decimal(response_json.get('over_time')), Decimal(str(payload.get('over_time'))), response_json)\n self.assertIsNotNone(response_json.get('hourly_rate'), response_json)\n self.assertIsNotNone(response_json.get('total_amount'), response_json)\n total_amount = Decimal(str(\n math.trunc((Decimal(response_json.get('regular_hours')) + Decimal(response_json.get('over_time')))\n * Decimal(response_json.get('hourly_rate')) * 100) / 100\n ))\n self.assertEqual(Decimal(response_json.get('total_amount')), total_amount, response_json)",
"def check_open():\n print(\"***** Check if Business is Open/Closed *****\")\n while True:\n print()\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n if business_object['is_open'] == 1:\n print(\"This business is open!\")\n else:\n print(\"This business is closed!\")\n\n print()\n\n print_business(business_object)",
"def test_get_active_cco(session):\n business = factory_business('BC1234567')\n filing_dict = copy.deepcopy(FILING_HEADER)\n filing_dict['filing']['consentContinuationOut'] = copy.deepcopy(CONSENT_CONTINUATION_OUT)\n filing = factory_completed_filing(business, filing_dict)\n\n expiry_date = get_cco_expiry_date(filing.effective_date)\n\n consent_continuation_out = ConsentContinuationOut()\n consent_continuation_out.foreign_jurisdiction = 'CA'\n consent_continuation_out.foreign_jurisdiction_region = 'AB'\n consent_continuation_out.expiry_date = expiry_date\n consent_continuation_out.business_id = business.id\n consent_continuation_out.filing_id = filing.id\n consent_continuation_out.save()\n\n cco = consent_continuation_out.get_active_cco(business.id, filing.effective_date)\n assert cco\n cco = consent_continuation_out.get_active_cco(business.id, expiry_date)\n assert cco\n cco = consent_continuation_out.get_active_cco(business.id, expiry_date, 'CA', 'AB')\n assert cco\n\n cco = consent_continuation_out.get_active_cco(business.id, expiry_date + datedelta.datedelta(days=1))\n assert not cco",
"def test_biweekly_bussiness_days_only(self):\n print()\n print(\"Test Bussiness Days Only\")\n start_date = timezone.now()\n start_date = start_date.replace(day=1, month = 9, year = 2020)\n end_date = start_date.replace(day=30)\n expense = BudgetExpense.objects.get(id = 700)\n\n expected_dates = []\n expected_date = expense.start_date\n expected_date = expected_date.replace(day = 4, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 21, month = 9, year = 2020)\n expected_dates.append(expected_date)\n\n\n print(\"EXPECTED\")\n print(\"==========\")\n for d in expected_dates:\n print(d)\n\n result = get_anticipated_transaction_occurences(expense, start_date, end_date)\n print()\n print(\"Actual Result\")\n print(\"============\")\n for r in result.get(expense):\n print(r)\n print()\n self.assertEquals(expected_dates, result.get(expense))",
"def test_debts_sorted_by_fee(self):\n card = CreditCard.objects.create(\n name='One',\n interest_rate=20.0,\n balance=1000_00,\n min_payment=10_00,\n min_payment_percent=10.0,\n annual_fee=100_00,\n user=self.user,\n )\n overdraft = Overdraft.objects.create(\n name='Over',\n interest_rate=20.0,\n balance=1000_00,\n monthly_fee=9_00,\n user=self.user,\n )\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertJSONEqual(\n response.content,\n [overdraft.to_JSON(), card.to_JSON()],\n )",
"def test_happy_path_scenario(self):\n debit_jobs([(self.job, A(480), Entry.FLAT_DEBIT)]) # progress invoice\n credit_jobs([(self.job, A(100), A(0), A(0))], D(100)) # progress payment\n debit_jobs(\n [(self.job, A(480), Entry.FLAT_DEBIT)], recognize_revenue=True\n ) # final invoice\n credit_jobs([(self.job, A(800), A(60), A(0))], D(800)) # final payment\n\n self.assert_balances(\n bank=A(900, 0, 0),\n invoiced=A(960),\n paid=A(-960),\n debited=A(480 * 2 + 380),\n credited=A(-480 * 2 - 380),\n income=A(960).net_amount,\n tax=A(900).tax_amount,\n discounts=A(-60).net_amount,\n )\n\n total_income = income_account().balance + discount_account().balance\n self.assertEqual(total_income, A(900).net_amount)",
"def test_open_ru_stock_commission(self, ):\n if self.report_type == 'open.ru' and self.open_ru_report_type == 'stock':\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n deals = self.get_deals() \n repo_deals = self.get_repo_deals()\n summcomm = self.open_ru_get_micex_commission(deals, repo_deals)\n self.assertAlmostEqual(summcomm, \n self.model._sqlite_connection.execute('select sum(commission) from deals').fetchone()[0])\n print('test stock commission passed')",
"def test_finalize_and_open_period(self):\n employee_payments_qty = EmployeePayment.objects.filter(employer=self.test_employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period2.id})\n self.client.force_login(self.test_user_employer)\n # change from OPEN to FINALIZE\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty + 1)\n # change from FINALIZE to OPEN\n response = self.client.put(url, data={'status': 'OPEN'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('status'), 'OPEN', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty)",
"def test_weekly_bussiness_days_only(self):\n print()\n print(\"Test Bussiness Days Only\")\n start_date = timezone.now()\n start_date = start_date.replace(day=1, month = 9, year = 2020)\n end_date = start_date.replace(day=30)\n expense = BudgetExpense.objects.get(id = 600)\n\n expected_dates = []\n expected_date = expense.start_date\n expected_date = expected_date.replace(day = 4, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 14, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 21, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 28, month = 9, year = 2020)\n expected_dates.append(expected_date)\n\n print(\"EXPECTED\")\n print(\"==========\")\n for d in expected_dates:\n print(d)\n\n result = get_anticipated_transaction_occurences(expense, start_date, end_date)\n print()\n print(\"Actual Result\")\n print(\"============\")\n for r in result.get(expense):\n print(r)\n print()\n self.assertEquals(expected_dates, result.get(expense))",
"def test_set_new_objects(self):\n\n name = 'Juan'\n\n partner = create_partner(name)\n\n amount = 5000000\n interest_rate = 1.5\n\n loan_capital_data = {\n 'amount': amount,\n 'interest_rate':interest_rate,\n 'partner_id': partner.id\n }\n\n loan_capital = create_loan_capital(loan_capital_data)\n\n client = APIClient()\n\n response = self.client.get('/loan_quotation/4000000')\n\n # Este request al endpoint cumplirá con lo solicitado y mostrará\n # el siguiente mensaje:\n self.assertEqual(json.loads(response.content), {\n 'Socio': 'Juan',\n 'Cuota_mensual': '171111.11',\n 'Pago_total_credito': '6160000.00',\n 'Tasa_interes_mensual': '1.50'\n })\n\n # Este request al endpoint no cumplirá con lo solicitado y mostrará\n # el siguiente mensaje:\n response = self.client.get('/loan_quotation/8000000')\n self.assertEqual(json.loads(response.content), {\n 'message': 'No hay socio disponible'\n })",
"def all_in():\r\n\r\n raise_bet(player.get_cash())",
"def test_open_ru_fut_commission(self, ):\n if self.report_type == 'open.ru' and self.open_ru_report_type == 'future':\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n atl = self.get_account_totally_line()\n if len(atl) == 0:\n return\n summcomm = self.open_ru_get_forts_comm(atl)\n self.assertAlmostEqual(summcomm,\n sum([d['commission'] for d in self.model.list_deals(aid)]))\n print('open.ru commission for futures passed')",
"def test_get_all_upcoming_expenses(self):\n print()\n print(\"Get all expenses will still occur\")\n user = CustomUser.objects.get(username = \"Test User\")\n actual_result = get_all_upcoming_budget_expenses(user = user)\n for ele in actual_result:\n print(ele)\n expected_result = [ BudgetExpense.objects.get(id=100),\n BudgetExpense.objects.get(id=150), \n BudgetExpense.objects.get(id=200), \n BudgetExpense.objects.get(id=600), \n BudgetExpense.objects.get(id=700),\n BudgetExpense.objects.get(id=500),\n BudgetExpense.objects.get(id=800)]\n print(\"====================\")\n print()\n self.assertEquals(expected_result, list(actual_result))",
"def test_balance(self):\n\n self.assertEqual(self.cash_report.balance(), 150)",
"def test_balance_tracking(self):\n # TODO\n pass",
"def get_open_lost_cases(asof=None):\n if asof is None: asof = datetime.utcnow().date()\n cutoff = asof - timedelta(days=1)\n return PatientCase.view_with_patient(\"centralreports/open_ltfu_cases\", \n include_docs=True,\n startkey=cutoff.strftime(\"%Y-%m-%d\"), \n endkey=\"\", descending=True)",
"def test_get_all_boats(self):\n pass",
"def test_open_ru_ballance_after_make_position(self, ):\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n accs = self.model.list_view_accounts().fetchall()\n self.assertEqual(1, len(accs))\n before = accs[0]['current_money']\n self.model.tamake_positions_for_whole_account(aid)\n accs = self.model.list_view_accounts().fetchall()\n after = accs[0]['current_money']\n self.assertAlmostEqual(before, after)",
"def test_companies_company_id_data_journal_entries_get(self):\n pass",
"def test_get_goals(self):\n pass",
"def _printOrderStatus(self, targetorders):\n result = list(self.getList(self.root))\n open_order = filter(lambda y: (y[1] % 2) != 0, result)\n close_order = filter(lambda y: ((y[1] % 2) == 0 and y[1] != 0), result)\n open = list(open_order)\n close = list(close_order)\n close_order_count = 0\n for x in close:\n result = x[1] // 2\n close_order_count += result\n open_order_count = 0\n for x in open:\n result = x[1] + 1 // 2\n open_order_count += result\n balance = targetorders - (open_order_count + close_order_count)\n print(f'Open Orders: {open_order_count}')\n print(f'Closed Orders: {close_order_count}')\n print(f'Yet to be fulfilled: {balance}')\n print('------------------------------------')",
"def test_under_one_week_lookback_window(self, mock_get_prices):\n class BuyBelow10(Moonshot):\n \"\"\"\n A basic test strategy that buys below 10.\n \"\"\"\n DB = 'test-db'\n LOOKBACK_WINDOW = 2\n\n def prices_to_signals(self, prices):\n signals = prices.loc[\"Close\"] < 10\n return signals.astype(int)\n\n def _mock_get_prices():\n\n dt_idx = pd.DatetimeIndex([\"2018-05-01\",\"2018-05-02\",\"2018-05-03\", \"2018-05-04\"])\n fields = [\"Close\",\"Volume\"]\n idx = pd.MultiIndex.from_product([fields, dt_idx], names=[\"Field\", \"Date\"])\n\n prices = pd.DataFrame(\n {\n \"FI12345\": [\n #Close\n 9,\n 11,\n 10.50,\n 9.99,\n # Volume\n 5000,\n 16000,\n 8800,\n 9900\n ],\n \"FI23456\": [\n # Close\n 9.89,\n 11,\n 8.50,\n 10.50,\n # Volume\n 15000,\n 14000,\n 28800,\n 17000\n\n ],\n },\n index=idx\n )\n\n return prices\n\n def mock_download_master_file(f, *args, **kwargs):\n\n master_fields = [\"Timezone\", \"Symbol\", \"SecType\", \"Currency\", \"PriceMagnifier\", \"Multiplier\"]\n securities = pd.DataFrame(\n {\n \"FI12345\": [\n \"America/New_York\",\n \"ABC\",\n \"STK\",\n \"USD\",\n None,\n None\n ],\n \"FI23456\": [\n \"America/New_York\",\n \"DEF\",\n \"STK\",\n \"USD\",\n None,\n None,\n ]\n },\n index=master_fields\n )\n securities.columns.name = \"Sid\"\n securities.T.to_csv(f, index=True, header=True)\n f.seek(0)\n\n mock_get_prices.return_value = _mock_get_prices()\n\n with patch(\"moonshot.strategies.base.download_master_file\", new=mock_download_master_file):\n results = BuyBelow10().backtest(start_date=\"2018-05-01\", end_date=\"2018-05-04\")\n\n get_prices_call = mock_get_prices.mock_calls[0]\n _, args, kwargs = get_prices_call\n self.assertListEqual(kwargs[\"codes\"], [\"test-db\"])\n self.assertEqual(kwargs[\"start_date\"], \"2018-04-25\")\n self.assertEqual(kwargs[\"end_date\"], \"2018-05-04\")\n self.assertEqual(kwargs[\"fields\"], ['Open', 'Close', 'Volume'])\n self.assertIsNone(kwargs[\"timezone\"])\n self.assertTrue(kwargs[\"infer_timezone\"])",
"def test_current_bills_page(self):\n self.make_request(\"/bills/current\", follow_redirects=True)\n self.assertIn(\"Current Bills\", self.html)\n self.assertIn(\"Weekly update for all current bills\", self.html)\n for bill_key in self.fx.BillData:\n bill = getattr(self.fx.BillData, bill_key[0])\n if bill.status and bill.status.name in self.current_statuses:\n self.contains_bill(bill)\n else:\n self.doesnt_contain_bill(bill)",
"def returnDepositsWithdrawals(self,\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass",
"def test_get_balance_between_stocktakings(self):\n # The data required to generate the test case can be reused from the\n # unit test \"test_helper_stocktakings\".\n TestHelpersStocktakingsTestCase().test_balance_between_stocktakings_two_stocktakings()\n\n # Prepare the request url and the request params.\n url = \"/stocktakingcollections/balance\"\n params = {\"start_id\": 1, \"end_id\": 2}\n\n # Do the API request.\n res = self.get(url, role=\"admin\", params=params)\n self.assertEqual(res.status_code, 200)\n balance = json.loads(res.data)\n\n # Check the data\n self.assertTrue(\"products\" in balance)\n products = balance[\"products\"]\n\n # Check if all products are in the balance\n self.assertEqual({\"1\", \"2\", \"3\", \"4\"}, set(products.keys()))\n\n # Check purchase count\n self.assertEqual(products[\"1\"][\"purchase_count\"], 3)\n self.assertEqual(products[\"2\"][\"purchase_count\"], 5)\n self.assertEqual(products[\"3\"][\"purchase_count\"], 8)\n self.assertEqual(products[\"4\"][\"purchase_count\"], 0)\n\n # Check purchase sum price\n self.assertEqual(products[\"1\"][\"purchase_sum_price\"], 900)\n self.assertEqual(products[\"2\"][\"purchase_sum_price\"], 250)\n self.assertEqual(products[\"3\"][\"purchase_sum_price\"], 800)\n self.assertEqual(products[\"4\"][\"purchase_sum_price\"], 0)\n\n # Check replenish count\n self.assertEqual(products[\"1\"][\"replenish_count\"], 10)\n self.assertEqual(products[\"2\"][\"replenish_count\"], 0)\n self.assertEqual(products[\"3\"][\"replenish_count\"], 5)\n self.assertEqual(products[\"4\"][\"replenish_count\"], 0)\n\n # Check differences\n self.assertEqual(products[\"1\"][\"difference\"], -57)\n self.assertEqual(products[\"2\"][\"difference\"], -20)\n self.assertEqual(products[\"3\"][\"difference\"], -10)\n self.assertEqual(products[\"4\"][\"difference\"], -30)\n\n # Check balance\n self.assertEqual(products[\"1\"][\"balance\"], -57 * 300)\n self.assertEqual(products[\"2\"][\"balance\"], -20 * 50)\n self.assertEqual(products[\"3\"][\"balance\"], -10 * 100)\n self.assertEqual(products[\"4\"][\"balance\"], -30 * 200)\n\n # Check overall balance\n self.assertEqual(balance[\"balance\"], -25100)\n self.assertEqual(balance[\"loss\"], 25100)\n self.assertEqual(balance[\"profit\"], 0)"
]
| [
"0.7997928",
"0.7800163",
"0.6249587",
"0.5947075",
"0.5603603",
"0.5390032",
"0.53497434",
"0.5313137",
"0.52879274",
"0.52468204",
"0.52310777",
"0.5218465",
"0.5213669",
"0.5169804",
"0.5147557",
"0.5146794",
"0.5145016",
"0.5138725",
"0.51375633",
"0.50891036",
"0.5077854",
"0.5075926",
"0.5071422",
"0.50606155",
"0.5039411",
"0.5038881",
"0.50368756",
"0.5030697",
"0.5024427",
"0.5023491"
]
| 0.93086916 | 0 |
Test case for get_opening_balance_journals_key | def test_get_opening_balance_journals_key(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_opening_balance_journals(self):\n pass",
"def test_delete_opening_balance_journals_key(self):\n pass",
"def test_post_opening_balance_journals(self):\n pass",
"def test_journals_paged_fields(self, api_client):\n rv = api_client.get(\"/journals-paged\")\n json_data = rv.get_json()\n sample = next(\n (item for item in json_data[\"results\"] if item[\"issn_l\"] == \"1907-1760\"),\n None,\n )\n top_level_keys = [\n \"id\",\n \"issn_l\",\n \"issns\",\n \"title\",\n \"publisher\",\n \"previous_issn_ls\",\n \"other_titles\",\n \"journal_metadata\",\n \"total_dois\",\n \"dois_by_issued_year\",\n \"sample_dois\",\n \"subscription_pricing\",\n \"apc_pricing\",\n \"open_access\",\n \"status\",\n \"status_as_of\",\n ]\n\n i = 0\n for key in sample.keys():\n assert key == top_level_keys[i]\n i += 1",
"def test_open_ru_ballance(self, ):\n if self.report_type == 'open.ru':\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n deals = self.get_deals()\n repo_deals = self.get_repo_deals()\n \n if self.open_ru_report_type == 'stock':\n comm = self.open_ru_get_micex_commission(deals, repo_deals)\n elif self.open_ru_report_type == 'future':\n atl = self.get_account_totally_line()\n comm = self.open_ru_get_forts_comm(atl)\n ballance = sum([float(d.getAttribute('deal_sign')) *\n float(d.getAttribute('price')) *\n float(d.getAttribute('quantity'))\n for d in deals])\n ballance += sum([float(d.getAttribute('deal_sign')) *\n float(d.getAttribute('deal_price')) *\n float(d.getAttribute('quantity'))\n for d in repo_deals])\n ballance += 10000 - comm # 10000 is the initial account amount\n accs = self.model.list_view_accounts().fetchall()\n self.assertEqual(1, len(accs))\n self.assertAlmostEqual(ballance, accs[0]['current_money'])",
"def test_wallets_get(self):\n pass",
"def test_companies_company_id_data_journal_entries_get(self):\n pass",
"def test_carryover(self):\n carryover = Money(100)\n available = {Decimal(0.5): carryover}\n self.forecast(available)\n self.assertEqual(\n self.forecast.carryover,\n carryover)",
"def test_api_key():\n assert gather_stock_returns('abc', 'AAPL', buy_date, sell_date) == msg3",
"def test_companies_company_id_data_bill_credit_notes_get(self):\n pass",
"def test_get_order_buyer_info(self):\n pass",
"def test_biweekly_bussiness_days_only(self):\n print()\n print(\"Test Bussiness Days Only\")\n start_date = timezone.now()\n start_date = start_date.replace(day=1, month = 9, year = 2020)\n end_date = start_date.replace(day=30)\n expense = BudgetExpense.objects.get(id = 700)\n\n expected_dates = []\n expected_date = expense.start_date\n expected_date = expected_date.replace(day = 4, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 21, month = 9, year = 2020)\n expected_dates.append(expected_date)\n\n\n print(\"EXPECTED\")\n print(\"==========\")\n for d in expected_dates:\n print(d)\n\n result = get_anticipated_transaction_occurences(expense, start_date, end_date)\n print()\n print(\"Actual Result\")\n print(\"============\")\n for r in result.get(expense):\n print(r)\n print()\n self.assertEquals(expected_dates, result.get(expense))",
"def test_get_pay_in_details(self):\n pass",
"def test_companies_company_id_data_bill_credit_notes_bill_credit_note_id_get(self):\n pass",
"def test_companies_company_id_data_journal_entries_journal_entry_id_get(self):\n pass",
"def test_get_state_comparison_stats_foreign_born(self):\n\n key = '% Foreign-born employees'\n\n us = CountryFactory(code2='US')\n germany = CountryFactory(name='Germany')\n\n # Mississippi: 0 employees born in Germany, 3 in US\n for _ in range(3):\n employee = EmployeeFactory(place_of_birth=PlaceFactory(country=us))\n employee.bureau_states.add(self.mississippi)\n\n # Texas: 2 employees born in Germany, 2 in US\n for _ in range(2):\n employee = EmployeeFactory(place_of_birth=PlaceFactory(country=germany))\n employee.bureau_states.add(self.texas)\n for _ in range(2):\n employee = EmployeeFactory(place_of_birth=PlaceFactory(country=us))\n employee.bureau_states.add(self.texas)\n\n # Kentucky: 3 employees born in Germany, 1 employee in US, 3 unknown\n for _ in range(3):\n employee = EmployeeFactory(place_of_birth=PlaceFactory(country=germany))\n employee.bureau_states.add(self.kentucky)\n for _ in range(1):\n employee = EmployeeFactory(place_of_birth=PlaceFactory(country=us))\n employee.bureau_states.add(self.kentucky)\n for _ in range(3):\n employee = EmployeeFactory()\n employee.bureau_states.add(self.kentucky)\n\n expected_output = [('Kentucky', 75), ('Texas', 50)]\n\n stats = get_state_comparison_stats(number=2)\n top_states = self.get_state_stats_for_key(stats, key)\n\n self.assertListEqual(top_states, expected_output,\n \"'{key}' should contain states with the top x % foreign-born employees\")",
"def test_get_next_ops_meeting():\n result = schedule.get_next_workshop()\n\n if result:\n assert result['name'], 'Result has no `name` key'\n assert result['date'], 'Result has not `date` key'\n\n assert isinstance(result['name'], str), 'name is not a string'\n assert isinstance(result['date'], arrow.Arrow), 'date is not a date'",
"def test_employees_by_salary_index(self):\n key=\"employees-by-salary\"\n emps_by_salary = {50000: ['5'], 75000: ['4'], 80000: ['3'], 120000: ['2'],\n 100000: ['1']}\n self.mapper.map(\"select id, salary from redmate.employees\") \\\n .to_sorted_set(key_pattern=key, score=\"salary\")\n self.mapper.run()\n\n for sal in emps_by_salary.items():\n self.assertEqual(sal[1],\n self.redis.zrangebyscore(key, sal[0] - 1, sal[0] + 1))",
"def test_companies_company_id_push_push_operation_key_get(self):\n pass",
"def test_get_party_by_business_id_success_with_collection_exercise_id(self):\n with responses.RequestsMock() as rsps:\n url = f\"{url_get_business_party}?collection_exercise_id={collection_exercise['id']}&verbose=True\"\n rsps.add(rsps.GET, url, json=business_party, status=200)\n with app.app_context():\n business = party_controller.get_party_by_business_id(\n business_party[\"id\"],\n self.app_config[\"PARTY_URL\"],\n self.app_config[\"BASIC_AUTH\"],\n collection_exercise[\"id\"],\n )\n self.assertEqual(business[\"id\"], business_party[\"id\"])\n self.assertEqual(business[\"name\"], business_party[\"name\"])",
"def test_get_max_key_entry(self):\n order_dict = OrderDict()\n\n order_2 = StockOrderWrapper(self.order_2)\n order_3 = StockOrderWrapper(self.order_3)\n order_5 = StockOrderWrapper(self.order_5)\n order_7 = StockOrderWrapper(self.order_7)\n\n order_2.stock_order.order_status = DEFINITIVE\n order_3.stock_order.order_status = DEFINITIVE\n order_5.stock_order.order_status = DEFINITIVE\n order_7.stock_order.order_status = DEFINITIVE\n\n order_dict.add_order(1.125, order_2)\n order_dict.add_order(10.321, order_3)\n order_dict.add_order(1.4, order_5)\n order_dict.add_order(9.321, order_7)\n\n # =================================================================\n # test: max_key is created\n # =================================================================\n\n max_key_entry = order_dict.get_max_key_entry()\n exp_entry = [order_5]\n self.assertEqual(max_key_entry, exp_entry)\n\n # =================================================================\n # test: max_key is updated after remove order\n # =================================================================\n\n # order_dict.remove_order(key=.4, order=order_5)\n order_dict.remove_max_key()\n max_key_entry = order_dict.get_max_key_entry()\n exp_entry = [order_3, order_7]\n self.assertEqual(max_key_entry, exp_entry)\n\n # =================================================================\n # test: max_key is updated after remove entry\n # =================================================================\n\n # order_dict.remove_entry(key=.321)\n order_dict.remove_max_key()\n max_key_entry = order_dict.get_max_key_entry()\n exp_entry = [order_2]\n self.assertEqual(max_key_entry, exp_entry)",
"def test_get_party_by_business_id_success_without_collection_exercise_id(self):\n with responses.RequestsMock() as rsps:\n rsps.add(rsps.GET, url_get_business_party, json=business_party, status=200)\n with app.app_context():\n business = party_controller.get_party_by_business_id(\n business_party[\"id\"], self.app_config[\"PARTY_URL\"], self.app_config[\"BASIC_AUTH\"]\n )\n\n self.assertEqual(business[\"id\"], business_party[\"id\"])\n self.assertEqual(business[\"name\"], business_party[\"name\"])",
"def test_happy_path_scenario(self):\n debit_jobs([(self.job, A(480), Entry.FLAT_DEBIT)]) # progress invoice\n credit_jobs([(self.job, A(100), A(0), A(0))], D(100)) # progress payment\n debit_jobs(\n [(self.job, A(480), Entry.FLAT_DEBIT)], recognize_revenue=True\n ) # final invoice\n credit_jobs([(self.job, A(800), A(60), A(0))], D(800)) # final payment\n\n self.assert_balances(\n bank=A(900, 0, 0),\n invoiced=A(960),\n paid=A(-960),\n debited=A(480 * 2 + 380),\n credited=A(-480 * 2 - 380),\n income=A(960).net_amount,\n tax=A(900).tax_amount,\n discounts=A(-60).net_amount,\n )\n\n total_income = income_account().balance + discount_account().balance\n self.assertEqual(total_income, A(900).net_amount)",
"def test_create_payment_verify_amounts(self):\n test_shift, _, __ = self._make_shift(\n shiftkwargs={'status': 'OPEN', 'starting_at': timezone.now(),\n 'ending_at': timezone.now() + timedelta(hours=8), 'minimum_hourly_rate': 15,\n 'minimum_allowed_rating': 0, 'maximum_clockin_delta_minutes': 15,\n 'maximum_clockout_delay_minutes': 15, 'maximum_allowed_employees': 5,\n 'employees': self.test_employee},\n employer=self.test_employer)\n test_period = mixer.blend('api.PayrollPeriod', employer=self.test_employer)\n\n payload = {\n 'payroll_period': test_period.id,\n 'employee': self.test_employee.id,\n 'employer': self.test_employer.id,\n 'shift': test_shift.id,\n 'splited_payment': True,\n 'status': 'PENDING',\n 'breaktime_minutes': 5,\n 'regular_hours': 6,\n 'over_time': 2.42,\n 'hourly_rate': 8.4,\n 'total_amount': 13,\n }\n url = reverse_lazy('api:me-get-payroll-payments-employer')\n response = self.client.post(url, data=payload)\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('breaktime_minutes'), 5, response_json)\n self.assertEqual(Decimal(response_json.get('regular_hours')), Decimal(payload.get('regular_hours')), response_json)\n self.assertEqual(Decimal(response_json.get('over_time')), Decimal(str(payload.get('over_time'))), response_json)\n self.assertIsNotNone(response_json.get('hourly_rate'), response_json)\n self.assertIsNotNone(response_json.get('total_amount'), response_json)\n total_amount = Decimal(str(\n math.trunc((Decimal(response_json.get('regular_hours')) + Decimal(response_json.get('over_time')))\n * Decimal(response_json.get('hourly_rate')) * 100) / 100\n ))\n self.assertEqual(Decimal(response_json.get('total_amount')), total_amount, response_json)",
"def money_in_bank_given(initial, time_weeks):\n return initial * 2 ** time_weeks",
"def test_fetch_open_data(self):\n args = [\"L1\", 1126259446, 1126259478]\n pesummary_data = StrainData.fetch_open_data(*args)\n gwpy_data = TimeSeries.fetch_open_data(*args)\n np.testing.assert_almost_equal(pesummary_data.value, gwpy_data.value)\n np.testing.assert_almost_equal(\n pesummary_data.times.value, gwpy_data.times.value\n )\n assert isinstance(pesummary_data.gwpy, TimeSeries)\n np.testing.assert_almost_equal(\n pesummary_data.gwpy.value, gwpy_data.value\n )\n np.testing.assert_almost_equal(\n pesummary_data.gwpy.times.value, gwpy_data.times.value\n )\n assert pesummary_data.IFO == \"L1\"\n assert list(pesummary_data.strain_dict.keys()) == [\"L1\"]\n np.testing.assert_almost_equal(\n pesummary_data.strain_dict[\"L1\"].value, gwpy_data.value\n )\n np.testing.assert_almost_equal(\n pesummary_data.strain_dict[\"L1\"].times.value, gwpy_data.times.value\n )",
"def test_get_active_cco(session):\n business = factory_business('BC1234567')\n filing_dict = copy.deepcopy(FILING_HEADER)\n filing_dict['filing']['consentContinuationOut'] = copy.deepcopy(CONSENT_CONTINUATION_OUT)\n filing = factory_completed_filing(business, filing_dict)\n\n expiry_date = get_cco_expiry_date(filing.effective_date)\n\n consent_continuation_out = ConsentContinuationOut()\n consent_continuation_out.foreign_jurisdiction = 'CA'\n consent_continuation_out.foreign_jurisdiction_region = 'AB'\n consent_continuation_out.expiry_date = expiry_date\n consent_continuation_out.business_id = business.id\n consent_continuation_out.filing_id = filing.id\n consent_continuation_out.save()\n\n cco = consent_continuation_out.get_active_cco(business.id, filing.effective_date)\n assert cco\n cco = consent_continuation_out.get_active_cco(business.id, expiry_date)\n assert cco\n cco = consent_continuation_out.get_active_cco(business.id, expiry_date, 'CA', 'AB')\n assert cco\n\n cco = consent_continuation_out.get_active_cco(business.id, expiry_date + datedelta.datedelta(days=1))\n assert not cco",
"def test_weekly_bussiness_days_only(self):\n print()\n print(\"Test Bussiness Days Only\")\n start_date = timezone.now()\n start_date = start_date.replace(day=1, month = 9, year = 2020)\n end_date = start_date.replace(day=30)\n expense = BudgetExpense.objects.get(id = 600)\n\n expected_dates = []\n expected_date = expense.start_date\n expected_date = expected_date.replace(day = 4, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 14, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 21, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 28, month = 9, year = 2020)\n expected_dates.append(expected_date)\n\n print(\"EXPECTED\")\n print(\"==========\")\n for d in expected_dates:\n print(d)\n\n result = get_anticipated_transaction_occurences(expense, start_date, end_date)\n print()\n print(\"Actual Result\")\n print(\"============\")\n for r in result.get(expense):\n print(r)\n print()\n self.assertEquals(expected_dates, result.get(expense))",
"def test_update_available(self):\n available = {}\n self.forecast(available)\n # There should be at least 26 inflows for the 26 biweekly\n # pay periods:\n self.assertGreaterEqual(\n len(self.forecast.transactions[available]),\n 26)\n # Assuming there are no other inflows or outflows (and, since\n # this is the first year, there shouldn't be as there are no\n # carryovers), the sum of inflows should be 150*26=3900\n self.assertAlmostEqual(\n sum(self.forecast.transactions[available].values()),\n Money(3900),\n places=2)",
"def test_get_test_organization_api_key(self):\n pass"
]
| [
"0.814138",
"0.7337289",
"0.66358453",
"0.5097487",
"0.5004364",
"0.49763408",
"0.49549302",
"0.49017188",
"0.48923275",
"0.48898348",
"0.48454645",
"0.4810751",
"0.47996962",
"0.47848547",
"0.47577968",
"0.4739",
"0.47346452",
"0.47301468",
"0.4718948",
"0.471765",
"0.47082657",
"0.47074428",
"0.46992433",
"0.46948248",
"0.4661892",
"0.46535552",
"0.46395501",
"0.46369243",
"0.46207175",
"0.45761466"
]
| 0.9500056 | 0 |
Test case for post_opening_balance_journals | def test_post_opening_balance_journals(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_opening_balance_journals(self):\n pass",
"def test_get_opening_balance_journals_key(self):\n pass",
"def test_delete_opening_balance_journals_key(self):\n pass",
"def test_finalize_and_open_period(self):\n employee_payments_qty = EmployeePayment.objects.filter(employer=self.test_employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period2.id})\n self.client.force_login(self.test_user_employer)\n # change from OPEN to FINALIZE\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty + 1)\n # change from FINALIZE to OPEN\n response = self.client.put(url, data={'status': 'OPEN'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('status'), 'OPEN', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty)",
"def test_create_payment_verify_amounts(self):\n test_shift, _, __ = self._make_shift(\n shiftkwargs={'status': 'OPEN', 'starting_at': timezone.now(),\n 'ending_at': timezone.now() + timedelta(hours=8), 'minimum_hourly_rate': 15,\n 'minimum_allowed_rating': 0, 'maximum_clockin_delta_minutes': 15,\n 'maximum_clockout_delay_minutes': 15, 'maximum_allowed_employees': 5,\n 'employees': self.test_employee},\n employer=self.test_employer)\n test_period = mixer.blend('api.PayrollPeriod', employer=self.test_employer)\n\n payload = {\n 'payroll_period': test_period.id,\n 'employee': self.test_employee.id,\n 'employer': self.test_employer.id,\n 'shift': test_shift.id,\n 'splited_payment': True,\n 'status': 'PENDING',\n 'breaktime_minutes': 5,\n 'regular_hours': 6,\n 'over_time': 2.42,\n 'hourly_rate': 8.4,\n 'total_amount': 13,\n }\n url = reverse_lazy('api:me-get-payroll-payments-employer')\n response = self.client.post(url, data=payload)\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('breaktime_minutes'), 5, response_json)\n self.assertEqual(Decimal(response_json.get('regular_hours')), Decimal(payload.get('regular_hours')), response_json)\n self.assertEqual(Decimal(response_json.get('over_time')), Decimal(str(payload.get('over_time'))), response_json)\n self.assertIsNotNone(response_json.get('hourly_rate'), response_json)\n self.assertIsNotNone(response_json.get('total_amount'), response_json)\n total_amount = Decimal(str(\n math.trunc((Decimal(response_json.get('regular_hours')) + Decimal(response_json.get('over_time')))\n * Decimal(response_json.get('hourly_rate')) * 100) / 100\n ))\n self.assertEqual(Decimal(response_json.get('total_amount')), total_amount, response_json)",
"def test_balance_tracking(self):\n # TODO\n pass",
"def test_happy_path_scenario(self):\n debit_jobs([(self.job, A(480), Entry.FLAT_DEBIT)]) # progress invoice\n credit_jobs([(self.job, A(100), A(0), A(0))], D(100)) # progress payment\n debit_jobs(\n [(self.job, A(480), Entry.FLAT_DEBIT)], recognize_revenue=True\n ) # final invoice\n credit_jobs([(self.job, A(800), A(60), A(0))], D(800)) # final payment\n\n self.assert_balances(\n bank=A(900, 0, 0),\n invoiced=A(960),\n paid=A(-960),\n debited=A(480 * 2 + 380),\n credited=A(-480 * 2 - 380),\n income=A(960).net_amount,\n tax=A(900).tax_amount,\n discounts=A(-60).net_amount,\n )\n\n total_income = income_account().balance + discount_account().balance\n self.assertEqual(total_income, A(900).net_amount)",
"def test_open_ru_ballance(self, ):\n if self.report_type == 'open.ru':\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n deals = self.get_deals()\n repo_deals = self.get_repo_deals()\n \n if self.open_ru_report_type == 'stock':\n comm = self.open_ru_get_micex_commission(deals, repo_deals)\n elif self.open_ru_report_type == 'future':\n atl = self.get_account_totally_line()\n comm = self.open_ru_get_forts_comm(atl)\n ballance = sum([float(d.getAttribute('deal_sign')) *\n float(d.getAttribute('price')) *\n float(d.getAttribute('quantity'))\n for d in deals])\n ballance += sum([float(d.getAttribute('deal_sign')) *\n float(d.getAttribute('deal_price')) *\n float(d.getAttribute('quantity'))\n for d in repo_deals])\n ballance += 10000 - comm # 10000 is the initial account amount\n accs = self.model.list_view_accounts().fetchall()\n self.assertEqual(1, len(accs))\n self.assertAlmostEqual(ballance, accs[0]['current_money'])",
"def test_overpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(580), A(0), A(0))], D(580))\n diff = A(500) - A(580)\n self.assert_balances(\n bank=A(580, 0, 0),\n invoiced=A(500),\n paid=A(-580),\n partial=A(580).net_amount,\n tax=A(580).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment",
"def test_underpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n diff = A(500) - A(480)\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(500),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment",
"def test_post_pay(self):\n freelancer = self.valid_freelancers[0]\n client = self.valid_clients[0]\n amount = 150\n\n client_id = self.create_object('api/clients',\n client)\n freelancer_id = self.create_object('api/freelancers',\n freelancer)\n\n client['id'] = client_id\n job1 = self.valid_jobs[0]\n\n new_job_id = self._register_job(job1, client)\n payment = {'freelancer_id': freelancer_id,\n 'job_id': new_job_id,\n 'amount': amount}\n\n fwallet, cwallet = self._get_wallets(freelancer_id, client_id)\n\n response = requests.post(self.BASE_API + self.ENDPOINT,\n data=json.dumps(payment))\n\n self.assertEqual(200, response.status_code,\n msg=(f'Error in payment transaction '\n f'Response code: {response.status_code}'))\n\n registered_fwallet, registered_cwallet = self._get_wallets(freelancer_id, client_id)\n\n expected_fwallet = fwallet + amount\n expected_cwallet = cwallet - amount\n\n self.assertEqual(expected_cwallet,\n registered_cwallet,\n msg=\"Inconsistent client's wallet after payment\")\n\n self.assertEqual(expected_fwallet,\n registered_fwallet,\n msg=\"Inconsistent freelancer's wallet after payment\")",
"def test_open_ru_ballance_after_make_position(self, ):\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n accs = self.model.list_view_accounts().fetchall()\n self.assertEqual(1, len(accs))\n before = accs[0]['current_money']\n self.model.tamake_positions_for_whole_account(aid)\n accs = self.model.list_view_accounts().fetchall()\n after = accs[0]['current_money']\n self.assertAlmostEqual(before, after)",
"def test_finalize_period(self):\n employee_payments_qty = EmployeePayment.objects.filter(employer=self.test_employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period.id})\n self.client.force_login(self.test_user_employer)\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('id'), self.test_period.id, response_json)\n self.assertEqual(response_json.get('employer'), self.test_employer.id, response_json)\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty + 2)\n # verify amounts, with no over_time\n employee_payment = EmployeePayment.objects.get(employer_id=self.test_employer.id,\n employee_id=self.test_employee2.id,\n payroll_period_id=self.test_period.id)\n self.assertEqual(employee_payment.earnings, Decimal('300.00'), employee_payment.earnings)",
"def test_finalize_period_overtime(self):\n employee_payments_qty = EmployeePayment.objects.filter(employer=self.test_employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period.id})\n self.client.force_login(self.test_user_employer)\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('id'), self.test_period.id, response_json)\n self.assertEqual(response_json.get('employer'), self.test_employer.id, response_json)\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty + 2)\n # verify amounts, with over_time\n employee_payment = EmployeePayment.objects.get(employer_id=self.test_employer.id,\n employee_id=self.test_employee.id,\n payroll_period_id=self.test_period.id)\n self.assertEqual(employee_payment.earnings, Decimal('360.00') + Decimal('450.00'),\n employee_payment.earnings)",
"def test_charge_correct_for_fiction_after_close(self):\n rental = create_test_rental(\n book=self.book2,\n customer=self.user1,\n date_borrowed=\"2019-05-22 00:00:00.400952+00:00\",\n )\n close_rental_url = reverse(\"close_rental\", kwargs={\"pk\": rental.pk})\n\n data = {\"date_returned\": \"2019-05-25 13:46:57.249145+03:00\"}\n response = self.client.put(close_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.data[\"amount_charged\"], \"9.00\")\n self.assertEqual(response.data[\"rental_status\"], \"Closed\")\n self.assertEqual(response.data[\"currency\"], CURRENCY)",
"def test_balance(self):\n\n self.assertEqual(self.cash_report.balance(), 150)",
"def test_payment(self):\n debit_jobs([(self.job, A(480), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(480),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )",
"def backtest(self, strategy): \n \n # load data\n data = self.import_data(currency_ids = np.unique(self.signals.currency_id),\n start_date = self.start_date,\n end_date = self.end_date)\n \n # add cash and columns for currencies\n data[\"capital\"] = 0\n data[\"actions\"] = \"\" # should become a dictW \n data[\"description\"] = \"\"\n data[\"capital\"].iloc[0] = self.initial_capital\n data[\"worth_usd\"] = 0\n \n for currency_id in np.unique(self.signals.currency_id):\n data[currency_id + '_position'] = 0\n \n # a sell or buy can influence subsequent positions, so calculate iteratively\n for observation in range(1, len(data.index)):\n \n date = data.index[observation]\n \n # investment this period is zero\n investment_capital_period = 0\n \n # amount of currency_ids initially same as last period\n for currency_id in np.unique(self.signals.currency_id):\n data[currency_id + '_position'].iloc[observation] = data[currency_id + '_position'].iloc[observation-1] \n \n # at each point, compute size of each position (cash and currencies), and record actions\n if(data.index[observation] in self.signals.index):\n \n action_df = pd.DataFrame(columns=list([\"Currency\",\"NominalAmount\", \"CapitalAmount\"]))\n \n # could be multiple actions\n for index, action in self.signals.loc[date].iterrows(): \n currency_id = action['currency_id']\n signal = action['signal']\n \n # Buy\n if signal == 1:\n \n # buy for 10% currency_id\n investment_capital = data[\"capital\"].iloc[observation-1] * 0.10 \n\n # estimate how many coins\n investment_nominal = round(investment_capital / data[currency_id].iloc[observation])\n \n # calculate exact capital needed\n investment_capital_exact = investment_nominal * data[currency_id].iloc[observation]\n investment_capital_period = investment_capital_period + investment_capital_exact \n \n # change the amount of currency hold\n data[currency_id + '_position'].iloc[observation] = data[currency_id + '_position'].iloc[observation-1] + investment_nominal\n \n # report action by appending a Series to the (empty) dataframe\n action_df = action_df.append(pd.Series({\"Currency\": currency_id, \n \"NominalAmount\": investment_nominal, \n \"CapitalAmount\": investment_capital_exact}),ignore_index=True)\n \n # report description\n data[\"description\"].iloc[observation] = (data[\"actions\"].iloc[observation] + \"\\n Buy \" + \n str(investment_nominal) + \" \" + str(currency_id) + \n \" for \" + str(investment_capital_exact))\n \n # Sell\n if signal == -1:\n \n # sell currency_id for 10% of total capital\n investment_capital = data[\"capital\"].iloc[observation-1] * 0.10 \n \n # estimate how many coins\n investment_nominal = round(investment_capital / data[currency_id].iloc[observation])\n \n # calculate exact capital needed\n investment_capital_exact = investment_nominal * data[currency_id].iloc[observation]\n investment_capital_period = investment_capital_period - investment_capital_exact\n \n # change the amount of currency hold\n data[currency_id + '_position'].iloc[observation] = data[currency_id + '_position'].iloc[observation-1] - investment_nominal\n \n # report action\n action_df = action_df.append(pd.Series({\"Currency\": currency_id, \n \"NominalAmount\": investment_nominal, \n \"CapitalAmount\": investment_capital_exact}),ignore_index=True)\n \n # report description\n data[\"description\"].iloc[observation] = data[\"actions\"].iloc[observation] + \"Sell \" + str(investment_nominal) + \" \" + str(currency_id) + \" for \" + str(investment_capital_exact)\n \n # report actions\n data[\"actions\"].iloc[observation] = action_df.to_json()\n \n # calculate resulting cash capital\n data[\"capital\"].iloc[observation] = data[\"capital\"].iloc[observation-1] - investment_capital_period\n \n # calculate worth by capital (usd) and each currency * price\n data[\"worth_usd\"].iloc[observation] = data[\"capital\"].iloc[observation] \n return data",
"def test_issue_post_issue_reaction(self):\n pass",
"def test_preliminary(self, ):\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n if self.deals_count >= 0:\n self.assertEqual(self.deals_count, self.model._sqlite_connection.execute('select count(*) from deals').fetchone()[0])\n print('deals count passed')\n if self.report_type == 'open.ru':\n if self.open_ru_report_type == 'stock':\n pt = self.model.get_paper_type('stock')\n self.assertEqual(set([pt['id']]), set(map(lambda a: a[0], self.model._sqlite_connection.execute('select distinct type from papers').fetchall())))\n print('stock or fut passed')\n elif self.open_ru_report_type == 'future':\n pt = self.model.get_paper_type('future')\n self.assertEqual(set([pt['id']]), set(map(lambda a: a[0], self.model._sqlite_connection.execute('select distinct type from papers').fetchall())))\n print('stock or fut passed')",
"def test_is_payday_positive6(self):\n # Overriding first_payday\n self.first_payday = date_class(2020,12,24)\n date_to_check = date_class(2021,1,8)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True",
"def test_amendment_court_order_success(session, client, jwt):\n # setup\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n rv1 = create_financing_test(session, client, jwt)\n assert rv1.status_code == HTTPStatus.CREATED\n assert rv1.json['baseRegistrationNumber']\n base_reg_num = rv1.json['baseRegistrationNumber']\n\n json_data = copy.deepcopy(SAMPLE_JSON)\n json_data['baseRegistrationNumber'] = base_reg_num\n json_data['debtorName']['businessName'] = 'TEST BUS 2 DEBTOR'\n json_data['changeType'] = 'CO'\n del json_data['createDateTime']\n del json_data['amendmentRegistrationNumber']\n del json_data['payment']\n del json_data['removeTrustIndenture']\n del json_data['addTrustIndenture']\n del json_data['deleteDebtors']\n del json_data['documentId']\n json_data['deleteDebtors'] = rv1.json['debtors']\n del json_data['deleteSecuredParties']\n json_data['deleteSecuredParties'] = rv1.json['securedParties']\n del json_data['deleteGeneralCollateral']\n json_data['deleteGeneralCollateral'] = rv1.json['generalCollateral']\n del json_data['deleteVehicleCollateral']\n json_data['deleteVehicleCollateral'] = rv1.json['vehicleCollateral']\n# print(json_data)\n\n # test\n rv = client.post('/api/v1/financing-statements/' + base_reg_num + '/amendments',\n json=json_data,\n headers=create_header_account(jwt, [PPR_ROLE]),\n content_type='application/json')\n\n # check\n# print(rv.json)\n assert rv.status_code == HTTPStatus.CREATED\n json_data = rv.json\n assert 'amendmentRegistrationNumber' in json_data\n assert len(json_data['changes']) >= 1\n assert 'amendmentRegistrationNumber' in json_data['changes'][0]\n assert json_data['baseRegistrationNumber'] == base_reg_num\n assert json_data['changes'][0]['baseRegistrationNumber'] == base_reg_num",
"def test_adjusted_payment_still_below_invoice(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(580), # debited (600) + adjustment (-20) = invoiced (580)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )",
"def final_strategy_test():\r\n print('-- Testing final_strategy --')\r\n print('Win rate:', compare_strategies(final_strategy))",
"def test_overpayment_then_new_debit_with_recognized_revenue(self):\n debit_jobs([(self.job, A(480), Entry.FLAT_DEBIT)], recognize_revenue=True)\n credit_jobs([(self.job, A(960), A(0), A(0))], D(960))\n self.assert_balances(\n bank=A(960, 0, 0),\n balance=A(-480),\n invoiced=A(480),\n paid=A(-960),\n income=A(480).net_amount,\n tax=A(480).tax_amount,\n )\n debit_jobs([(self.job, A(480), Entry.FLAT_DEBIT)])\n self.assert_balances(\n bank=A(960, 0, 0),\n invoiced=A(960),\n paid=A(-960),\n income=A(960).net_amount,\n tax=A(960).tax_amount,\n )",
"def test_create_routing_slip_usd_one_of_payments(session, staff_user_mock):\n routing_slip_payload: Dict[str, any] = {\n 'number': '206380834',\n 'routingSlipDate': datetime.now().strftime(DT_SHORT_FORMAT),\n 'paymentAccount': {\n 'accountName': 'TEST'\n },\n 'payments': [\n {\n 'paymentMethod': PaymentMethod.CHEQUE.value,\n 'paymentDate': datetime.now().strftime(DT_SHORT_FORMAT),\n 'chequeReceiptNumber': '123',\n 'paidAmount': 100\n },\n {\n 'paymentMethod': PaymentMethod.CHEQUE.value,\n 'paymentDate': datetime.now().strftime(DT_SHORT_FORMAT),\n 'chequeReceiptNumber': '123',\n 'paidAmount': 100,\n 'paidUsdAmount': 80\n }\n ]\n }\n\n rs = RoutingSlip_service.create(routing_slip_payload)\n assert rs\n assert rs.get('total_usd') == 80\n cfs_account_model: CfsAccountModel = CfsAccountModel.find_effective_by_account_id(\n rs.get('payment_account').get('id'))\n assert cfs_account_model.status == CfsAccountStatus.PENDING.value",
"def test_bidding_round_handle_transactions(self):\n self.order_1.save()\n self.order_2.save()\n self.order_3.save()\n self.order_4.save()\n self.order_5.save()\n self.order_6.save()\n self.order_7.save()\n self.order_8.save()\n self.order_9.save()\n self.order_10.save()\n self.order_11.save()\n self.order_12.save()\n self.order_13.save()\n\n # =================================================================\n # test: sell order has more stocks then sell-person\n # =================================================================\n\n self.person_2.number_of_stocks = 0\n self.person_2.save()\n\n try:\n self.bidding_round_manager.handle_transactions(bidding_rounds=[self.bidding_round])\n raise AssertionError('ExceedMaxSellSharesException expected')\n except ExceedMaxSellSharesException:\n pass",
"def test_create_routing_slip_usd_both_payments(session, staff_user_mock):\n routing_slip_payload: Dict[str, any] = {\n 'number': '206380834',\n 'routingSlipDate': datetime.now().strftime(DT_SHORT_FORMAT),\n 'paymentAccount': {\n 'accountName': 'TEST'\n },\n 'payments': [\n {\n 'paymentMethod': PaymentMethod.CHEQUE.value,\n 'paymentDate': datetime.now().strftime(DT_SHORT_FORMAT),\n 'chequeReceiptNumber': '123',\n 'paidAmount': 120,\n 'paidUsdAmount': 100\n },\n {\n 'paymentMethod': PaymentMethod.CHEQUE.value,\n 'paymentDate': datetime.now().strftime(DT_SHORT_FORMAT),\n 'chequeReceiptNumber': '123',\n 'paidAmount': 100,\n 'paidUsdAmount': 80\n }\n ]\n }\n\n rs = RoutingSlip_service.create(routing_slip_payload)\n assert rs\n assert rs.get('total_usd') == 180\n cfs_account_model: CfsAccountModel = CfsAccountModel.find_effective_by_account_id(\n rs.get('payment_account').get('id'))\n assert cfs_account_model.status == CfsAccountStatus.PENDING.value",
"def test_overlap(self):\r\n t = Expense(name = \"fake lunch\",\r\n amount = 1.,\r\n on = (WeeklyRecurring(FR,\r\n fromdt = self.fromdt,\r\n todt = self.todt),\r\n DailyRecurring(fromdt = self.fromdt, \r\n todt = self.todt)))\r\n\r\n self.m.addTransaction(t)\r\n self.assertEqual(self.m.totalSaved(self.fromdt, self.todt), -365.)",
"def test_charge_correct_for_regular_after_close_1_day(self):\n rental = create_test_rental(\n book=self.book3,\n customer=self.user1,\n date_borrowed=\"2019-05-24 00:00:00.400952+00:00\",\n )\n close_rental_url = reverse(\"close_rental\", kwargs={\"pk\": rental.pk})\n\n data = {\"date_returned\": \"2019-05-25 13:46:57.249145+03:00\"}\n response = self.client.put(close_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.data[\"amount_charged\"], \"2.00\")\n self.assertEqual(response.data[\"rental_status\"], \"Closed\")\n self.assertEqual(response.data[\"currency\"], CURRENCY)"
]
| [
"0.84121585",
"0.69012296",
"0.649312",
"0.6370543",
"0.5991055",
"0.5923325",
"0.5892853",
"0.5802448",
"0.5717177",
"0.56970596",
"0.5671049",
"0.56420887",
"0.5641799",
"0.56363505",
"0.5616931",
"0.56020135",
"0.5594999",
"0.55920017",
"0.55864936",
"0.5533124",
"0.5510422",
"0.5487162",
"0.5479931",
"0.5462342",
"0.54474396",
"0.5440515",
"0.54302496",
"0.5422938",
"0.5415637",
"0.540302"
]
| 0.9369089 | 0 |
Go to the page 'url', find the next link to got, then extract the JSON query result, find the wanted train, and display the results. | def main(url, MY_OUTWARD_TIME_MINI, MY_OUTWARD_TIME_MAXI="23:59"):
MY_OUTWARD_TIME_MINI = MY_OUTWARD_TIME_MINI.replace("h", ":")
MY_OUTWARD_TIME_MAXI = MY_OUTWARD_TIME_MAXI.replace("h", ":")
# Create the web browser object
b = RB(history=True, allow_redirects=True)
# Open the page
b.open(url)
# Find the next page to go
res = str(b.select("#url_redirect_proposals")[0])
# # - First solution: manual search
# offset = 4 + res.index('hid=')
# length = 3
# key = res[offset: offset + length]
# print("key =", key)
# next_url = url1 + str(key)
# print("1. Next url =", next_url)
# - Second solution: search with a regexp
m = url_finder.search(res)
next_url = m.string[m.start() : m.end()]
print("Next url =", next_url, "...")
# Follow this url
b.open(next_url)
# Get the data.query part
script = b.select("#vsc-preloaded-data-snippet")[0]
content = script.contents[0]
# 1. Search for the query to display it nicely again
m = query_finder.search(content)
jsontext = m.string[m.start() : m.end()]
# print(jsontext)
beginning = "data.query = JSON.parse('"
end = "');"
query = jsontext[len(beginning) : -len(end)]
jsonrawstr = query.replace(r"\"", '"').replace(r"\'", "'") # \" > ", \' > '
# print(jsonrawstr)
jsonobj = json.loads(jsonrawstr)
# print(json.dumps(jsonobj, sort_keys=True, indent=4))
# 2. Search for the result
m = searchResponse_finder.search(content)
jsontext = m.string[m.start() : m.end()]
# print(jsontext)
beginning = "data.searchResponse = JSON.parse('"
end = "');"
searchResponse = jsontext[len(beginning) : -len(end)]
# print(searchResponse)
jsonrawstr = searchResponse.replace(r"\"", '"').replace(
r"\'", "'"
) # \" > ", \' > '
# print(jsonrawstr)
jsonobj = json.loads(jsonrawstr)
# print(json.dumps(jsonobj, sort_keys=True, indent=4))
"""
with open('output.json', 'w+') as f:
json.dump(jsonobj, f, sort_keys=True, indent=4)
"""
# 3. Affichage des horaires
print("\nDifferents horaires :")
horaires = [i["departureDate"] for i in jsonobj["trainProposals"]]
print(horaires)
for number, h in enumerate(horaires):
print("Pour un train partant a :", h)
prices = jsonobj["trainProposals"][number]["priceProposals"]
if len(prices) > 0:
prix = prices[0]["amount"]
print("\tPrix TGV minimum", "=", prix, "euros.")
else:
print("\tTrain complet.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_details_json(self, url):\n response = self.get_response(url)\n if response:\n html_soup = BeautifulSoup(response.text, 'html.parser')\n listings_json = html_soup.find('script', id='__NEXT_DATA__')\n if listings_json:\n listings_json = str(listings_json)\n listings_json = listings_json.replace(\"<script id=\\\"__NEXT_DATA__\\\" type=\\\"application/json\\\">\", \"\").replace(\"</script>\", \"\")\n listings = json.loads(listings_json)\n return listings\n else:\n skip_scraper(self.college, 'Trulia')",
"def query_and_fetch(query, top_n=12):\n global url_details, url_text\n print('Query: ' + query + '; Top N: ' + str(top_n))\n url_details = []\n url_text = []\n driver = None\n bad_request = False\n try:\n driver = Fetcher.get_selenium_driver()\n driver.get('https://api.duckduckgo.com/?q=' + query + '&kl=wt-wt')\n except:\n print('An error occurred while searching query: ' + query)\n Fetcher.close_selenium_driver(driver)\n Fetcher.search_driver = None\n bad_request = True\n finally:\n try:\n if not bad_request:\n results = driver.find_elements_by_class_name('result__a')\n result_size = len(results)\n print('Result Size: ' + str(result_size))\n while result_size > 0 and len(url_details) < top_n:\n urls = []\n for element in results:\n new_url = element.get_attribute('href')\n # TODO: Filter URLs if required\n print(new_url)\n urls.append(new_url)\n\n fetched_result = Fetcher.fetch_multiple(urls, top_n)\n\n for fetched_data in fetched_result:\n if not fetched_data[1] or len(fetched_data[1].strip()) == 0:\n continue\n details = dict()\n details['url'] = fetched_data[0]\n details['html'] = fetched_data[1]\n details['title'] = fetched_data[2]\n details['label'] = predict(fetched_data[3])\n url_details.append(details)\n url_text.append(fetched_data[3])\n if len(url_details) == top_n:\n break\n\n # Infinite Scroll\n if len(url_details) < top_n:\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n results = driver.find_elements_by_class_name('result__a')\n results = results[result_size:]\n result_size = len(results)\n print('Moved to Next Page. Result Size: ' + str(result_size))\n except:\n print('An error occurred while searching query: '+ query + ' and fetching results')\n #finally:\n # if driver is not None:\n # Fetcher.close_selenium_driver(driver)\n setattr(flask.current_app, 'url_text', url_text)\n print('Search Completed')\n return url_details",
"def parse(self, response):\n print('爬取链接',response.url)\n self.logger.info('爬取链接{}'.format(response.url))\n pattern=re.compile('q=([\\u4e00-\\u9fa5_a-zA-Z0-9]{0,})')\n target_url=unquote(response.url)\n keyword=re.findall(pattern,target_url)\n self.logger.info('组合{}'.format(keyword))\n print('组合{}'.format(keyword))\n js = json.loads(response.body.decode('utf-8'))\n print(js)\n\n if js.get('code')!=501:\n if js.get('totalCount') and js.get('totalCount') !=0:\n #proceed to next page\n total_count = js['totalCount']\n current_url_id = js['q']\n\n yield self.parse_detail(response,js)\n else:\n yield Request(url=response.url, callback=self.parse, dont_filter=True)",
"def navigate_search_results(self):\n driver = self.driver\n search_results_exhausted = False\n results_page = self.results_page\n delay = 60\n date = get_date_time()\n # css elements to view job pages\n list_element_tag = '/descendant::a[@class=\"job-title-link\"]['\n print_num_search_results(driver, self.keyword, self.location)\n # go to a specific results page number if one is specified\n go_to_specific_results_page(driver, delay, results_page)\n results_page = results_page if results_page > 1 else 1\n\n while not search_results_exhausted:\n for i in range(1,26): # 25 results per page\n # define the css selector for the blue 'View' button for job i\n job_selector = list_element_tag + str(i) + ']'\n if search_suggestion_box_is_present(driver, \n job_selector, i, results_page):\n continue\n # wait for the selector for the next job posting to load.\n # if on last results page, then throw exception as job_selector \n # will not be detected on the page\n if not link_is_present(driver, delay, \n job_selector, i, results_page):\n continue\n robust_wait_for_clickable_element(driver, delay, job_selector)\n extract_transform_load(driver,\n delay,\n job_selector,\n date,\n self.keyword,\n self.location,\n self.filename)\n # attempt to navigate to the next page of search results\n # if the link is not present, then the search results have been \n # exhausted\n try:\n next_results_page(driver, delay)\n print(\"\\n**************************************************\")\n print(\"\\n\\n\\nNavigating to results page {}\" \\\n \"\\n\\n\\n\".format(results_page + 1))\n except ValueError:\n search_results_exhausted = True\n print(\"**************************************************\")\n print(\"\\n\\n\\n\\n\\nSearch results exhausted\\n\\n\\n\\n\\n\")\n else:\n results_page += 1",
"def crawl(self) -> None:\n result = self.__exec_request(self.url)\n if result == \"failed\":\n raise InterruptedError(\"The server responded with status code: {}\".format(self._status_code))\n self.__save_relevants_in_results(result, total=True)\n self.total_nums = self.results[\"total_results\"]\n pbar = tqdm(total=self.total_nums / 100) if self.to_be_num > self.total_nums else tqdm(total=self.to_be_num/100)\n pbar.update(1)\n if len(self.results[\"documents\"]) != self.to_be_num:\n while self.num_res < self.total_nums:\n # print(\"Is: {} | To be: {}\".format(self.num_res, self.total_nums))\n for el in result['search-results']['link']:\n if el['@ref'] == 'next':\n next_url = el['@href']\n result = self.__exec_request(next_url)\n if result == \"failed\":\n print(\"Invalid request. Server responded with Statuscode 400 while crawling. \"\n \"The found articles will be saved further on...\")\n break\n self.__save_relevants_in_results(result)\n pbar.update(1)\n if len(self.results[\"documents\"]) == self.to_be_num:\n break\n if len(self.results[\"documents\"]) == self.to_be_num:\n break\n pbar.close()",
"def get_results():\n # store info in a dictionary {name -> shortname}\n res = {}\n session = requests.Session()\n handle_url('http://www.gocomics.com/features', session, res)\n handle_url('http://www.gocomics.com/explore/editorial_list', session, res)\n handle_url('http://www.gocomics.com/explore/sherpa_list', session, res)\n save_result(res, json_file)",
"def main(url):\n words = fetch_words(url)\n print_items(words)",
"def main(url):\n words = fetch_words(url)\n\n print_items(words)",
"def main(url):\n \n words = fetch_words(url)\n print_items(words)",
"def scrape(self):\n pass",
"def internallinks(url, number_of_pages):\n hotelslist = set()\n request = get(url)\n parser = BeautifulSoup(request.text, 'html.parser')\n page_load = 5\n for link in parser.findAll(\"a\", href=re.compile(\"^(/|.*)(?=REVIEWS)\")):\n if link.attrs['href'] is not None:\n hotelurl = link.attrs['href']\n url = 'https://www.tripadvisor.es' + str(hotelurl)\n hotelslist.add(url)\n else:\n pass\n next_page = parser.find(class_=\"prw_rup prw_common_standard_pagination_resp\").find(\"a\", href=re.compile(\"^(/|.*)\"))\n next_page_url = next_page.attrs['href']\n while number_of_pages > 1:\n url = 'https://www.tripadvisor.es' + str(next_page_url)\n request = get(url)\n parser = BeautifulSoup(request.text, 'html.parser')\n for link in parser.findAll(\"a\", href=re.compile(\"^(/|.*)(?=REVIEWS)\")):\n if link.attrs['href'] is not None:\n hotelurl = link.attrs['href']\n url = 'https://www.tripadvisor.es' + str(hotelurl)\n hotelslist.add(url)\n else:\n pass\n try:\n next_page = parser.find(class_=\"prw_rup prw_common_standard_pagination_resp\").find(\"a\", href=re.compile(\n \"^(/|.*)\"))\n next_page_url = next_page.attrs['href']\n print(next_page_url)\n number_of_pages = number_of_pages - 1\n if page_load < 5:\n page_load = page_load + (5 - page_load)\n else:\n pass\n except:\n print(\n \"IndexError(Encontramos un error al extraer la {0} página volvemos a ejecutar el contenido de esa \"\n \"pagina)\".format(str(number_of_pages)))\n sleep(1)\n if page_load > 0:\n page_load = page_load - 1\n pass\n else:\n raise IndexError(\"Encontramos un error al extraer la {0} multiples fallos \"\n \"salimos \").format(str(number_of_pages))\n return hotelslist",
"def main():\n goods = '书包'\n # 爬取深度\n depth = 3\n start_url = 'https://s.taobao.com/search?q=' + goods\n # 输出结果的列表\n infoList = []\n # 使用for循环对每一个页面进行处理\n for i in range(depth):\n try:\n # 每个页面的URL链接\n url = start_url + '' + str(44*i)\n html = getHTMLText(url)\n parsePage(infoList, html)\n except:\n continue\n printGoodsList(infoList)",
"def get_results():\n # store info in a dictionary {name -> shortname}\n res = {}\n session = requests.Session()\n handle_url('http://www.creators.com/comics/cat-seeall.html', session, res)\n save_result(res, json_file)",
"def fetch(url):\r\n PAGES = {\"http://SEARCH_QUERY_URL?&page=1\" : SEARCH_RESULT_PAGE1,\r\n \"http://SEARCH_QUERY_URL?&page=2\" : SEARCH_RESULT_PAGE2} \r\n return PAGES[url]",
"def retrieving_data():\n for x in range(1):\n page_number=random.randint(1,500)\n page_num=str(page_number)\n url = 'http://www.tastespotting.com/browse/'+page_num\n req = http.request('GET', url)\n data = BeautifulSoup(req.data,'html.parser')\n for each_div in data.find_all(\"div\", { \"class\": \"trendspotted-item\"}):\n for each_recipe in each_div.find_all('a', href=True):\n \"\"\"links starting with /clicks are the links of recipe to their original sites, so just retrieve those links\"\"\"\n if each_recipe['href'].startswith('/click'):\n retrieving_data.recipe_link=each_recipe['href'][16:-12]\n for each_img in each_recipe.find_all('img', alt=True):\n retrieving_data.recipe_image=each_img['src']\n for each_caption in each_div.find(\"p\", { \"class\": \"photo_caption\"}):\n retrieving_data.recipe_title=each_caption",
"def _scrape(self):",
"def parse_listing(keyword, place):\n url = \"https://www.paginegialle.it/ricerca/{0}/{1}\".format(keyword, place)\n print(\"retrieving \", url)\n\n headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Host': 'www.paginegialle.it',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'\n }\n try:\n response = requests.get(url, verify=False, headers=headers)\n print(\"parsing page\")\n if response.status_code == 200:\n parser = html.fromstring(response.text)\n # making links absolute\n base_url = \"https://www.paginegialle.it\"\n parser.make_links_absolute(base_url)\n\n XPATH_LISTINGS = \"//div[@class='pageContentWrapper active']//div[@class='col contentCol']\"\n listings = parser.xpath(XPATH_LISTINGS)\n elif response.status_code == 404:\n print(\"Could not find a location matching\", place)\n # no need to retry for non existing page\n else:\n print(\"Failed to process page exit with no results exit code: 213\")\n return []\n except:\n print(\"Failed to process page exit with no results exit code: 222\")\n return []\n\n XPATH_RESULTS = \"//div[@class=' container containerListato ']//span[@class='searchResNum']//text()\"\n raw_RESULTS = listings[0].xpath(XPATH_RESULTS)\n resultsn = ''.join(raw_RESULTS).strip().replace(\"risultati\",\"\") if raw_RESULTS else None\n print(\"results found for query {0} {1} - {2}\".format(keyword,place,resultsn))\n page_number = int(int(resultsn)/20) #20 is the number of result for single web page\n print(\"number of web page to parse: {0}\".format(page_number))\n\n scraped_results = []\n if page_number == 1 or page_number == 0:\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n return scraped_results\n if page_number > 1: \n for retry in range(page_number):\n if retry == 0:\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n else:\n time.sleep(5)\n try:\n url = \"https://www.paginegialle.it/ricerca/{0}/{1}/p-{2}\".format(keyword,place,retry)\n response = requests.get(url, verify=False, headers=headers)\n print(\"parsing page {0}\".format(retry))\n if response.status_code == 200:\n parser = html.fromstring(response.text)\n # making links absolute\n base_url = \"https://www.paginegialle.it\"\n parser.make_links_absolute(base_url)\n\n XPATH_LISTINGS = \"//div[@class='pageContentWrapper active']//div[@class='col contentCol']\"\n listings = parser.xpath(XPATH_LISTINGS)\n for results in listings:\n XPATH_BUSINESS_NAME = \".//h2[@class='fn itemTitle ']//text()\"\n XPATH_BUSSINESS_PAGE = \".//h2[@class='fn itemTitle ']//@href\"\n XPATH_TELEPHONE = \".//span[@class='tel ']//span[@itemprop='telephone']//text()\"\n XPATH_STREET = \".//span[@itemprop='streetAddress']//text()\"\n XPATH_LOCALITY = \".//span[@class='locality']//text()\"\n XPATH_REGION = \".//span[@class='region']//text()\"\n XPATH_ZIP_CODE = \".//span[@class='postal-code']//text()\"\n XPATH_DESCRIPTION = \".//p[@itemprop='description']//text()\"\n XPATH_OPENTIME = \".//span[@class='label']//text()\"\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n raw_business_telephone = results.xpath(XPATH_TELEPHONE)\n raw_business_page = results.xpath(XPATH_BUSSINESS_PAGE)\n raw_street = results.xpath(XPATH_STREET)\n raw_locality = results.xpath(XPATH_LOCALITY)\n raw_region = results.xpath(XPATH_REGION)\n raw_zip_code = results.xpath(XPATH_ZIP_CODE)\n raw_opentime = results.xpath(XPATH_OPENTIME)\n raw_description = results.xpath(XPATH_DESCRIPTION)\n\n raw_data = [raw_business_name,raw_business_telephone,raw_business_page,raw_street,raw_locality,raw_region,raw_zip_code,raw_opentime,raw_description]\n\n cleaned = []\n for grezz in raw_data:\n cleaned.append(''.join(grezz).strip() if grezz else None)\n \n business_details = {\n 'business_name': cleaned[0],\n 'telephone': cleaned[1],\n 'business_page': cleaned[2],\n 'street': cleaned[3],\n 'locality': cleaned[4],\n 'region': cleaned[5],\n 'zipcode': cleaned[6],\n 'openingTime': cleaned[7],\n 'Description': cleaned[8],\n }\n scraped_results.append(business_details)\n\n elif response.status_code == 404:\n print(\"Could not find a location matching\", place)\n # no need to retry for non existing page\n break\n else:\n print(\"Failed to process page number: {0}\".format(retry))\n return scraped_results\n\n except:\n print(\"Failed to process page number: {0}\".format(retry))\n return scraped_results \n return scraped_results",
"def extract_page_urls(self, _):\n url = \"https://mossadams.taleo.net/careersection/rest/jobboard/searchjobs?lang=en&portal=4160751617\"\n page_num = 1\n last_count = 0\n this_count = 0\n\n while True:\n last_count = len(self.urls_to_scrape)\n payload = PAYLOAD + '\"pageNo\":' + str(page_num) + \"}\"\n json_data = self.post_request(url, out_format='json', headers=HEADERS, data=payload)\n\n for job in json_data['requisitionList']:\n job_url = \"https://mossadams.taleo.net/careersection/6/jobdetail.ftl?job=\" + job['contestNo']\n self.urls_to_scrape.add(job_url)\n\n # check to see if any new records were scraped; if not, I've reach the end\n this_count = len(self.urls_to_scrape)\n if last_count == this_count:\n break\n else:\n last_count = this_count\n page_num += 1",
"def scrapping():\r\n\r\n data_cust = {}\r\n #token, latitude, longitude, name, place_id, types_places, vicinity = [],[],[],[],[],[], []\r\n\r\n apik = 'AIzaSyDiFSOQvPbWVh3voJPSSORT9TSfKAXMy7E'\r\n urls = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={},{}&radius={}&key={}&type={}&keyword={}'.format(\r\n lat_ori, long_ori, radius, apik, types_user, keyword_user)\r\n r = requests.get(urls)\r\n data_cust['0'] = r.json()\r\n\r\n \"\"\"\r\n /////////////////////////////////////////////////////////////////////////////\r\n\r\n CODE FOR NEXT PAGE TOKEN\r\n\r\n /////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n\r\n for number in range(10):\r\n\r\n content = str(number)\r\n if 'next_page_token' in data_cust[content].keys():\r\n sleep(5)\r\n pagetoken = data_cust[content]['next_page_token']\r\n apik = 'AIzaSyDiFSOQvPbWVh3voJPSSORT9TSfKAXMy7E'\r\n urls = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={},{}&radius={}&type={}&keyword={}&key={}{pagetoken}'.format(\r\n lat_ori, long_ori, radius, types_user, keyword_user, apik, pagetoken=\"&pagetoken=\"+pagetoken if pagetoken else \"\")\r\n r = requests.get(urls)\r\n get = requests.post(urls)\r\n print(get)\r\n new_id = str(number+1)\r\n data_cust[new_id] = r.json()\r\n else:\r\n print(\"Done\")\r\n break\r\n\r\n latitude, longitude, name, place_id, types_places, vicinity = [], [], [], [], [], []\r\n for i in range(number+1):\r\n content = str(i)\r\n for numbers in range(len(data_cust[content]['results'])):\r\n latitude.append(data_cust[content]['results']\r\n [numbers]['geometry']['location']['lat'])\r\n longitude.append(data_cust[content]['results']\r\n [numbers]['geometry']['location']['lng'])\r\n name.append(data_cust[content]['results'][numbers]['name'])\r\n place_id.append(data_cust[content]['results'][numbers]['place_id'])\r\n types_places.append(\r\n data_cust[content]['results'][numbers]['types'][0])\r\n vicinity.append(data_cust[content]['results'][numbers]['vicinity'])\r\n\r\n datacustype = pd.DataFrame({'customer_name': name, 'customer_type': types_places, 'place_id': place_id,\r\n 'keyword': keyword_user, 'radius': radius, 'latitude_origin': lat_ori, 'longitude_origin': long_ori, 'latitude_destination': latitude,\r\n 'longitude_destination': longitude})\r\n datacustype\r\n\r\n \"\"\"\r\n /////////////////////////////////////////////////////////////////////////////\r\n\r\n PHONE NUMBER\r\n\r\n /////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n\r\n data_number = {}\r\n for number in datacustype['place_id'].values:\r\n apik = 'AIzaSyDiFSOQvPbWVh3voJPSSORT9TSfKAXMy7E'\r\n urls = 'https://maps.googleapis.com/maps/api/place/details/json?place_id={}&fields=name,formatted_address,rating,formatted_phone_number&key={}'.format(\r\n number, apik)\r\n r = requests.get(urls)\r\n data_number[number] = r.json()\r\n\r\n data_number\r\n\r\n datanumb = pd.DataFrame.from_dict(data_number).T.reset_index()\r\n datanumb.columns = ['place_id', 'html_attributions', 'result', 'status']\r\n datanumb\r\n\r\n name, phone, alamat = [], [], []\r\n\r\n for number in range(len(datanumb)):\r\n if datanumb['status'][number] == 'NOT_FOUND':\r\n name.append('Unknown')\r\n phone.append(0)\r\n alamat.append('-')\r\n else:\r\n name.append(datanumb['result'][number]['name'])\r\n alamat.append(datanumb['result'][number]['formatted_address'])\r\n if 'formatted_phone_number' in (datanumb['result'][number].keys()):\r\n phone.append(datanumb['result'][number]\r\n ['formatted_phone_number'])\r\n else:\r\n phone.append(0)\r\n\r\n datanumb2 = pd.DataFrame(\r\n {'customer_name': name, 'customer_address': alamat, 'phone_number': phone})\r\n datanumb2['place_id'] = datanumb['place_id']\r\n datanumb2\r\n\r\n \"\"\"\r\n /////////////////////////////////////////////////////////////////////////////\r\n\r\n DATA MERGE\r\n\r\n /////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n\r\n datamerge = datacustype.merge(datanumb2, how='left', on='place_id')\r\n datamerge\r\n\r\n \"\"\"\r\n /////////////////////////////////////////////////////////////////////////////\r\n\r\n DUMMY\r\n\r\n /////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n\r\n datadummy = datamerge.copy()\r\n datadummy\r\n\r\n datadummydrop = datadummy.drop(['customer_name_y'], axis=1)\r\n datadummydrop.rename(\r\n columns={'customer_name_x': 'customer_name'}, inplace=True)\r\n datadummydrop2 = datadummydrop[['customer_name', 'customer_address', 'customer_type', 'keyword', 'radius',\r\n 'place_id', 'latitude_origin', 'longitude_origin', 'latitude_destination', 'longitude_destination', 'phone_number']]\r\n datadummydrop2\r\n\r\n \"\"\"\r\n /////////////////////////////////////////////////////////////////////////////\r\n\r\n DISTANCE MATRIX\r\n\r\n /////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n\r\n API_key = 'AIzaSyDiFSOQvPbWVh3voJPSSORT9TSfKAXMy7E' # enter Google Maps API key\r\n gmaps = googlemaps.Client(key=API_key)\r\n\r\n distancedrive, distancewalks = [], []\r\n\r\n # Loop through each row in the data frame using pairwise\r\n for number in range(datadummydrop2.shape[0]):\r\n # Assign latitude and longitude as origin/departure points\r\n LatOrigin = datadummydrop2['latitude_origin'][number]\r\n LongOrigin = datadummydrop2['longitude_origin'][number]\r\n origins = (LatOrigin, LongOrigin)\r\n\r\n # Assign latitude and longitude from the next row as the destination point\r\n # Save value as lat\r\n LatDest = datadummydrop2['latitude_destination'][number]\r\n # Save value as lat\r\n LongDest = datadummydrop2['longitude_destination'][number]\r\n destination = (LatDest, LongDest)\r\n\r\n # pass origin and destination variables to distance_matrix function# output in meters\r\n result = gmaps.distance_matrix(origins, destination, mode='driving', avoid='tolls',\r\n units='metric', departure_time=1703981100)[\"rows\"][0][\"elements\"][0][\"distance\"][\"value\"]\r\n # 1703981100 #1606867500\r\n # append result to list\r\n distancedrive.append(result)\r\n\r\n datadummydrop2['distance_driving'] = distancedrive\r\n datadummydrop3 = datadummydrop2.sort_values(\r\n by=['distance_driving'], ascending=True, ignore_index=True)\r\n datadummydrop3\r\n\r\n \"\"\"\r\n /////////////////////////////////////////////////////////////////////////////\r\n\r\n DATAFRAME TO POSTGRE\r\n\r\n /////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n\r\n database = psycopg2.connect(database=\"customerDB\",\r\n user=\"postgres\",\r\n password=\"1234\",\r\n host=\"localhost\")\r\n\r\n cursor = database.cursor()\r\n\r\n for i in datadummydrop3.index:\r\n c1 = datadummydrop3['customer_name'][i]\r\n c2 = datadummydrop3['customer_address'][i]\r\n c3 = datadummydrop3['customer_type'][i]\r\n c4 = datadummydrop3['keyword'][i]\r\n c5 = datadummydrop3['radius'][i]\r\n c6 = datadummydrop3['place_id'][i]\r\n c7 = datadummydrop3['latitude_origin'][i]\r\n c8 = datadummydrop3['longitude_origin'][i]\r\n c9 = datadummydrop3['latitude_destination'][i]\r\n c10 = datadummydrop3['longitude_destination'][i]\r\n c11 = datadummydrop3['phone_number'][i]\r\n c12 = datadummydrop3['distance_driving'][i]\r\n query = \"\"\"\r\n Insert into customertarget_customerpotential(customer_name, customer_address, customer_type, keyword, radius, place_id, latitude_origin, longitude_origin, latitude_destination, longitude_destination, phone_number, distance_driving) VALUES('%s','%s','%s','%s','%s','%s',%s,%s,%s,%s,'%s',%s);\r\n \"\"\" % (c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12)\r\n cursor.execute(query)\r\n cursor.close()\r\n\r\n database.commit()\r\n database.close()\r\n\r\n print(\"Data berhasil di upload\")",
"def scrape_callback(url, html):\r\n fields = ('area', 'population', 'iso', 'country', 'capital',\r\n 'continent', 'tld', 'currency_code', 'currency_name',\r\n 'phone', 'postal_code_format', 'postal_code_regex',\r\n 'languages', 'neighbours')\r\n if re.search('/view/', url):\r\n tree = fromstring(html)\r\n all_rows = [\r\n tree.xpath('//tr[@id=\"places_%s__row\"]/td[@class=\"w2p_fw\"]' % field)[0].text_content()\r\n for field in fields]\r\n print(url, all_rows)",
"def traverse_next(page, next, results):\n for link in page.extract_links(next['follow_link']):\n print(Back.YELLOW + Fore.BLUE + \"Loading page \", link.url + Back.RESET + Fore.RESET)\n r = results.copy()\n for attribute in next['scraping'].get('data'):\n if attribute['field'] != \"\":\n print(\"\\nExtracting\", attribute['field'], \"attribute\", sep=' ')\n r[attribute['field']] = link.extract_content(attribute['selector'], attribute['attr'], attribute['default'])\n if not next['scraping'].get('next'):\n yield r\n else:\n for next2 in next['scraping'].get('next'):\n for result in traverse_next(link, next2, r):\n yield result",
"def crawl_main_list(session, top_url, indicator):\n try:\n req = session.get(top_url)\n\n except requests.exceptions.RequestException:\n # In the case HTTP request failed.\n req_err = str(sys.exc_info()[0]) + ' : ' + str(sys.exc_info()[1])\n print('HTTP request error. ({})'.format(err))\n sbtk.show_errormessage(indicator.parent,\n 'HTTP request error. Program terminated.',\n req_err)\n raise\n\n # print('get return = {} --- {}'.format(req.url, req.reason))\n top_list = req.html.find('li.regular-search-result')\n\n # Take information of restaurants from Main Page\n for a_rest in top_list:\n time.sleep(5)\n try:\n # Get this restaurant's information.\n rest_name = a_rest.find('h3.search-result-title > span.indexed-biz-name > a.biz-name.js-analytics-click > span', first = True).text\n # Genre, Area, Address, Phone\n rest_genre_list = [ rest_genre.text for rest_genre in a_rest.find(\n 'div.price-category > span.category-str-list > a')]\n\n rest_secondattr = a_rest.find('div.secondary-attributes', first=True)\n # Some businesses don't have area.\n rest_area_elem = rest_secondattr.find('span.neighborhood-str-list', first=True)\n if not rest_area_elem:\n rest_area = ''\n else:\n rest_area = rest_area_elem.text\n # rest_area = rest_area_elem.text\n\n # Some businesses don't have <address> tag and\n # <div class=\"biz-parent-container\"> tag instead.\n rest_address_elem = rest_secondattr.find('address', first=True)\n if not rest_address_elem:\n rest_located = rest_secondattr.find('div.biz-parent-container', first=True)\n if rest_located:\n rest_address = rest_located.text.replace('\\n', ', ')\n else:\n rest_address = ''\n else:\n rest_address = rest_address_elem.text.replace('\\n', ', ')\n # Some businesses don't have phone number.\n rest_phone_elem = rest_secondattr.find('span.biz-phone', first=True)\n if not rest_phone_elem:\n rest_phone = ''\n else:\n rest_phone = rest_phone_elem.text\n\n # print(str(\"* {}\".format(rest_name).encode(encoding='cp932', errors='replace')), flush=True)\n\n # Go to the link to the individual restaurant page.\n # Get the restaurant's website, message, reservation values by Dict.\n rest_link = element_link(a_rest.find(\n 'h3.search-result-title > span.indexed-biz-name',\n first=True\n ).find('a.biz-name.js-analytics-click', first=True)\n )\n rest_page_info = each_rest_page(session, rest_link)\n\n list_num = len(rest_list)+1\n # Set information to Dict rest_list.\n rest_list[list_num] = {\n 'name' : rest_name,\n 'genre' : rest_genre_list,\n 'area' : rest_area,\n 'address' : rest_address,\n 'phone' : rest_phone,\n 'web' : rest_page_info['web'],\n 'message' : rest_page_info['message'],\n 'reservation' : rest_page_info['reservation'],\n 'takes_rsrv' : rest_page_info['takes_rsrv'],\n 'page' : rest_link\n }\n indicator.set_num_to_msg(list_num)\n # print('[{}] {} : {}'.format(len(rest_list), rest_name, rest_list[rest_name]), flush=True)\n\n except Exception:\n # When any program error occures...\n err = str(sys.exc_info()[0]) + ' : ' + str(sys.exc_info()[1])\n if not sbtk.choose_errormessage(indicator.parent, 'HTML Analysis Error.', err):\n raise # Program terminates. (otherwise go back to loop)\n\n except:\n # When a system error happens...\n raise # Profram terminates.\n\n # Return 'next page' link\n return element_link(req.html.find(\n 'a.u-decoration-none.next.pagination-links_anchor', first = True))",
"def parse_listing(keyword,place):\n\turl = \"https://www.yellowpages.com/search?search_terms={0}&geo_location_terms={1}\".format(keyword,place)\n\tprint(\"retrieving \",url)\n\n\theaders = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n\t\t\t\t'Accept-Encoding':'gzip, deflate, br',\n\t\t\t\t'Accept-Language':'en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7',\n\t\t\t\t'Cache-Control':'max-age=0',\n\t\t\t\t'Connection':'keep-alive',\n\t\t\t\t'Host':'www.yellowpages.com',\n\t\t\t\t'Upgrade-Insecure-Requests':'1',\n\t\t\t\t'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'\n\t\t\t}\n\t# Adding retries\n\tfor retry in range(10):\n\t\ttry:\n\t\t\tresponse = requests.get(url,verify=False, headers = headers )\n\t\t\tprint(\"parsing page\")\n\t\t\tif response.status_code==200:\n\t\t\t\tparser = html.fromstring(response.text)\n\t\t\t\t#making links absolute\n\t\t\t\tbase_url = \"https://www.yellowpages.com\"\n\t\t\t\tparser.make_links_absolute(base_url)\n\n\t\t\t\tXPATH_LISTINGS = \"//div[@class='search-results organic']//div[@class='v-card']\"\n\t\t\t\tlistings = parser.xpath(XPATH_LISTINGS)\n\t\t\t\tscraped_results = []\n\n\t\t\t\tfor results in listings:\n\t\t\t\t\tXPATH_BUSINESS_NAME = \".//a[@class='business-name']//text()\"\n\n\t\t\t\t\tXPATH_WEBSITE = \".//div[@class='info']//div[contains(@class,'info-section')]//div[@class='links']//a[contains(@class,'website')]/@href\"\n\n\t\t\t\t\traw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n\n\t\t\t\t\traw_website = results.xpath(XPATH_WEBSITE)\n\n\n\t\t\t\t\tbusiness_name = ''.join(raw_business_name).strip() if raw_business_name else None\n\n\t\t\t\t\twebsite = ''.join(raw_website).strip() if raw_website else None\n\n\n\n\n\n\t\t\t\t\tbusiness_details = {\n\t\t\t\t\t\t\t\t\t\t'business_name':business_name,\n\n\t\t\t\t\t\t\t\t\t\t'website':website\n\n\t\t\t\t\t}\n\t\t\t\t\tscraped_results.append(business_details)\n\t\t\t\t\tprint(scraped_results)\n\t\t\t\treturn scraped_results\n\n\t\t\telif response.status_code==404:\n\t\t\t\tprint(\"Could not find a location matching\",place)\n\t\t\t\t#no need to retry for non existing page\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"Failed to process page\")\n\t\t\t\treturn []\n\n\t\texcept:\n\t\t\tprint(\"Failed to process page\")\n\t\t\treturn []",
"async def async_get(self, url):\n self.reset()\n self.next_link = url\n return await self.async_advance_page()",
"def fetch(self):\n self.genre = \"Review\"\n try:\n if not self.__setSoup():\n log.info(self.log_msg(\"Soup not set,returning false\"))\n return False\n #if not self._getParentPage():\n # log.info(self.log_msg(\"Parent page not found\"))\n while True:\n parent_page_soup = copy.copy(self.soup)\n # log.info(self.log_msg('current uri%s'%parent_page_soup))\n if not self.__addReviews():\n log.info(self.log_msg('fetched all reviews for the url %s'\\\n %self.task.instance_data['uri']))\n \n log.info(self.log_msg('Next page%s'%self.currenturi))\n try:\n \n # self.currenturi = self.task.instance_data['uri'].rsplit\\\n # ('/', 1)[0] + '/' + self.soup.find('a', \\\n # title='Go to the next page')['href']\n self.currenturi = 'http://www.phonedog.com' + parent_page_soup.find('a',title='Go to the next page')['href']\n \n if not self.__setSoup():\n log.info(self.log_msg('soup not set for the uri %s'%\\\n self.currenturi))\n break\n except:\n log.info(self.log_msg('Next page not found for the uri %s'%\\\n self.currenturi))\n break\n return True\n except:\n log.exception(self.log_msg(\"Exception in fetch\"))\n return False",
"def crawl(self, url):\n\n url = self.url_util.normalise_url(url)\n hostname = self.url_util.get_hostname(url)\n\n urlsToVisit = [url]\n urlsVisted = []\n output = []\n # Each iteration of this loop processes the next URL to visit.\n while (len(urlsToVisit) > 0):\n \n url = urlsToVisit.pop(0)\n urlsVisted.append(url)\n\n html = self.html_requester.get_html(url)\n links = self.html_parser.get_links(html)\n same_hostname_urls = self.html_parser.get_same_hostname_urls(hostname, links)\n assets = self.html_parser.get_assets(same_hostname_urls)\n web_pages = self.html_parser.get_web_pages(same_hostname_urls)\n \n output.append({\"url\":url,\"assets\":assets})\n print json.dumps({\"url\":url,\"assets\":assets}, indent=4)\n \n for web_page in web_pages:\n # Do not visit a page more than once\n if not web_page in urlsToVisit and web_page not in urlsVisted:\n urlsToVisit.append(web_page)\n \n return json.dumps(output, indent=4).splitlines()",
"def test_iterate_next_urls_html(self):\n self.mini_spider_thread.grab_url('http://example.com/iterate_next_urls/html_webpage')\n self.assertTrue(self.mini_spider_thread.grab_url_success)\n self.assertEqual(list(self.mini_spider_thread.iterate_next_urls(self.url_obj))[0],\n 'http://example.com/test/test1.html')",
"def getMNACGenerator():\n\n # 0 - 89 (something between 80 and 90\n searchBaseUrl = u'http://www.museunacional.cat/en/advanced-piece-search?title_1=&title=&field_piece_inventory_number_value=&keys=&field_piece_type_value_i18n[0]=pintura&&&page=%s'\n # 0 - 48, for some reason not all paintings get returned in the main query\n # searchBaseUrl = u'http://www.museunacional.cat/en/advanced-piece-search?field_piece_type_value_i18n[0]=pintura&field_piece_info_content_value[p.%%2019th]=p.%%2019th&field_piece_info_content_value[q.%%2020th]=q.%%2020th&&page=%s'\n htmlparser = HTMLParser.HTMLParser()\n\n foundit=True\n\n for i in range(0, 89):\n searchUrl = searchBaseUrl % (i,)\n print searchUrl\n searchPage = urllib2.urlopen(searchUrl)\n searchPageData = searchPage.read()\n\n searchRegex = u'\\<a href\\=\\\"(\\/en\\/colleccio\\/[^\\\"]+)\\\"\\>Read more\\<\\/a\\>'\n itemmatches = re.finditer(searchRegex, searchPageData)\n urllist = []\n #for match in matches:\n # try:\n # # #bla = unicode(match.group(1), u'utf-8')\n # urllist.append(u'http://www.dulwichpicturegallery.org.uk%s' % (match.group(1),))\n # except UnicodeDecodeError:\n # pywikibot.output(u'Found an url I cannot parse: %s' % (unicode(match.group(1), u'utf-8'),))#\n\n #print len(urllist)\n #urlset = set(urllist)\n #print len(urlset)\n\n\n for itemmatch in itemmatches:\n url = u'http://www.museunacional.cat%s' % (itemmatch.group(1),)\n print url\n\n if url==u'http://adsfasdfasdf':\n foundit=True\n if not foundit:\n continue\n metadata = {}\n\n metadata['collectionqid'] = u'Q861252'\n metadata['collectionshort'] = u'MNAC'\n metadata['locationqid'] = u'Q861252'\n metadata['instanceofqid'] = u'Q3305213'\n \n metadata['url'] = url\n\n itemPage = urllib2.urlopen(url)\n itemPageData = unicode(itemPage.read(), u'utf-8')\n \n #print itemPageEnData\n titleRegex = u'<li class=\"ca first\"><a href=\"/ca/colleccio/[^\\\"]+\" class=\"language-link\" xml:lang=\"ca\" title=\"([^\\\"]+)\">Català</a></li>[\\r\\n\\t\\s]*<li class=\"es\"><a href=\"/es/colleccio/[^\\\"]+\" class=\"language-link\" xml:lang=\"es\" title=\"([^\\\"]+)\">Español</a></li>[\\r\\n\\t\\s]*<li class=\"en last active\"><a href=\"/en/colleccio/[^\\\"]+\" class=\"language-link active\" xml:lang=\"en\" title=\"([^\\\"]+)\">English</a></li>'\n #titleEnRegex = u'<main class=\"main narrow\">[\\r\\n\\t\\s]+<h1>[\\r\\n\\t\\s]*([^<]+)[\\r\\n\\t\\s]*</h1>'\n creatorRegex = u'<div class=\"ds-author-piece\">([^<]+)</div>'\n dateRegex = u'Painting<div class=\"ds-feature\"><p>(\\d\\d\\d\\d)</p></div>' #FIXME: Only matches on real years\n invRegex = u'Inventory number: </div><p>([^<]+)</p>'\n\n # Could also get Dimensions, Materials, Acquisition\n \n matchTitle = re.search(titleRegex, itemPageData)\n if not matchTitle:\n pywikibot.output(u'The title data for this painting is BORKED!')\n continue\n\n #FIXME: Check encoding\n\n metadata['title'] = { u'ca' : htmlparser.unescape(matchTitle.group(1)),\n u'es' : htmlparser.unescape(matchTitle.group(2)),\n u'en' : htmlparser.unescape(matchTitle.group(3)),\n }\n \n #pywikibot.output(metadata.get('title'))\n\n creatorMatch = re.search(creatorRegex, itemPageData)\n if not creatorMatch:\n pywikibot.output(u'The creator data for this painting is BORKED!')\n continue\n\n #FIXME: Add some logic for work after and clean up\n\n name = htmlparser.unescape(creatorMatch.group(1))\n # We need to normalize the name\n if u',' in name:\n (surname, sep, firstname) = name.partition(u',')\n name = u'%s %s' % (firstname.strip(), surname.strip(),)\n metadata['creatorname'] = name\n \n metadata['description'] = { u'nl' : u'%s van %s' % (u'schilderij', metadata.get('creatorname'),),\n u'en' : u'%s by %s' % (u'painting', metadata.get('creatorname'),),\n u'ca' : u'%s de %s' % (u'pintura', metadata.get('creatorname'),),\n u'es' : u'%s de %s' % (u'pintura', metadata.get('creatorname'),),\n }\n\n\n invMatch = re.search(invRegex, itemPageData)\n\n if not invMatch:\n pywikibot.output(u'No inventory number found! Skipping')\n continue\n \n metadata['id'] = invMatch.group(1)\n metadata['idpid'] = u'P217'\n\n dateMatch = re.search(dateRegex, itemPageData)\n\n if dateMatch:\n metadata['inception'] = dateMatch.group(1)\n\n yield metadata",
"def _scrape_next_results_page_link(self, response):\n next_pages = response.xpath('//*[@id=\"pagnNextLink\"]/@href |'\n '//ul[contains(@class, \"a-pagination\")]'\n '/a[contains(text(), \"eiter\")]/@href').extract()\n next_page_url = None\n\n if len(next_pages) == 1:\n next_page_url = next_pages[0]\n elif len(next_pages) > 1:\n self.log(\"Found more than one 'next page' link.\", ERROR)\n\n return next_page_url",
"def parse(self, response):\n announcement_urls = response.css('#TD1 > table > tbody > tr > td.tdline2 > a::attr(href)').extract()\n for announcement_url in announcement_urls:\n yield Request(url=parse.urljoin(response.url, announcement_url), callback=self.parse_detail)\n\n # next page\n total_num_text = response.css('#Table1 > tbody > tr > td:nth-child(1)::text').extract()[-1]\n match_re = re.match('.*?共(\\d+)页', total_num_text)\n if not match_re:\n print('extract total page number error, please check the page source.')\n return\n total_num = int(match_re.group(1))\n if self.current_page <= total_num:\n form_request_text = re.match(\".*'(.*)?'\", response.css(\n '#Table1 > tbody > tr > td:nth-child(3) > input.cls-navigate-next::attr(onclick)').extract_first()).group(1)\n next_page_url = form_request_text.split('?')[0]\n form_data = form_request_text.split('?', 1)[1].split('&')\n yield scrapy.FormRequest(\n url=parse.urljoin(response.url, next_page_url),\n formdata={\n 'ISAJAXLOAD': form_data[0].split('=')[1],\n 'displayContentId': form_data[1].split('=')[1],\n 'SHOWTYPE': form_data[2].split('=')[1],\n 'CATALOGTYPE': form_data[3].split('=')[1],\n 'ORIGINAL_CATALOGID': form_data[4].split('=')[1],\n 'HEAD': '本所公告', # todo 第二页返回时发现乱码 经测试该字段是固定的 先这样处理\n 'CATALOGID': form_data[6].split('=')[1],\n 'TYPE': form_data[7].split('=')[1],\n 'COUNT': form_data[8].split('=')[1],\n 'ARTICLESOURCE': form_data[9].split('=')[1],\n 'LANGUAGE': form_data[10].split('=')[1],\n 'REPETITION': form_data[11].split('=')[1],\n 'DATESTYLE': form_data[12].split('=')[1],\n 'DATETYPE': form_data[13].split('=')[1],\n 'SEARCHBOXSHOWSTYLE': form_data[14].split('=')[1],\n 'INHERIT': form_data[15].split('=')[1],\n 'USESEARCHCATALOGID': form_data[16].split('=')[1],\n 'REPORT_ACTION': form_data[17].split('=')[1],\n 'PAGESIZE': form_data[18].split('=')[1],\n 'PAGECOUNT': form_data[19].split('=')[1],\n 'RECORDCOUNT': form_data[20].split('=')[1],\n 'PAGENO': form_data[21].split('=')[1],\n },\n callback=self.parse\n )\n self.current_page += 1"
]
| [
"0.640374",
"0.6311036",
"0.6293636",
"0.62394124",
"0.61887467",
"0.6168092",
"0.6126903",
"0.60940063",
"0.60837823",
"0.6080943",
"0.60407084",
"0.60225344",
"0.60035765",
"0.5994992",
"0.5961852",
"0.5961246",
"0.59560084",
"0.5912258",
"0.59046686",
"0.5901058",
"0.58722764",
"0.5869966",
"0.5867249",
"0.5858262",
"0.5857694",
"0.5855748",
"0.5838928",
"0.57991505",
"0.57631785",
"0.57538426"
]
| 0.7052188 | 0 |
generate the hashes for the passwords | def gen_hash(self, data):
password_gen = crypt.encrypt(data)
return password_gen | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_password(args):\n for password in args:\n heashed=hash_password(password)\n print(heashed)\n # checked=check_password(heashed)",
"def generate_hash(password, salt):\n # encode the password/salt in utf-8\n bytes_string = password.encode(encoding='utf-8')\n salt = salt.encode(encoding='utf-8')\n\n # creates hash objects\n hash_md5 = hashlib.md5()\n hash_sha256 = hashlib.sha256()\n\n # hashes salt and password in the 2 formats\n hash_md5.update(salt + bytes_string)\n hash_sha256.update(salt + bytes_string)\n\n # returns the hex-digest eg the format you most commonly see\n print(hash_md5.hexdigest())\n print(hash_sha256.hexdigest())\n\n return hash_sha256, hash_md5",
"def test_hash_verification(self):\n pw = generate_password(8)\n for hash_method in (ldap_des_crypt, ldap_sha512_crypt, ldap_md5,\n ldap_salted_sha1):\n encrypted = hash_method.hash(pw)\n self.assertTrue(verify_password(pw, encrypted),\n \"{}: '{}' should verify '{}'\"\n .format(hash_method.name, encrypted, pw))",
"def test_salt_generation(self):\n pw = generate_password(8)\n hashes = tuple(hash_password(pw) for i in range(10))\n self.assertEqual(len(hashes), len(set(hashes)),)",
"def generate_hash(passwd):\n return hashlib.sha512(passwd.encode(\"utf-8\")).hexdigest()",
"def generate_pw(self):\n\n chunks = []\n for chunk_no in range(self.CHUNKS):\n if chunk_no < self.chunk:\n chunks.append(self.verified_chunks[chunk_no])\n elif chunk_no == self.chunk:\n chunks.append(str(self.counter).zfill(self.PASSWORD_LENGTH /\n self.CHUNKS))\n else:\n chunks.append(\"000\")\n\n return \"\".join(chunks)",
"def secret_hash(data):\n\n passwords_hash = hashlib.md5(data.encode(\"UTF-8\")).hexdigest()\n \n return passwords_hash",
"def set_passwords(self, passwords):\n self.passwords = {}\n for user_name in passwords:\n self.passwords[user_name] = sha512_crypt.hash(\n passwords[user_name], rounds=5000)",
"def _hashPassword(password):\n charset = './' + ascii_letters + digits\n return crypt.crypt(password, ''.join(random.sample(charset, 2)))",
"def hash_all_password(db):\n passwords = []\n # Filter all users that needs update\n for security in db['security'].find():\n update = {}\n # Check if password is hashed\n if not security['password'].startswith('pbkdf2:'):\n update['password'] = generate_password_hash(security['password'])\n\n # Check if security answers are hashed\n if any(not ans.startswith('pbkdf2:') for ans in security['security_answers']):\n update['security_answers'] = [generate_password_hash(ans)\n for ans in security['security_answers']]\n\n if update:\n passwords.append((str(security['_id']), update))\n print(\"Will update these users: \", passwords)\n\n # Update these users\n for _id, update in passwords:\n db['security'].update_one({'_id': _id}, {'$set': update})",
"def __generate_hash(password):\n if password is None:\n return None\n return bcrypt.generate_password_hash(password, rounds=10).decode(\"utf8\")",
"def generate_hash(password):\n return pbkdf2_sha256.hash(password)",
"def hash_password(self, password):\n salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')\n pwdhash = hashlib.pbkdf2_hmac('sha256', password.encode('utf-8'), \n salt, 100000)\n pwdhash = binascii.hexlify(pwdhash)\n return (salt + pwdhash).decode('ascii')",
"def hash_passwd(password, hash_method=\"sha256\"):\n\n return generate_password_hash(password, hash_method)",
"def hashPassword(passwd):\r\n \r\n return hashlib.sha224(passwd).hexdigest()",
"def password_generator(self):\n password_list = []\n for generated in JugglerPassGen.generate(self.word): # call the function for permutations\n password_list.append(generated)\n return password_list",
"def make_hashed_password(cleartext,salt=None):\n \n if not salt:\n salt = make_salt(5)\n return \"%s|%s\" % (salt,hashlib.sha256(salt + cleartext).hexdigest())",
"def test_hash_type(self):\n expected_hash_method = ldap_sha512_crypt\n for pw in (generate_password(8) for i in range(10)):\n encrypted = hash_password(pw)\n self.assertTrue(expected_hash_method.identify(encrypted),\n \"Expected hashes for method {}, got: {}\"\n .format(expected_hash_method.name, encrypted))",
"def hash_functions(self):\n pass",
"def password(self, password):\n self.password_hash = generate_password_hash(password)",
"def hash_password(password):\r\n salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')\r\n pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'),salt,100000)\r\n pwdhash = binascii.hexlify(pwdhash)\r\n return (salt+pwdhash).decode('ascii')",
"def hash_pass(password, salt):\n return hashlib.pbkdf2_hmac('sha512', password.encode(), salt, 100000)",
"def passsword(self, password):\n self.passwor_harsh = generate_password_hash(password)",
"def hash_password(password):\n #return passlib.hash.pbkdf2_sha512.encrypt(password)\n return sha256_crypt.hash(password)",
"def HashAlgorithm(self) -> _n_7_t_0:",
"def generate_password_hash(event=None, user_id=None):\n\n suffix_key = f'password{event}'\n hexkey = str.encode(f'{user_id}{suffix_key}')\n\n # md5 value[1:10] + 1\n passwd = '{0}{1}'.format(hashlib.md5(hexkey).hexdigest()[1:10], 1)\n\n return passwd",
"def hash_functions(self):\n def hash_factory(n):\n return lambda x: hash(\"salt\" + str(n) + str(x) + \"salt\")\n return [ hash_factory(_) for _ in range(self.dim) ]",
"def make_pw_hash(password, salt=None):\n if not salt:\n salt = make_salt()\n h = hashlib.sha256(password+salt).hexdigest()\n return '%s,%s' % (salt, h)",
"def get_password_hash(self, username):\n raise NotImplementedError()",
"def hash(self) -> str:\r\n ..."
]
| [
"0.69298804",
"0.68461454",
"0.6780465",
"0.67700803",
"0.6745123",
"0.6678128",
"0.66269416",
"0.6591104",
"0.65296036",
"0.6519999",
"0.65092117",
"0.65037704",
"0.6464029",
"0.646137",
"0.6457473",
"0.6453947",
"0.64536256",
"0.64312357",
"0.64078385",
"0.63722223",
"0.63666165",
"0.6358728",
"0.6339339",
"0.63386446",
"0.63350576",
"0.63347846",
"0.6332392",
"0.632723",
"0.63245916",
"0.6320226"
]
| 0.6938553 | 0 |
Transfer model weights to target model with a factor of Tau | def transfer_weights(self):
W, target_W = self.model.get_weights(), self.target_model.get_weights()
for i in range(len(W)):
target_W[i] = self.tau * W[i] + (1 - self.tau)* target_W[i]
self.target_model.set_weights(target_W) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())",
"def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())",
"def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())",
"def _update_target_model(self):\n self.target_network.model.set_weights(self.policy_network.model.get_weights())",
"def soft_update_actor(self):\n local_weights = np.array(self.actor_local.model.get_weights())\n target_weights = np.array(self.actor_target.model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n self.actor_target.model.set_weights(new_weights)",
"def get_soft_target_model_updates(target, source, tau):\n target_weights = target.get_weights()\n tau_values = np.ones(np.shape(target_weights)) * tau\n new_weights = (1 - tau_values) * target.get_weights() + tau_values * source.get_weights()\n target.set_weights(new_weights)\n return target",
"def soft_update(self, local_model, target_model, tau):\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)",
"def soft_update(self, local_model, target_model, tau):\n local_weights = np.array(local_model.get_weights())\n target_weights = np.array(target_model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = tau * local_weights + (1 - tau) * target_weights\n target_model.set_weights(new_weights)",
"def soft_update(self, local_model, target_model):\n local_weights = np.array(local_model.get_weights())\n target_weights = np.array(target_model.get_weights())\n\n assert len(local_weights) == len(target_weights), \"Local and target model parameters must have the same size.\"\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n target_model.set_weights(new_weights)",
"def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n ctr = 0\n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n ctr += 1\n\n self.target_model.set_weights(pars_target)",
"def update_target_network(self, tau):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(t * (1-tau) + e * tau)",
"def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)",
"def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)",
"def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n\n self.target_model.set_weights(pars_target)",
"def get_hard_target_model_updates(target, source):\n target.set_weights(source.get_weights())\n\n return target",
"def soft_update(source_net, target_net, tau):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) + param.data * tau\n )",
"def update_target_network(self, tau):\n for p_target, p_local in zip(self.q_network_target.parameters(), self.q_network_local.parameters()):\n p_target.data.copy_(tau * p_local.data + (1.0-tau) * p_target.data)",
"def load_weigths_into_target_network(self):\n logging.debug(\"Transfer Weight!\")\n self.network.save_weights(self._save_path)\n self.target_network.load_weights(self._save_path)",
"def _weights_for_target(self, target):\n\n self._update_global_transform(target)\n projected_target = self.global_transform.pseudoinverse().apply(target)\n # now we have the target in model space, project it to recover the\n # weights\n new_weights = self.model.project(projected_target)\n # TODO investigate the impact of this, could be problematic\n # the model can't perfectly reproduce the target we asked for -\n # reset the global_transform.target to what it CAN produce\n #refined_target = self._target_for_weights(new_weights)\n #self.global_transform.target = refined_target\n return new_weights",
"def apply_thermostat(self,target_temp):\n u_old = self.u\n tav = self.t.mean()\n scale_factor = target_temp / tav\n self.t = self.t * scale_factor\n self.u = get_vdw_u(self.t,self.rho)\n uenv = self.u.sum() - u_old.sum()",
"def transfer_weights(src_model, dest_model):\r\n # ingore the first layer Input()\r\n # layer 1-24 to 1-24\r\n for i in range(1, 24):\r\n dest_model.layers[i].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 1-24 successfully!\")\r\n\r\n # layer 25-45 to 65-85\r\n for i in range(25, 45):\r\n dest_model.layers[i+40].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 25-45 successfully!\")\r\n\r\n # layer 46-65 to 126-145\r\n for i in range(46, 65):\r\n dest_model.layers[i+80].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 46-65 successfully!\")\r\n\r\n # 69 to 189\r\n dest_model.layers[69+120].set_weights(src_model.layers[69].get_weights())\r\n print(\"Partially load weights from layer 69 successfully!\")",
"def sync_target_models(self):\n for model, target_model in self.model_groups:\n for var, target_var in zip(\n model.trainable_variables, target_model.trainable_variables\n ):\n target_var.assign((1 - self.tau) * target_var + self.tau * var)",
"def soft_update(target, source, tau):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)",
"def update_target_network(self):\n self.target.set_weights(self.policy.get_weights()) # Update weights of target network with weights of policy network",
"def adjust_weights(weights, target, learn_rate):\r\n\r\n for w in range(0, len(target)):\r\n weights[w] += learn_rate * (target[w] - weights[w])",
"def copy_para(from_model, to_model):\n for i, j in zip(from_model.trainable_weights, to_model.trainable_weights):\n j.assign(i)",
"def soft_update_critic(self):\n local_weights = np.array(self.critic_local.model.get_weights())\n target_weights = np.array(self.critic_target.model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n self.critic_target.model.set_weights(new_weights)",
"def update_target(self):\n with torch.no_grad():\n for target_q_param, q_param in zip(self.target_q_funcs.parameters(), self.q_funcs.parameters()):\n target_q_param.data.copy_(self.tau * q_param.data + (1.0 - self.tau) * target_q_param.data)\n for target_pi_param, pi_param in zip(self.target_policy.parameters(), self.policy.parameters()):\n target_pi_param.data.copy_(self.tau * pi_param.data + (1.0 - self.tau) * target_pi_param.data)",
"def update_target_network(self):\n self.target_dqn.set_weights.remote(self.dqn.get_weights.remote())",
"def SoftUpdate(self, local, target, tau):\n for target_param, local_param in zip(target.parameters(), local.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)"
]
| [
"0.73884064",
"0.73884064",
"0.73884064",
"0.7025803",
"0.6765796",
"0.67450416",
"0.6741832",
"0.67204714",
"0.6674984",
"0.6514898",
"0.65123016",
"0.6492347",
"0.6492347",
"0.6467089",
"0.6460493",
"0.64421093",
"0.64420146",
"0.6416018",
"0.6381238",
"0.6341845",
"0.6338286",
"0.63367003",
"0.6314336",
"0.6257472",
"0.62210643",
"0.6216442",
"0.6138488",
"0.61217344",
"0.61113465",
"0.5975208"
]
| 0.7874975 | 0 |
Output the company CIK listed (applicable only for stocks issues in the SEC, so no exchange name is required) | def get_company_cik(stock_ticker: str):
company_cik = sec_finance_functions.get_company_data_by_ticker(stock_ticker).company_cik
return company_cik | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def company(self):\n\n x = 0\n my_company = self.data[\"Company Name\"]\n my_account = self.data[\"Account\"]\n result = []\n for i in my_company:\n my_string = i + \" -- \" + my_account[x]\n x += 1\n result.append(my_string)\n\n return result",
"def cikToName(CIK):\n\n # Form URL to search for CIK\n url = \"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK=\" \\\n + str(CIK)\n\n # Get the results\n resp = requests.get(url)\n soup = BeautifulSoup(resp.text, \"html.parser\")\n\n # There should be a tag with the name in it\n tag = soup.find_all('span', attrs={\"class\": \"companyName\"})[0]\n name = tag.contents[0]\n\n return name",
"def tickerToCIK(ticker):\n\n # Form URL to search for ticker\n url = \"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK=\" \\\n + str(ticker)\n\n # Get the results\n resp = requests.get(url)\n soup = BeautifulSoup(resp.text, \"html.parser\")\n\n # There should be a tag with the CIK in it, or else ticker is probably invalid\n try:\n tag = soup.find_all('input', attrs={\"name\": \"CIK\"})[0]\n except IndexError:\n return None\n\n CIK = tag['value']\n return CIK",
"def mapper_cik_company(\n apikey: str,\n ticker: str,\n) -> typing.Optional[typing.List[typing.Dict]]:\n path = f\"mapper-cik-company/{ticker}\"\n query_vars = {\"apikey\": apikey}\n return __return_json_v4(path=path, query_vars=query_vars)",
"def IC(stock):\n return Invested_Book_Capital(stock)",
"def ckn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ckn\")",
"def get_compo_list(self):\n super(self.__class__, self).get_compo_list()\n link = 'https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average'\n params={'Symbol':2, 'Name':0, 'Sector':3, 'Industry':3}\n self.components = get_index_components_from_wiki(link, params)\n # insert CIK\n ciks = self.update_ciks(updateall=True)\n self.components = self.components.join(ciks)\n return self.components",
"def get_company_info(company_no):\n in_ = 'curl -s -X GET -u yLwgnyHvwlYxkbOBAoLEwsaEfVQ_a7kAuCUTNtSt: https://api.companieshouse.gov.uk/company/{}/officers?q=Officers&items_per_page=100&start_index=0'.format(company_no).split()\n\n out = subprocess.check_output(in_)\n res = json.loads(out.decode('utf8'))\n ret = res['items']\n \n return ret",
"def retrieve_company_data(self):\n self.set_stock_sym_append_str('')\n self.set_stock_retrieval_type('all') #'all', watcher\n self.load_stock_symbol_fr_file()",
"def get_company_info_for(symbol: str):\n baseurl = \"https://financialmodelingprep.com/api/v3/search\"\n params = {\"query\": symbol, \"apikey\": FMP_API_KEY, \"limit\": \"1\"}\n return make_request(baseurl=baseurl, params=params)",
"def customers(ticker: str, other_args: List[str]):\n parser = argparse.ArgumentParser(\n prog=\"customer\",\n add_help=False,\n description=\"List of customers from ticker provided. [Source: CSIMarket]\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n url_customer_chain = (\n f\"https://csimarket.com/stocks/custexNO.php?markets&code={ticker.upper()}\"\n )\n text_customer_chain = BeautifulSoup(\n requests.get(url_customer_chain).text, \"lxml\"\n )\n\n l_customer = list()\n for customer in text_customer_chain.findAll(\n \"td\", {\"class\": \"plava svjetlirub\"}\n ):\n l_customer.append(customer.text)\n\n if l_customer:\n print(\"List of Customers: \" + \", \".join(l_customer) + \"\\n\")\n else:\n print(\"No customers found.\\n\")\n\n except Exception as e:\n print(e, \"\\n\")",
"def get_companyName(self, obj):\n groupCompanies = get_objects_for_group(\n obj, \"view_company\", klass=models.Company)\n return [x.name for x in groupCompanies]",
"def __repr__(self):\n return ''.join(f'\\ncompany: {self.company_name}\\nsize: {self.company_size}\\ncompany_founded: '\n f'{self.company_founded}\\ncompany_industry: {self.company_industry}\\ncompany_sector: '\n f'{self.company_sector}\\ncompany_type: {self.company_type}\\ncompany_rating: '\n f'{self.company_rating}\\ncompany_competitors: {self.company_competitors}\\ncompany_revenue: '\n f'{self.company_revenue}\\ncompany_headquarters: {self.company_headquarters}')",
"def get_cit_data(ticker):\n log = logging.getLogger(__name__)\n # TODO: This is probably not working anymore (need to refresh files)\n file = os.path.join(c.cfg['default']['data'], 'COT', 'Supplemental', 'T{}.PRN'.format(ticker))\n columns = ['Non-Commercial Longs', 'Non-Commercial Shorts',\n 'Commercial Longs', 'Commercial Shorts',\n 'Non-Reportable Longs', 'Non-Reportable Shorts',\n 'Index Trader Longs', 'Index Trader Shorts']\n\n # Read Data from csv file\n cit = pd.read_csv(file, parse_dates=True, index_col=0, header=None, names=columns)\n \n\n return cit",
"def print_stock_info():\n for key, value in stock_info.iteritems():\n // your code here",
"def iam(self):\n print(\"I am company\", self.ticker)",
"def list(logger, client):\n logger.info('Retrieving Cloudify License')\n license = client.license.list()\n print_data(LICENSE_COLUMN, license, 'Cloudify License')",
"def show(list_of_dicts, key):\n print(\"\\nHere are the stocks I have considered for you:\")\n for i in list_of_dicts: # iterates through list_of_dicts and prints Name and Market Cap\n print(f\" - {i['Name']} - {key} is {i[key]} \")",
"def get_company_info(company_name):\n\n # Fix formatting of name\n co = company_name.replace(\".\", \"\").replace(\" \", \"%20\")\n\n query = f\"http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={co}\\\n ®ion=1&lang=en&callback=YAHOO.Finance.SymbolSuggest.ssCallback\"\n\n response = requests.get(query)\n\n fdata = response.text.split(\"(\", 1)[1]\n fdata = fdata.rsplit(\")\", 1)[0]\n data = json.loads(fdata)\n yahoo_json = data[\"ResultSet\"][\"Result\"]\n\n return yahoo_json",
"def list_my_contracts(self):\n self.get_my_contracts()\n try:\n for contract in self.my_contracts:\n print('------')\n print(contract.timestamp)\n print(contract.market)\n print(contract.ticker)\n print(contract.shares)\n print(contract.gain_or_loss)\n print(contract.average_price)\n print(contract.buy_price)\n print(contract.sell_price)\n print(contract.estimate_sale_of_current_shares)\n print(contract.implied_odds)\n print(contract.estimate_best_result)\n print('------')\n except TypeError:\n print('You don\\'t have any active contracts!')\n return",
"def test_cny(self):\n cash_accounts = self.port_values['cash_accounts']\n cash_account = self.extract_cash_account(cash_accounts, 'CNY')\n self.assertNotEqual(cash_account, {})\n\n self.assertEqual(cash_account['account_num'], '012-875-0-603962-0')\n self.assertEqual(cash_account['account_type'], 'Current Account')\n self.assertEqual(cash_account['bank'], 'Bank of China (Hong Kong) Ltd')\n self.assertEqual(cash_account['date'], datetime.datetime(2015,12,10))\n self.assertAlmostEqual(cash_account['balance'], 386920)\n self.assertAlmostEqual(cash_account['fx_rate'], 1.2037)\n self.assertAlmostEqual(cash_account['local_currency_equivalent'], 465735.604)",
"def all_companies(login_details):\n output = None\n sql = u'SELECT client_company_ID ' \\\n u'FROM client_company_TBL;'\n\n c, conn = connection(login_details)\n try:\n c.execute(sql)\n values = c.fetchall()\n if values is not None:\n output = values\n finally:\n conn_close(c, conn)\n\n return output",
"def make_companies():\n logging.info(\"Making CH\")\n companies_address = get_address()\n companies_sector = get_sector()\n\n companies = (\n companies_address[[\"company_number\", \"postcode\"]]\n .merge(\n companies_sector.query(\"rank==1\")[[\"company_number\", \"SIC4_code\"]],\n on=\"company_number\",\n )\n .assign(division=lambda x: [c[:2] for c in x[\"SIC4_code\"]])\n .assign(division_name=lambda x: x[\"division\"].map(_DIV_NAME_LOOKUP))\n .merge(nspl, left_on=\"postcode\", right_on=\"pcds\")\n )\n\n return companies",
"def getCompanyName(self, stockSymbol):\n return self.db.select_company_name(stockSymbol)",
"def test_get_subscribe_company(self):\n company = create_company()\n user_company = create_usercompany(self.user, company)\n res = self.client.get(COMPANY_STOCK_LIST)\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def company_lists(self):\n return self.client.get('company/named-lists')",
"def get_company_name(self):\n\t\treturn call_sdk_function('PrlLic_GetCompanyName', self.handle)",
"def display_cups(self):\n print('cups: ', end='')\n for cup in self.cups:\n if cup == self.current:\n print('(' + str(cup) + ') ', end='')\n else:\n print(cup, end=' ')\n print()",
"def get_kit_descr_collapse(self):\n self.ensure_one()\n if not self.is_kit_invoice_line:\n return \"\"\n comp_obj = self.env['account.invoice.line.comp']\n components = comp_obj.search([('invoice_line_id','=',self.id),('is_kit_invoice_comp','=',False)]) # get all comps that are not kits\n ir_model_data = self.env['ir.model.data']\n units_id = ir_model_data.get_object_reference('product','product_uom_unit')[1]\n res = []\n for comp in components:\n qty_int_val = int(comp.qty_total)\n if comp.uom_id.id == units_id: # uom is units, no need to print it\n qty = str(qty_int_val) # qty is an int because it's in units\n comp_str = comp.name + \": \" + qty\n else:\n if qty_int_val == comp.qty_total:\n qty = str(qty_int_val)\n else:\n qty = str(comp.qty_total)\n comp_str = comp.name + \": \" + qty + \" \" + comp.uom_id.name\n res.append(comp_str)\n res = \" (\" + \", \".join(res) + \")\"\n return res",
"def build_contracts_list():\n ns_getcontracts_filter = '''\n <nc:filter type=\"xpath\"\n xmlns:nc=\"urn:ietf:params:xml:ns:netconf:base:1.0\"\n xmlns:na=\"http://cisco.com/ns/yang/Cisco-IOS-XE-native\"\n xmlns:rb=\"http://cisco.com/ns/yang/Cisco-IOS-XE-bgp\"\n select=\"/na:native/ip/rb:extcommunity-list/standard\"\n />\n '''\n m = manager.connect( host='10.112.83.100',\n port=830,\n username='cisco',\n password='cisco',\n hostkey_verify=False)\n answer = m.get_config(source='running', filter=ns_getcontracts_filter).data_xml\n c = xmltodict.parse (answer)\n # build the list\n liste_contracts = [ { 'name': r['name'], 'id': r['permit']['rt']['name'][6:] } for r in c['data']['native']['ip']['extcommunity-list']['standard'] ]\n return liste_contracts"
]
| [
"0.62574416",
"0.6169119",
"0.6011516",
"0.5925151",
"0.56726855",
"0.5637131",
"0.5628796",
"0.56104475",
"0.5564428",
"0.54830617",
"0.5324723",
"0.5307937",
"0.5264544",
"0.52378327",
"0.5234159",
"0.52317774",
"0.51992476",
"0.5190561",
"0.5165406",
"0.5164627",
"0.5144692",
"0.5119741",
"0.51148576",
"0.5100981",
"0.5089207",
"0.50623477",
"0.5059274",
"0.5033407",
"0.5008972",
"0.49996108"
]
| 0.6787039 | 0 |
Get the number of fillings of a stock for the given year (or all fillings since the company listing) | def get_last_filling(stock_ticker: str,
filling_year: Optional[int] = None):
company_cik = sec_finance_functions.get_company_data_by_ticker(stock_ticker).company_cik
all_company_fillings = sec_finance_functions.get_all_company_filings_by_cik(company_cik)
all_company_fillings_by_year = sec_finance_functions.get_all_fillings_of_year(all_company_fillings, filling_year)
return all_company_fillings_by_year | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def numPostings(years):\n\tcount = []\n\tfor year in years:\n\t\tfilename = \"SmartEnergy\" +str(year) +\".xlsx\"\n\t\tDB = pd.read_excel(filename, sheet_name = 'Filters')\n\t\tcount.append(DB.iloc[10][1])\n\treturn count",
"def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn",
"def get_counts_for_year(data, year):\r\n \r\n accidents_counts = list(map(list, zip(*list((filter(lambda x: x[0] == year, data))))))[2]\r\n return accidents_counts",
"def get_num_countries():\n num_countries = np.zeros(shape=(len(annual_files), 1))\n \n for year in annual_files:\n df = get_runners_data(year)\n country_count = df['countryCode'].value_counts()\n num_countries[annual_files.index(\n year)] = len(country_count.index)\n return num_countries",
"def calculate_total_fees_collected(year=None, month=None):\n total = 0\n fees = load_fees()\n for fee in fees:\n if year and month:\n date = datetime.datetime.fromtimestamp(fee[\"created\"])\n if date.year == year and date.month == month:\n total += fee[\"amount\"]\n else:\n total += fee[\"amount\"]\n return total",
"def get_num_of_tracks(year):\n track_results = sp.search('year:' + str(year), type='track', limit=1, offset=0)\n return track_results['tracks']['total']",
"def inventoryByYear(self):\n\n\t\tcurrentYear = date.today().year\n\t\tinventory = {}\n\n\t\tfor bottle in self.bottles:\n\t\t\tif bottle.consumption == None:\n\t\t\t\tholdYear = max(currentYear, bottle.hold_until)\n\t\t\t\tif holdYear not in inventory:\n\t\t\t\t\tinventory[holdYear] = 1\n\n\t\t\t\telse:\n\t\t\t\t\tinventory[holdYear] += 1\n\n\t\treturn inventory",
"def is_there_new_filling(stock_ticker: str):\n return {f\"Function is not implemented yet\"}",
"def get_labcount_dict(year):\r\n return common.get_dict_all(get_labcount_filename(year), int)",
"def lower_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i <= support(stock):\n counter+=1\n return counter",
"def stock_count(self):\n return self.stock_level_records.aggregate(models.Sum(\"stock_level\"))[\n \"stock_level__sum\"\n ]",
"def get_drugcount_dict(year):\r\n return common.get_dict_all(get_drugcount_filename(year), int)",
"def get_portf_pnl(portf_symbol, date_last_year_str, connection):\n ret = 0\n portf_pnl = 0\n cursor = connection.cursor(pymysql.cursors.SSCursor)\n sql = \"SELECT price_instruments_data.pnl, portfolios.quantity, \"+\\\n \"instruments.pip, \"+\\\n \"price_instruments_data.pnl_long, price_instruments_data.pnl_short, \"+\\\n \"portfolios.strategy_order_type \" +\\\n \"FROM portfolios JOIN price_instruments_data ON \"+\\\n \"portfolios.symbol = price_instruments_data.symbol \"+\\\n \"JOIN instruments ON portfolios.symbol = instruments.symbol \"+\\\n \"WHERE portfolios.portf_symbol = '\"+ portf_symbol +\"' AND date=\"+\\\n date_last_year_str +\" ORDER BY portfolios.portf_symbol\"\n debug(sql)\n cursor.execute(sql)\n res = cursor.fetchall()\n for row in res:\n pnl_c = row[0]\n quantity_c = row[1]\n pip_c = row[2]\n pnl_long_c = row[3]\n pnl_short_c = row[4]\n strategy_order_type_c = row[5]\n if strategy_order_type_c == 'long/short':\n portf_pnl = portf_pnl + (pnl_c * quantity_c * pip_c)\n if strategy_order_type_c == 'long' and pnl_long_c != 999:\n portf_pnl = portf_pnl + (pnl_long_c * quantity_c * pip_c)\n if strategy_order_type_c == 'short' and pnl_short_c != 999:\n portf_pnl = portf_pnl + (pnl_short_c * quantity_c * pip_c)\n ret = portf_pnl\n cursor.close()\n return ret",
"def wordsForYear(year, yearlist):\r\n for el in yearlist:\r\n if el.year==year:\r\n return el.count\r\n return 0",
"def most_frequent_days(year):\n if isleap(year):\n return [i.strftime('%A') for i in\n sorted([date(year, 1, 1),\n date(year, 1, 2)],\n key=lambda x: x.weekday())]\n else:\n return [date(year, 1, 1).strftime('%A')]",
"def WeekCount(year):\n weekday = DayOfWeek(year, 1, 1)\n if weekday == 4:\n return 53\n elif weekday == 3 and LeapYear(year):\n return 53\n else:\n return 52",
"def getFreiePlaetze(self):\n frei = 0\n for reihe in self.belegung:\n for platz in reihe:\n if not platz.belegt(): frei += 1\n return frei",
"def count_by_year(all_articles):\n all_dates = get_all_dates(all_articles)\n year_count = {}\n\n for date in all_dates:\n if date.year in year_count:\n year_count[date.year] += 1\n else:\n year_count[date.year] = 1\n\n print_all_items_in_dict(year_count)",
"def get_favourite_from_year_with_count(self, year):\n\t\tconnection = self.connect_to_db()\n\t\tcursor = connection.cursor()\n\t\tcursor.execute('''select count(*) from movies where release_year = %d;''' % (year))\n\t\tpage_count = cursor.fetchone()[0]\n\t\tconnection.close()\n\t\tpage_count = int(ceil(page_count))\n\t\treturn page_count",
"def get_pcg_counts_dict(year):\r\n return common.get_dict_all(get_pcg_filename(year), int)",
"def cumulative_socemissions_for_20years(years_usingtec, factor_20years, soil_c_stock):\n\n if years_usingtec < 20:\n if factor_20years == 1:\n Cumulative_considering_20_years = 0\n else:\n Cumulative_considering_20_years = (-1 * soil_c_stock * (factor_20years - 1)) * (44 / 12)\n annual_change = Cumulative_considering_20_years / 20\n else:\n Cumulative_considering_20_years = 0\n annual_change = 0\n\n return [annual_change,\n Cumulative_considering_20_years]",
"def createYearCount(fields):\n year = int(fields[1])\n count = int(fields[2])\n return YearCount(year,count)",
"def get_physician_counts_year_lastyear(physician):\n try:\n today = datetime.today()\n day_of_year = today.timetuple().tm_yday\n last_year = today.year - 1\n start_date = date(last_year, 1, 1)\n end_date = start_date + timedelta(day_of_year - 2)\n\n all_referls = Referral.objects.filter(physician=physician,\n visit_date__range=(start_date,end_date))\n\n count = 0\n for refs in all_referls:\n count = count + refs.visit_count\n return count\n except Exception:\n return 0",
"def get_fcst_len(store, dept, data, data_test):\n def weeks_between(start_date, end_date):\n weeks = rrule.rrule(rrule.WEEKLY, dtstart=start_date, until=end_date)\n return weeks.count()\n \n\n c = data_test['Store'] == store\n c2 = data_test['Dept'] == dept\n start = pivot_df(data, dept).index[-1] + relativedelta(weeks=1)\n end = data_test[c&c2]['Date'].iloc[-1]\n fcst_len = weeks_between(start, end)\n return fcst_len",
"def fill_price_gaps(\n from_date=dt.datetime(1970,1,1),\n to_date=dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)\n ):\n #Create a collection of years\n years = []\n cur_year = from_date.year\n while cur_year <= to_date.year:\n years.append(cur_year)\n cur_year += 1\n #Loop each year\n all_year_dates = pd.DataFrame([])\n for year in tqdm(years, total=len(years), desc=\"Loop through years to find dates\"):\n #establish bounding dates\n year_from_date = None if year != from_date.year else from_date\n year_to_date = None if year != to_date.year else to_date\n #Get filtered year dates\n year_dates = create_filtered_year_dates(year, from_date=year_from_date, to_date=year_to_date, )\n #Add to the full list\n all_year_dates = pd.concat([all_year_dates, year_dates])\n #Order the dates (just in case)\n all_year_dates = all_year_dates.sort_values([\"date\"]) \\\n .reset_index(drop=True)\n #Fetch all the tickers\n tickers = sqlaq_to_df(ticker.fetch())\n #Loop through tickers\n errors = []\n run_time = ProcessTime()\n for _,r in tqdm(tickers[[\"id\",\"ticker\"]].iterrows(), total=tickers.shape[0], desc=\"Filling in gaps\"):\n logger.info(f\"Filling gaps in {r.id} -> {r.ticker}\")\n try:\n #Fetch all prices\n dp = sqlaq_to_df(daily_price.fetch(ticker_ids=[r.id]))\n dp[\"date\"] = dp.date.astype(\"datetime64[ns]\")\n #Identify missing dates\n missing_dates = pd.merge(all_year_dates, dp[[\"date\",\"id\"]], on=[\"date\"], how=\"left\")\n #Identify the start date and remove all missing date before that\n start_date = missing_dates[~missing_dates.id.isnull()].date.min()\n missing_dates = missing_dates[missing_dates.date > start_date]\n #Remove all other items which have dates\n missing_dates = missing_dates[missing_dates.id.isnull()]\n #Order remaining dates\n missing_dates = missing_dates.sort_values(\"date\")\n #Create groupings no larger than max_days (in config)\n st_d = None\n date_groups = []\n missing_dates = missing_dates.date.to_list()\n if len(missing_dates):\n for i,d in enumerate(missing_dates):\n if not st_d:\n st_d = d\n else:\n #Append when group gets too big\n if (d - st_d).days > WEB_SCRAPE_MAX_DAYS:\n date_groups.append([st_d, missing_dates[i-1]])\n #Update the start date\n st_d = d\n #Append the last item\n date_groups.append([st_d, d])\n #Scrape the missing prices\n logger.info('Number of webscrapes to perform -> {}'.format(len(date_groups)))\n #For each time frame perform a scrape\n try: #Try loop so as not to miss all following date groups\n for i,dates in enumerate(date_groups):\n logger.info(f\"Running dates {i} -> {dt.datetime.strptime(str(dates[0])[:10], '%Y-%m-%d')} - {dt.datetime.strptime(str(dates[1])[:10], '%Y-%m-%d')}\")\n process_daily_prices(\n r.ticker,\n r.id,\n st_date=dates[0],\n en_date=dates[1],\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e, \"st_date\":dates[0], \"en_dates\":dates[1]})\n #Run an update on th weekly prices\n process_weekly_prices(\n r.id,\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e})\n #Lap\n logger.info(run_time.lap())\n logger.info(run_time.show_latest_lap_time(show_time=True))\n logger.info(f\"GAP FILL RUN TIME - {run_time.end()}\")\n\n logger.info(f'\\nGAP FILL ERROR COUNT -> {len(errors)}')\n if len(errors) > 0:\n logger.info('GAP FILL ERRORS ->')\n for e in errors:\n logger.error(e)",
"def bins_per_year(self):\n # Load the vector version #\n df = self.grouped_bins.reset_index()\n # Add year and remove TimeStep #\n df['year'] = self.country.timestep_to_year(df['time_step'])\n df = df.drop('time_step', axis=1)\n # Only if we are in the calibration scenario #\n if self.parent.parent.scenario.short_name == 'calibration':\n # Patch the harvest data frame to stop at the simulation year #\n selector = df['year'] <= self.parent.parent.country.base_year\n df = df.loc[selector].copy()\n # Return #\n return df",
"def books_by_year(year):\n lst = []\n for book in root.findall('Book'):\n published_year = book.find('Year_of_publishing').text\n if int(published_year) == year:\n lst.append(book)\n return len(lst)",
"def Friday_occur_from_2000(day, month, year):\r\n Day_on_13 = [Day_of_week(13, s, t) \\\r\n for s in xrange(1, 13) for t in xrange(2000, year)]\r\n count_from_2000 = Day_on_13.count(\"Friday\")\r\n \r\n if day >= 13:\r\n count_from_2000 += \\\r\n [Day_of_week(13, s, year) for s in xrange(1, month+1)].count(\"Friday\")\r\n else:\r\n count_from_2000 += \\\r\n [Day_of_week(13, s, year) for s in xrange(1, month)].count(\"Friday\")\r\n return count_from_2000",
"def _get_docks_available(sta):\n return sta['num_docks_available']",
"def total_feet_of_ribbon(packages):\n return sum(feet_of_ribbon(d) for d in dd)"
]
| [
"0.60557085",
"0.5479599",
"0.5406363",
"0.53464955",
"0.53220004",
"0.5278154",
"0.517318",
"0.51207525",
"0.51200336",
"0.4972167",
"0.49510524",
"0.49407512",
"0.49316242",
"0.49237838",
"0.49139115",
"0.49106917",
"0.49084038",
"0.48991558",
"0.48983836",
"0.48923317",
"0.48742566",
"0.48704678",
"0.48580095",
"0.48512894",
"0.48503944",
"0.4834996",
"0.4826243",
"0.48208758",
"0.48176998",
"0.4815076"
]
| 0.6672505 | 0 |
Call to inform that the any concurrent ripping process is finished. | def rip_finished(self):
if self.is_ripping:
self.is_ripping.clear() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def syncDone (self) :\r\n self.ongoing_sync_count -= 1",
"def _notify_end(self):\n to_scan = len(self._files_to_scan)\n event.notify(ScanProgressEvent(to_scan, to_scan, True))",
"def finished(self):",
"def finished(self):\n pass",
"def finish(self) -> None:",
"def finish(self) -> None:",
"def finish():",
"def finish():",
"def finish():",
"def finish():",
"def _finished(self) -> None:",
"def finish(self):\r\n\r\n self._is_finished = True",
"def finished(self):\n\t\telog(\"finished\")",
"def finish(self):",
"def finish(self):",
"def finish(self):\n pass",
"def finish(self):\n pass",
"def finish():\n pass",
"def finished(self):\n raise NotImplementedError()",
"def finished(self):\r\n raise NotImplementedError",
"def finish(self):\n with self._lock: # just to be tidy; lock not really needed to set a boolean\n self._done = True",
"def _done(self):\n self._doneFlag = True\n self._executionCompletedNotifier.notify(self)",
"def complete(self):\n pass",
"def monitor(self):\n for idx, process in enumerate(self.__process_list):\n process.id_number = idx + 1\n while len(self.__process_list) > 0:\n for process in list(self.__process_list):\n if not process.has_output():\n _return_code = process.return_code\n self.__process_list.remove(process)\n if _return_code == 0:\n logger.info(\"Finished process #{}: there are now {}/{} running\".format(process.id_number, len(self.__process_list), self.__n_initial))\n else:\n logger.warning(\"Process #{} terminated unexpectedly (return code {}): there are now {}/{} running\".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))",
"def task_done(self) -> None:\n pass",
"def complete(self):\n calls = []\n while self[1:] not in calls:\n calls.append(self[1:])\n self.complete_once()",
"def done(self):",
"def done(self):",
"def finish(self):\r\n self.start_finish()\r\n self.wait_finish()",
"def completion() -> None:"
]
| [
"0.6337645",
"0.62002885",
"0.6101757",
"0.60869145",
"0.60562354",
"0.60562354",
"0.6047451",
"0.6047451",
"0.6047451",
"0.6047451",
"0.6016429",
"0.59790313",
"0.59760773",
"0.5959373",
"0.5959373",
"0.5951485",
"0.5951485",
"0.5918788",
"0.59124225",
"0.5900939",
"0.58990365",
"0.5891454",
"0.5882296",
"0.5843184",
"0.58057266",
"0.5778317",
"0.575882",
"0.575882",
"0.57588184",
"0.5747729"
]
| 0.67650646 | 0 |
Iterate over DISC, splitting it into packets starting at TRACK_NUMBER index 1. This call will ensure that no packets cross a track or pregap boundary, and will also obey any edits to the disc. It will not, however, read any samples from disc, just tell the calling code what to read. | def iterate(cls, disc, track_number):
assert track_number >= 0 and track_number < len(disc.tracks)
track = disc.tracks[track_number]
packet_frame_size = (
disc.audio_format.rate / cls.PACKETS_PER_SECOND)
# Mock up a packet that ends at the start of index 1, so the
# first packet generated starts at that position
p = cls(disc, track, track_number, track.pregap_offset, 0)
while True:
# Calculate offsets of next packet
abs_pos = p.abs_pos + p.length
if abs_pos < track.pregap_offset:
length = min(track.pregap_offset - abs_pos, packet_frame_size)
else:
length = min(track.length - abs_pos, packet_frame_size)
assert length >= 0
if length == 0:
# Reached end of track, switch to next. Simplify this
# code by generating a dummy packet for the next
# iteration to work on (but don't yield it!)
track_number += 1
try:
track = disc.tracks[track_number]
except IndexError:
# That was the last track, no more packets
return
p = cls(disc, track, track_number, 0, 0)
else:
# Generate next packet
flags = 0
if (track.pause_after
and abs_pos + length == track.length
and track_number + 1 < len(disc.tracks)):
flags |= p.PAUSE_AFTER
p = cls(disc, track, track_number, abs_pos, length, flags)
yield p | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _read_packets(self, reader: Par2FileReader):\n start_count = len(self)\n pointers = reader.get_pointers()\n # Create RecoverySets if needed\n for set_id, pointer_set in packets.by_set_id(pointers).items():\n print(set_id.hex(), pointer_set)\n if set_id not in self.recovery_sets.keys():\n # Create a RecoverySet if needed\n self.recovery_sets[set_id] = RecoverySet(set_id)\n for pointer in pointer_set:\n self.recovery_sets[set_id].packets.add(pointer)\n logger.info(\"Added {} new packets\".format(len(self) - start_count))",
"def packets_for_stream(fobj, offset):\n pcap = dpkt.pcap.Reader(fobj)\n pcapiter = iter(pcap)\n ts, raw = pcapiter.next()\n\n fobj.seek(offset)\n for p in next_connection_packets(pcapiter, linktype=pcap.datalink()):\n yield p",
"def _packets_from_tshark_sync(self, tshark_process, packet_count=None, timeout:float=3.0,\n max_data_length:int=10000):\n # NOTE: This has code duplication with the async version, think about how to solve this\n\n psml_structure, data = self.eventloop.run_until_complete(self._get_psml_struct(tshark_process.stdout))\n packets_captured = 0\n\n data = b\"\"\n try:\n while self.is_open.value:\n try:\n packet, data = self.eventloop.run_until_complete(\n self._get_packet_from_stream(tshark_process.stdout, \n data,\n psml_structure=psml_structure,\n got_first_packet=packets_captured > 0, \n timeout=timeout))\n except EOFError:\n echo(\"Caught EOF\", file=Interceptor.stdout)\n self._log.debug(\"EOF reached (sync)\")\n break\n\n if(packet is False): continue\n\n if packet:\n packets_captured += 1\n yield packet\n if packet_count and packets_captured >= packet_count:\n break\n if len(data) > max_data_length:\n data = b''\n finally:\n if tshark_process in self._running_processes:\n self.eventloop.run_until_complete(self._cleanup_subprocess(tshark_process))",
"def snips_to_split(snip_dir, data_dir, test_recs=2, min_recs=6,\n bird_list_path='bird_list.txt',\n labels_file_path='labels.txt'):\n train_dir = os.path.join(data_dir, 'train')\n test_dir = os.path.join(data_dir, 'test')\n\n if os.path.exists(train_dir):\n shutil.rmtree(train_dir)\n os.makedirs(train_dir)\n if os.path.exists(test_dir):\n shutil.rmtree(test_dir)\n os.makedirs(test_dir)\n\n for subd in os.listdir(snip_dir):\n bird_path = os.path.join(snip_dir, subd)\n if not os.path.isdir(bird_path):\n continue\n rec_folders = [sd for sd in os.listdir(bird_path) if\n (os.path.isdir(os.path.join(bird_path, sd))\n and sd != 'samples')]\n num_recordings = len(rec_folders)\n if num_recordings < min_recs:\n continue\n counts = [len([f for f in os.listdir(os.path.join(bird_path, sd))])\n for sd in rec_folders]\n remaining = num_recordings\n while remaining > 0:\n max_index = counts.index(max(counts))\n # Send all recordings except test_recs=2 to train split.\n if remaining > test_recs:\n add_to_split(os.path.join(bird_path, rec_folders[max_index]),\n train_dir, int(subd))\n # Send remaining recordings to test split.\n else:\n add_to_split(os.path.join(bird_path, rec_folders[max_index]),\n test_dir, int(subd))\n counts[max_index] = 0\n remaining -= 1\n\n # Calculate and print the number of train and test images.\n subs = [sd for sd in os.listdir(train_dir) if\n os.path.isdir(os.path.join(train_dir, sd))]\n num = sum([len([f for f in os.listdir(os.path.join(train_dir, sd))])\n for sd in subs])\n logging.info(\"Number of images in train split: %d\" % num)\n subs = [sd for sd in os.listdir(test_dir) if\n os.path.isdir(os.path.join(test_dir, sd))]\n num = sum([len([f for f in os.listdir(os.path.join(test_dir, sd))])\n for sd in subs])\n logging.info(\"Number of images in test split: %d\" % num)\n\n # Get 'bird_list' and use it to write the labels file for training.\n with open(bird_list_path) as f:\n lines = f.readlines()\n bird_list = [line.rstrip('\\n') for line in lines]\n labels_to_class_names = {label: bird for label, bird in\n enumerate(bird_list) if label <= cutoff}\n write_labels_file(labels_to_class_names, data_dir,\n filename=labels_file_path)",
"def read_packets(serial_input):\n while 1:\n header = scan_to_headerword(serial_input)\n yield header.read_packet(serial_input)",
"def sniff_continuously(self, packet_count=None):\n \n self.lcapture_tshark = (self.lcapture_tshark or \n self.eventloop.run_until_complete(self._get_tshark_process()))\n\n self._running_processes.add(self.lcapture_tshark)\n\n # Retained for backwards compatibility and to add documentation.\n return self._packets_from_tshark_sync(packet_count=packet_count, \n tshark_process=self.lcapture_tshark)",
"def record_chunk(self):\n data = self.stream.read(nFFT)\n data_array = bytearray(data)\n self.cur_input = []\n for i in range(nFFT):\n amp = struct.unpack('H', data_array[:2])\n for _ in range(2):\n data_array.pop(0)\n self.cur_input.append(amp)",
"def _convert_packets_into_batch(self, packets):\n assert isinstance(packets, (tuple, list))\n assert len(packets) > 0\n assert all(isinstance(packet, tuple) for packet in packets)\n assert all(len(packet) == 2 for packet in packets)\n assert all(isinstance(packet[0], Candidate) for packet in packets)\n assert all(isinstance(packet[1], str) for packet in packets)\n\n for candidate, packet in packets:\n # find associated community\n try:\n community = self.get_community(packet[2:22])\n except KeyError:\n if __debug__:\n dprint(\"drop a \", len(packet), \" byte packet (received packet for unknown community) from \", candidate, level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:unknown community\")\n self._statistics.drop_count += 1\n continue\n\n # find associated conversion\n try:\n conversion = community.get_conversion(packet[:22])\n except KeyError:\n if __debug__:\n dprint(\"drop a \", len(packet), \" byte packet (received packet for unknown conversion) from \", candidate, level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:unknown conversion\")\n self._statistics.drop_count += 1\n continue\n\n try:\n # convert binary data into the meta message\n yield conversion.decode_meta_message(packet), candidate, packet, conversion\n\n except DropPacket, exception:\n if __debug__:\n dprint(\"drop a \", len(packet), \" byte packet (\", exception,\") from \", candidate, level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:decode_meta_message:%s\" % exception)\n self._statistics.drop_count += 1",
"def start_loop_recording(self, track):\n pass",
"def ExtractTrackInformation(lines):\n\n # The starting line should be something like ' TRACK 01 AUDIO'\n # and we want to create ``data = {'track': '1'}``\n # NB: Cue format has a 99 track limit\n data = {\"track\": CueMetadata.ExtractProperty(lines[0], \"TRACK\")[0:2].lstrip(\"0\")}\n\n # Parse the remaining lines for this track to find the track starting time\n # which is typically, but not necessarily, a line starting with ' INDEX 01'\n # Also want to pick up any extra tags in the block and store it in ``data``,\n # eg, the 'TITLE' field. Since not all fields are valid but remarks are\n # it's necessary to \"un-remark\" the lines starting with 'REM '\n times = {}\n for line in lines[1:]:\n if not line.startswith(' ' * 4):\n break\n line = line.strip()\n # Don't consider multi-artist albums\n if line.startswith(\"PERFORMER\"):\n continue\n line = line.replace(\"INDEX \", \"INDEX\") # Turn 'INDEX 01' into 'INDEX01', etc.\n line = line.replace(\"REM \", \"\") # Make remarks appear as valid tags\n name = line.split(\" \")[0]\n info = CueMetadata.ExtractProperty(line, name)\n if not info:\n continue\n name = name.lower()\n if \"INDEX\" in line:\n # Handle these time codes separately since there may be more than one\n times[name] = time.CueTimeToMKATime(info)\n else:\n data[name] = info\n # In CUE files, 'INDEX 00' is (typically) used for pre-gap and 'INDEX 01' denotes\n # the start of the actual track. Higher indices are possible, but rarely used,\n # typically for access to portions of songs. Here we want to prefer 'INDEX 01'\n # and use 'INDEX 00' if there is no 'INDEX 01' while ignoring higher indices.\n for idx in [\"index01\", \"index00\"]:\n if idx in times:\n time_code = idx\n break\n else:\n raise CueFormatError(f\"No valid time codes found for track {data['track']}\")\n data[\"start_time\"] = times[time_code]\n return data",
"def processBuffer(self, noteBuffer):\n\n\t\tmeasureBySixteenths = Util.chunks(noteBuffer, \n\t\t\t\t\t\t\t\t\t\t int(self.sixteenthTimerTicks))\n\n\t\t# Are the samples for each 16th note chunk predominantly \n\t\t# None (rest) or predominantly a note?\n\t\tmeasureByMaxNote = [max(Util.counter(note16th)) \n\t\t\t\t\t\t\tfor note16th in measureBySixteenths]\n\t\t# Get each note with its corresponding length in terms of \n\t\t# 16th beats.\t\t\t\t\t\n\t\tmeasureByNoteGroup = [ ( elem, len(list(grouper)) ) \n\t\t\t\t\t\t for elem, grouper in groupby(measureByMaxNote) ]\n\t\tmeasure = stream.Measure()\t\t\t\t\t\n\t\t# Build the new measure\n\t\tfor (elem, noteLen) in measureByNoteGroup:\n\t\t\tif ( isinstance(elem, note.Note) ):\n\t\t\t\t# a 16th note is 1/4 a quarter note\n\t\t\t\tif ( self.heavyFiltering.get() ):\n\t\t\t\t\t# smooth to nearest eighth\n\t\t\t\t\tnoteLen = Util.stepround(noteLen, 2)\n\t\t\t\telem.quarterLength = (1.0/4.0) * noteLen\t\t\t\t\t\n\t\t\t\tmeasure.insert(elem)\n\t\t\telse:\n\t\t\t\trest = note.Rest(quarterLength=noteLen)\n\t\t\t\tmeasure.insert(rest)\t\t\n\t\t\t\t\n\t\tmeasureLength = len(measure)\t\t\t\n\t\tif ( measureLength > 1 and self.heavyFiltering.get() ):\n\t\t\t# Filter out octave jumps\n\t\t\tself.listener.windowLength = 5\n\t\t\tpleasePop = []\n\t\t\tfor i in xrange(measureLength):\n\t\t\t\tcurrent = measure[i]\n\t\t\t\tif ( i == 0 ):\n\t\t\t\t\tnext = measure[1]\n\t\t\t\t\tprev = None\n\t\t\t\telif ( i == measureLength - 1 )\t:\n\t\t\t\t\tprev = measure[i-1]\n\t\t\t\t\tnext = None\n\t\t\t\telse:\n\t\t\t\t\tprev = measure[i-1]\t\n\t\t\t\t\tnext = measure[i+1]\n\t\t\t\tnoteSequence = (isinstance(current, note.Note) \n\t\t\t\t\t\t\t\tand isinstance(next, note.Note)\n\t\t\t\t\t\t \t\tand isinstance(prev, note.Note) )\n\n\t\t\t\tif ( noteSequence ):\n\t\t\t\t\toctaveJump = ((prev.octave == next.octave) and \n\t\t\t\t\t\t\t\t\t(current.octave != prev.octave))\n\t\t\t\t\tif ( octaveJump ): pleasePop.append(i)\n\t\t\tfor i in pleasePop: measure.pop(i)\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t# You don't want too many ledger lines...\n\t\toctaves = [n.octave for n in measure if isinstance(n, note.Note)]\n\t\tif ( len(octaves) > 0 and min(octaves) < 4 ): \n\t\t\tmeasure.insert(clef.BassClef())\n\t\telse:\n\t\t\tmeasure.insert(clef.TrebleClef())\t\n\t\t\t\n\t\tself.transcribedPart.insert(measure)\t\n\t\tself.updateSheetDisplay()",
"def _read_data_into_packet(self, p):\n\n length = p.length * self.disc.audio_format.bytes_per_frame\n\n if p.file_pos is None:\n # Silence, so send on null bytes to player\n p.data = '\\0' * length\n\n else:\n file_pos = p.file_pos * self.disc.audio_format.bytes_per_frame\n self.audio_file.seek(file_pos)\n\n p.data = self.audio_file.read(length)\n length -= len(p.data)\n file_pos += len(p.data)\n\n # If we didn't get all data, iterate with a timeout until\n # it's all been read or the ripping process has stopped.\n # This is not very efficient, and there's a small race\n # condition at the end of the disc, but this should be\n # very rare so keep it unoptimised for now.\n\n while length > 0 and self.is_ripping and self.is_ripping.is_set():\n time.sleep(1)\n\n self.audio_file.seek(file_pos)\n d = self.audio_file.read(length)\n\n length -= len(d)\n file_pos += len(d)\n\n p.data += d\n\n # Still didn't get all data, treat it as an exception\n if length > 0:\n raise SourceError('unexpected end of file, expected at least {0} bytes'\n .format(length))",
"def save_disc_tracking_segments(self):\r\n\r\n print '\\n\\n### Saving discarded tracking segments ###\\n'\r\n logger.debug('\\n\\n### Saving discarded tracking segments ###\\n')\r\n\r\n segments_path = os.path.join(\r\n self.track_path, c.FACE_TRACKING_SEGMENTS_DIR)\r\n\r\n segments_path += '_discarded'\r\n\r\n # Delete already saved files\r\n if os.path.exists(segments_path):\r\n\r\n images_dirs = os.listdir(segments_path)\r\n\r\n for images_dir in images_dirs:\r\n images_dir_path = os.path.join(segments_path, images_dir)\r\n shutil.rmtree(images_dir_path)\r\n\r\n disc_tracked_faces_nr = float(len(self.disc_tracked_faces))\r\n\r\n segment_counter = 0\r\n\r\n for segment_dict in self.disc_tracked_faces:\r\n\r\n self.progress = 100 * (segment_counter / disc_tracked_faces_nr)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n segment_frame_list = segment_dict[c.FRAMES_KEY]\r\n\r\n segment_path = os.path.join(\r\n segments_path, str(segment_counter))\r\n\r\n if not (os.path.exists(segment_path)):\r\n os.makedirs(segment_path)\r\n\r\n image_counter = 0\r\n\r\n for segment_frame_dict in segment_frame_list:\r\n\r\n frame_name = segment_frame_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n frame_path = os.path.join(self.frames_path, frame_name)\r\n\r\n image = cv2.imread(frame_path, cv2.IMREAD_COLOR)\r\n\r\n # Add tracking window to image as red rectangle\r\n track_bbox = segment_frame_dict[c.TRACKING_BBOX_KEY]\r\n\r\n x0 = track_bbox[0]\r\n x1 = x0 + track_bbox[2]\r\n y0 = track_bbox[1]\r\n y1 = y0 + track_bbox[3]\r\n\r\n cv2.rectangle(\r\n image, (x0, y0), (x1, y1), (0, 0, 255), 3, 8, 0)\r\n\r\n # Add detection bbox to image as blue rectangle\r\n det_bbox = segment_frame_dict[c.DETECTION_BBOX_KEY]\r\n\r\n if det_bbox is not None:\r\n x0 = det_bbox[0]\r\n x1 = x0 + det_bbox[2]\r\n y0 = det_bbox[1]\r\n y1 = y0 + det_bbox[3]\r\n\r\n cv2.rectangle(\r\n image, (x0, y0), (x1, y1), (255, 0, 0), 3, 8, 0)\r\n\r\n file_name = '%07d.png' % image_counter\r\n\r\n face_path = os.path.join(segment_path, file_name)\r\n\r\n cv2.imwrite(face_path, image,\r\n [cv.CV_IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n del image\r\n\r\n image_counter += 1\r\n\r\n segment_counter += 1",
"def next_packet(filename, memorymap=True):\n with open(filename, 'rb') as f:\n \n #memory map the file if necessary (prob requires 64 bit systems)\n _file = f\n if memorymap:\n _file = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)\n \n while True:\n packet = _file.read(TS.PACKET_SIZE)\n if packet:\n # first byte SHOULD be the sync byte\n # but if it isn't find one.\n if packet[0] != TS.SYNC_BYTE:\n start_byte = 0\n print packet[0]\n for i in range(start_byte, TS.PACKET_SIZE):\n if packet[i] == TS.SYNC_BYTE:\n start_byte = i\n break\n # didn't find a new start? FAIL\n if start_byte == 0:\n raise Exception(\"failure to find sync byte in ts packet size.\")\n continue\n remainder = _file.read(TS.PACKET_SIZE - start_byte)\n packet = packet[start_byte:] + remainder\n yield packet\n else:\n break",
"def genLoopPackets(self):\n\n for p in self.get_observations():\n ts = int(time.time() + 0.5)\n packet = pywws2weewx(p, ts,\n self._last_rain_loop, self._last_rain_ts_loop,\n self.max_rain_rate)\n self._last_rain_loop = packet['rainTotal']\n self._last_rain_ts_loop = ts\n if packet['status'] != self._last_status:\n log.info('station status %s (%s)' % \n (decode_status(packet['status']), packet['status']))\n self._last_status = packet['status']\n yield packet",
"def parse_chunks(self):\n logger.info('parse_chunks()')\n\n while (self.replay.pos < len(self.replay)):\n chunk_type = self.replay.read_uint32()\n chunk_size = self.replay.read_int32()\n offset = self.replay.bytepos\n\n if chunk_type == ChunkTypes.CHECKPOINT.value:\n self.parse_checkpoint()\n\n elif chunk_type == ChunkTypes.EVENT.value:\n self.parse_event()\n\n elif chunk_type == ChunkTypes.REPLAYDATA.value:\n self.parse_replaydata()\n\n elif chunk_type == ChunkTypes.HEADER.value:\n self.parse_header(chunk_size)\n\n self.replay.bytepos = offset + chunk_size",
"def _convert_packets_into_batch(self, packets):\n def filter_non_bootstrap_nodes():\n for candidate, packet in packets:\n cid = packet[2:22]\n\n if not cid in self._communities and False: # candidate.sock_addr[0] in self._non_autoload:\n if __debug__:\n logger.warn(\"drop a %d byte packet (received from non-autoload node) from %s\", len(packet), candidate)\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:from bootstrap node for unloaded community\")\n continue\n\n yield candidate, packet\n\n packets = list(filter_non_bootstrap_nodes())\n if packets:\n return super(TrackerDispersy, self)._convert_packets_into_batch(packets)\n\n else:\n return []",
"def _track_create_block(self, msg, src):\n \n try:\n main_track = int(msg[2],10)\n except:\n main_track = self.track_id_from_name(msg[2])\n \n new_tracks = {\n 'midi' : 0,\n 'audio' : 0\n }\n #track_types = ['midi','audio']\n for track_type in new_tracks: \n new_tracks[track_type] = 0\n if str(msg[3]).lower() == track_type.lower():\n new_tracks[track_type] = msg[4]\n if len(msg) >= 7:\n if str(msg[5]).lower() == track_type.lower():\n new_tracks[track_type] = msg[6]\n\n try:\n names = msg[msg.index('names') + 1:] #case sensitive?\n except:\n names = []\n changed = False \n \n if main_track == -1:\n self.song().create_audio_track(0) #maybe at end?\n main_track = 0\n self.song().tracks[main_track].name = msg[2]\n changed = True\n \n #self.song().tracks[main_track].is_foldable = 1 #come on ableton, no creating groups in the api?\n\n self.log_message(\"names\",names)\n if self.song().tracks[main_track].is_foldable: #count the tracks under this group\n i = 0\n while main_track+i+1 < len(self.song().tracks) and self.song().tracks[main_track+i+1].is_grouped:\n i+=1\n group_tracks = i\n #total_tracks = len(self.song().visible_tracks)\n #self.song().tracks[main_track].fold_state = 1\n #group_tracks = total_tracks - len(self.song().visible_tracks)\n #self.song().tracks[main_track].fold_state = 0\n self.log_message(\"group tracks:\",group_tracks)\n #count tracks\n self.log_message(\"checking midi under\",self.song().tracks[main_track].name,new_tracks['midi'])\n count = 0\n for i in range(new_tracks['midi']):\n if self.song().tracks[main_track+i+1].has_audio_input:\n changed = True\n self.log_message(\"inserting midi at:\",main_track+i+1)\n self.song().create_midi_track(main_track + 1)\n count += 1\n \n for i in range(group_tracks - new_tracks['midi']): #kill any extra midis\n self.log_message(\"midi at:\",main_track,main_track+new_tracks['midi']+i)\n if not self.song().tracks[main_track+new_tracks['midi']+1].has_audio_input:\n self.log_message(\"extra midi at:\",main_track+new_tracks['midi']+i+1)\n changed = True\n self.song().delete_track(main_track+new_tracks['midi'] + 1)\n group_tracks -= 1\n else:\n break\n \n if (group_tracks + count) == new_tracks['midi'] :\n self.song().create_midi_track(main_track + 1) #workaround: if only midi tracks are present, add one more to extend group and delete it later\n changed = True\n \n for i in range(new_tracks['audio']):\n self.log_message(\"checking audio under\",self.song().tracks[main_track].name,new_tracks['audio'],i,main_track+i+1+new_tracks['midi'])\n if (main_track+i+1+new_tracks['midi'] < len(self.song().tracks)) and (not self.song().tracks[main_track+i+1+new_tracks['midi']].has_audio_input) or (main_track+i+1+new_tracks['midi']) > main_track+group_tracks:\n self.log_message(\"inserting audio at:\",main_track+i+1+new_tracks['midi'])\n self.song().create_audio_track(main_track + new_tracks['midi'] + 1)\n changed = True\n\n self.log_message(\"updating names\")\n for i in range(len(names)):\n #self.log_message(\"updating names\",i,len(names),self.song().tracks[main_track + i + 1].name.lower() , names[i].lower())\n if i < new_tracks['audio'] + new_tracks['midi'] and self.song().tracks[main_track + i + 1].name.lower() != names[i].lower():\n self.song().tracks[main_track + i + 1].name = names[i]\n changed = True\n \n \n if self.song().tracks[main_track].is_foldable:\n #cleanup extra tracks if the main track was a group track\n i = 0\n while main_track+i+1 < len(self.song().tracks) and self.song().tracks[main_track+i+1].is_grouped:\n i+=1\n group_tracks = i\n excess_tracks = group_tracks - (new_tracks['audio'] + new_tracks['midi'])\n self.log_message(\"cleaning up:\",excess_tracks)\n if excess_tracks > 0:\n changed = True\n for i in range(excess_tracks):\n self.song().delete_track(main_track + group_tracks - i)\n \n \n for i in range(new_tracks['audio'] + new_tracks['midi'] + 1): #reset volumes & mutes\n if self.song().tracks[main_track+i].has_audio_input:\n self.song().tracks[main_track+i].mixer_device.volume.value = .85\n self.song().tracks[main_track+i].mute = 0\n\n #if changed:\n # self._reassign_tracks() #i think listeners will handle this just fine",
"def process_pcap(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n self.process_pkts(pkts)",
"def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]",
"def process(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n if self.sip and self.dip and self.sp and self.dp:\n self.process_pkts(pkts)",
"def parse_file(self):\n # the header was already read in the init, start at the first sample line\n\n for line in self._stream_handle:\n\n # create the dictionary of key/value pairs composed of the labels and the values from the\n # record being parsed\n # ex: data_dict = {'sci_bsipar_temp':10.67, n1, n2, nn}\n data_dict = self._read_data(line)\n\n if GliderParser._has_science_data(data_dict, self._particle_class):\n # create the timestamp\n timestamp = ntplib.system_to_ntp_time(float(data_dict[GliderParticleKey.M_PRESENT_TIME]))\n # create the particle\n self._record_buffer.append(self._extract_sample(\n self._particle_class, None, data_dict, internal_timestamp=timestamp))",
"def start_tracker(self):\n while self.tracker_enabled:\n # Sleep a bit to leave more time to other threads\n time.sleep(Tracker.LOOP_SLEEP)\n # self._log_queue_lengths()\n if not self.frames_to_track:\n continue\n frame_to_track = self.frames_to_track.popleft()\n frame_tracked = self.track_controllers_in_frame(\n frame_to_track, self.drum_set.controllers)\n self.frames_tracked.append(frame_tracked)\n self.drum_set.play()",
"def new_loop_packet(self, event):\n # packet has traveled back in time\n if self.end_ts > event.packet['dateTime']:\n self.logger.error(\"Service ignoring packet has dateTime of %f which is prior to previous packet %f\"\n %(event.packet['dateTime'], self.end_ts))\n else:\n start_ts = self.end_ts\n self.end_ts = event.packet['dateTime']\n\n for topic in self.subscriber.subscribed_topics: # topics might not be cached.. therefore use subscribed?\n self.logger.debug(\"Service packet prior to update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.packet['dateTime']),\n to_sorted_string(event.packet)))\n target_data = self.subscriber.get_accumulated_data(topic,\n start_ts, self.end_ts, event.packet['usUnits'])\n event.packet.update(target_data)\n self.logger.debug(\"Service packet after update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.packet['dateTime']),\n to_sorted_string(event.packet)))",
"def _prepare_blocks():\n\n counter = blocks[0]['freeStart']\n maxBlocks = blocks[0]['maxBlocks']\n while(counter < maxBlocks) :\n try:\n # print (mount['parent'] + '/linddata.' + str(counter))\n f = open(mount['parent'] + '/linddata.' + str(counter), 'r') \n except IOError, e:\n return STATUS['M_BD']\n else :\n fdatastring = f.next()\n fdata = deserializedata(fdatastring)\n blocks[counter] = fdata\n counter += 1\n \n return STATUS['OK']",
"def next_batch(self):\n\n while self.cap.isOpened():\n flag, frame = self.cap.read()\n yield frame",
"def _generate_tracks(self):\n # Need to use more than 1 thread, and lose reproducibility of FSR\n # numbering, in order to have temporary tracks and segments array of\n # the correct size for the multi-threaded solver.\n self.track_generator.setNumThreads(self.num_threads)\n self.track_generator.generateTracks()",
"def pull(self):\n\n # For each packet in the pcap process the contents\n for item in self.input_stream:\n\n # Print out the timestamp in UTC\n print('%s -' % item['timestamp'], end='')\n\n # Transport info\n if item['transport']:\n print(item['transport']['type'], end='')\n\n # Print out the Packet info\n packet_type = item['packet']['type']\n print(packet_type, end='')\n packet = item['packet']\n if packet_type in ['IP', 'IP6']:\n # Is there domain info?\n if 'src_domain' in packet:\n print('%s(%s) --> %s(%s)' % (net_utils.inet_to_str(packet['src']), packet['src_domain'],\n net_utils.inet_to_str(packet['dst']), packet['dst_domain']), end='')\n else:\n print('%s --> %s' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst'])), end='')\n else:\n print(str(packet))\n\n # Only include application if we have it\n if item['application']:\n print('Application: %s' % item['application']['type'], end='')\n print(str(item['application']), end='')\n\n # Just for newline\n print()",
"def __readCONTINoutput(self):\n\n titleline = 'OBJ. FCTN. VARIANCE STD. DEV.'\n chunkTitle = re.compile('OBJ. FCTN. VARIANCE STD. DEV. ')\n\n alldata = []\n\n with open(self.outputfile, 'r') as f:\n\n for line in f:\n if chunkTitle.search(line) is not None:\n\n alphadic = {}\n\n # gets the header\n alphaLine = next(f)\n if '*' in alphaLine:\n alphadic['marked'] = True\n\n alphaLine = alphaLine.replace('*', '')\n alphaParam = np.fromstring(alphaLine, sep=' ')\n\n # reduce the header line to string seperated text\n line = re.sub('\\s\\s\\s+', ' ', line).strip()\n for key, value in zip(line.split(' '), alphaParam):\n alphadic[key] = value\n # skip a line then get the data\n next(f)\n # alldata.append((alphadic, readblock(f)))\n alldata.append(\n (alphadic, readblock(f), readSummaryData(f)))\n\n # skip a line then get the data\n # print(next(f))\n\n return alldata",
"def new_loop_packet(self, event):\n global _data\n print \"LOOP: \", weeutil.weeutil.timestamp_to_string(event.packet['dateTime']), StdPrint.sort(event.packet)\n ## Save data in managed shared dict\n _data[\"t\"] = event.packet['dateTime']\t\t\t# timestamp (UNIX, int)\n _data[\"r\"] = StdPrint.sort(event.packet)\t\t# data only"
]
| [
"0.5243465",
"0.50652975",
"0.503824",
"0.49788526",
"0.49385667",
"0.48881853",
"0.48847038",
"0.4879093",
"0.48679477",
"0.4829947",
"0.48023844",
"0.4800166",
"0.47898296",
"0.47791988",
"0.47784358",
"0.47733098",
"0.4772302",
"0.4743291",
"0.4735822",
"0.4723006",
"0.4693958",
"0.46356636",
"0.46182832",
"0.46181124",
"0.4603398",
"0.458763",
"0.4578864",
"0.45684206",
"0.4555681",
"0.4554562"
]
| 0.73285556 | 0 |
Takes base paths to specs and resources. | def set_paths(self, specs, resources):
self.install = 'install.xml'
self.specs_path = path_format(specs)
self.root = path_format(dirname(dirname(self.specs_path)) + '/')
self.res_path = path_format(resources)
self.resources['BASE'] = self.res_path
self.specs['BASE'] = self.specs_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_resource_path(self):\n\n # Without arguments\n resources_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..', 'resources'\n ))\n self.assertEqual(resources_root_path, paths.resource())",
"def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()",
"def test_relative_paths(self):\n command_line = self._MENU + [\n \"some_pool\",\n \"../dev\",\n \"./fake\",\n \"/abc\",\n ]\n TEST_RUNNER(command_line)",
"def setup_paths(self):\n # TODO: separate out paths based on android, json patch, server_gen.\n app_base_path = os.path.join(self.base_dir, self.app_name)\n build_dir = os.path.join(app_base_path, \"build\")\n api_spec_dir = os.path.join(app_base_path, \"api_specs\")\n api_spec_migrations_dir = os.path.join(api_spec_dir, \"migrations\")\n api_specs_json = os.path.join(api_spec_dir, \"api_spec.json\")\n request_response_dir = os.path.join(build_dir, \"request_response\")\n decorator_options_file = os.path.join(request_response_dir, \"decorator_options.py\")\n security_definitions_file = os.path.join(request_response_dir, \"security_definitions.py\")\n serializers_base_dir = os.path.join(build_dir, \"serializers\")\n definitions_serializers_base_dir = os.path.join(serializers_base_dir, \"definitions\")\n global_parameters_dir = os.path.join(build_dir, \"parameters\")\n global_response_dir = os.path.join(build_dir, \"responses\")\n url_file = os.path.join(build_dir, \"urls.py\")\n mobx_base_dir = os.path.join(build_dir, \"mobx_classes\")\n mobx_base_dir_models = os.path.join(mobx_base_dir, 'models')\n mobx_base_dir_responses = os.path.join(mobx_base_dir, 'responses')\n mobx_base_dir_endpoints = os.path.join(mobx_base_dir, 'endpoints')\n mobx_base_dir_parameters = os.path.join(mobx_base_dir, 'parameters')\n view_environments_dir = os.path.join(build_dir, \"view_environments\")\n sample_json_dir = os.path.join(app_base_path, \"conf\", \"responses\")\n settings_file = os.path.join(app_base_path, \"conf\", \"settings.py\")\n mock_views_dir = os.path.join(build_dir, \"mock_views\")\n views_dir = os.path.join(app_base_path, \"views\")\n api_environment_file = os.path.join(api_spec_dir, \"api_environment.py\")\n android_base_dir = os.path.join(build_dir, \"android_%s\" % self.app_name)\n api_doc_dir = os.path.join(build_dir, \"docs\")\n tests_dir = os.path.join(app_base_path, \"tests\")\n global_jars_dir = os.path.join(self.base_dir, \"android_jars\")\n zappa_settings = os.path.join(self.base_dir, \"zappa_settings.json\")\n apidoc = os.path.join(self.base_dir, \"apidoc.json\")\n docs = os.path.join(self.base_dir, \"docs\")\n static = os.path.join(self.base_dir, \"static\")\n static_docs = os.path.join(static, \"docs\")\n interface_dir = os.path.join(app_base_path, 'interfaces')\n package_json = os.path.join(self.base_dir, \"package.json\")\n self.paths = {\n \"base_dir\": self.base_dir,\n \"app_base_path\": app_base_path,\n \"build_dir\": build_dir,\n \"api_spec_dir\": api_spec_dir,\n \"api_spec_migrations_dir\": api_spec_migrations_dir,\n \"api_specs_json\": api_specs_json,\n \"request_response_dir\": request_response_dir,\n \"decorator_options_file\": decorator_options_file,\n \"security_definitions_file\": security_definitions_file,\n \"serializers_base_dir\": serializers_base_dir,\n \"definitions_serializers_base_dir\": definitions_serializers_base_dir,\n \"global_parameters_dir\": global_parameters_dir,\n \"global_response_dir\": global_response_dir,\n \"url_file\": url_file,\n \"view_environments_dir\": view_environments_dir,\n \"sample_json_dir\": sample_json_dir,\n \"settings_file\": settings_file,\n \"mock_views_dir\": mock_views_dir,\n \"views_dir\": views_dir,\n \"api_environment_file\": api_environment_file,\n \"android_base_dir\": android_base_dir,\n \"api_doc_dir\": api_doc_dir,\n \"tests_dir\": tests_dir,\n \"global_jars_dir\": global_jars_dir,\n \"zappa_settings\": zappa_settings,\n \"apidoc\": apidoc,\n \"static\": static,\n \"static_docs\": static_docs,\n \"docs\": docs,\n \"interface_dir\": interface_dir,\n \"mobx_base_dir\": mobx_base_dir,\n 'mobx_base_dir_models': mobx_base_dir_models,\n 'mobx_base_dir_responses': mobx_base_dir_responses,\n 'mobx_base_dir_endpoints': mobx_base_dir_endpoints,\n 'mobx_base_dir_parameters': mobx_base_dir_parameters,\n \"package_json\": package_json\n }",
"def test_docs_paths():\n assert os.path.exists('test/examples/docs/paths-root-api.md')\n assert os.path.exists('test/examples/docs/paths-subpath1.md')\n assert os.path.exists('test/examples/docs/paths-subpath1.md')",
"def create_tester_paths():\n config.config_tester()\n _create_paths(vmcheckerpaths.tester_paths())",
"def __init__(self, basedir=None):\n # ------------------------------------------------------------------------\n super(Resources, self).__init__()\n self.xInitialize(basedir or \"resources\")",
"def add_base(paths):\r\n\r\n return [os.path.join(BASEDIR, x) for x in paths]",
"def path_helper(self, operations, resource, base_path=None, suffix=None, **kwargs):\n resource_uri_mapping = self._generate_resource_uri_mapping(self._app, resource, suffix)\n\n if not resource_uri_mapping:\n raise APISpecError(\"Could not find endpoint for resource {0}\".format(resource))\n\n operations.update(yaml_utils.load_operations_from_docstring(resource.__doc__) or {})\n\n # In case multiple uri were found, keep the only one that has methods\n try:\n path = next(uri for uri, methods in resource_uri_mapping.items() if methods)\n except StopIteration:\n path = next(iter(resource_uri_mapping))\n\n methods = resource_uri_mapping[path]\n\n if base_path is not None:\n # make sure base_path accept either with or without leading slash\n # swagger 2 usually come with leading slash but not in openapi 3.x.x\n base_path = '/' + base_path.strip('/')\n path = re.sub(base_path, \"\", path, 1)\n\n for method_name, method_handler in methods.items():\n docstring_yaml = yaml_utils.load_yaml_from_docstring(method_handler.__doc__)\n operations[method_name] = docstring_yaml or dict()\n return path",
"def test_get_test_assets(self):\n pass",
"def assemble_resource_directories(project, base_dir):\n resource_path = os.path.join(base_dir, project.resources_path)\n os.makedirs(os.path.join(resource_path, 'images'))\n os.makedirs(os.path.join(resource_path, 'fonts'))\n os.makedirs(os.path.join(resource_path, 'data'))",
"def resources_path(*args):\n path = os.path.dirname(__file__)\n path = os.path.abspath(\n os.path.join(path, os.path.pardir, 'resources'))\n for item in args:\n path = os.path.abspath(os.path.join(path, item))\n\n return path",
"def resources(self):",
"def example(*paths):\n\n return normpath(join(dirname(__file__), '..', 'examples', *paths))",
"def assemble_resources(base_dir, resource_path, resources, type_restrictions=None):\n for f in resources:\n if type_restrictions and f.kind not in type_restrictions:\n continue\n target_dir = os.path.abspath(os.path.join(base_dir, resource_path, ResourceFile.DIR_MAP[f.kind]))\n f.copy_all_variants_to_dir(target_dir)",
"def tester_paths():\n return [dir_queue(), dir_tester_unzip_tmp()]",
"def base(path1, *paths):\r\n return BASE_DIR.relpathto(path1.joinpath(*paths))",
"def test_get_versions(self):\n # Arrange\n base_paths = ['base_path', 'base/path']\n\n for base_path in base_paths:\n with self.subTest(f'Base Path = {base_path}'):\n # Valid paths should look like {base_path}/{version}/{file}.\n files = [\n f'{base_path}/v0.0.1/file1.csv',\n f'{base_path}/v0.0.1/file2.csv',\n f'{base_path}/v0.0.2/file2.csv',\n f'{base_path}/file2.csv',\n f'{base_path}/another_path/v0.0.3/file2.csv',\n 'another_path/v0.0.4/file1.csv',\n f'v0.0.5/{base_path}/file2.csv']\n\n # Act\n results = get_versions(base_path, files)\n\n # Assert\n expected_results = set(['v0.0.2', 'v0.0.1'])\n self.assertSetEqual(expected_results, results)",
"def test_main():\n for template in templates:\n main([\"-g\", template])\n\n # One at a time\n for xyz_file in example_xyz_files:\n main([template, xyz_file])\n\n # All at once\n main([template] + list(example_xyz_files))\n\n # Allow use of template in the parent directory\n with cd(\"data\"):\n main([\"../pnictogen/repo/ADF.in\", \"water-dimer.xyz\"])",
"def test_get_api_resources(self):\n pass",
"def merge_spec(self):\n from django_swagger_utils.spec_client.merge_spec import MergeSpec\n merge_spec = MergeSpec(self.paths['api_spec_dir'], self.paths['base_dir'])\n merge_spec.merge()",
"def test_get_all(self, mock_helio_finder, mock_exists):\n static_paths = self.finder.find('path/to/component/component.ext', all=True)\n self.assertEqual(static_paths, ['MOCK_BASE_DIR/path/to/component/static/component.ext',\n 'MOCK_BASE_DIR_2/path/to/component/static/component.ext'])",
"def ResourcePath(self, name):\n pass",
"def examples_paths():\n data_dir = utils.get_data_filename(os.path.join('tests', 'data'))\n p_xylene_dir = os.path.join(data_dir, 'p-xylene-implicit')\n p_xylene_gro_dir = os.path.join(data_dir, 'p-xylene-gromacs-example')\n ben_tol_dir = os.path.join(data_dir, 'benzene-toluene-explicit')\n abl_imatinib_dir = os.path.join(data_dir, 'abl-imatinib-explicit')\n tol_dir = os.path.join(data_dir, 'toluene-explicit')\n benz_tol_dir = os.path.join(data_dir, 'benzene-toluene-standard-state')\n\n paths = dict()\n paths['lysozyme'] = os.path.join(p_xylene_dir, '181L-pdbfixer.pdb')\n paths['p-xylene'] = os.path.join(p_xylene_dir, 'p-xylene.mol2')\n paths['benzene'] = os.path.join(ben_tol_dir, 'benzene.tripos.mol2')\n paths['toluene'] = os.path.join(ben_tol_dir, 'toluene.tripos.mol2')\n paths['abl'] = os.path.join(abl_imatinib_dir, '2HYY-pdbfixer.pdb')\n paths['imatinib'] = os.path.join(abl_imatinib_dir, 'STI02.mol2')\n paths['bentol-complex'] = [os.path.join(ben_tol_dir, 'complex.prmtop'),\n os.path.join(ben_tol_dir, 'complex.inpcrd')]\n paths['bentol-solvent'] = [os.path.join(ben_tol_dir, 'solvent.prmtop'),\n os.path.join(ben_tol_dir, 'solvent.inpcrd')]\n paths['pxylene-complex'] = [os.path.join(p_xylene_gro_dir, 'complex.top'),\n os.path.join(p_xylene_gro_dir, 'complex.gro')]\n paths['pxylene-solvent'] = [os.path.join(p_xylene_gro_dir, 'solvent.top'),\n os.path.join(p_xylene_gro_dir, 'solvent.gro')]\n paths['pxylene-gro-include'] = os.path.join(p_xylene_gro_dir, 'top')\n paths['toluene-solvent'] = [os.path.join(tol_dir, 'solvent.pdb'),\n os.path.join(tol_dir, 'solvent.xml')]\n paths['toluene-vacuum'] = [os.path.join(tol_dir, 'vacuum.pdb'),\n os.path.join(tol_dir, 'vacuum.xml')]\n paths['benzene-toluene-boxless'] = [os.path.join(benz_tol_dir, 'standard_state_complex_boxless.inpcrd'),\n os.path.join(benz_tol_dir, 'standard_state_complex.prmtop')]\n paths['benzene-toluene-nan'] = [os.path.join(benz_tol_dir, 'standard_state_complex_nan.inpcrd'),\n os.path.join(benz_tol_dir, 'standard_state_complex.prmtop')]\n return paths",
"def parse_paths(self):\n self.soup = BeautifulSoup(open(self.get_path('install')))\n for spec in list(self.specs.keys()):\n spec_file = self.find_specs_path(spec)\n if spec_file:\n # If spec file exists\n self.specs[spec] = path_format(spec_file)\n else:\n # If specs are held inside install.xml\n self.specs[spec] = self.install",
"def test_paths(self):\n # minify and combine\n js_source = javascript_link('/deep/a.js', '/b.js', combined=True, minified=True)\n css_source = stylesheet_link('/deep/a.css', '/b.css', combined=True, minified=True)\n self.assert_('\"/a.b.COMBINED.min.css\"' in css_source)\n self.assert_('\"/a.b.COMBINED.min.js\"' in js_source)\n\n # combine\n js_source = javascript_link('/deep/a.js', '/b.js', combined=True)\n css_source = stylesheet_link('/deep/a.css', '/b.css', combined=True)\n self.assert_('\"/a.b.COMBINED.css\"' in css_source)\n self.assert_('\"/a.b.COMBINED.js\"' in js_source)\n\n # minify\n js_source = javascript_link('/deep/a.js', '/b.js', minified=True)\n css_source = stylesheet_link('/deep/a.css', '/b.css', minified=True)\n self.assert_('\"/deep/a.min.css\"' in css_source)\n self.assert_('\"/b.min.css\"' in css_source)\n self.assert_('\"/deep/a.min.js\"' in js_source)\n self.assert_('\"/b.min.js\"' in js_source)\n\n # root minify and combined\n js_source = javascript_link('/c.js', '/b.js', combined=True, minified=True)\n css_source = stylesheet_link('/c.css', '/b.css', combined=True, minified=True)\n self.assert_('\"/c.b.COMBINED.min.css\"' in css_source)\n self.assert_('\"/c.b.COMBINED.min.js\"' in js_source)\n\n # root minify\n js_source = javascript_link('/c.js', '/b.js', minified=True)\n css_source = stylesheet_link('/c.css', '/b.css', minified=True)\n self.assert_('\"/b.min.css\"' in css_source)\n self.assert_('\"/b.min.js\"' in js_source)\n self.assert_('\"/c.min.js\"' in js_source)\n self.assert_('\"/c.min.js\"' in js_source)\n\n # both root minify and combined\n js_source = javascript_link('/deep/a.js', '/deep/d.js', combined=True, minified=True)\n css_source = stylesheet_link('/deep/a.css', '/deep/d.css', combined=True, minified=True)\n self.assert_('\"/deep/a.d.COMBINED.min.css\"' in css_source)\n self.assert_('\"/deep/a.d.COMBINED.min.js\"' in js_source)",
"def test_get_deployment_resources(self):\n pass",
"def resolve_specs(paths):\n specs = []\n for path in paths:\n if os.path.isdir(path):\n _, _, files = os.walk(path).next()\n specs.extend(os.path.join(path, fname) for fname in files)\n else:\n specs.append(path)\n return specs",
"def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep",
"def test_walk_component_base_dir(self, mock_walk):\n static_dirs = [static_dir for static_dir in walk_component_base_dir('mock')]\n self.assertEqual(len(static_dirs), 1)\n self.assertEqual(static_dirs[0][0], 'component/static')"
]
| [
"0.6165541",
"0.6127959",
"0.611318",
"0.6023412",
"0.59037876",
"0.576045",
"0.575814",
"0.5617551",
"0.56028676",
"0.55873394",
"0.5575436",
"0.5532464",
"0.5520052",
"0.54906976",
"0.54781777",
"0.5471999",
"0.54613155",
"0.54188824",
"0.53953713",
"0.539372",
"0.5368863",
"0.53228784",
"0.53095824",
"0.5289808",
"0.5260423",
"0.52365625",
"0.5221117",
"0.5201276",
"0.52006006",
"0.51996684"
]
| 0.7049754 | 0 |
Extracts paths to available izpack resources and spec files from the installer's install.xml spec. | def parse_paths(self):
self.soup = BeautifulSoup(open(self.get_path('install')))
for spec in list(self.specs.keys()):
spec_file = self.find_specs_path(spec)
if spec_file:
# If spec file exists
self.specs[spec] = path_format(spec_file)
else:
# If specs are held inside install.xml
self.specs[spec] = self.install | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_paths(self, specs, resources):\n self.install = 'install.xml'\n self.specs_path = path_format(specs)\n self.root = path_format(dirname(dirname(self.specs_path)) + '/')\n self.res_path = path_format(resources)\n self.resources['BASE'] = self.res_path\n self.specs['BASE'] = self.specs_path",
"def get_installation_paths(versions=None):\n\n pass",
"def get_htdocs_dirs(self):\n from pkg_resources import resource_filename\n return [('inieditorpanel', resource_filename(__name__, 'htdocs'))]\n #return []",
"def _get_package_files(self) -> Dict[str, 'BinPackageFile']:\n\n if self._package_files is None:\n manifest_path = f\"{self.path}/packagemanifest\"\n manifest_urlpath = f\"{self.path}/packages/files/packagemanifest\"\n self.project.storage.download(manifest_urlpath, manifest_path)\n files = BinPackageFile.from_package_manifest(self.project.storage.fspath(manifest_path))\n self._package_files = {pf.extract_path: pf for pf in files}\n return self._package_files",
"def extract_resources(self, resources, collector, cwd=None):\n raise NotImplementedError(\"not implemented\")",
"def getImportedPathes():\n # Based on instance path, construct import pathes \n cfg = getConfiguration()\n instance_ipath = osp.join(cfg.instancehome, \"import\")\n product_ipath = osp.join(package_home(GLOBALS), \"import\")\n # Check presence of Product import directory\n if not osp.isdir(product_ipath): \n raise BadRequest, \"Skin Product's import directory '%s' - does not exist or is'nt direcory\" % product_ipath\n # Check presence of Instance import directory\n if not osp.isdir(instance_ipath):\n raise BadRequest, \"Instance import directory '%s' - does not exist or isn't direcory\" % instance_ipath\n return [instance_ipath, product_ipath]",
"def filepaths(self) -> Dict[str, 'BinPackageFile']:\n return self._get_package_files()",
"def _get_base_files(self):\n setup_file = path.join(self.PyCogentDirectory, 'setup.py')\n #reqs_file = path.join(self.PyCogentDirectory, 'cogent-requirements.txt')\n #return [(setup_file, 'Python'), (reqs_file, 'Properties')]\n return [(setup_file, 'Python')]",
"def get_templates_dirs(self): \n from pkg_resources import resource_filename\n return [ resource_filename(__name__, 'templates') ]\n # return []",
"def parse_tool_path(self):",
"def check_extract_from_egg(pth, todir=None):\n rv = []\n if os.path.altsep:\n pth = pth.replace(os.path.altsep, os.path.sep)\n components = pth.split(os.path.sep)\n for i, name in enumerate(components):\n if name.lower().endswith(\".egg\"):\n eggpth = os.path.sep.join(components[:i + 1])\n if os.path.isfile(eggpth):\n # eggs can also be directories!\n try:\n egg = zipfile.ZipFile(eggpth)\n except zipfile.BadZipfile as e:\n raise SystemExit(\"Error: %s %s\" % (eggpth, e))\n if todir is None:\n # Use the same directory as setuptools/pkg_resources. So,\n # if the specific egg was accessed before (not necessarily\n # by pyinstaller), the extracted contents already exist\n # (pkg_resources puts them there) and can be used.\n todir = os.path.join(pkg_resources_get_default_cache(),\n name + \"-tmp\")\n if components[i + 1:]:\n members = [\"/\".join(components[i + 1:])]\n else:\n members = egg.namelist()\n for member in members:\n pth = os.path.join(todir, member)\n if not os.path.isfile(pth):\n dirname = os.path.dirname(pth)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(pth, \"wb\") as f:\n f.write(egg.read(member))\n rv.append((pth, eggpth, member))\n return rv\n return [(pth, None, None)]",
"def GetFilesForTool(self):\n raise NotImplementedError()",
"def iter_extension_paths():\n for entry_point in iter_entry_points(group=\"confab.extensions\"):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))",
"def install_location(self):\r\n return self._content_at_path('/template/os/install/%s' % self.install_type)",
"def test_install(self):\n self.assertIn('kser', [x.key for x in pkg_resources.working_set])",
"def get_installations():\n github_app = get_default_app()\n pprint(github_app.get_installations())",
"def get_installed_images(self):\n raise NotImplementedError",
"def get_installed_files(packagename, venv_pip, temp_dir):\n result = check_output(venv_pip + ['show', '-f', packagename])\n result = (result.decode()).split('\\n')\n files = []\n\n for line in result:\n # this line contains path to venv directory\n if line.startswith('Location:'):\n line = line[len('Location: '):]\n prefix = '/' + line.replace(temp_dir, 'usr') + '/'\n if line.startswith(' '*2):\n path = os.path.abspath(prefix + line.strip())\n if os.path.isdir(path):\n path += \"/\"\n files.append(path)\n return files",
"def get_htdocs_dirs(self):\n\t\tfrom pkg_resources import resource_filename\n\t\treturn [('hw', resource_filename(__name__, 'htdocs'))]",
"def get_data_files():\n return [\n ('share/jupyter/nbextensions/{}'.format(PY_PACKAGE), TARGETS),\n ('share/jupyter/lab/extensions', [\n os.path.relpath(f, '.') for f in glob.glob(TAR_PATH)\n ])\n ]",
"def get_package_resources(self):\n resources = []\n for pkg in self.packages:\n resource_data = self.get_resource_data()\n resources.extend(pkg.get_resources(resource_data))\n return resources",
"def test_get_deployment_resources(self):\n pass",
"def install_locations():\n \n main_folder=Workflow.base_environment_variable.lower()\n options=[os.path.join(os.path.sep,\"opt\",main_folder)]\n home_dir=get_home_directory()\n if home_dir:\n options+=[os.path.join(home_dir,main_folder)]\n \n return options",
"def test_apps(self):\n ## List the dirs in PATH\n apps = []\n for path in self.paths:\n apps.extend(os.listdir(path))\n \n for app in self.expected_executables:\n assert app in apps",
"def install(self, egg, dir_path):",
"def find_specs_path(self, spec):\n element = self.soup.find(spec)\n if element:\n child = element.find('xi:include')\n if child: # if xi:include exists, specs are external.\n path = self.properties.substitute(child['href'])\n else:\n # Internal specs.\n path = None\n else:\n # No spec defined in file, assume default location.\n path = self.specs[spec]\n return path",
"def get_setup_file():\n repo_fs()\n return SETUP_FILES",
"def get_filepaths_and_exts(self):\n filepaths = [prod.filepath for prod in self.products]\n exts = [prod.ext for prod in self.products]\n\n return filepaths, exts",
"def extract(self):\n self.build_path_pairs()\n self.extract_field_blocks()\n self.assert_filenames()",
"def parse_resources(self, soup):\n for res in soup.find_all('res'):\n if 'customlangpack' in res['id'].lower():\n self.find_langpack_path(res)\n else:\n rid = remove_xml(res['id'])\n self.resources[rid] = path_format(self.properties.substitute(res['src']))"
]
| [
"0.5985074",
"0.5818984",
"0.55800974",
"0.5558382",
"0.5510714",
"0.5509265",
"0.54902023",
"0.5484745",
"0.54695016",
"0.543011",
"0.5405252",
"0.53903776",
"0.5373021",
"0.5365647",
"0.5341147",
"0.5305645",
"0.53018004",
"0.52990454",
"0.52964294",
"0.52779734",
"0.5271428",
"0.5258163",
"0.52270097",
"0.52224356",
"0.5218311",
"0.52166414",
"0.52064323",
"0.52010536",
"0.5193531",
"0.516517"
]
| 0.6754402 | 0 |
Find the path for the spec in the install.xml file. | def find_specs_path(self, spec):
element = self.soup.find(spec)
if element:
child = element.find('xi:include')
if child: # if xi:include exists, specs are external.
path = self.properties.substitute(child['href'])
else:
# Internal specs.
path = None
else:
# No spec defined in file, assume default location.
path = self.specs[spec]
return path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_paths(self):\n self.soup = BeautifulSoup(open(self.get_path('install')))\n for spec in list(self.specs.keys()):\n spec_file = self.find_specs_path(spec)\n if spec_file:\n # If spec file exists\n self.specs[spec] = path_format(spec_file)\n else:\n # If specs are held inside install.xml\n self.specs[spec] = self.install",
"def install_location(self):\r\n return self._content_at_path('/template/os/install/%s' % self.install_type)",
"def get_installation_path():\n file_abs_path = os.path.abspath(__file__)\n real_file_abs_path = os.path.realpath(file_abs_path)\n return real_file_abs_path[:real_file_abs_path.find('/node')]",
"def cfgInstallPath( *args ):\n return cfgPath( cfgInstallSection, *args )",
"def _find(self, spec):\n if spec.template_path is not None:\n return spec.template_path\n\n dir_path, file_name = self._find_relative(spec)\n\n locator = self.loader._make_locator()\n\n if dir_path is None:\n # Then we need to search for the path.\n path = locator.find_object(spec, self.loader.search_dirs, file_name=file_name)\n else:\n obj_dir = locator.get_object_directory(spec)\n path = os.path.join(obj_dir, dir_path, file_name)\n\n return path",
"def get_path_spec(self):\n raise NotImplementedError, \"ConfigElement get_path_spec unimplemented\"",
"def find_conf():\n path = os.path.abspath(os.path.expanduser(os.getcwd()))\n while path not in ('', '/'):\n conf_path = os.path.join(path, 'dataplicity.conf')\n if os.path.exists(conf_path):\n return conf_path\n path = os.path.dirname(path)\n return None",
"def __find_tool_path(self):\n tool_path = Path(os.path.dirname(os.path.realpath(__file__)))\n # We asume the installion path is relative to our installation path\n tool_path = tool_path / '../../../bin'\n if os.name == 'posix':\n ret = tool_path / 'fast-discovery-server'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n elif os.name == 'nt':\n ret = tool_path / 'fast-discovery-server.exe'\n if not os.path.exists(ret):\n ret = tool_path / 'fast-discovery-server.bat'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n else:\n print(f'{os.name} not supported')\n sys.exit(1)\n\n return ret",
"def _find_config_file(self) -> str or None:\n import os\n\n for path in self.paths:\n path = os.path.expanduser(path)\n for extension in self.file_extensions:\n for file_name in self.file_names:\n file_path = os.path.join(path, \"{}.{}\".format(file_name, extension))\n if os.path.isfile(file_path):\n return file_path\n\n return None",
"def get_specfile_path_from_repo(project: GitProject, ref: str = None) -> Optional[str]:\n spec_files = project.get_files(ref=ref, filter_regex=r\".+\\.spec$\")\n\n if not spec_files:\n logger.debug(f\"No spec file found in {project.full_repo_name!r}\")\n return None\n return spec_files[0]",
"def _get_configspec():\n files = sorted(pkg_resources.resource_listdir(__name__, \"\"))\n # NOTE:\n # Explicit convert the filter results to a list, since the returned\n # iterator can ONLY be used ONCE.\n specfiles = list(filter(lambda fn: fn.endswith(\".conf.spec\"), files))\n if os.environ.get(\"DEBUG_FG21SIM\"):\n print(\"DEBUG: Found config specifications: %s\" % \", \".join(specfiles),\n file=sys.stderr)\n # NOTE:\n # `resource_string()` returns the resource in *binary/bytes* string\n configspec = \"\\n\".join([\n pkg_resources.resource_string(__name__, fn).decode(\"utf-8\")\n for fn in specfiles\n ]).split(\"\\n\")\n return configspec",
"def find_spec(self, fullname, path, target=None):\n if not path:\n path = [os.getcwd()]\n if \".\" in fullname:\n name = fullname.split(\".\")[-1]\n else:\n name = fullname\n for entry in path:\n if os.path.isdir(os.path.join(entry, name)):\n # this module has child modules\n filename = os.path.join(entry, name, \"__init__.py\")\n submodule_locations = [os.path.join(entry, name)]\n else:\n filename = os.path.join(entry, name + \".\" + config.FILE_EXT)\n submodule_locations = None\n\n if not os.path.exists(filename):\n continue\n\n return spec_from_file_location(\n fullname,\n filename,\n loader=ExtensionLoader(filename),\n submodule_search_locations=submodule_locations,\n )\n return None # we don't know how to import this",
"def get_versioned_path_spec(self):\n raise NotImplementedError, \"ConfigElement get_versioned_path_spec unimplemented\"",
"def findPkgPath(self, pkg):\r\n try:\r\n return self._rp.get_path(pkg)\r\n except rospkg.ResourceNotFound:\r\n raise ResourceNotFound('Can not find ROS package '\r\n '\"{0}\".'.format(pkg))",
"def workflow_spec_path(self) -> str:\n if self._spec:\n spec_path = os.path.abspath(os.path.join(self._output_dir, self._spec))\n if not self._is_path_inside_output_dir(spec_path):\n raise REANAFetcherError(\"Invalid path to the workflow specification\")\n if not os.path.isfile(spec_path):\n raise REANAFetcherError(\n \"Cannot find the provided workflow specification\"\n )\n return spec_path\n\n specs = [os.path.abspath(path) for path in self._discover_workflow_specs()]\n unique_specs = list(set(specs))\n if not unique_specs:\n raise REANAFetcherError(\"Workflow specification was not found\")\n if len(unique_specs) > 1:\n raise REANAFetcherError(\"Multiple workflow specifications found\")\n return unique_specs[0]",
"def _pkg_path(self, pkg):\n r = rospkg.RosPack()\n pkg_path = r.get_path(pkg) \n return pkg_path",
"def _pkg_path(self, pkg):\n r = rospkg.RosPack()\n pkg_path = r.get_path(pkg) \n return pkg_path",
"def findInstallDir( dir=\"TrigValTools\",\n searchpath=\"DATAPATH\" ):\n\n if not os.environ.has_key(searchpath): return None\n\n dirs = os.environ[searchpath].split(\":\")\n if not dirs: return None\n for d in dirs:\n path = os.path.join(d,dir)\n if os.path.exists(path): return path\n\n return None",
"def find_pkg(self, pkg):\n pass",
"def get_requirements_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-13]\n req_path = os.path.join(root, 'requirements.txt')\n\n return req_path",
"def _get_egg_path(self):\n try:\n _dist = get_distribution('janitoo_nut')\n return _dist.__file__\n except AttributeError:\n return 'src-nut/config'",
"def parse_tool_path(self):",
"def test_finder_installs_dev_releases(data: TestData) -> None:\n\n req = install_req_from_line(\"bar\")\n\n # using a local index (that has dev releases)\n finder = make_test_finder(\n index_urls=[data.index_url(\"dev\")],\n allow_all_prereleases=True,\n )\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url.endswith(\"bar-2.0.dev1.tar.gz\"), found.link.url",
"def get_nuspec_at_path(path):\n if NuSpec.path_is_nuspec_file(path):\n return path\n elif os.path.isdir(path):\n return utils.dir_find_file_with_extension(path, '.nuspec')\n return None",
"def _findConfigPath(self, name):\n for path in reversed(self._makeAllConfigPaths(name)):\n if os.path.exists(path):\n return path",
"def find_spec(cls, fullname: str, path: Optional[List[str]] = None,\n target: Optional[str] = None) -> Optional[ModuleSpec]:\n if path is None:\n path = sys.path\n\n sorocospec = None\n\n for p in path:\n sorocospec = XPYCEFileFinder(p).find_spec(fullname, target)\n\n if sorocospec is None:\n continue\n if sorocospec.origin is None:\n sorocospec = None\n break\n\n # This line is important for Python's internal libraries (like\n # warnings) to work. Setting has_location to True can break\n # introspection because Python will assume the entire source code\n # is there, but it is encrypted\n sorocospec.has_location = False\n\n if sorocospec is not None:\n break\n return sorocospec",
"def check_configure_scan(project_path):\n for file_name in CONFIGURE_AC_NAMES:\n file_path = os.path.join(project_path, file_name)\n if os.path.exists(file_path):\n return file_path\n return None",
"def set_paths(self, specs, resources):\n self.install = 'install.xml'\n self.specs_path = path_format(specs)\n self.root = path_format(dirname(dirname(self.specs_path)) + '/')\n self.res_path = path_format(resources)\n self.resources['BASE'] = self.res_path\n self.specs['BASE'] = self.specs_path",
"def find_config():\n print(\"in find_config()\")\n print(os.getcwd())\n print(os.listdir(os.getcwd()))\n print(os.path.expanduser(\"~/.pylcmodel\"))\n if os.path.isfile(os.path.join(os.getcwd(), \".pylcmodel\")):\n return os.path.join(os.getcwd(), \".pylcmodel\")\n elif os.path.isfile(os.path.expanduser(\"~/.pylcmodel\")):\n return os.path.expanduser(\"~/.pylcmodel\")\n else:\n raise FileNotFoundError(\"No .pylcmodel config file found.\")",
"def realPath(self):\n \n return (self.useLink and [self.linkPath] or [self.installPath])[0]"
]
| [
"0.66329956",
"0.6390044",
"0.6344499",
"0.62348706",
"0.61632514",
"0.6048204",
"0.5858314",
"0.5840221",
"0.580385",
"0.5725794",
"0.5686409",
"0.5641364",
"0.56244683",
"0.5608649",
"0.55820984",
"0.5533073",
"0.5533073",
"0.5516132",
"0.5502403",
"0.5439248",
"0.54391897",
"0.54295826",
"0.5426992",
"0.5421032",
"0.53961706",
"0.5388083",
"0.53756875",
"0.53735596",
"0.5370982",
"0.53474766"
]
| 0.7470482 | 0 |
Parse the install.xml (or resources.xml) soup to find all available resources. | def parse_resources(self, soup):
for res in soup.find_all('res'):
if 'customlangpack' in res['id'].lower():
self.find_langpack_path(res)
else:
rid = remove_xml(res['id'])
self.resources[rid] = path_format(self.properties.substitute(res['src'])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_resources(self):\n path = self.get_path('resources')\n\n if not path:\n rsoup = self.soup\n else:\n rsoup = BeautifulSoup(open(path))\n\n self.parse_resources(rsoup)",
"def parse_paths(self):\n self.soup = BeautifulSoup(open(self.get_path('install')))\n for spec in list(self.specs.keys()):\n spec_file = self.find_specs_path(spec)\n if spec_file:\n # If spec file exists\n self.specs[spec] = path_format(spec_file)\n else:\n # If specs are held inside install.xml\n self.specs[spec] = self.install",
"def get_packages(xml, ns):\r\n execs = []\r\n for x in xml.iter():\r\n if x.xpath('./SSIS:PackageMetaData', namespaces = ns):\r\n for x_exec in x:\r\n execs.append(x_exec)\r\n\r\n return execs",
"def all_installation(self):\n\t\tself.db = DB()\n\t\tinstallation_all = self.db.select_all_from(\"installations\")\n\t\ttmpl = lookup.get_template(\"installation.html\")\n\t\treturn (tmpl.render(installation=installation_all))",
"def get_package_resources(self):\n resources = []\n for pkg in self.packages:\n resource_data = self.get_resource_data()\n resources.extend(pkg.get_resources(resource_data))\n return resources",
"def listup_all_programs(self):\n with urllib.request.urlopen(self._request) as response:\n XmlData = response.read()\n\n root = ET.fromstring(XmlData)\n\n for child in root:\n for title in child.iter(\"title\"):\n yield title.text.replace(\"\\u3000\",\"\")\n return None",
"def parse_xmls(user, application, complete_path, init_es, tool, scan_name, user_host, to_name):\n process_files(user, application, complete_path, init_es, tool, scan_name, user_host, to_name)\n info_debug_log(event='Parse xmls',status='success')",
"def read_manifest(self): # -> None:\n ...",
"def extract_resources(self, resources, collector, cwd=None):\n raise NotImplementedError(\"not implemented\")",
"def get_resource_urls(soup: BeautifulSoup) -> set:\n return {\n tag[TAGS_ATTR[tag.name]]\n for tag in soup.findAll(name=list(TAGS_ATTR.keys()))\n if tag.has_attr(TAGS_ATTR[tag.name])\n }",
"def fetch_registry_content(self):\n for registry_name, registry in self.registries.items():\n if not registry.source:\n continue\n registry.get_repositories()",
"def checkCatalogs():\n url = CHECKBASE % 'catalogs'\n catalogs = []\n try:\n fh = getURLHandle(url)\n #fh = urllib2.urlopen(url)\n data = fh.read()\n dom = minidom.parseString(data)\n fh.close()\n catalog_elements = dom.getElementsByTagName('Catalog')\n for catel in catalog_elements:\n if catel.firstChild is None:\n continue\n catalog = catel.firstChild.data.strip()\n if len(catalog):\n catalogs.append(str(catalog))\n except:\n raise Exception,\"Could not open %s to search for list of catalogs\" % url\n return catalogs",
"def get_resources(self, resource_url):\n response = self.response(resource_url)\n body = response[0]\n return ResourceParser.extract_resources(body)",
"def parse_data(self):\n\n try:\n if self.is_bytes:\n self.data = etree.XML(self.manifest)\n else:\n with open(self.manifest) as fh:\n self.data = etree.XML(fh.read().encode())\n except etree.XMLSyntaxError:\n raise InvalidManifest('Not an XML file')\n\n self.tree = etree.ElementTree(self.data)\n\n self.find_remotes()\n self.find_defaults()\n self.find_projects()\n\n return self.generate_manifest_dict()",
"def discover(self):\n\n # Get the Huge Page configuration\n self.get_hugepages()\n\n # Get the device configuration\n self.get_devices_per_node()\n\n # Get the CPU configuration\n self.get_cpu()\n\n # Get the current grub cmdline\n self.get_grub()",
"def resources():\n check_resources()",
"def resources(self):\n return list(self.get_resources_for_type(gdef.ResType_All))",
"def get_rss(self):\r\n rssfiles = []\r\n \r\n rssfiles.append(feedparser.parse(self.url))\r\n return rssfiles",
"def read_manifest_xml(cls, document):\n manifest = []\n with zipfile.ZipFile(document, 'a') as open_document:\n for line in open_document.open(DOCUMENT_MANIFEST_PATH):\n manifest.append(line.decode('utf-8'))\n return manifest",
"def get_resources(self):\n return []",
"def query_project(self):\n\n # Find stylesheets.\n found = False\n for filename in self.project.namelist():\n if os.path.basename(filename) == 'styles.xml':\n found = True\n print(filename)\n if not found:\n print(\"not found!\")",
"def get_installations():\n github_app = get_default_app()\n pprint(github_app.get_installations())",
"def _parse_extensions(self):\n for root in self.roots:\n for extensions in root.iter('extensions'):\n for extension in extensions.iter('extension'):\n extension_name = extension.attrib.get('name', '')\n #print(f'Extension: {extension_name}')\n self.extensions.append(extension_name)\n\n extension_apis = extension.attrib.get('supported', '')\n extension_api_list = set(extension_apis.split('|'))\n\n # filter by api\n if 'gl' not in extension_apis:\n continue\n\n for require in extension.iter('require'):\n for enum in require.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_list.append(enum_name)\n self.enum_required_by_extension[enum_name].append({\n \"name\": extension_name,\n \"api_list\": extension_api_list})\n for command in require.iter('command'):\n command_name = command.attrib['name']\n self.command_list.append(command_name)\n self.command_required_by_extension[command_name].append({\n \"name\": extension_name,\n \"api_list\": extension_api_list})",
"def initializeCatalog():\n libxml2mod.xmlInitializeCatalog()",
"def read_xml(self):\n pass",
"def load_installed_file_list(self):\n listpath = os.path.join(self._build_root, 'src', 'gromacs', 'installed-headers.txt')\n with open(listpath, 'r') as installedfp:\n for line in installedfp:\n path = line.strip()\n if not os.path.isabs(path):\n self._reporter.input_error(\n \"installed file not specified with absolute path: {0}\"\n .format(path))\n continue\n relpath = self._get_rel_path(path)\n if relpath not in self._files:\n self._reporter.input_error(\n \"installed file not in source tree: {0}\".format(path))\n continue\n self._files[relpath].set_installed()",
"def get_items(xml):\r\n try:\r\n from bs4 import BeautifulSoup\r\n except ImportError:\r\n error = ('Missing dependency '\r\n '\"BeautifulSoup4\" and \"lxml\" required to import Wordpress XML files.')\r\n sys.exit(error)\r\n with open(xml, encoding='utf-8') as infile:\r\n xmlfile = infile.read()\r\n soup = BeautifulSoup(xmlfile, \"xml\")\r\n items = soup.rss.channel.findAll('item')\r\n return items",
"def resources(self):",
"def parse_xml(path):\r\n ns = {'SSIS': \"www.microsoft.com/SqlServer/SSIS\",}\r\n proj_xml = et.parse(path)\r\n proj_packages = get_packages(proj_xml, ns)\r\n\r\n packages = [Package(*package_properties(package, ns))\r\n for package in proj_packages]\r\n \r\n #package_props = {}\r\n #for package in packages:\r\n # name, version = package_properties(package, ns)\r\n # package_props[name] = version\r\n\r\n return packages",
"def get_files_to_download(self):\n\n self.logger.logMsg(\"Getting Files to Download\")\n\n download_links = []\n try:\n with open(self.main_xml) as xml_file:\n data_dict = xmltodict.parse(xml_file.read())\n\n xml_file.close()\n\n for docs in data_dict.get('response').get('result').get('doc'):\n for doc in docs.get('str'):\n\n if doc.get('@name') == 'download_link':\n link = doc.get('#text', None)\n if link is not None:\n download_links.append(link)\n except Exception as e:\n self.logger.logMsg(\"Error Getting Files to Download {}\".format(str(e)))\n raise Exception('Error in Getting Files For Download')\n\n self.logger.logMsg(\"Finished Getting Files to Download\")\n\n return download_links"
]
| [
"0.7189946",
"0.59733474",
"0.5380635",
"0.53306276",
"0.52788985",
"0.5263637",
"0.5235712",
"0.5170429",
"0.5113374",
"0.50949967",
"0.5075004",
"0.50734943",
"0.5058489",
"0.5009926",
"0.4998898",
"0.4996702",
"0.4971879",
"0.49711636",
"0.49570197",
"0.49357328",
"0.49281958",
"0.4918891",
"0.49066952",
"0.48818445",
"0.4873365",
"0.48695016",
"0.48582104",
"0.48507297",
"0.48504502",
"0.48421955"
]
| 0.66038096 | 1 |
Finds a langpack path from the given xml langpack element | def find_langpack_path(self, langpack):
lid = langpack['id']
src = path_format(self.properties.substitute(langpack['src']))
if '.xml_' in lid:
self.langpacks[lid[-3:]] = src
if not 'default' in self.langpacks:
self.langpacks['default'] = src
self.resources['strings'] = src | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_langpack_path(self, lid='default'):\n path = self.langpacks[lid]\n return force_absolute(self.res_path, path)",
"def _safe_get_element(path, root):\n elements = path.split('.')\n parent = root\n for element in elements[:-1]:\n parent = getattr(parent, element, None)\n if parent is None:\n return None\n return getattr(parent, elements[-1], None)",
"def _find_in_xml(self, pattern, element=None, namespace=Xmlns_path):\n el = self._xml if element is None else element\n return el.find('.//' + namespace + pattern)",
"def _find(path, xp, tag_class={}, **kwargs):\n request = Osc.get_osc().get_reqobj()\n xpath = xp\n if hasattr(xp, 'tostring'):\n xpath = xp.tostring()\n f = request.get(path, match=xpath, **kwargs)\n return fromstring(f.read(), **tag_class)",
"def xmlin_path_to_xmlout(relpath, context=None):\n\t\t\n\t\tmarkup_xpath = '__inconnu__'\n\t\ttry:\n\t\t\t# for this we use the global var : STRUCT_TO_BIBL\n\t\t\tmarkup_xpath=XTokinfo.STRUCT_TO_BIBL[relpath]\n\t\texcept KeyError as ke:\n\t\t\tprint(\"KeyError: '%s' n'est pas dans ma liste...\" % relpath)\n\t\t\n\t\treturn markup_xpath",
"def domFindElementByPath( node, astrElementPathName ):\n \"\"\"eg: [\"starting-condition\", \"condition\", \"script_type\"] \"\"\"\n element = node;\n for name in astrElementPathName:\n element = domFindElement( element, name );\n if( element == None ):\n return None;\n return element;",
"def search_path():\n next_part = relative_to\n for node in name_path:\n # Look for attribute first.\n attribute = getattr(next_part, node, None)\n\n if attribute is not None:\n next_part = attribute\n else:\n # If module, look for sub-module.\n if (next_part is None or\n isinstance(next_part, types.ModuleType)):\n if next_part is None:\n module_name = node\n else:\n module_name = '%s.%s' % (next_part.__name__, node)\n\n try:\n fromitem = module_name.split('.')[-1]\n next_part = importer(module_name, '', '',\n [str(fromitem)])\n except ImportError:\n return None\n else:\n return None\n\n if not isinstance(next_part, types.ModuleType):\n if not (isinstance(next_part, type) and\n issubclass(next_part, (Message, Enum))):\n return None\n\n return next_part",
"def _look_in_package(tree: dict, module_path: str, name: str, level: Optional[int] = None) -> Union[str, None]:\n parent_path = os.path.dirname(module_path)\n if level is not None:\n for _ in range(level - 1):\n parent_path = os.path.dirname(parent_path)\n parent = find_tree(tree, lambda x, p: x[\"path\"] in [p, os.path.join(p, \"__init__.py\")], args=(parent_path,))\n if parent:\n if parent[\"fullname\"] in [name, \"{}.__init__\".format(name)]:\n return parent[\"path\"]\n for child in parent[\"children\"].values():\n if child[\"name\"] == name:\n return child[\"path\"]\n target = find_tree(tree, lambda x, f: x[\"fullname\"] == f, args=(\"{}.{}\".format(parent[\"fullname\"], name),))\n if target:\n return target[\"path\"]\n return None",
"def locationFromElement(self, element):\n elementLocation = None\n for locationElement in element.findall('.location'):\n elementLocation = self.readLocationElement(locationElement)\n break\n return elementLocation",
"def xpath (entry, path):\n if isinstance (path, str):\n path = path.split('/')\n result=entry\n for key in path: result=result[key]\n return result",
"def LoadSubElementByPath(self, path):\n curr = self\n for part in path:\n curr = curr.LoadSubElement(part)\n if curr is None:\n return None\n return curr",
"def get_short_path(content):",
"def _find_element(source: ET.Element, name: str) -> ET.Element:\n element = source.find(name)\n if element is None:\n raise ValueError(f\"Could not find {name} element in annotation file\")\n return element",
"def lookup_element(self, name: str) -> ElementNSEntry:\n for i, scope in enumerate(reversed(self.element_ns_stack)):\n if name in scope:\n el, parent_def = scope[name]\n if i == 0:\n # Return anything from local namespace\n return (el, parent_def)\n elif isinstance(el, comp.Signal):\n # Signals are allowed to be found in parent namespaces\n return (el, parent_def)\n elif self.parent_parameters_visible and isinstance(el, Parameter):\n # Parameters are allowed to be found in parent namespaces,\n # except in some contexts\n return (el, parent_def)\n return (None, None)",
"def parse(k):\r\n return xml_object.xpath(k)[0]",
"def parse(k):\r\n return xml_object.xpath(k)[0]",
"def get_pol_from_path(path):\n matches = re.findall(r'\\.([xy][xy])\\.', path)\n if not len(matches):\n return None\n return matches[-1]",
"def lookup(self, path):\n if path == '/':\n path = ''\n best_fit = self.retrieve_catalog_for_path(path)\n return best_fit.find_directory_entry(path)",
"def get_location_from_id(id):\n tree = ET.parse('./devset_topics.xml')\n root = tree.getroot()\n for item in root.findall('./topic'):\n if id == item[0].text:\n return item[1].text",
"def find_elm(root, elm):\n if root == elm:\n return \"\"\n for o in root:\n path = find_elm(o, elm)\n if path is not None:\n step = step_elm(o)\n return step + path\n return None",
"def get_celex_path(path, lemma, language):\n return path + language + \"/{lang}p{lem}/{lang}p{lem}.cd\".format(lang=language[0], lem=lemma[0])",
"def search_element(doc, xpath, line=None):\n for elem in doc.xpath(xpath):\n if line is None:\n return elem\n\n elif elem.sourceline == line:\n return elem\n\n else:\n continue\n\n # raise ValueError if the element could not be located.\n LOGGER.info('could not find element \"%s\"', xpath)\n raise ValueError('could not find element \"%s\"' % xpath)",
"def findPlug(node, attr):\n\n pass",
"def find(root, path, default_value=None):\n if root is None or path is None:\n return default_value\n\n assert isinstance(path, str)\n tokens = path.split('.')\n for token in tokens:\n root = root.get(token)\n if root is None:\n return default_value\n\n return root",
"def parse_tool_path(self):",
"def get_element(self, el_name, params={}):\n\t\tself.cfg_root.find(el_name)",
"def find_specs_path(self, spec):\n element = self.soup.find(spec)\n if element:\n child = element.find('xi:include')\n if child: # if xi:include exists, specs are external.\n path = self.properties.substitute(child['href'])\n else:\n # Internal specs.\n path = None\n else:\n # No spec defined in file, assume default location.\n path = self.specs[spec]\n return path",
"def get_url_from_xml(name):\n url_list = []\n url_path = os.path.join(proDir, 'testFile', 'interfaceURL.xml')\n tree = ElementTree.parse(url_path)\n for u in tree.findall('url'):\n url_name = u.get('name')\n if url_name == name:\n for c in u.getchildren():\n url_list.append(c.text)\n\n url ='/'.join(url_list)\n return url",
"def find_asset(path, root=None):\n if root is None:\n root = 'z:\\\\Leif\\\\Dropbox\\\\mugen\\\\testing-build\\\\'\n\n check = ('', 'data', 'stages', 'sound')\n for folder in (join(root, i) for i in check):\n candidate = join(folder, path)\n if exists(candidate):\n return candidate\n\n return \"<NO PATH TO FILE>\"",
"def parse_resources(self, soup):\n for res in soup.find_all('res'):\n if 'customlangpack' in res['id'].lower():\n self.find_langpack_path(res)\n else:\n rid = remove_xml(res['id'])\n self.resources[rid] = path_format(self.properties.substitute(res['src']))"
]
| [
"0.60459447",
"0.542366",
"0.54062265",
"0.53140926",
"0.5283099",
"0.5267232",
"0.52111304",
"0.52110153",
"0.5112656",
"0.5109639",
"0.50798786",
"0.503845",
"0.5029484",
"0.5024299",
"0.50155646",
"0.50155646",
"0.5005777",
"0.49973974",
"0.49784917",
"0.49735188",
"0.49698043",
"0.49553552",
"0.49473807",
"0.49269286",
"0.49130672",
"0.48869792",
"0.48864698",
"0.4865612",
"0.48572177",
"0.4840364"
]
| 0.71990365 | 0 |
Returns the path to the langpack with the given localization id. | def get_langpack_path(self, lid='default'):
path = self.langpacks[lid]
return force_absolute(self.res_path, path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_langpack_path(self, langpack):\n lid = langpack['id']\n src = path_format(self.properties.substitute(langpack['src']))\n if '.xml_' in lid:\n self.langpacks[lid[-3:]] = src\n\n if not 'default' in self.langpacks:\n self.langpacks['default'] = src\n self.resources['strings'] = src",
"def id_to_relative_raw_path(self, id):\n return osp.join(self.id_to_base_id(id) + '.ply')",
"def GetResourcePath(self, resource_id):\n\n return self._GetPathFromLabel(resource_id)",
"def getLocalizedString(self, stringId):\r\n langPath = 'special://home/addons/' + self.addonId + '/resources/language/English/strings.xml' \r\n langPath = xbmc.translatePath(langPath)\r\n if os.path.exists(langPath):\r\n with open(langPath, 'r') as langFile:\r\n langStr = langFile.read()\r\n strToSearch = '<string id=\"' + str(int(stringId)) + '\">'\r\n limInf = langStr.find(strToSearch)\r\n if limInf == -1: return ''\r\n limInf += len(strToSearch)\r\n limSup = langStr.find('</string>', limInf)\r\n return langStr[limInf:limSup]\r\n return ''",
"def getNamespacePath(self, id: long) -> unicode:\n ...",
"def get_translated_id(id, lang, event=True):",
"def _GetPath(self, kind, id):\n\n return { Database.RESOURCE : self.GetResourcePath,\n Database.TEST : self.GetTestPath,\n Database.SUITE : self.GetSuitePath } [kind] (id)",
"def get_localised_dir(self, language):\n return os.path.join(\n self.base_path,\n to_locale(language),\n self.content_path\n )",
"def get_full_path(self, reg_path: str, reg_id: str) -> str:\n return '{}.{}'.format(reg_path, reg_id)",
"def get_path(self, language=None, fallback=True, version_id=None, force_reload=False):\n return self.get_title_obj_attribute(\"path\", language, fallback, version_id, force_reload)",
"def abspath(self, fileid):\n # Find the directory, relative from the corpus root.\n name = fileid.split('.')[0]\n category = fileid.split('_')[0]\n # Create the pickle file extension\n basename = name + '.pickle'\n\n # Return the path to the file relative to the target.\n return os.path.normpath(os.path.join(self.target, category, basename))",
"def deduce_path(self, id, ns, path):\n locations = self.id_lookups[ns][id]\n if path != '':\n if path not in locations.keys():\n print \"** Error\"\n print \"Specified path '%s' not in name space '%s' locations for id '%s'\" % (path, ns, id)\n traceback.print_stack()\n sys.exit(1)\n else:\n if len(locations) > 1:\n print \"** Error\"\n print \"Path not specified for '%s', but must be since\" %id\n print \" there are multiple locations:\" + \", \".join(locations.keys())\n traceback.print_stack()\n sys.exit(1)\n path = locations.keys()[0]\n return path",
"def get_path(self, path_id):\n\t\tpass",
"def _GetPathFromLabel(self, label):\n\n return os.path.join(self.GetRoot(),\n self._GetRelativeLabelPath(label))",
"def one_translation_path(self, api_path):\n try:\n from jupyterlab_server.translation_utils import (\n get_language_pack,\n get_language_packs,\n )\n\n all_packs, _ = get_language_packs()\n packs = {\n locale: {\"data\": get_language_pack(locale)[0], \"message\": \"\"}\n for locale in all_packs.keys()\n }\n metadata = {\"data\": all_packs, \"message\": \"\"}\n except ImportError as err: # pragma: no cover\n self.log.warning(\n f\"[lite] [translation] `jupyterlab_server` was not importable, \"\n f\"cannot create translation data {err}\"\n )\n\n metadata = {\n \"data\": {\n \"en\": {\"displayName\": \"English\", \"nativeName\": \"English\"},\n },\n \"message\": \"\",\n }\n packs = {\"en\": {\"data\": {}, \"message\": \"Language pack 'en' not installed!\"}}\n\n # save the metadata about available packs\n api_path.parent.mkdir(parents=True, exist_ok=True)\n api_path.write_text(\n json.dumps(metadata, indent=2, sort_keys=True),\n encoding=\"utf-8\",\n )\n\n for locale, data in packs.items():\n language_pack_file = self.api_dir / f\"{locale}.json\"\n language_pack_file.write_text(\n json.dumps(data, indent=2, sort_keys=True),\n encoding=\"utf-8\",\n )\n self.maybe_timestamp(language_pack_file)",
"def get_level_name(self, level_id):\n for (english_name, level_package) in self.levels[self.game]:\n if level_package.lower() == level_id.lower():\n return english_name\n return None",
"def get_pack_path():\r\n return get_package_path().replace(\"\\\\\", \"/\").replace(\"src\", \"\")",
"def subs_filename(subs_id, lang='en'):\r\n if lang == 'en':\r\n return u'subs_{0}.srt.sjson'.format(subs_id)\r\n else:\r\n return u'{0}_subs_{1}.srt.sjson'.format(lang, subs_id)",
"def transcript_path_for_id(transcript_id):\n if transcript_id is int:\n transcript_id = \"%05d\" % transcript_id\n prefix_folder = str(transcript_id)[:3]\n return join(config.FISHER_ROOT, 'data', 'trans', prefix_folder, 'fe_03_%s.txt' % transcript_id)",
"def get_language(language_id):\n\n api = (api_name, 'language')\n args_params = (str(language_id), )\n \n response = make_request(*args_params, api=api, action='get', **{})\n status_code = response.status_code\n content = response.text\n\n msg = str(status_code) + ' : ' + content\n \n if status_code >= 300:\n\n click.echo(\"response error message: %s \" % msg)\n raise click.Abort()\n \n\n logger.debug(\"response from spanglish get_language: {}\".format(response))\n logger.debug(\"response msg from spanglish get_language: {}\".format(msg))\n\n click.echo(\"response message: %s \" % msg)",
"def scriptpath(self, code):\n return '' if code == 'en' else ('/' + code)",
"def get_id_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n id_path = os.path.join(root, \"client\\\\files\\\\id.txt\")\n\n return id_path",
"def _GetRelativeLabelPath(self, label):\n\n if self._AreLabelsPaths():\n return label\n\n path = \"\"\n components = self.GetLabelComponents(label)\n if not components:\n return path\n \n for c in components[:-1]:\n path = os.path.join(path, c + self.suite_extension)\n path = os.path.join(path, components[-1])\n return path",
"def _get_pubchem_template_path(self, het_id):\n path = os.path.join(self.pubchem_templates, f\"{het_id}.sdf\")\n\n return path if os.path.isfile(path) else \"\"",
"def abspath(self, fileid):\n # Find the directory, relative from the corpus root.\n parent = os.path.relpath(\n os.path.dirname(self.corpus.abspath(fileid)), self.corpus.root\n )\n\n # Compute the name parts to reconstruct\n basename = os.path.basename(fileid)\n name, ext = os.path.splitext(basename)\n\n # Create the pickle file extension\n basename = name + '.pickle'\n\n # Return the path to the file relative to the target.\n return os.path.normpath(os.path.join(self.target, parent, basename))",
"def abspath(self, fileid):\n # Find the directory, relative from the corpus root.\n parent = os.path.relpath(\n os.path.dirname(self.corpus.abspath(fileid)), self.corpus.root\n )\n\n # Compute the name parts to reconstruct\n basename = os.path.basename(fileid)\n name, ext = os.path.splitext(basename)\n\n # Create the pickle file extension\n basename = name + '.pickle'\n\n # Return the path to the file relative to the target.\n return os.path.normpath(os.path.join(self.target, parent, basename))",
"def get_strings_filepath(language_code):\n config = read_config()\n #Get the root path for the outputs\n output_directory = config.get(OUTPUT_DIRECTORIES_SECTION,STRINGS_OUTPUT_SECTION)\n #Get the path for the filename for the language_code\n string_filename = config.get(XML_STRINGS_SECTION,language_code)\n return join(output_directory, string_filename)",
"def get_target_providers_package_folder(provider_package_id: str) -> str:\n return os.path.join(get_target_providers_folder(), *provider_package_id.split(\".\"))",
"def get_string(lang_code: str, string_id: str) -> str:\n lang = get_language(lang_code)\n try:\n return all_strings[lang][string_id]\n except KeyError:\n # TODO: log this error\n return \"ERR_NO_STRING\"",
"def get_local_path(self, example_id):\n\n return self._cache.get_local_path(example_id)"
]
| [
"0.73198885",
"0.6105887",
"0.5961551",
"0.58504504",
"0.58083636",
"0.5787844",
"0.5782545",
"0.5768041",
"0.55205953",
"0.5518579",
"0.5518284",
"0.54572797",
"0.54148406",
"0.540771",
"0.539632",
"0.539049",
"0.5386805",
"0.5383221",
"0.5375136",
"0.5335914",
"0.53032947",
"0.5299673",
"0.5282383",
"0.52738965",
"0.5261483",
"0.5261483",
"0.5217172",
"0.5144369",
"0.5139455",
"0.5129622"
]
| 0.79494125 | 0 |
Removes the .xml from a resource or spec id. | def remove_xml(rid):
if '.xml' in rid[-4:]:
return rid[:-4]
else:
return rid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove(self): \n self.doRoot(self.removeDir)\n settings.getChanged('mosh.resourceReplacer.applied').remove(self.file)",
"def removeID(self, doc):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n ret = libxml2mod.xmlRemoveID(doc__o, self._o)\n return ret",
"def removeResource(self, *args):\n return _libsbml.XMLAttributes_removeResource(self, *args)",
"def deleteconvert(self):\n filename = os.path.join(self.docx_path, self.name.docx)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.html_path, self.name.html)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.docbook_path, self.name.xml)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.markdown_path, self.name.md)\n if os.path.isfile(filename):\n os.remove(filename)",
"def remove_from_xml(self, xml_tree):\n xml_tree.getroot().find(\"part-list\").remove(self.xml_score_part)\n xml_tree.getroot().remove(self.xml_part)",
"def delete(self, name):\n path = self.directory / f\"{name}.yaml\"\n if path.exists():\n path.unlink()",
"def remove(path):",
"def removeID(self, attr):\n if attr is None: attr__o = None\n else: attr__o = attr._o\n ret = libxml2mod.xmlRemoveID(self._o, attr__o)\n return ret",
"def remove(self, spec_or_id=None):\n if isinstance(spec_or_id, ObjectId) or \\\n isinstance(spec_or_id, basestring):\n return self.database.connection.request.delete_document(\n self.database.name, self.name, spec_or_id)\n if not spec_or_id:\n spec_or_id = {}\n return self.database.connection.request.delete_replace_documents(\n self.database.name, self.name, spec_or_id, [])",
"def delete_BLAST_xml(blast_xml_file):\n xml_txt = str(blast_xml_file)[:-4] + \"_details.txt\"\n\n # delete the original files\n try:\n os.remove(blast_xml_file)\n except:\n sys.stdout.write(\"{} could not be deleted\".format(blast_xml_file))\n try:\n os.remove(xml_txt)\n except:\n sys.stdout.write(\"{} could not be deleted\".format(xml_txt))",
"def remove_resource(self, name):\n self._NDL_API('removeresource', { 'vm': name, }, None)",
"def remove(self, _id):\n self.collection.remove({\"_id\": ObjectId(_id)})\n\n file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n \"assets\",\n \"action_selection\",\n str(_id)))\n if os.path.exists(file_path):\n shutil.rmtree(file_path)\n\n return",
"def remove_file(self, path):\n pass",
"def rem_file(self, key):\n del self.fileList[key]\n\n path = os.path.join(self.file_path, '%s.xoj' % key)\n try:\n os.remove( path )\n except:\n print \"Unable to remove\", path\n self.save()",
"def clean_prep_xml(des_xml, rels_rIds, pxml_subtags):\n root, tree = gen_tree(des_xml)\n nmsps = root.nsmap['r']\n rId = f\"{{{nmsps}}}id\"\n for k,v in pxml_subtags.items():\n subtag1 = tree.find(k)\n for i in subtag1:\n if i.attrib.get(rId):\n if i.attrib.get(rId) not in rels_rIds:\n subtag1.remove(i)\n\n tree.write(des_xml, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n return",
"def remove(cls, doc_id, specs=None):\n ret = cls._make_call('remove', cls._make_specs(doc_id, specs))\n if ret:\n return ret['n']",
"def untag_resource(Resource=None, TagKeys=None):\n pass",
"def remove(self):\n self.remove_file()",
"def delete(self, xact, path):\n self._log.debug(\"Deleting NSR xact:%s, path:%s\", xact, path)\n self.regh.delete_element(path)\n self._log.debug(\"Deleted NSR xact:%s, path:%s\", xact, path)",
"def remove(name):",
"def remove(self, name, source):\n self.m.path.assert_absolute(source)\n self._run(name, ['remove', source])\n self.m.path.mock_remove_paths(source)",
"def remove_rental(self, id):\n super(RentalHistoryText, self).remove_rental(id)\n self.save_file()",
"def rm(self, name: str) -> None:\n path = self.get_path(name)\n if os.path.exists(path):\n os.remove(path)",
"def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200",
"def rmcontents(self, name, source):\n self.m.path.assert_absolute(source)\n self._run(name, ['rmcontents', source])\n self.m.path.mock_remove_paths(str(source)+self.m.path.sep)",
"def delete_file(self, name):\n del self.files[name]",
"def remove_file(path: str) -> None:\n\tremove(path)",
"def pre_namespace_delete(self, resource_id):\n pass",
"def remove_from_xml_tree(the_config, tag):\n for elt in the_config.iterdescendants():\n if tag in elt.tag:\n elt.getparent().remove(elt)",
"def remove_descriptor(self, uuid):"
]
| [
"0.6571621",
"0.59867907",
"0.59495574",
"0.5810718",
"0.58104086",
"0.57775265",
"0.5769766",
"0.5720916",
"0.56980383",
"0.5681628",
"0.5668962",
"0.56642413",
"0.5655876",
"0.5647954",
"0.56197345",
"0.5602799",
"0.550016",
"0.5495898",
"0.5472836",
"0.5450075",
"0.5386508",
"0.53854614",
"0.53737295",
"0.5368029",
"0.53678375",
"0.5348428",
"0.53446275",
"0.5336599",
"0.5326481",
"0.53244096"
]
| 0.6564904 | 1 |
Ensures that the base path is not appended to an absolute path. | def force_absolute(base, path):
if os.path.abspath(path) and os.path.exists(path):
return path
else:
return path_format(base + path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_bad_path(path, base):\r\n return not resolved(joinpath(base, path)).startswith(base)",
"def relative_base(base):\n return as_base(base).lstrip('/')",
"def _graceful_relative_url(base_url, url):\n if url == base_url:\n return ''\n base_prefix = '%s://%s' % urlparse.urlparse(base_url or '')[0:2]\n url_prefix = '%s://%s' % urlparse.urlparse(url or '')[0:2]\n if base_prefix == url_prefix and url_prefix != '://':\n return url[len(url_prefix):]\n return url",
"def check_absolute_path(path):\n current_dir = os.getcwd()\n if os.path.isabs(path) is False:\n if str(path).startswith(\"./\"):\n return current_dir + path[1:]\n else:\n return current_dir + \"/\" + path\n else:\n return path",
"def normalize_base_dir(base_dir: Optional[str]) -> str:\n if base_dir is None:\n base_dir = os.path.abspath(\"\")\n elif not is_absolute_path(base_dir):\n base_dir = os.path.abspath(base_dir)\n while base_dir != '/' and base_dir.endswith('/'):\n base_dir = base_dir[:-1]\n return base_dir",
"def as_base(path):\n path = path if path.startswith('/') else '/' + path\n return path if path.endswith('/') else path + '/'",
"def validated_path(basepath, env = None, *path):\n if basepath is not None:\n result = os.path.realpath(os.path.join(os.path.expanduser(basepath), *path))\n\n if env is not None and not os.path.isdir(result):\n env.warn(result + ' not found.')\n\n return result\n else:\n raise ValueError",
"def _abs_path(rel_path):\n return os.path.join(BASE_DIR, rel_path)",
"def path_is_base(self, path):\n\n return path is not None and len(path) == len(self.levels)",
"def check_path(p, cwd):\n if not path.isabs(p):\n p = path.normpath(path.join(cwd,p))\n return p",
"def validate_safe_path(value):\n base = \"/input/\"\n\n try:\n new_path = safe_join(base, value)\n except SuspiciousFileOperation:\n raise ValidationError(\"Relative paths are not allowed.\")\n\n valid_path = new_path[len(base) :]\n\n if value != valid_path:\n raise ValidationError(f\"Invalid file path, should be {valid_path}.\")",
"def qualify(path):\n if not absoluteRegexp.search(path):\n path = os.path.join(cwd, path)\n return path",
"def _sanitize_relative_path(self, path):\n last = None\n path = os.path.normpath(path)\n while path != last:\n last = path\n # Note: os.path.join treats '/' as os.sep on Windows\n path = path.lstrip(os.sep).lstrip('/')\n path = path.lstrip(os.pardir).lstrip('..')\n drive, path = os.path.splitdrive(path) # for Windows\n return path",
"def clean_and_check_root_relative_path(root, relative_path):\n # type: (str, str) -> str\n if not os.path.isabs(root):\n raise ValueError(\"Root parameter %r should an absolute path\" % root)\n\n if not root.endswith(os.sep):\n root = root + os.sep\n\n joined_path = os.path.join(root, relative_path)\n resolved_path = os.path.realpath(joined_path)\n\n if not resolved_path.startswith(root):\n raise ValueError(\"Final path %r is outside of %r\" % (resolved_path, root))\n\n return resolved_path",
"def set_basedir(self, host, path):",
"def relpath(targpath: str, basepath: str='') -> str:\n pass",
"def absolute(self):\n if self.relative == '':\n return self.root # don't join in this case as that appends trailing '/'\n return os.path.join(self.root, self.relative)",
"def test_local_path(self, nexus_base):\n assert isinstance(nexus_base.local_path, str)",
"def prepare(self):\n if not self._base:\n self.error = \"path= must be specified\"\n return False\n if self._volume:\n if \"://\" not in self._volume:\n self.error = \"mount= can only be an URL\"\n return False\n if self._base.startswith(\"/\"):\n self._base = os.path.join(self._volume, self._base[1:])\n # do the prefix check anyway, for sanity\n if not is_parent_of(self._volume, self._base):\n self.error = \"mount= must be a prefix of path=\"\n return False\n return True",
"def test_src_path(self, nexus_base):\n assert isinstance(nexus_base.src_path, str)\n assert len(nexus_base.src_path) != 0",
"def get_base_path(self) -> str:\n raise NotImplementedError()",
"def _ensure_path_absolute(maybe_relpath, cfg_path):\n if not isinstance(maybe_relpath, str):\n raise TypeError(\n \"Attempting to ensure non-text value is absolute path: {} ({})\".\n format(maybe_relpath, type(maybe_relpath)))\n if os.path.isabs(maybe_relpath) or is_url(maybe_relpath):\n _LOGGER.debug(\"Already absolute\")\n return maybe_relpath\n # Maybe we have env vars that make the path absolute?\n expanded = os.path.expanduser(os.path.expandvars(maybe_relpath))\n if os.path.isabs(expanded):\n _LOGGER.debug(\"Expanded: {}\".format(expanded))\n return expanded\n # Set path to an absolute path, relative to project config.\n config_dirpath = os.path.dirname(cfg_path)\n _LOGGER.debug(\"config_dirpath: {}\".format(config_dirpath))\n abs_path = os.path.join(config_dirpath, maybe_relpath)\n _LOGGER.debug(\"Expanded and/or made absolute: {}\".format(abs_path))\n return abs_path",
"def normalize_upstream(path):\n if not path:\n return path\n if ':' not in path:\n return os.path.abspath(path)\n return path",
"def basepath():\n return os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n '..'\n )\n )",
"def set_base_path(self, base_path):\n self._base_path = base_path",
"def base_path(self):\n return self.setup.base_path",
"def norm_safe(path):\n if not os.path.isabs(path):\n raise Exception(path, 'Not absolute path: %r' % path)\n\n return os.path.normpath(path)",
"def test_base_dir(self):\n self.assertEqual(self.settings.BASE_DIR, TestPredefines.BASE_DIR)",
"def _absPath(self, relpath):\n\n # Pass through URIs and absolute paths.\n if self.isUrl(relpath) or relpath[0] == '/':\n return relpath\n\n # This won't deal with ~user/ syntax, but it's much less\n # common anyway.\n if relpath.startswith('~/') and 'HOME' in os.environ:\n return os.path.join(os.environ['HOME'], relpath[2:])\n\n if self._configFileStack:\n relativeTo = os.path.dirname(self._configFileStack[-1])\n else:\n relativeTo = os.getcwd()\n\n if self.isUrl(relativeTo):\n parts = urlparse.urlsplit(relativeTo)\n return urlparse.urlunsplit((parts.scheme, parts.netloc, os.path.normpath(os.path.join(parts.path, relpath)), parts.query, parts.fragment))\n return os.path.normpath(os.path.join(relativeTo, relpath))",
"def test_bad_paths(self):\n self.do_test_bad_path('frog', '/frog') # no permission to write"
]
| [
"0.6916786",
"0.67421585",
"0.66749847",
"0.64415956",
"0.64331144",
"0.6391375",
"0.634446",
"0.63375163",
"0.63358223",
"0.63287544",
"0.63265294",
"0.6256979",
"0.6243405",
"0.6241263",
"0.62288374",
"0.61725926",
"0.6163012",
"0.61393946",
"0.61246914",
"0.610019",
"0.60855454",
"0.60642713",
"0.5992856",
"0.5987188",
"0.59635514",
"0.5954164",
"0.5934303",
"0.59282106",
"0.5897085",
"0.58663243"
]
| 0.8096843 | 0 |
Append a child SHETNode object to this parent. | def append_child(self, child):
self._children.append(child) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def appendChild(self, child):\n self.__initChild()\n self.__child.append(child)",
"def add_child(self, child):\r\n self.children.append(child)",
"def add_child(self, child):\n assert isinstance(child, (Node, str))\n self.children.append(child)\n return child",
"def add_child(self, child):\r\n self.children.append(child)",
"def add_child(self, child):\n self.children.append(child)",
"def add_child(self, child):\n self.children.append(child)",
"def add_child(self, child):\n self.childs.append(child)",
"def add_child(self, child):\r\n \r\n self._children.append(child)\r\n self.update_batch(self._batch, self._group)",
"def _add_child(self, node):\n self.children.update({\n node.name: node\n })\n node.path = self._sep.join([self.path, node.name])\n node.parent = self",
"def new_child(self, parent, *args, **kwargs):\n child = self.new_element(*args, **kwargs)\n parent.append(child)\n return child",
"def add_child(self, node):\n self.children.append(node)",
"def add_child(self, node):\n self.children.append(node)",
"def add_child(self, node):\n self.children.append(node)",
"def addChild( self, child ):\n\n self.childs.append( child )",
"def add_child(self, node):\n if isinstance(node, _Node):\n self.__children.append(node)\n else:\n raise ValueError(\"Please provide a valid node to append\")",
"def link_child(self, parent, child):\n nodelist = self.get_nodes()\n parent_index = nodelist.index(parent)\n child_index = nodelist.index(child)\n\n self.__nodes[parent_index].append(child_index)",
"def add_child(self, state):\n child = RRT.Node(state=state, parent=self)\n self.children.append(child)\n return child",
"def add_child(self, node):\n if self is node:\n parent_id = \"\"\n _nodeid=\"N_\"+str(0)\n else:\n if not issubclass(node.__class__, Node):\n raise TypeError(\"{}.add_child: arg «node»=«{}», type {} not valid.\".format(self.__class__.__name__, node, type(node)))\n self.childs.append(node)\n node.parent = self\n parent_id = self.TV.selection()[0]\n _nodeid=\"N_\"+str(self.node_count)\n # parent = self.rootnode.get_node_by_id(parent_id)\n # if parent is None:\n # return None\n\n # print(\"self.TV.insert node._nodeid\", node._nodeid)\n # print(\"self.TV.insert node.data\", node.data)\n \n self.TV.insert(parent_id, 'end', _nodeid, text=node.name)\n\n # parent_id = self.TreeView.selection()[0]\n # node_name = askstring(\"New Child\", prompt=\"Enter the node name\", initialvalue=\"\")\n # if not node_name:\n # node_name = \"no-name-node\"\n # # self.TV.insert(item, 'end', 'LC_'+str(self.TVleafref), \n # # text='Load case '+str(self.TVleafref))\n # #self.node_count += 1\n \n # self.TreeView.insert(parent_id, 'end', self._nodeid, text=self.name)\n\n return node",
"def add_child(self, element, parent):\n parent_node = self._validate(parent)\n child_node = self._Node(element,parent_node)\n parent_node._children.append(child_node)\n self._size += 1",
"def addChild(self, child):\n #assert child not in self.children\n #if child not in self.children:\n child.parents.append(self)\n self.children.append(child)",
"def append_child(self, child):\n \n # Check a type of 'child' parametr\n if not isinstance(self, SitemapTreeElement):\n raise TypeError('SiteMapTreeElement type expected')\n self._children.append(child)",
"def add_child(self, cd, wt: float):\n self.child.append([cd, wt])",
"def add_child(self, child: UIComponent):\n child.parent = self\n child.set_chronometer(self._chronometer)\n self.children.append(child)\n if self.props.resize_mode == ResizeMode.AUTO:\n self._reset('add_child')",
"def add_child(self, node):\n if node not in self.children: #If the node isn't already a child of Node,\n self.children.append(node) #Add it to the end of the list of children",
"def addChild(self, node):\n if IElement.providedBy(node):\n node.parent = self\n self.children.append(node)\n return node",
"def append_child(self, child):\n # Child UID must not be the same as parent's UID\n if self.uid == child.uid:\n raise RuntimeError(\"Cannot add child widget '{}' because it has same UID as its parent\".format(child.uid))\n\n # Each widget is responsible to control only its direct children, not all descendants\n if self.has_child(child.uid):\n raise RuntimeError(\"Widget '{}' already contains descendant '{}'\".format(self.uid, child.uid))\n\n child.parent = self\n\n if not child.weight:\n self._last_children_weight += 100\n child.weight = self._last_children_weight\n elif child.weight > self._last_children_weight:\n self._last_children_weight = ceil(child.weight / 100) * 100\n\n # Obviously, child must be placed in the same form's area as its parent\n child.form_area = self.form_area\n\n self._children.append(child)\n self._children_uids.append(child.uid)\n self._children.sort(key=lambda x: x.weight)\n\n return child",
"def appendChild(self, child):\n self.points += child.points.copy()\n self.children.append(child)",
"def _newChild(self, child):\n self._testKeySubNsAdd()\n self._getSubNsList().append(child)",
"def append_node(self, p_node):\n p_node.parent = self\n self.children.append(p_node)",
"def add_child(self, child, label):\n self.children[label] = child\n child.parents.append(self)"
]
| [
"0.75263894",
"0.71979386",
"0.71660924",
"0.70930797",
"0.7083536",
"0.7083536",
"0.70750386",
"0.70191234",
"0.70174617",
"0.69852936",
"0.6984658",
"0.6984658",
"0.6984658",
"0.6974553",
"0.6964133",
"0.691352",
"0.6870681",
"0.68534565",
"0.683959",
"0.6832848",
"0.68288153",
"0.6758147",
"0.6717152",
"0.6677253",
"0.66744995",
"0.66683364",
"0.6647732",
"0.66269094",
"0.65720445",
"0.65677047"
]
| 0.743963 | 1 |
Return the child object for this row. | def child(self, row):
return self._children[row] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fm_get_child(self, idx):\n return self._relation_lst[self.CHILD][idx]",
"def parent_row(self):\n dict_cur.execute('SELECT * FROM \"{}\" WHERE {} = {}'.format(\n self.parent_table(), self.id_col_of_parent(), self.parent_id\n ))\n return dict_cur.fetchone()",
"def child(self, index):\n self.__initChild()\n return self.__child[index]",
"def child(self):\n if self.is_free():\n raise RegistryStructureDoesNotExist(\"HBINCell is free at 0x%x\" % (self.offset()))\n\n id_ = self.data_id()\n\n if id_ == b\"vk\":\n return VKRecord(self._buf, self.data_offset(), self)\n elif id_ == b\"nk\":\n return NKRecord(self._buf, self.data_offset(), self)\n elif id_ == b\"lf\":\n return LFRecord(self._buf, self.data_offset(), self)\n elif id_ == b\"lh\":\n return LHRecord(self._buf, self.data_offset(), self)\n elif id_ == b\"li\":\n return LIRecord(self._buf, self.data_offset(), self)\n elif id_ == b\"ri\":\n return RIRecord(self._buf, self.data_offset(), self)\n elif id_ == b\"sk\":\n return SKRecord(self._buf, self.data_offset(), self)\n elif id_ == b\"db\":\n return DBRecord(self._buf, self.data_offset(), self)\n else:\n return DataRecord(self._buf, self.data_offset(), self)",
"def db_row(self):\n return self._model_cls.query.get(self._pk)",
"def getChild(self):\n return self.features[0]",
"def getChild(self, label = None, *args, **kwargs):\n\n\t\tif (self.child_class is None):\n\t\t\traise NotImplementedError()\n\n\t\tif (label is None):\n\t\t\tif (self.current is not None):\n\t\t\t\treturn self.current\n\t\t\tlabel = self.getUnique(self.child_uniqueName)\n\t\t\tselect = True\n\t\telse:\n\t\t\tselect = False\n\n\t\tchild = self._get(label, returnForNone = None)\n\t\tif (child is None):\n\t\t\tchild = self.new(*args, label = label, **kwargs)\n\t\tif (select):\n\t\t\tself.select(child)\n\t\treturn child",
"def _get_child(self, name) -> H5ObjectLike:\n pass",
"def child(self, p_int, int_column=0): # real signature unknown; restored from __doc__\r\n return QStandardItem",
"def child_at(self, index, child_class=None):\n if not child_class:\n child_class = self.CHILD_CLASS\n\n return self.children(child_class)[index]",
"def getChildId(self):\n if self.cursor:\n return self.cursor.childId\n return None",
"def row(self):\n return self[\"row\"]",
"def row(self):\n\t\treturn self.__row",
"def get_child(self, name):\n return name, self._children[name]",
"def row(self, row_id):\r\n return Row(self, row_id)",
"def child(self, index):\n raise AttributeError, \"Cannot retrieve children from leaf nodes! Attempted on leaf:\\n\\n%s\" % self.prettyPrint()",
"def child(self, p_int, p_int_1): # real signature unknown; restored from __doc__\n return QModelIndex",
"def getChild(self, *args):\n return _libsbml.XMLNode_getChild(self, *args)",
"def attribute(self):\n return getattr(self.parent_model, self.name)",
"def takeChild(self, p_int, int_column=0): # real signature unknown; restored from __doc__\r\n return QStandardItem",
"def getChild(self, name):\n \n for child in self._children:\n if child.getName() == name:\n return child",
"def cast(self):\n # If the method has already been called\n if self._subobject is not None:\n return self._subobject\n for field in self.subclass_fields:\n try:\n self._subobject = getattr(self, field)\n # If an exception is not thrown, we've found the subclass\n break\n except self.DoesNotExist:\n pass\n # If no child is found, it returns None.\n return self._subobject",
"def get_child(self, child_index):\n try:\n return self.children[child_indexndex] #Return the child at the provided index\n except: #If the index is invalid,\n return None #Returns None",
"def get_child(self, n):\n child, _ = self.recursive_get_child(n)\n return child",
"def get_child(self, val):\n if val in self._children:\n return self._children[val]",
"def row(self):\n\t\tif self._parent != None:\n\t\t\treturn self._parent._children.index(self)\n\t\telse:\n\t\t\treturn 0",
"def getValue(self):\n return self._row[self.name]",
"def get_rightchild(self):\n return self._rightchild",
"def getDependOnItem(self):\n currentRow = self.getCurrentRow()\n if currentRow == 0:\n return None\n return self.jobRow.child(currentRow - 1, 0)",
"def parent(self):\n result = self.get_parent(\n identifier=DEFAULT_PARENT_IDENTIFIER,\n relationship=CommCareCaseIndexSQL.CHILD\n )\n return result[0] if result else None"
]
| [
"0.69378644",
"0.6922745",
"0.67441875",
"0.67046505",
"0.64928705",
"0.6490793",
"0.64874583",
"0.636516",
"0.6325162",
"0.63212925",
"0.6312723",
"0.62407464",
"0.6223152",
"0.6199766",
"0.61490715",
"0.6089433",
"0.6058238",
"0.6028954",
"0.6019511",
"0.60055214",
"0.6004265",
"0.5970033",
"0.5969498",
"0.5968792",
"0.5963361",
"0.59550625",
"0.5929296",
"0.5884917",
"0.5877103",
"0.58570117"
]
| 0.7951686 | 0 |
Return the number of children for this object. | def child_count(self):
return len(self._children) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def children_count(self):\n return len(self._children_ids)",
"def get_num_children(self):\n return len(self.children)",
"def num_children(self):\r\n return len(self.children)",
"def n_children(self):\n ch = self.children\n return 0 if not ch else len(ch) + sum([c.n_children for c in ch])",
"def num_children(self):\n # TODO: Determine how many children this node has\n count = 0\n for child in self.children:\n # if child not none\n if child:\n count += 1\n return count",
"def getNumChildren(self):\n return _libsbml.XMLNode_getNumChildren(self)",
"def getNumChildren(self):\n return _libsbml.ASTBasePlugin_getNumChildren(self)",
"def childCount(self):\n if self.__child is not None:\n return len(self.__child)\n return self._expectedChildCount()",
"def getNumChildren(self):\n return _libsbml.ASTNode_getNumChildren(self)",
"def _count_children(self, item):\n return len(self.tree.get_children(item))",
"def num_children_array(self):\n return self._num_children_array",
"def _children_count(self):\n cnt = 0\n if self.left:\n cnt += 1\n if self.right:\n cnt += 1\n return cnt",
"def get_child_element_count(self):\n return len(self._child_elements)",
"def cardinality(self):\n return len(self._children)",
"def __len__(self) -> int:\n return 1 + sum(len(child) for child in self.children)",
"def num children(self, p):\n raise NotImplementedError( must be implemented by subclass )",
"def size(self):\n if len(self.children) == 0:\n return 1\n else:\n return 1 + sum([x.size() for x in self.children])",
"def num_children(self, p):\n raise NotImplementedError('must be implemented by subclass')",
"def num_children(self, p):\n raise NotImplementedError('must be implemented by subclass')",
"def children_num(self,p):\n counter = 0\n for child in self.children(p):\n counter += 1\n return counter",
"def num_childrens(self, p):\n raise NotImplementedError('must be implemented by subclass')",
"def GetChildrenCount(self, recursively=True):\r\n\r\n count = len(self._children)\r\n \r\n if not recursively:\r\n return count\r\n\r\n total = count\r\n\r\n for n in xrange(count):\r\n total += self._children[n].GetChildrenCount()\r\n \r\n return total",
"def num_children(self, p):\n raise NotImplemented(\"must be implemented by subclass\")",
"def __len__(self):\n return 1 + sum([len(child) for child in self.children])",
"def get_exact_children_count(self):\n count = [0, 1]\n for child in self._children:\n if child.is_document():\n count[0] += 1\n continue\n child_count = child.get_exact_children_count()\n count = np.add(count, child_count)\n \n return count",
"def num_children(self):\n log = logging.getLogger(__name__)\n if self.synthetic_type == self.SYNTHETIC_CHILDREN:\n return len(self.synthetic_children)\n elif self.synthetic_type == self.SYNTHETIC_PROXY_NAME:\n value = getattr(self, self.synthetic_proxy_name)\n \"\"\":type: lldb.SBValue\"\"\"\n if value is not None:\n value = get_synthetic_value_copy(value)\n count = value.GetNumChildren()\n \"\"\":type: int\"\"\"\n return count\n log.error(\"num_children: Cannot get proxy value: {} for type {}.\".format(self.synthetic_proxy_name, self.type_name))\n return 0\n elif self.synthetic_type == self.SYNTHETIC_PROXY_VALUE:\n if self.synthetic_proxy_value is not None:\n value = get_synthetic_value_copy(self.synthetic_proxy_value)\n count = value.GetNumChildren()\n \"\"\":type: int\"\"\"\n return count\n log.error(\"num_children: No proxy value for type {}.\".format(self.type_name))\n # Returns child number for current object.\n return self.value_obj.GetNumChildren()\n\n log.error(\"num_children: Unknown synthetic type: {} for type {}.\".format(self.synthetic_type, self.type_name))\n return 0",
"def num_children(self, p):\n raise NotImplementedError('must be implemented by subclass')",
"def num_children(self, u):\n return self._ll_tree.get_num_children(u)",
"def arity(self):\n return len(self.__children)",
"def num_children(self, p):\n node = self._validate(p)\n return len(node._children)"
]
| [
"0.90056854",
"0.89945096",
"0.8814528",
"0.84932816",
"0.8328836",
"0.820382",
"0.81602144",
"0.8130981",
"0.8113122",
"0.8032147",
"0.8006279",
"0.7992068",
"0.7951158",
"0.79241395",
"0.7880368",
"0.7846781",
"0.7826892",
"0.7825752",
"0.7825752",
"0.7813822",
"0.78123784",
"0.78010553",
"0.77445906",
"0.77432114",
"0.77430576",
"0.76953137",
"0.7677778",
"0.7656716",
"0.76380783",
"0.76364493"
]
| 0.90249795 | 0 |
Return the row number of this object in its parents child object list. | def row(self):
if self._parent != None:
return self._parent._children.index(self)
else:
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def position(self):\n # (this to be able to let the model know my 'row')\n if self.parent and self in self.parent.children:\n return self.parent.children.index(self)\n return 0",
"def get_row_count(self):\n\t\treturn self.iter_n_children(None)",
"def get_parent_index(self):\n return (self.index - 1) // 2",
"def rowCount(self, parent):\n if parent.column() > 0:\n return 0\n parent_item = self.root_item if not parent.isValid() else parent.internalPointer()\n return len(parent_item.children)",
"def parent_count(self):\n return len(self._parent_ids)",
"def num_parents(self):\n return self._num_parents",
"def rowCount(self, parent_midx):\n return self._sel.give_conds_nb() + 1",
"def rowCount(self, parent):\n parent_item = parent.internalPointer()\n if parent.isValid():\n parent_item = parent.internalPointer() # noqa\n return 0\n else:\n return len(self._nodes)",
"def parent(self, index):\n return index / 2",
"def get_left_child_index(self, parent):\n return 2*parent+1",
"def parent_id(self) -> int:\n return self._parent_id",
"def parent_row(self):\n dict_cur.execute('SELECT * FROM \"{}\" WHERE {} = {}'.format(\n self.parent_table(), self.id_col_of_parent(), self.parent_id\n ))\n return dict_cur.fetchone()",
"def rowCount(self, index=QModelIndex()):\n\t\tif index.isValid():\n\t\t\tparent = index.row()\n\t\telse:\n\t\t\tparent = None\n\t\t\n\t\treturn len(filter(lambda x: x[2] == parent, self.fields))",
"def get_parent_index(self, child):\n return (child-1)//2",
"def get_row(self):\n return self._row_number",
"def rowCount(self, parent:typing.Optional[QtCore.QModelIndex]=QtCore.QModelIndex()) -> int:",
"def rowCount(self, parent = QModelIndex()):\n return self.numRows",
"def rowCount(self, parent): # pylint: disable=unused-argument\n return self._config['row_count']",
"def parent(self, pos):\n if pos == 0: \n return None\n return int(math.ceil(pos / self.dary) - 1)",
"def rowCount(self, parent):\r\n return len(self.arraydata)",
"def rowCount(self, parent_midx):\n return self._sel.give_conds_nb()",
"def get_parent(self, index):\n return (index - 1) // (2)",
"def get_left_child_index(self):\n return (2 * self.index) + 1",
"def get_right_child_index(self, parent):\n return 2*parent+2",
"def row_counter(self) -> int:\n return self.writer.row_counter",
"def _get_parent_index(self, child_index: int) -> int:\n index = (child_index - 1) // 2\n\n return index if index >= 0 else 0",
"def parent(self, index):\n if index == 0:\n print(\"index 0 has no parent\")\n return None\n return (index - 1) // 2",
"def _parent(self, index):\r\n # Declaring the \"root\" its own parent, otherwise usual math\r\n return (index - 1) // 2 if index else 0",
"def parent_block_count(self):\n return self.block_count",
"def get_parent_index(i):\n # Indexing for i_parent == i // 2 is NOT ZERO-INDEXED\n pos = i + 1\n parent_pos = pos // 2\n parent_index = parent_pos - 1\n return parent_index"
]
| [
"0.7749123",
"0.7150035",
"0.7133484",
"0.7002957",
"0.6893562",
"0.68775827",
"0.68358344",
"0.68228674",
"0.6657135",
"0.65858424",
"0.6512793",
"0.65048885",
"0.6504525",
"0.6482687",
"0.64728826",
"0.6462298",
"0.6381234",
"0.6368826",
"0.6327531",
"0.63188356",
"0.63071525",
"0.6296316",
"0.6295622",
"0.62945014",
"0.6290395",
"0.6278822",
"0.6267229",
"0.62625843",
"0.62276745",
"0.6225382"
]
| 0.8057412 | 0 |
Returns a count of games for each team between a range of dates. The range of dates is inclusive. The result is a dictionary, where the key is a team ID and the values is the number of games played within the date range. | def games_count(self, start_date, end_date): # noqa
if start_date > end_date:
raise RuntimeError("End date must be beyond start")
cur_date = start_date
tot_gc = defaultdict(int)
while cur_date <= end_date:
teams_playing = self._teams_playing_one_day(cur_date)
for team in teams_playing:
tot_gc[team] += 1
cur_date = cur_date + datetime.timedelta(days=1)
return tot_gc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def collect_stats(games: List[BaseGame], date_min = None, date_max = None):\n if not games: games = self.games\n\n under2_5 = len(list(filter(lambda g: g.is_total_under(), games)))\n under3_5 = len(list(filter(lambda g: g.is_total_under(3.5), games)))\n under1_5 = len(list(filter(lambda g: g.is_total_under(1.5), games)))\n\n home_score = sum([g.FTHG for g in games])\n away_score = sum([g.FTAG for g in games])\n\n home_wins = sum(1 for _ in filter(lambda g: g.is_home_win(), games))\n away_wins = sum(1 for _ in filter(lambda g: g.is_away_win(), games))\n draws = sum(1 for _ in filter(lambda g: g.is_draw(), games))\n\n return {\n 'under2.5': float(under2_5) / len(games),\n 'under3.5': float(under3_5) / len(games),\n 'under1.5': float(under1_5) / len(games),\n 'avgScoredHome': float(home_score) / len(games),\n 'avgScoredAway': float(away_score) / len(games),\n \"home_wins\": float(home_wins) / len(games),\n \"away_wins\": float(away_wins) / len(games),\n \"draws\": float(draws) / len(games),\n }",
"def counts_per_teams(cls, date_scope):\n return cls._counts_per(\"player_team\", date_scope)",
"def collect_after_game_dicts(league, start_date, end_date):\n after_game_no_dicts = collections.defaultdict(dict)\n\n def add_team_stats(team, after_game_no, stat):\n stat_dict = after_game_no_dicts[after_game_no]\n stat_dict[team] = stat\n\n for team in league.teams:\n matches = get_matches(league, start_date, end_date, team_involved=team)\n for x in range(1, len(matches) + 1):\n stats = TeamStats(team, matches[:x])\n add_team_stats(team, x, stats)\n\n for x, dictionary in after_game_no_dicts.items():\n if len(dictionary) != len(league.teams):\n del after_game_no_dicts[x]\n return after_game_no_dicts",
"def get_number_games(season: str, team_id: int, game_type_code: str = \"R\") -> dict:\n args = arguments.get_arguments()\n\n endpoint = f\"/schedule?teamId={team_id}&season={season}&gameType={game_type_code}\"\n response = api.nhl_api(endpoint)\n\n if response:\n schedule = response.json()\n games_total = schedule[\"totalItems\"]\n return games_total\n\n # If no valid response, just return default number of games (82)\n return 82",
"def make_project_count_dict(df):\n year_lists = []\n null_date_info_projects = 0\n for i in range(len(df)):\n start_date = df[\"Start Date:\"].iloc[i]\n end_date = df[\"End Date:\"].iloc[i]\n if (start_date == -1) or (end_date == -1):\n null_date_info_projects += 1\n continue\n year_lists.append(list(range(start_date.year, end_date.year + 1))) # +1 because the project is active that year. It needs to show on graph\n print(year_lists)\n year_count_dict = OrderedDict.fromkeys(range(2000, datetime.now().year + 5), 0)\n print(year_count_dict)\n for i in year_lists:\n for j in i:\n year_count_dict[j] += 1\n return year_count_dict, null_date_info_projects",
"def get_games(date):\n scoreboard = nba_py.Scoreboard(month=date.month,\n day=date.day,\n year=date.year)\n line_score = scoreboard.line_score()\n game_header = scoreboard.game_header()\n\n games = []\n current_game = {}\n game_sequence = 0\n game_sequence_counter = 0\n\n # Get HOME TEAM and AWAY TEAM data for each boxscore game in line_score.\n for i, value in enumerate(line_score):\n if (value[\"GAME_SEQUENCE\"] != game_sequence):\n game_sequence += 1\n\n current_game[\"GAME_ID\"] = value[\"GAME_ID\"]\n home_team_id = game_header[game_sequence - 1][\"HOME_TEAM_ID\"]\n\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n \n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter += 1\n elif game_sequence_counter == 1:\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n\n current_game[\"GAME_STATUS_TEXT\"] = game_header[game_sequence - 1][\"GAME_STATUS_TEXT\"]\n if not game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]:\n current_game[\"BROADCASTER\"] = \"\"\n else:\n current_game[\"BROADCASTER\"] = game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]\n\n games.append(current_game)\n\n current_game = {}\n\n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter -= 1\n\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return (games, east_standings, west_standings)",
"def get_team_stats(players: list[Player]) -> dict[int]:\n\n team_stats = {}\n\n total_reaction = 0\n total_mechanical_skill = 0\n total_tactical_skill = 0\n total_game_knowledge = 0\n total_xp = 0\n\n for player in players:\n total_reaction += player.reaction\n total_mechanical_skill += player.mechanical_skill\n total_tactical_skill += player.tactical_skill\n total_game_knowledge += player.game_knowledge\n total_xp += player.xp\n\n team_stats.update(\n {\"reaction\": total_reaction,\n \"mechanical_skill\": total_mechanical_skill,\n \"tactical_skill\": total_tactical_skill,\n \"game_knowledge\": total_game_knowledge,\n \"xp\": total_xp})\n\n return team_stats",
"def makeProjectedWins(fantasyTeams, weekStart, weekEnd):\n\tprojectedWinsDict = {}\n\tfor team in fantasyTeams:\n\t\tprojectedWins = 0\n\t\tfor i in range(weekStart, weekEnd+1):\n\t\t\ttry:\n\t\t\t\tif fantasyTeams[team].projectedPoints[i] > fantasyTeams[team].projectedPointsAgainst[i]:\n\t\t\t\t\tprojectedWins += 1\n\t\t\texcept Exception as e:\n\t\t\t\tpass\n\t\tprojectedWinsDict.update({team:projectedWins})\n\treturn projectedWinsDict",
"def counts_per_players(cls, date_scope):\n return cls._counts_per(\"player_name\", date_scope)",
"def _get_teams(unstacked, min_games=0):\n count_col = 'game_id'\n home_gb = unstacked.groupby('hteam_id').count()[[count_col]]\n away_gb = unstacked.groupby('ateam_id').count()[[count_col]]\n merged = home_gb.merge(away_gb, how='outer', left_index=True, right_index=True)\n merged[count_col] = merged[count_col + '_x'] + merged[count_col + '_y']\n merged.index.name = 'index'\n teams = merged.reset_index().rename(columns={'index': 'team_id'})\n teams = teams.sort('team_id')\n teams['i_team'] = np.arange(teams.shape[0])\n team_index = {}\n for team, idx in teams[['team_id', 'i_team']].values:\n team_index[int(team)] = int(idx)\n return teams[['team_id', 'i_team']], team_index",
"def get_teams() -> Dict[int, Team]:\r\n # Get teams\r\n soup = TCS_Scraper.scrape_teams()\r\n\r\n # Get each region table\r\n regions = soup.find_all(\"table\", {\"class\" : \"table table-hover table-bordered\"})\r\n\r\n regions_teams = []\r\n\r\n for region in regions:\r\n rows = region.find_all(\"tr\")[1:]\r\n region_list = []\r\n\r\n # find the url and team name for each team in this region\r\n for row in rows:\r\n tag = row.find(\"a\")\r\n name = tag.text.strip()\r\n url = tag.get(\"href\")\r\n region_list.append([name, url])\r\n\r\n # append this region's list of names and url\r\n regions_teams.append(region_list)\r\n\r\n NAME = 0\r\n URL = 1\r\n teams = []\r\n\r\n # Using this list, create Team objects\r\n REGION_NAMES = [\"west\", \"south\", \"north\", \"east\"]\r\n for x in range(len(REGION_NAMES)):\r\n for team in regions_teams[x]:\r\n teams.append(\r\n Team(\r\n team[URL],\r\n REGION_NAMES[x],\r\n team[NAME]\r\n )\r\n )\r\n\r\n team_dict = {}\r\n for team in teams:\r\n team_dict[team.id] = team\r\n\r\n return team_dict",
"def extract_games(self) -> Dict[int, Dict[str, Any]]:\n optadocument = self._get_doc()\n attr = assertget(optadocument, '@attributes')\n matchdata = assertget(optadocument, 'MatchData')\n matches = {}\n for match in matchdata:\n matchattr = assertget(match, '@attributes')\n matchinfo = assertget(match, 'MatchInfo')\n matchinfoattr = assertget(matchinfo, '@attributes')\n game_id = int(assertget(matchattr, 'uID')[1:])\n matches[game_id] = dict(\n # Fields required by the base schema\n game_id=game_id,\n competition_id=int(assertget(attr, 'competition_id')),\n season_id=int(assertget(attr, 'season_id')),\n game_day=int(assertget(matchinfoattr, 'MatchDay')),\n game_date=datetime.strptime(assertget(matchinfo, 'Date'), '%Y-%m-%d %H:%M:%S'),\n # home_team_id=see below,\n # away_team_id=see below,\n # Optional fields\n # home_score=see below,\n # away_score=see below,\n # duration=?\n # referee=?\n # venue=?,\n # attendance=?\n # home_manager=?\n # away_manager=?\n )\n teamdata = assertget(match, 'TeamData')\n for team in teamdata:\n teamattr = assertget(team, '@attributes')\n side = assertget(teamattr, 'Side')\n teamid = assertget(teamattr, 'TeamRef')\n score = assertget(teamattr, 'Score')\n if side == 'Home':\n matches[game_id]['home_team_id'] = int(teamid[1:])\n matches[game_id]['home_score'] = int(score)\n else:\n matches[game_id]['away_team_id'] = int(teamid[1:])\n matches[game_id]['away_score'] = int(score)\n return matches",
"def event_counts_to_people(self, from_date, events):\n\n jql_script = \"function main() { var event_selectors_array = []; _.each(params.events, function(e) {\" \\\n \"event_selectors_array.push({'event': e});}); return join(Events({from_date: params.from_date,\" \\\n \"to_date: params.to_date, event_selectors: event_selectors_array}), People(), {type: 'inner'})\" \\\n \".groupByUser(['event.name'], mixpanel.reducer.count()).map(function(row) {v = {}; v[row.key[1]]\" \\\n \" = row.value; return {$distinct_id: row.key[0],value: v};});}\"\n\n to_date = datetime.datetime.today().strftime('%Y-%m-%d')\n\n if isinstance(from_date, datetime.date):\n from_date = from_date.strftime('%Y-%m-%d')\n\n params = {'from_date': from_date, 'to_date': to_date, 'events': events}\n return self.jql_operation(jql_script, '$set', jql_params=params, backup=False)",
"def getMatchesInDateRange(self, startDate=None, endDate=None):\n matches = MatchList(matchIds=[])\n if startDate is None:\n startDate = datetime.min\n if endDate is None:\n endDate = datetime.max\n for id in self._matches.keys():\n match = self._matches[id]\n if match.date > startDate and match.date < endDate:\n matches.addMatch(id, match)\n return matches",
"def get_stats(cls, contract_month_start_day=1):\n # I could do this in SQL with date_trunc, but eventually this'll need\n # to be contract-month, so like the 7th-7th or something, which AFAIK\n # can't be done in SQL (and certainly not in Django). So just do this\n # by hand. There are only a few hundred reports/month right now, so this\n # should be OK.\n stats = {}\n\n reports = cls.objects.filter(days_until_triage__isnull=False)\n for report in reports:\n first_day, last_day = dates.contract_month(report.created_at, contract_month_start_day)\n if first_day not in stats:\n stats[first_day] = {\n 'count': 0,\n 'triaged_accurately': 0,\n 'false_negatives': 0,\n 'triaged_within_one_day': 0,\n 'last_day': last_day,\n\n }\n\n stats[first_day]['count'] += 1\n stats[first_day]['triaged_accurately'] += report.is_accurate\n stats[first_day]['false_negatives'] += report.is_false_negative\n if report.days_until_triage <= 1:\n stats[first_day]['triaged_within_one_day'] += 1\n\n stats[\"totals\"] = {\n key: sum(month_stats[key] for month_stats in stats.values()) if stats else 0\n for key in ('count', 'triaged_accurately', 'false_negatives', 'triaged_within_one_day')\n }\n\n return stats",
"def git_stats_date_range(\n project: Project,\n formatter: Formatter,\n date_start: datetime.datetime,\n date_end: datetime.datetime,\n) -> GitStats:\n name_start = f\"{formatter.name}@{{{date_start:%F %T}}}\"\n name_end = f\"{formatter.name}@{{{date_end:%F %T}}}\"\n return _git_stats(project, name_start, name_end)",
"def count_hours(ranges, range_start=None, range_stop=None):\n buckets = [0.0] * (24 * 7)\n one_hour = timedelta(0, 60 * 60)\n\n first = None\n last = None\n\n for start, stop in ranges:\n if ((range_start is not None and start < range_start) or\n (range_stop is not None and stop > range_stop)):\n continue\n\n if first is None:\n first = start\n last = stop\n\n open_ref = datetime(start.year, start.month, start.day, start.hour)\n open_ref += one_hour\n open_ref = min(open_ref, stop)\n open_frac = (open_ref - start) / one_hour\n buckets[24 * start.weekday() + start.hour] += open_frac\n\n if start.date() != stop.date() or start.hour != stop.hour:\n stop_ref = datetime(stop.year, stop.month, stop.day, stop.hour)\n stop_frac = (stop - stop_ref) / one_hour\n buckets[24 * stop.weekday() + stop.hour] += stop_frac\n\n start_hour = 24 * open_ref.weekday() + open_ref.hour\n stop_hour = 24 * stop.weekday() + stop.hour\n if stop_hour < start_hour - 1:\n stop_hour += 24 * 7\n\n for hour in range(start_hour, stop_hour):\n buckets[hour % (24 * 7)] += 1\n\n return buckets, first, last",
"def hits(self) -> Mapping[str, int]:\n if len(self._clock_starts) > 0:\n warnings.warn(\n \"Retrieved hit counts while clocks are still going, \"\n \"incomplete times are not included: \"\n f\"{list(self._clock_starts.keys())}\",\n RuntimeWarning,\n )\n return self._hit_count.copy()",
"def find_games(days_ahead=0):\n headers = {\n 'Host': 'stats.nba.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Referer': 'https://stats.nba.com/',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'x-nba-stats-origin': 'stats',\n 'x-nba-stats-token': 'true'\n }\n board = scoreboardv2.ScoreboardV2(day_offset=days_ahead, headers=headers).get_data_frames()[0]\n board.replace(id_to_abrv, inplace=True)\n return board[['GAME_DATE_EST', 'GAME_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID']]",
"def grouped_games(self):\n # Game times are stored as UTC, need to be offset back to EST or the\n # night games overflow into the next day.\n offset = datetime.timedelta(hours=-5)\n date_grouper = lambda g: (g.start + offset).date()\n time_grouper = lambda g: g.start.time()\n for date, games1 in groupby(self.games.fetch(100), date_grouper):\n group = [(time, list(games2)) for time, games2\n in groupby(games1, time_grouper)]\n yield date, group",
"def get_list_team_scores(self):\n scores = defaultdict(lambda: {\n \"scored_xg\": [],\n \"conceded_xg\": [],\n \"home_adv\": 0,\n \"expected_points\": 0\n })\n\n for g in self.games:\n scores[g.HomeTeam][\"scored_xg\"].append(g.FTHG)\n scores[g.HomeTeam][\"conceded_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"scored_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"conceded_xg\"].append(g.FTHG)\n\n for team in scores.keys():\n scores[team][\"expected_points\"] = (self.get_table(metric='points')[team] /\n len(scores[team][\"scored_xg\"]))\n\n return scores",
"def get_penalty_counts(game):\n game_type = get_game_type_from_season_type(game)\n\n pen_counts = dict()\n pen_counts['home'] = defaultdict(int)\n pen_counts['road'] = defaultdict(int)\n\n game_events_src_path = os.path.join(\n CONFIG['base_data_dir'], 'game_events',\n str(game['season']), str(game_type), \"%d.json\" % game['game_id'])\n events_data = json.loads(open(game_events_src_path).read())\n\n for period in events_data:\n for event in events_data[period]:\n if event['type'] == 'penalty':\n duration = int(event['data']['duration'] / 60)\n if event['data']['team'] == 'home':\n pen_counts['home'][duration] += 1\n else:\n pen_counts['road'][duration] += 1\n\n return pen_counts",
"def get_team_scores(self, team, include_home=True, include_away=True):\n if include_away:\n away_games = list(filter(lambda g: team == g.AwayTeam, self.games))\n else:\n away_games = []\n\n if include_home:\n home_games = list(filter(lambda g: team == g.HomeTeam, self.games))\n else:\n home_games = []\n\n scored_h = [g.FTHG for g in home_games]\n scored_a = [g.FTAG for g in away_games]\n\n conceded_h = [g.FTAG for g in home_games]\n conceded_a = [g.FTHG for g in away_games]\n\n try:\n mean_gd = mean(scored_h + scored_a) - mean(conceded_h + conceded_a)\n home_gd = mean(scored_h) - mean(conceded_h)\n home_adv = home_gd - mean_gd\n except Exception:\n home_adv = 0\n\n return {\n \"scored_xg\": scored_h + scored_a,\n \"conceded_xg\": conceded_h + conceded_a,\n \"home_adv\": home_adv,\n \"expected_points\": self.get_table(metric='points')[team] /\n len(home_games + away_games)\n }",
"def compute_holidays(start, end):\n # The list of holidays and their given dates every year\n holiday_dates = {\n \"new year holiday:\": (1, 1),\n \"labor day holiday:\": (5, 1),\n \"all saints day holiday:\": (11, 1),\n \"christmas holiday:\": (12, 25)\n }\n\n # Initialize the count of occurrences per holiday\n holiday_counts = {holiday: 0 for holiday in holiday_dates.keys()}\n # For loop to go through each holiday\n for holiday in holiday_dates.keys():\n # Sets the year for when counting the occurrences start\n count_start = start.year\n # If the holiday occurs before the start date, we disregard it\n if (start - datetime.date(start.year, *holiday_dates[holiday])).days > 0:\n count_start += 1\n # Sets the year for when counting the occurrences end\n count_end = end.year\n # If the holiday occurs after the end date, we disregard it\n if (datetime.date(end.year, *holiday_dates[holiday]) - end).days > 0:\n count_end -= 1\n # For loop to go through each year in the counting range\n for year in range(count_start, count_end + 1):\n # If the holiday falls on a weekday, we increment the occurrence count\n if datetime.date(year, *holiday_dates[holiday]).weekday() < 5:\n holiday_counts[holiday] += 1\n\n # The total number of holidays is the sum of the counts of each holiday\n holiday_counts[\"total holidays:\"] = sum(holiday_counts.values())\n\n # Returns the dictionary with complete counts\n return holiday_counts",
"def get_all_dataset_counts(\n self,\n ) -> Dict[Tuple[str, int, int], int]:\n res = self._engine.execute(\n select(\n [\n PRODUCT.c.name,\n TIME_OVERVIEW.c.start_day,\n TIME_OVERVIEW.c.period_type,\n TIME_OVERVIEW.c.dataset_count,\n ]\n )\n .select_from(TIME_OVERVIEW.join(PRODUCT))\n .where(TIME_OVERVIEW.c.product_ref == PRODUCT.c.id)\n .order_by(\n PRODUCT.c.name, TIME_OVERVIEW.c.start_day, TIME_OVERVIEW.c.period_type\n )\n )\n\n return {\n (\n r.name,\n *TimePeriodOverview.from_flat_period_representation(\n r.period_type, r.start_day\n )[:2],\n ): r.dataset_count\n for r in res\n }",
"def get_counts_by_site_per_day(rows, start_date, end_date):\n counts_by_site_per_day = {}\n\n for row_columns in rows:\n # Each item in \"rows\" represents a participant -- particularly,\n # the set of dates when that participant completed a specified event\n # in the enrollment lifecycle\n hpo = row_columns[0]\n\n # Gather all the dates of each lifecycle phase that needs to be\n # passed in order to become a full participant\n dates = row_columns[2:]\n \n dates = dates[:2] + dates[4:]\n \n dates = [date if date != 'NULL' else '' for date in dates]\n\n # Get the latest -- the most recent -- of those dates\n most_recent_date = sorted(dates)[-1]\n\n # Increment by 1 the number of full participants enrolled\n # in this HPO (i.e. high-level recruitment origin, \"site\")\n # on this date\n if hpo in counts_by_site_per_day:\n counted_days = counts_by_site_per_day[hpo]\n if most_recent_date in counted_days:\n counts_by_site_per_day[hpo][most_recent_date] += 1\n else:\n counts_by_site_per_day[hpo][most_recent_date] = 1\n else:\n counts_by_site_per_day[hpo] = {most_recent_date: 1}\n\n counts_by_site_per_day = AouSummary.truncate_counts(counts_by_site_per_day, start_date, end_date)\n return counts_by_site_per_day",
"def calculate_matches(teams: Dict[int, Team]) -> Dict[int, Match]:\r\n match_urls = TCS_Scraper.scrape_matches(end_round=CURRENT_ROUND)\r\n matches = {}\r\n for match in match_urls:\r\n print(\"Scraping\", match)\r\n team_1id, results, team_2id \\\r\n = TCS_Scraper.scrape_match(match, teams)\r\n # If nothing happened on this match page, skip it\r\n if not results:\r\n continue\r\n team_1 = teams[team_1id]\r\n team_2 = teams[team_2id]\r\n\r\n team_1elos = [team_1.elo]\r\n team_2elos = [team_2.elo]\r\n for result in results:\r\n # Calculate new elo for each team\r\n e1p, e2p = Team.calculate_elo(team_1.elo, team_2.elo, result[0])\r\n\r\n # Print elo changes for each team\r\n print(team_1.name, str(e1p - team_1.elo))\r\n print(team_2.name, str(e2p - team_2.elo))\r\n\r\n # Store the elo changes\r\n team_1elos.append(e1p)\r\n team_2elos.append(e2p)\r\n\r\n # Set new elo values\r\n team_1.elo = e1p\r\n team_2.elo = e2p\r\n\r\n # Create a new match object and append it to the list of matches\r\n new_match = Match(\r\n match,\r\n team_1id,\r\n team_2id,\r\n team_1elos,\r\n team_2elos,\r\n results\r\n )\r\n matches[new_match.id] = new_match\r\n\r\n # Add match id to each team object\r\n team_1.matches.append(new_match.id)\r\n team_2.matches.append(new_match.id)\r\n\r\n return matches",
"def play_n_game(n_games, n_toss):\n results_list = []\n for _ in range(n_games):\n results_list.append(play_one_game(n_toss))\n dict_proba = {}\n for j in range (n_toss + 1):\n if results_list.count(j) != 0:\n dict_proba[j] = results_list.count(j)/n_games\n else:\n continue\n return dict_proba",
"def get_afltables_stats(\n self,\n start_date: Optional[str] = \"1965-01-01\",\n end_date: Optional[str] = \"2016-12-31\",\n ) -> pd.DataFrame:\n\n return self.__data(\n f'get_afltables_stats(start_date = \"{start_date}\", '\n f'end_date = \"{end_date}\")'\n ).assign(playing_for=self.__translate_team_column(\"playing_for\"))",
"def grade_over_time(g_date):\n year_df=g_date.groupby('GRADE DATE')['GRADE']\n gradeOverTime={}\n for name, group in year_df:\n gradeOverTime[name]=collections.Counter(group)\n\n gradeOverTime=pd.DataFrame(gradeOverTime)\n gradeOverTime=pd.DataFrame(gradeOverTime.values.T,columns=gradeOverTime.index,index=[2010,2011,2012,2013,2014])\n \n\n return gradeOverTime"
]
| [
"0.66655016",
"0.6575532",
"0.6239661",
"0.60753345",
"0.5819676",
"0.56853807",
"0.56271034",
"0.5605924",
"0.552719",
"0.5430894",
"0.5406802",
"0.5393699",
"0.53550273",
"0.5331798",
"0.52389354",
"0.5208741",
"0.5198225",
"0.51977944",
"0.5194764",
"0.51899004",
"0.51551",
"0.513937",
"0.5127652",
"0.51092947",
"0.5107862",
"0.5081814",
"0.50802946",
"0.5055818",
"0.50544935",
"0.503077"
]
| 0.77764916 | 0 |
Returns the full list of all players in the NHL. Each player is returned with their teamID and playerID. | def players(self):
if self.players_cache is None:
team_df = self.teams()
self.players_cache = self.ea.players_endpoint(
team_df["id"].tolist())
columns = ["teamId", "playerId", "name", "position"]
all_players = []
for team in self.players_cache["teams"]:
team_id = team["id"]
for plyr in team["roster"]["roster"]:
player_id = plyr["person"]["id"]
player_name = plyr["person"]["fullName"]
position = plyr["position"]["abbreviation"]
all_players.append({columns[0]: team_id,
columns[1]: player_id,
columns[2]: player_name,
columns[3]: position})
return pd.DataFrame(data=all_players, columns=columns) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_players(team_id: int) -> list[Player]:\n\n players = Player.query.filter_by(team_id=team_id).order_by(Player.position.asc()).all()\n\n return players",
"async def get_players(self):\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/leaderboard/3v3?locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n output = {}\r\n for player in range(0, 965):\r\n output[int(player)] = data['rows'][player]\r\n with open('Pvp_Players.json', 'w') as pvp_players:\r\n json.dump(output, pvp_players)\r\n return output",
"def players(self):\n return Player.objects.filter(team=self)",
"def get_all_players(self):\n\n self._logger.debug(\"Getting player list\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT first_name, last_name, nickname, time FROM player \\\n ORDER BY time DESC\")\n players = cursor.fetchall()\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return players",
"def get_players():\n nfl_players = redis_cache('nfl_players_key', NFL_Player_2015.query.all)\n return nfl_players",
"def get_roster_players(self, team, season=None):\n # setting up empty list of players\n players = list()\n\n # getting html document with team's roster\n doc = self.get_html_document(team, 'roster', season)\n\n # retrieving player page urls, and players' first and last names\n # from roster page\n urls = doc.xpath(\"//td[@class='name-col']/a[@href]/@href\")\n first_names = doc.xpath(\n \"//td[@class='name-col']/a/div/span[@class='name-col__item \" +\n \"name-col__firstName']/text()\")\n # using filter to get rid of empty strings after stripping string\n # elements\n # using replace to get rid of asterisk indicating players on injury\n # reserve\n last_names = filter(\n None, [\n x.replace(\"*\", \"\").strip() if x else None for x in doc.xpath(\n \"//td[@class='name-col']/a/div/span[@class='name-\" +\n \"col__item name-col__lastName']/text()\")])\n # retrieving players' positions\n positions = [x[:1] for x in doc.xpath(\n \"//td[@class='position-col fixed-width-font']/text()\")]\n\n for (\n first_name, last_name, url, position\n ) in zip(\n first_names, last_names, urls, positions\n ):\n # retrieving nhl id from player page url\n plr_id = int(url.split(\"-\")[-1])\n\n # trying to find player in database\n plr = Player.find_by_id(plr_id)\n # creating player if not already in database\n if plr is None:\n plr = self.create_player(\n plr_id, last_name, first_name, position)\n logging.info(\"+ %s created\" % plr)\n\n players.append(plr)\n\n return players",
"def _player_list(self):\n game = self.ctrl.game\n return game.players[self.i_to_player_id(0)], game.players[self.i_to_player_id(1)]",
"def get_players(self, all=False):\n if all:\n return self.all_players\n else:\n return self.players",
"def get_all_game_players(self):\n return GamePlayer.objects.filter(game=self)",
"def getPlayers(self):\n\t\tself.server.playerMutex.lock()\n\t\tplayers = [ (player[0], player[1][3]) for player in self.server.players.items() ]\n\t\tself.server.playerMutex.unlock()\n\t\treturn players",
"def get_players(n_players):\n\n if n_players < 2 or 8 < n_players:\n raise ValueError('A game must have between 2 to 8 players. You input {} players.'.format(n_players))\n\n return {classes.Player(p) for p in range(n_players)}",
"async def _get_team_players(self, server_id: str, team_id: str):\n params = {}\n url = self.api_url + 'teams/{}/players'.format(team_id)\n\n return await self._make_request(url, params, server_id)",
"def get_player_list(tournament):\n database = TinyDB('db.json')\n players_table = database.table('players')\n # retrieving the list of identifiers of players following a tournament\n id_list = tournament['Liste indice Joueurs']\n player_list = []\n for player_id in id_list:\n # getting the players\n player = players_table.get(doc_id=player_id)\n player_list.append(player)\n return player_list",
"def getPlayers(self):\n players = []\n for pgp in self.sandboxplayergroupplayer_set.filter(quit=False):\n players.append(pgp.player)\n return players",
"def player_list():\n page = request.args.get(\"page\", \"1\")\n count = request.args.get(\"count\", \"12\")\n team_id = request.args.get(\"team_id\")\n\n if not team_id:\n raise BadRequest(\"Nama team tidak boleh kosong\")\n\n # type conversion\n page = int(page)\n count = int(count)\n team_id = int(team_id)\n\n player = player_ctrl.get_list(page=page, count=count, team_id=team_id)\n\n response = {\n \"status\": 200 if player.items != [] else 204,\n \"has_next\": player.has_next,\n \"has_prev\": player.has_prev,\n \"total\": player.total,\n \"result\": _entity_player_list(player.items)\n }\n\n return jsonify(response)",
"def get_contracted_players(self, team):\n # setting up empty list of players\n players = list()\n\n # getting html document with team's contracted players\n doc = self.get_html_document(team, 'contracts')\n\n # returning empty list if no system page could be found\n if doc is None:\n return players\n\n # collecting player names and links to capfriendly pages for different\n # player groups\n cf_links = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/@href\")\n cf_names = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/text()\")\n\n for lnk, name in zip(cf_links, cf_names):\n # retrieving capfriendly id from player page link\n cf_id = lnk.split(\"/\")[-1]\n # trying to find player in database\n plr = Player.find_by_capfriendly_id(cf_id)\n # trying to find player using suggestions\n if plr is None:\n last_name, first_name = name.split(\", \")\n suggested_players = self.get_suggested_players(\n last_name, first_name)\n for suggested_player in suggested_players:\n (\n sugg_plr_id, sugg_pos,\n sugg_last_name, sugg_first_name, _\n ) = (\n suggested_player\n )\n if (last_name, first_name) == (\n sugg_last_name, sugg_first_name):\n plr = Player.find_by_id(sugg_plr_id)\n if plr is None:\n plr = self.create_player(\n sugg_plr_id, last_name, first_name, sugg_pos)\n\n if plr is None:\n print(\"Unable to find player with name %s\" % name)\n else:\n players.append(plr)\n\n return players",
"def get_players(self):\r\n return self.players.values()",
"def look_for_players(self):\n log.debug(\"Start looking for players [\" + self.team_link + \"]\")\n\n players_found = {}\n table = self.soup.find('table', {\"class\": \"table table-striped table-hover no-footer\"})\n for tr in table.find_all(\"tr\"):\n a = tr.find(\"a\")\n if a:\n # tag a container of the name player found\n player_name = str(a.string).strip()\n link = self.host_url + a[\"href\"]\n players_found[link] = player_name\n\n return players_found",
"def players(self):\n return self._get_by_class(Player)",
"def players(self):\n return self._get(\"players\")",
"def find_players_for_team(self, team, src='roster', season=None):\n # creating class wide variable to hold current team\n if type(team) is str:\n team = Team.find(team)\n\n print(\"+ Searching %s players for %s\" % (src, team))\n\n if src == 'roster':\n players = self.get_roster_players_via_api(team, season)\n elif src == 'system':\n players = self.get_system_players(team)\n elif src == 'contract':\n players = self.get_contracted_players(team)\n\n return players",
"def create_player_list(self, current_game):\n players = [Player(c['summonerId'], c['championId'], c['teamId']) for c in current_game['participants']]\n return players",
"def get_roster_players_with_data(self, team):\n # TODO: find usage for this function\n # getting html document with team's roster\n doc = self.get_html_document(team, 'roster')\n\n # retrieving player page urls, and player first and last names\n # from roster page\n urls = doc.xpath(\"//td[@class='name-col']/a[@href]/@href\")\n first_names = doc.xpath(\n \"//td[@class='name-col']/a/div/span[@class='name-col__item \" +\n \"name-col__firstName']/text()\")\n # using filter to get rid of empty strings after stripping string\n # elements\n # using replace to get rid of asterisk indicating players on injury\n # reserve\n last_names = filter(\n None, [\n x.replace(\"*\", \"\").strip() if x else None for x in doc.xpath(\n \"//td[@class='name-col']/a/div/span[@class='name-\" +\n \"col__item name-col__lastName']/text()\")])\n\n # retrieving further player data from roster page\n # player jersey numbers\n numbers = doc.xpath(\n \"//td[@class='number-col fixed-width-font']/text()\")\n # player positions\n positions = [x[:1] for x in doc.xpath(\n \"//td[@class='position-col fixed-width-font']/text()\")]\n # shooting hands, unfortunately goaltender's glove hands aren't\n # listed any longer\n hands = doc.xpath(\"//td[@class='shoots-col fixed-width-font']/text()\")\n # player heights (in ft. + in.)\n heights = doc.xpath(\n \"//td[@class='height-col fixed-width-font']/span[2]/text()\")\n # player weights (in lbs.)\n weights = [int(x) if x.isdigit() else 0 for x in doc.xpath(\n \"//td[@class='weight-col fixed-width-font']/text()\")]\n # player dates of birth\n dobs = doc.xpath(\"//td[@class='birthdate-col']/span[2]/text()\")\n hometowns = doc.xpath(\"//td[@class='hometown-col']/text()\")\n\n players = list()\n\n for (\n first_name, last_name, url, _, position, _, _, _, _, _\n ) in zip(\n first_names, last_names, urls, numbers, positions,\n hands, weights, heights, dobs, hometowns\n ):\n # retrieving nhl id from player page url\n plr_id = int(url.split(\"-\")[-1])\n\n # trying to find player in database\n plr = Player.find_by_id(plr_id)\n # creating player if not already in database\n if plr is None:\n plr = self.create_player(\n plr_id, last_name, first_name, position)\n print(\"%s created...\" % plr)\n\n players.append(plr)\n\n return players",
"def playerStandings():\n\n getPlayers = \"SELECT id, name, wins, matches FROM playerstats ORDER BY wins DESC\"\n players = executeQuery({'dbname': 'tournament', 'query' : getPlayers, 'type' : 'find'})\n return players",
"def players():\n try:\n return template('players.html', players=SERVER.players.values())\n except RoboBattleshipException as e:\n return JsonResponse.error(e)\n except:\n LOG.exception(\"Failed to show a list of all registered players on the \"\n \"server\")\n return JsonResponse.error(101)",
"def players_list(self):\n self.db = TinyDB('Models/db.json')\n self.query = Query()\n player_table = self.db.table('player_table')\n return player_table",
"def get_player_list(num_players):\n \n colors = BoardPlayer.POSSIBLE_COLORS\n\n player_list = []\n for i in range(num_players):\n player_list.append(Player(colors[i], DEPTH))\n\n return player_list",
"def players(game_id):\n # get data\n data = mlbgame.data.get_players(game_id)\n # parse data\n parsed = etree.parse(data)\n root = parsed.getroot()\n\n output = {}\n output['game_id'] = game_id\n\n # get player/coach data\n for team in root.findall('team'):\n type = team.attrib['type'] + \"_team\"\n # the type is either home_team or away_team\n output[type] = {}\n output[type]['players'] = []\n output[type]['coaches'] = []\n\n for p in team.findall('player'):\n player = {}\n for key in p.keys():\n player[key] = p.get(key)\n output[type]['players'].append(player)\n\n for c in team.findall('coach'):\n coach = {}\n for key in c.keys():\n coach[key] = c.get(key)\n output[type]['coaches'].append(coach)\n\n # get umpire data\n output['umpires'] = []\n for u in root.find('umpires').findall('umpire'):\n umpire = {}\n for key in u.keys():\n umpire[key] = u.get(key)\n output['umpires'].append(umpire)\n\n return output",
"def as_players(self):\n self._assert_no_aggregate()\n\n self._sort_tables = [types.Player]\n ids = self._ids('player', self._sorter)\n results = []\n q = 'SELECT %s FROM player %s %s'\n with Tx(self._db) as cur:\n q = q % (\n types.select_columns(types.Player),\n _prefix_and(_sql_pkey_in(cur, ['player_id'], ids['player'])),\n self._sorter.sql(tabtype=types.Player),\n )\n cur.execute(q)\n\n for row in cur.fetchall():\n results.append(types.Player.from_row(self._db, row))\n return results",
"def get_active_players(self, season):\n try:\n cursor = self.conn.cursor()\n command = '''\n SELECT Player\n FROM InLeague\n WHERE League IN (SELECT L_ID\n FROM League\n WHERE Season = ?)\n '''\n cursor.execute(command, (season,))\n players = []\n for p in cursor.fetchall():\n players.append(p[0])\n return players\n except BaseException as e:\n self.log.log_error('Fehler beim laden der aktiven Spieler', e)\n raise e"
]
| [
"0.73543996",
"0.7350862",
"0.7326979",
"0.7267568",
"0.7178544",
"0.71210647",
"0.7101303",
"0.7095917",
"0.70638573",
"0.70361084",
"0.70209825",
"0.695598",
"0.69269913",
"0.68729657",
"0.6858013",
"0.68501425",
"0.6834751",
"0.6827907",
"0.68150014",
"0.68046874",
"0.6801214",
"0.6800866",
"0.67503136",
"0.6741069",
"0.6723147",
"0.66950685",
"0.6684945",
"0.6648976",
"0.66467583",
"0.6640762"
]
| 0.77101475 | 0 |
Returns a dict of all exporters available in this module. | def exporters():
return dict(_exporters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_exported_services(self):\n with self.__export_lock:\n return [reg.get_export_reference() for reg in self.__exported_regs]",
"def modules(self):\n return self._modules.keys()",
"def export(self) -> Dict[str, Any]:\n return {\n \"name\": self.name,\n \"channels\": [channel for channel in self.channels],\n \"packages\": self.packages.export(),\n \"logs\": [log for log in self.logs],\n \"actions\": [action for action in self.actions],\n \"debug\": [debug for debug in self.debug],\n }",
"def get_export_providers() -> List[Union[Type[ProviderApi], Type[ExportProviderInterface]]]:\n return [p for p in ProviderFactory.get_providers() if issubclass(p, ExportProviderInterface)]",
"def get_available_plugin_loaders():\n mgr = stevedore.EnabledExtensionManager(namespace=PLUGIN_NAMESPACE,\n check_func=_auth_plugin_available,\n invoke_on_load=True,\n propagate_map_exceptions=True)\n\n return dict(mgr.map(lambda ext: (ext.entry_point.name, ext.obj)))",
"def exports(self):\n return ExportsCollection(client=self)",
"def get_all_reporters():\r\n for ep in iter_entry_points('attest.reporters'):\r\n yield ep.name",
"def exports(self):\n\n try:\n data_dir = self.export_dir()\n except ValueError, why:\n raise StopIteration(why)\n\n expdir = obj.Object('_IMAGE_EXPORT_DIRECTORY',\n offset = self.DllBase + data_dir.VirtualAddress,\n vm = self.obj_native_vm,\n parent = self)\n\n if expdir.valid(self._nt_header()):\n # Ordinal, Function RVA, and Name Object \n for o, f, n in expdir._exported_functions():\n yield o, f, n",
"def get_registered_providers():\n return _instance.providers_cls.keys()",
"def get_modules(self):\n return self._modules.values()",
"def get_driver_list():\n return list(object_store.ObjectStorageDriver.registry.keys())",
"def exports(self):\r\n return resources.Exports(self)",
"def modules(self):\n return self._modules",
"def encoders(self):\n return self.rpc.call(MsfRpcMethod.ModuleEncoders)['modules']",
"def modules(self):\r\n if not self._modules:\r\n self._modules = DajaxiceModule()\r\n for name, function in self._registry.items():\r\n self._modules.add(name, function)\r\n return self._modules",
"def modules_enabled(self, c):\n\n modules = []\n for name, module in self.modules.iteritems():\n modules.append( (name, module.__class__.__name__) )\n\n return modules",
"def get_available_plugins() -> Dict[str, BasePlugin]:\n if not INITIALIZED:\n _load_and_register_plugins()\n\n return REGISTERED_PLUGINS",
"def plugin_list(self):\r\n return get_module_list()",
"def modules_registered(self) -> list[Module]:\n return [cmds[0].module for cmds in self._registry[\"by_module\"].values()]",
"def get_plugins(self) -> dict:\n return Config.get_plugins()",
"def get_plugins(self):\n plugins = {}\n possibleplugins = os.listdir(self._plugin_folder)\n for i in possibleplugins:\n location = os.path.join(self._plugin_folder, i)\n if not os.path.isdir(location) or not self._main_module + \".py\" in os.listdir(location):\n continue\n info = imp.find_module(self._main_module, [location])\n plugins[i] = ({\"name\": i, \"info\": info})\n return plugins",
"def plugins(self):\n\n return list(self._renderers.keys())",
"def get_providers(self):\n return self.keys",
"def get_providers(self):\n return self.keys",
"def get_providers(self):\n return self.keys",
"def get_providers(self):\n return self.keys",
"def get_providers(self):\n return self.keys",
"def get_providers(self):\n return self.keys",
"def all_registered_modules():\n yield from iterchain(modules.values() for modules in Registry.monomers.values())",
"def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()"
]
| [
"0.67320096",
"0.671589",
"0.6693358",
"0.65067416",
"0.65005225",
"0.6418204",
"0.6391994",
"0.61761796",
"0.61683196",
"0.6134161",
"0.6020214",
"0.60188884",
"0.60087794",
"0.6007217",
"0.59965616",
"0.5981217",
"0.5973341",
"0.5953239",
"0.59515285",
"0.59218055",
"0.59131587",
"0.59113014",
"0.5908215",
"0.5908215",
"0.5908215",
"0.5908215",
"0.5908215",
"0.5908215",
"0.5889475",
"0.58802474"
]
| 0.8974469 | 0 |
Returns a dict of all expression writers available in this module. | def ewriters():
return dict(_ewriters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exporters():\n return dict(_exporters)",
"def loggers() -> Dict[str, logging.Logger]:\n return dict(logging.root.manager.loggerDict) # type: ignore",
"def _getWriters(reactor):\n if IReactorFDSet.providedBy(reactor):\n return reactor.getWriters()\n elif \"IOCP\" in reactor.__class__.__name__:\n return reactor.handles\n else:\n # Cannot tell what is going on.\n raise Exception(\"Cannot find writers on {!r}\".format(reactor))",
"def extensionregistry():\n registry = ExtensionRegistry()\n yield registry",
"def registered_compilers(self) -> Dict[str, CompilerAPI]:\n\n cache_key = self.config_manager.PROJECT_FOLDER\n if cache_key in self._registered_compilers_cache:\n return self._registered_compilers_cache[cache_key]\n\n registered_compilers = {}\n\n for plugin_name, (extensions, compiler_class) in self.plugin_manager.register_compiler:\n # TODO: Investigate side effects of loading compiler plugins.\n # See if this needs to be refactored.\n self.config_manager.get_config(plugin_name=plugin_name)\n\n compiler = compiler_class()\n\n for extension in extensions:\n if extension not in registered_compilers:\n registered_compilers[extension] = compiler\n\n self._registered_compilers_cache[cache_key] = registered_compilers\n return registered_compilers",
"def register_reports(self):\n from ckanext.qa import reports\n return [reports.openness_report_info]",
"def get_extenders():\n\n # pylint: disable=protected-access\n\n if not hasattr(get_extenders, '_CACHE'):\n get_extenders._CACHE = {}\n for entry in pkg_resources.iter_entry_points('tidypy.extenders'):\n try:\n get_extenders._CACHE[entry.name] = entry.load()\n except ImportError as exc: # pragma: no cover\n output_error(\n 'Could not load extender \"%s\" defined by \"%s\": %s' % (\n entry,\n entry.dist,\n exc,\n ),\n )\n return get_extenders._CACHE",
"def modules(self):\r\n if not self._modules:\r\n self._modules = DajaxiceModule()\r\n for name, function in self._registry.items():\r\n self._modules.add(name, function)\r\n return self._modules",
"def notifiers(self):\n return self.registry.keys()",
"def watchers(self) -> dict:\n return self.data.get(\"watchers\", {})",
"def registry(self):\n return self.__registry",
"def get_saveables(self):\n\n saveables = dict()\n saveables['encoder'] = self.encoder\n saveables['decoder'] = self.decoder\n saveables['optim'] = self.optim\n return saveables",
"def dir(self):\n\n info = {}\n\n for m in [models, coupling, integrators, noise, monitors, connectivity, equations, surfaces, patterns]:\n minfo = {}\n for k in dir(m):\n v = getattr(m, k)\n if isinstance(v, type):\n minfo[k] = k + '\\n\\n' + getattr(v, '__doc__', k)\n info[m.__name__.split('.')[-1]] = minfo\n\n return json.dumps(info)",
"def _items():\n for writer in writers:\n name = pm.get_manifest(writer.command).display_name\n title = (\n f\"{name} {writer.display_name}\"\n if writer.display_name\n else name\n )\n yield title, writer.filename_extensions",
"def get_available_extensions() -> DefaultDict[str, Type]:\n all_extensions:DefaultDict[str, Type] = defaultdict(lambda:False)\n for current_class in Content.__subclasses__():\n for extension in current_class.extensions:\n all_extensions[extension] = current_class\n return all_extensions",
"def get_registered_handlers(self):\n return list(self._registry.values())",
"def getEncoders ():\n return _registeredEncoders",
"def modules(self):\n return self._modules.keys()",
"def all_registered_modules():\n yield from iterchain(modules.values() for modules in Registry.monomers.values())",
"def get_exploits():\n results = {}\n for loader, name, ispkg in pkgutil.walk_packages(acsploit.exploits.__path__):\n m = loader.find_module(name).load_module(name)\n\n if not ispkg and hasattr(m, 'options') and hasattr(m, 'run'):\n exploit = name.replace('.', '/')\n results[exploit] = m\n\n return results",
"def export(self) -> Dict[str, Any]:\n return {\n \"name\": self.name,\n \"channels\": [channel for channel in self.channels],\n \"packages\": self.packages.export(),\n \"logs\": [log for log in self.logs],\n \"actions\": [action for action in self.actions],\n \"debug\": [debug for debug in self.debug],\n }",
"def registry(self):\n return self._registry",
"def get_driver_list():\n return list(object_store.ObjectStorageDriver.registry.keys())",
"def d_parsers(self):\n\n return self._d_parsers",
"def get_reader_funcs():\n return READERS",
"def get_registries(self):\n raise NotImplementedError(\"get_registries method is not implemented.\")",
"def encoders(self):\n return self.rpc.call(MsfRpcMethod.ModuleEncoders)['modules']",
"def get_all():\n return {\n _method : getattr(_ROOTObjectFunctions, _method)\n for _method in dir(_ROOTObjectFunctions)\n if not _method.startswith('_') and callable(getattr(_ROOTObjectFunctions, _method))\n }",
"def get_all_parsers():\n return [OptimizerFactory.get_parser(optimizer) for optimizer in OptimizerFactory.optimizers]",
"def available_output_formats() -> Dict:\n output_formats = {}\n for v in drivers:\n driver_ = v.load()\n if hasattr(driver_, \"METADATA\") and (driver_.METADATA[\"mode\"] in [\"w\", \"rw\"]):\n output_formats[driver_.METADATA[\"driver_name\"]] = driver_.METADATA\n return output_formats"
]
| [
"0.60313106",
"0.5901297",
"0.5880711",
"0.58622074",
"0.58196956",
"0.5742172",
"0.56615275",
"0.5657879",
"0.5596137",
"0.55732113",
"0.5545482",
"0.5508399",
"0.5503588",
"0.5474215",
"0.5455626",
"0.54393846",
"0.54258674",
"0.5408531",
"0.53520226",
"0.53115875",
"0.53091824",
"0.5307555",
"0.53055346",
"0.5291163",
"0.5280894",
"0.5280241",
"0.5270919",
"0.52618724",
"0.5203774",
"0.52027416"
]
| 0.5967789 | 1 |
generates all prime numbers smaller than _n_ using the Sieve of Eratosthenes | def primes(n):
sieve = [True]*n
for p in range(2, n):
if sieve[p]:
yield p
for i in range(p*p, n, p):
sieve[i] = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def primes(n):\n\tsieve = [True] * n\n\tyield 2\n\tfor i in xrange(3,int(n**0.5)+1,2):\n\t\tif sieve[i]:\n\t\t\tyield i\n\t\t\tsieve[i*i::2*i] = [False]*((n-i*i-1)/(2*i)+1)\n\tfor i in xrange(i+2,n,2):\n\t\tif sieve[i]: yield i",
"def eratosthenes_sieve(n):\r\n\tnumbers = [True for i in range(n + 1)]\r\n\t\r\n\tp = 2\r\n\twhile (p**2 <= n):\r\n\t\tif numbers[p]:\r\n\t\t\tfor i in range(p**2, n + 1, p):\r\n\t\t\t\tnumbers[i] = False\r\n\t\tp += 1\r\n\t\t\r\n\tprimes = compress(range(2, n + 1),numbers[2:])\r\n\treturn list(primes)",
"def primesupto(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]",
"def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]",
"def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]",
"def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]",
"def primes(n):\n sieve = [True] * n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*int(((n-i*i-1)/(2*i)+1))\n return [2] + [i for i in range(3,n,2) if sieve[i]]",
"def primes(n):\n sieve = [True] * n\n for i in range(3, int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in range(3,n,2) if sieve[i]]",
"def primes(n):\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * int(((n - i * i - 1) // (2 * i) + 1))\n return [2] + [i for i in range(3, n, 2) if sieve[i]]",
"def make_primes(n):\n out_list = []\n for i in range(2, n):\n if is_prime(i):\n out_list.append(i)\n return out_list",
"def sieve_for_primes_to(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]",
"def sieve(n: int) -> Generator[int, None, None]:\n primes, p = [i for i in range(2, n + 1)], 2\n while p**2 < n:\n for i in primes:\n if i % p == 0 and i != p:\n primes.remove(i)\n p += 1\n yield from primes",
"def prime_sieve(n):\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i*j < n:\n primes[i*j] = False\n return primes",
"def generate_prime_less_than_n(n):\n\tif n <= 1:\n\t\treturn []\n\tlist_of_primes = [2]\n\tfor i in range(3, n, 2):\n\t\tis_prime = True\n\t\tfor j in list_of_primes:\n\t\t\tif i%j == 0:\n\t\t\t\tis_prime = False\n\t\t\t\tbreak\n\t\tif is_prime:\n\t\t\tlist_of_primes.append(i)\n\treturn list_of_primes",
"def EratosthenesSieve(N):\n numbers = [True] * (N+1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p+1) if numbers[i]):\n for q in range(p*p, N+1, p):\n numbers[q] = False\n return [i for i in range(2, N+1) if numbers[i]]",
"def EratosthenesSieve(N):\n numbers = [True] * (N + 1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p + 1) if numbers[i]):\n for q in range(p * p, N + 1, p):\n numbers[q] = False\n return [i for i in range(2, N + 1) if numbers[i]]",
"def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]",
"def get_primes(n):\n\n return list(primes_sieve(n))",
"def seive_of_eratosthenes(n):\n sieve = [ True for i in range(n+1) ]\n def markOff(pv):\n for i in range(pv+pv, n+1, pv):\n sieve[i] = False\n markOff(2)\n for i in range(3, n+1):\n if sieve[i]:\n markOff(i)\n return [ i for i in range(2, n+1) if sieve[i] ]",
"def prime_sieve(n):\n li = [True] * n\n li[0] = li[1] = False\n\n for (i, isprime) in enumerate(li):\n if isprime:\n yield i\n for j in range(i*i, n, i):\n li[j] = False\n return(li)",
"def sieve(n):\n\tif n < 2:\n\t\treturn []\n\telse:\n\t\tis_prime = [True] * n\n\t\tis_prime[0] = is_prime[1] = False\n\t\tfor i in range(2, n):\n\t\t\tif is_prime[i]:\n\t\t\t\tyield i\n\t\t\t\tfor num in range(i*i, n, i):\n\t\t\t\t\tis_prime[num] = False",
"def primes1(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]",
"def primeSieve(n):\n result = []\n sieve = array.array('i', (True for i in range(0, n+1)))\n for k in range(2, n+1):\n if sieve[k]:\n result.append(k)\n i = k * k\n while i <= n:\n sieve[i] = False\n i += k\n return result",
"def eratosthenes2(n):\n multiples = set()\n for i in range(2, n+1):\n if i not in multiples:\n yield i\n multiples.update(range(i*i, n+1, i))",
"def gen_primes(N):\n primes = set()\n for n in range(2, N):\n if all(n % p > 0 for p in primes):\n primes.add(n)\n yield n",
"def sieve(n):\n global primes; lower = len(primes)\n if n+1 > lower:\n primes += [True, False] * ((n-lower)/2+1)\n for i in xrange(3, int(math.sqrt(n)+1), 2):\n if primes[i]:\n for j in xrange(3*i, n+1, 2*i):\n if j >= lower:\n primes[j] = False\n return [i for i, is_prime in enumerate(primes) if is_prime]",
"def sieve_of_eratosthenes(n):\n res = [2]\n i = 3\n marked = set()\n while i <= n**.5:\n if i not in marked:\n res.append(i)\n j = 0\n while j <= n/i:\n marked.add(i + j*i)\n j += 1\n i += 2\n while i <= n:\n if i not in marked:\n res.append(i)\n i += 2\n return res",
"def sieve(n):\n #All even numbers except 2 are not primes\n primes = [False, False, True] + [True, False] * (n / 2)\n\n #Start with 3\n p = 3\n\n while p*p <= n:\n if primes[p]:\n #p is prime, cross off all multiples of p, starting at the square \n #of p since all smaller multiples have already been crossed off\n d = p*p\n while d <= n:\n primes[d] = False\n d += p\n p += 2\n\n #Build a list of the primes we've found\n return [i for i in range(n) if primes[i]]",
"def primeSieve(n):\n\tsieve = numpy.ones(n/3 + (n%6==2), dtype=numpy.bool)\n\tfor i in xrange(1,int(n**0.5)/3+1):\n\t\tif sieve[i]:\n\t\t\tk=3*i+1|1\n\t\t\tsieve[ k*k/3 ::2*k] = False\n\t\t\tsieve[k*(k-2*(i&1)+4)/3::2*k] = False\n\treturn numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]",
"def sieve(n):\n s = [True] * (n + 1)\n for i in range(2, isqrt(n) + 1):\n if s[i]:\n for j in range(i + i, n + 1, i):\n s[j] = False\n return [i for i in range(2, n + 1) if s[i]]"
]
| [
"0.84180295",
"0.8417532",
"0.82622313",
"0.82567656",
"0.82486343",
"0.82486343",
"0.8190478",
"0.818247",
"0.8172363",
"0.81599665",
"0.81357145",
"0.8119539",
"0.8106137",
"0.8075972",
"0.8052299",
"0.8046388",
"0.80405337",
"0.8021724",
"0.8011649",
"0.8010728",
"0.8006345",
"0.79873395",
"0.7960497",
"0.7956843",
"0.79566467",
"0.7949767",
"0.79469484",
"0.7874018",
"0.7866841",
"0.78404003"
]
| 0.8424956 | 0 |
Check if the given sequence is a Fibonacci sequence. Fibonacci sequence is assumed to have length > 2. | def check_fibonacci(data: Sequence[int]) -> bool:
if len(data) < 3:
return False
if data[0] != 0 or data[1] != 1:
return False
for n in range(2, len(data)):
if data[n] != data[n - 1] + data[n - 2]:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_fibonacci(data: Sequence[int]) -> bool:\n def fib_gen(n: int) -> Iterable:\n a, b = 0, 1\n while a <= n:\n yield a\n a, b = b, a + b\n\n last_el = data[-1]\n perfect_fib = list(el for el in fib_gen(last_el))\n\n if len(data) < 3:\n return False\n if data[::-1] != perfect_fib[:-len(data) - 1:-1]:\n return False\n return True",
"def is_fibonacci_number(x):\n a = math.sqrt(5 * x ** 2 + 4)\n b = math.sqrt(5 * x ** 2 - 4)\n return a.is_integer() or b.is_integer()",
"def check_fibonacci(data: Sequence[int]) -> bool:\n if len(data) < 3:\n raise ValueError(\"Not enough data entered\")\n while len(data) >= 3:\n a, b, c = data[0], data[1], data[2]\n if not _check_window(a, b, c):\n return False\n del data[0]\n return True",
"def isfib(number):\n\n num1 = 1\n num2 = 1\n while True:\n if num2 < number:\n tempnum = num2\n num2 += num1\n num1 = tempnum\n elif num2 == number:\n return True\n else:\n return False",
"def find_fibonacci(x: int) -> bool:\r\n # write your code here\r\n a = 1\r\n b = 1\r\n while True: #Looping sampai ketemu return\r\n if x == 0:\r\n return True\r\n elif b <= x:\r\n if b == x:\r\n return True\r\n else:\r\n temp = b\r\n b = b + a\r\n a = temp\r\n else:\r\n return False",
"def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1 or n == 2:\n return 1\n elif n > 2:\n return fibonacci(n - 1) + fibonacci(n - 2)\n else:\n return False",
"def fibonacciSeries(userinput):\n try:\n isinstance(int(userinput), int)\n userinput = int(userinput)\n except ValueError as e:\n print(e)\n else:\n if isPerfectSquare(\n (5 *\n userinput *\n userinput) -\n 4)or isPerfectSquare(\n (5 *\n userinput *\n userinput) +\n 4):\n return True\n else:\n return False",
"def test_fibonacci():\n\n def isPerfectSquare(x):\n s = int(math.sqrt(x))\n return s * s == x\n\n def isFibonacci(n):\n # n is Fibonacci if one of 5*n*n + 4 or 5*n*n - 4 or both is a perfect square\n return isPerfectSquare(5 * n * n + 4) or isPerfectSquare(5 * n * n - 4)\n\n for _ in range(10000):\n num = random.randint(0, 1000)\n f = s7.fibonacci()\n for i in range(0, (num // 2) + 1):\n if num in s7.cache.values():\n break\n f()\n assert isFibonacci(num) is (num in s7.cache.values()), \"Check your Fibonacci implementation\"",
"def find_fib(n):\n # fibo = 2.078087 * math.log(n) + 1.672276\n return 0 # fibo",
"def test_fib_digits(n, result):\n from even_digit_primes import f\n assert f(n) == result",
"def fibonacci():\n return sum_series(a=0, b=1)",
"def fibo(n):\n first = 0\n second = 1\n for i in range (1,n+1):\n if (i<=1): \n #begins sequence (terms 0 and 1 do not have two prior terms)\n newVal = i\n else:\n #continues sequence by adding the previous two numbers in the\n #sequence, and updating the variables\n newVal = first + second\n first = second\n second = newVal\n print(i,newVal)",
"def fibonacci(n: int) -> int:\n m = 1 << (n.bit_length() - 1)\n Fn = 0\n Fnm1 = 1\n while m:\n Fn2 = Fn * Fn\n Fn = 2 * Fnm1 * Fn + Fn2\n Fnm1 = Fnm1 * Fnm1 + Fn2\n if n & m:\n Fnm1, Fn = Fn, Fnm1 + Fn\n m >>= 1\n return Fn",
"def fib(n): #Describe \"n\" as a variable in fib sequence\n while n == 0:\n return 0 #establish that 0 position is equal to 0\n if n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)",
"def fibonacci(n):",
"def test_negative_check_fibonacci(arg, expected):\n assert major_and_minor_elem(arg) == expected",
"def fibi(n):\n if n == 0: return 0\n if n == 1: return 1\n f_n2, f_n1 = 1, 1\n for i in range(3, n+1):\n f_n2, f_n1 = f_n1, f_n2+f_n1\n return f_n1",
"def test_fibonacci(n, result):\n from series import fibonacci\n assert fibonacci(n) == result",
"def optimized_fibonacci(f):\n a = 0\n b = 1\n if f < 2:\n return f\n else:\n for i in range(1, f):\n c = a + b\n a = b\n b = c\n return b",
"def fibonacci(input) :\n\n\n\n\n\n# if input == 0:\n\n# return 0\n\n\n\n# elif input == 1:\n\n# return 1\n\n# else :\n\n# return fibonacci(input-1) + fibonacci(input-2)\n\n\n\n\n return sum_series(input)",
"def fibo(n):\r\n if n==1:\r\n return 0\r\n elif n==2:\r\n return 1\r\n else:\r\n return fibo(n-1)+fibo(n-2)",
"def nthFibonacci(n):\n\n # Run some basic error checking\n try:\n n = int(n)\n except: # if this fails not a number inputed\n sys.stderr.write('Incorrect data input\\n')\n return None\n if n < 0:\n sys.stderr.write('Only positive integers allowed\\n')\n return None\n \n # since the error checking slows down the recursion we run it as a seperate function\n [Fnm,Fn] = fastrecursivefibonacci(n)\n return Fnm",
"def fibonacci(num):\n counter = 0\n\n # Start fibonacci\n sequence = [0, 1]\n while len(sequence) < num:\n n1 = sequence[counter]\n n2 = sequence[counter + 1]\n sequence.append(n1+n2)\n\n counter += 1\n\n return sequence",
"def fibonacci(n):\r\n\r\n if n in past_fib:\r\n return past_fib[n]\r\n \r\n if n == 0 or n == 1:\r\n past_fib[n] = 1\r\n return 1\r\n\r\n total = fibonacci(n-1) + fibonacci(n-2)\r\n past_fib[n] = total\r\n return total",
"def get_fib(position):\n\n # Base Case: Positions greater thatn 0 or 1, since Fibonacci for 0 is 0 and\n # 1 is 1.\n if position == 0 or position == 1:\n return position\n\n return get_fib(position - 1) + get_fib(position - 2)",
"def fib(number):\n if number < 0:\n raise ValueError\n if number in (0, 1):\n return number\n return fib(number - 1) + fib(number - 2)",
"def fibonacci_sequence(max):\n term = fibonacci_term(0)\n f = []\n i = 1\n while term < max:\n f.append(term)\n term = fibonacci_term(i)\n i += 1\n return f",
"def fibonacci(n):\n\tfib_seq = []\n\tnth_term = 0\n\t\n\tfor i in range(0,n+1):\n\t\tif i == 0:\n\t\t\tfib_seq.append(0)\n\t\tif i == 1:\n\t\t\tfib_seq.append(1)\n\t\tif i > 1:\n\t\t\tnth_term = fib_seq[-1] + fib_seq[-2]\n\t\t\tfib_seq.append(nth_term)\n\t\n\tprint(fib_seq)\n\tprint(fib_seq[n])\n\treturn(fib_seq[n])",
"def main():\n for term in FibonacciSeries():\n if log10(term.value) + 1 >= MAX_DIGITS:\n print((term.index))\n break",
"def fib_in_range():\n limit1, limit2, nums_in_range = create_range()\n fib1, fib2 = 0, 1\n for elem in range(limit1, limit2):\n fib1, fib2 = fib2, fib1 + fib2\n if fib1 in nums_in_range:\n print(fib1)"
]
| [
"0.7771214",
"0.7764462",
"0.7430969",
"0.7402631",
"0.70261985",
"0.68094856",
"0.67896724",
"0.65808904",
"0.6365835",
"0.63628143",
"0.632067",
"0.62956697",
"0.6284243",
"0.62702245",
"0.625835",
"0.6245525",
"0.61474824",
"0.6098934",
"0.6074185",
"0.60714906",
"0.6023177",
"0.6013212",
"0.59610486",
"0.5957459",
"0.595585",
"0.5949846",
"0.5949733",
"0.59365785",
"0.5926052",
"0.5918293"
]
| 0.81119895 | 0 |
return the first n bits of fractional part of float f | def frac_bin(f, n=32):
f -= math.floor(f) # get only the fractional part
f *= 2**n # shift left
f = int(f) # truncate the rest of the fractional content
return f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mask_i_float(i, n):\n range_n = arange(cast(array(n), int32))\n i_float = cast(array(i), int32)\n mask_i = equal(range_n, i_float)\n mask_i_float = cast(mask_i, float32)\n return mask_i_float",
"def fpart(x):\n return x - np.floor(x)",
"def fpart(x):\n return x - math.floor(x)",
"def quantize_float(f, q):\n return int(round(f / q) * q)",
"def get_frac_bits(self):\n return self.frac_bits",
"def float_to_fp(x, precision='single'):\n\n # Zero\n if x == 0:\n return 0\n\n # Inf\n if math.isinf(x):\n s = '0' if x > 0 else '1'\n return int(s + '1' * _Exponent_bits[precision] + '0' * _Fraction_bits[precision], 2)\n\n # NaN\n if math.isnan(x):\n return int('0' + '1' * _Exponent_bits[precision] + '1' * _Fraction_bits[precision], 2)\n\n if not float_in_range(x, precision):\n raise ValueError(\"Value out of range for precision\")\n\n # Get exponent and upper fraction\n l = abs(int(x)) # TODO check abs()\n f_upper = bin(l)[3:] # remove 0b1 (includes leading 1 implied in fp)\n e = bin(len(f_upper) + _Bias[precision])[2:2 + _Exponent_bits[precision]]\n\n # Get lower fraction\n r = abs(x) - l # TODO check abs()\n fraction_bits = len(f_upper)\n f_lower = ''\n while r != 0.0 and fraction_bits <= _Fraction_bits[precision]:\n r *= 2\n fraction_bits += 1\n f_lower = f_lower + str(int(r))\n r -= int(r)\n\n # Get sign and join\n sign = '1' if x < 0 else '0'\n res = zfill_right(sign + e + f_upper + f_lower, _Bitwidth[precision])\n return int(res, 2)",
"def truncate(f, n):\n if isinstance(f, float):\n s = '{}'.format(f)\n else:\n s = f\n if 'e' in s or 'E' in s:\n return '{0:.{1}f}'.format(f, n)\n i, p, d = s.partition('.')\n return '.'.join([i, (d+'0'*n)[:n]])",
"def float_trunc(v, zerobits):\n # A float is represented in binary like this:\n # seeeeeeeefffffffffffffffffffffff\n mask = -(1L << zerobits)\n v = struct.unpack('I', struct.pack('f', v))[0]\n v &= mask\n return struct.unpack('f', struct.pack('I', v))[0]",
"def f2p (f):\n #return 2*math.log(f, 11000) - 1\n #return f/11000 - 1\n return f/5500 - 1",
"def ifloor(f: SupportsFloat) -> int:\n\t\t# noinspection PyTypeChecker\n\t\treturn int(np.floor(f))",
"def f2i(f):\n return struct.unpack('i', struct.pack('f', f))[0]",
"def bitfield(i,N): \n bits = bitstr(i,N)\n bits = [int(digit)*2-1 for digit in bits]\n return np.array(bits).astype(float)",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack(\">HHL\", value)\n exponent = (short1 & 0x7F00) // 256 - 64\n mantissa = (\n ((short1 & 0x00FF) * 65536 + short2) * 4294967296 + long3\n ) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.0 ** exponent\n return mantissa * 16.0 ** exponent",
"def float_to_binary(x, n=64):\n return _fix_sign(int_to_binary(float_to_int(x, n), n))",
"def fmt_float(f, p=0):\n\n string = str(\n decimal.Decimal(f).quantize(decimal.Decimal('0.' + ('0' * p) if p > 0 else '0'), decimal.ROUND_HALF_UP)\n )\n\n m = FLOAT_TRIM_RE.match(string)\n if m:\n string = m.group('keep')\n if m.group('keep2'):\n string += m.group('keep2')\n return string",
"def __float__(self):\n return float(self.encoded) / (1 << self.frac_bits)",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack('>HHL', value)\n exponent = (short1 & 0x7f00) // 256 - 64\n mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 +\n long3) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.**exponent\n return mantissa * 16.**exponent",
"def poly_nth(f, n):\n if n < 0 or n > len(f)-1:\n raise IndexError\n else:\n return f[zzx_degree(f)-n]",
"def floor(n: float) -> int:\n return int(n // 1)",
"def float32_to_float8e5m2( # pylint: disable=too-many-statements\n fval: float,\n scale: float = 1.0,\n fn: bool = False,\n uz: bool = False,\n saturate: bool = True,\n) -> int:\n x = fval / scale\n b = int.from_bytes(struct.pack(\"<f\", np.float32(x)), \"little\")\n ret = (b & 0x80000000) >> 24 # sign\n\n if fn and uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x80\n if (b & 0x7FFFFFFF) == 0x7F800000:\n # inf\n if saturate:\n return ret | 0x7F\n return 0x80\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 109:\n pass\n elif e < 112:\n # denormalized number\n ex = e - 111\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 111\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7F:\n # rounding\n ret += 1\n elif not saturate:\n ret = 0x80\n elif e == 255 and m == 0: # inf\n ret = 0x80\n elif saturate:\n ret |= 0x7F # last possible number\n else:\n ret = 0x80\n elif m == 0:\n # -0\n ret = 0\n return int(ret)\n elif not fn and not uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x7F | ret\n if np.isinf(x):\n if saturate:\n return 0x7B | ret\n return 0x7C | ret\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 110:\n pass\n elif e < 113:\n # denormalized number\n ex = e - 112\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 112\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7B:\n # rounding\n ret += 1\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n return int(ret)\n else:\n raise NotImplementedError(\"fn and uz must be both False or True.\")",
"def fixed_point(x, int_bits, frac_bits):\n if int_bits == None and frac_bits == None: return x\n _frac = x - np.floor(x) #if x >= 0 else x - np.ceil(x)\n _int = np.floor(x) #if x>= 0 else np.ceil(x)\n _frac = np.round(_frac*2**frac_bits)/2**frac_bits\n _x = _int + _frac\n # if _x > 2**(int_bits-1)-1: _x = 2**(int_bits-1)-1\n # elif _x < -2**(int_bits-1): _x = -2**(int_bits-1)\n if _x > 2**(int_bits)-2**(-frac_bits): _x = 2**(int_bits)-2**(-frac_bits)\n elif _x < -2**(int_bits): _x = -2**(int_bits)\n return _x",
"def rfpart(x):\n return 1 - Util.fpart(x)",
"def ifrft(f, a):\n return frft(f, -a)",
"def p2f (p):\n #return 11000**((p+1)/2)\n #return (p+1)*11000\n return (p+1)*5500",
"def ftom(f):\n return 69 + 12 * log(f / 440.0, 2)",
"def get_fract(self, var, max_var):\n\t\treturn (var % max_var) / float(max_var)",
"def fpr(self):\n return float(self.fp) / (self.fp + self.tn) if self.tn != 0 else 1",
"def nth_derivative(f, x, n):\n h = 10e-2\n out_h = 1/(h**n)\n out = 0\n for k in range(0, n+1):\n out += (-1)**(k+n)*choose(n,k)*f(x +k*h)\n return out_h*out",
"def fraccoord(N, p, Qpx):\n #Qpx = 1\n H=N/2\n #print 'H:', H\n x=(1+p)*H\n flx=np.floor(x + 0.5 /Qpx)\n fracx=np.around(((x-flx)*Qpx))\n return (flx).astype(int), fracx.astype(int)",
"def fp_to_float(fp, precision='single'):\n\n if precision not in ('half', 'single', 'double', 'quad'):\n raise ValueError(\"Precision must be one of 'half', 'single', 'double', or 'quad\")\n if not isinstance(fp, int):\n raise TypeError(\"fp must be an integer\")\n\n fp = bin(fp)[2:].zfill(_Bitwidth[precision])\n s = fp[0]\n e = fp[1:1 + _Exponent_bits[precision]]\n f = fp[1 + _Exponent_bits[precision]:]\n\n if e == '0' * _Exponent_bits[precision]:\n if f == '0' * _Fraction_bits[precision]:\n return 0.0\n else:\n raise ValueError(\"Subnormal number not supported\")\n elif e == '1' * _Exponent_bits[precision]:\n if f == '0' * _Fraction_bits[precision]:\n return math.inf if s == '0' else -math.inf\n else:\n # Or float('nan') (Using math.nan permits object comparision, i.e. x is math.nan)\n return math.nan\n\n ev = 2 ** (int(e, 2) - _Bias[precision])\n fv = 1 + (int(f, 2) / 2 ** _Fraction_bits[precision])\n v = ev * fv\n return v if s == '0' else -v"
]
| [
"0.6895186",
"0.6649497",
"0.66047823",
"0.627372",
"0.6265717",
"0.61318606",
"0.6037289",
"0.6027234",
"0.60202575",
"0.60193044",
"0.59969354",
"0.5991846",
"0.59733486",
"0.59640974",
"0.5958672",
"0.59521466",
"0.5927512",
"0.5888159",
"0.58834416",
"0.58711225",
"0.57886606",
"0.57751435",
"0.57672524",
"0.5764282",
"0.57635856",
"0.57511586",
"0.5741545",
"0.57398385",
"0.5739382",
"0.5736708"
]
| 0.7696176 | 0 |
Preprocesses the given bitext by removing empty lines, sentence pairs in incorrect languages, sentences above a the specified length threshold, and sentence pair exceeding the specified length ratio. | def preprocess_bitext(src_path, tgt_path, src_lang, tgt_lang, max_len, max_len_ratio):
# Generate output paths
src_out_path = '.'.join(src_path.split('.')[:-1]) + '.clean.{:s}'.format(src_lang)
tgt_out_path = '.'.join(tgt_path.split('.')[:-1]) + '.clean.{:s}'.format(tgt_lang)
# Open aligned corpora
tgt_text = open(tgt_path, 'rb')
print('Cleaning corpora ...')
lines_kept = 0
with open(src_path, 'rb') as src_text:
with open(src_out_path, 'wb') as src_in:
with open(tgt_out_path, 'wb') as tgt_in:
for line_id, orig_src_line in enumerate(src_text):
orig_tgt_line = tgt_text.readline()
try:
str_src_line = orig_src_line.decode('utf-8')
str_tgt_line = orig_tgt_line.decode('utf-8')
except UnicodeDecodeError:
continue
# Remove punctuation
src_line = re.sub(r' +', ' ', str_src_line.strip().translate(pct_stripper))
tgt_line = re.sub(r' +', ' ', str_tgt_line.strip().translate(pct_stripper))
# NOTE: Lines which only contain whitespaces are not removed! This should be fixed.
# Keep if not empty
if len(src_line) > 0 and len(tgt_line) > 0:
# Keep if correct languages
if langid.classify(src_line)[0] == src_lang and langid.classify(tgt_line)[0] == tgt_lang:
# Tokenize
src_len = len(src_line.split(' '))
tgt_len = len(tgt_line.split(' '))
# Keep if below length threshold
if src_len <= max_len and tgt_len <= max_len:
# Keep if below length ratio
if max(src_len, tgt_len) / min(src_len, tgt_len) <= max_len_ratio:
src_in.write(orig_src_line)
tgt_in.write(orig_tgt_line)
lines_kept += 1
# Report occasionally
if line_id > 0 and line_id % 100000 == 0:
print('Processed {:d} sentence pairs | Kept {:d}'.format(line_id, lines_kept))
# Close open file objects
tgt_text.close()
print('-' * 20)
print('Done') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = handle_emojis(text)\n text = clean_number(text)\n text = spacing_punctuation(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n #text = stop(text)# if changing this, then chnage the dims \n #(not to be done yet as its effecting the embeddings..,we might be\n #loosing words)...\n return text",
"def parse_text(self, text):\r\n MAXLEN = 100\r\n sentences = []\r\n punct = [\",\",\":\",\";\",\".\",\"–\",\"?\",\"!\",\"(\",\")\"] # Interpunctuation marks\r\n text = text.replace(\"\\r\", \" \").replace(\"\\t\", \" \") # Remove CR and tabs\r\n words = text.split(\" \") if len(text) > MAXLEN else []\r\n sentence = \"\" if len(text) > MAXLEN else text\r\n\r\n # Preprocess list for silence markers\r\n if conf.SilenceMarker in text:\r\n words_new = []\r\n if not words and sentence: # Was too short to be cut initially\r\n words = text.split(\" \")\r\n sentence = \"\"\r\n for w in filter(None, words):\r\n if conf.SilenceMarker not in w.lower():\r\n words_new.append(w)\r\n else:\r\n text_chunks = w.lower().split(conf.SilenceMarker)\r\n for i, part in enumerate(text_chunks):\r\n if part:\r\n words_new.append(part)\r\n if i < len(text_chunks) - 1:\r\n words_new.append(conf.SilenceMarker)\r\n else:\r\n if words_new and conf.SilenceMarker in words_new[-1]:\r\n words_new[-1] += conf.SilenceMarker\r\n else:\r\n words_new.append(conf.SilenceMarker)\r\n words = words_new\r\n\r\n for w in words:\r\n if conf.SilenceMarker in w:\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n sentences.append(w)\r\n sentence = \"\"\r\n elif w[-1] in punct or w[0] in punct: # Encountered punctuation\r\n if w[-1] in punct and (len(sentence) + len(w) + 1 < MAXLEN):\r\n # Word ends with punct and sentence can still be added to\r\n sentences.append(sentence.strip() + \" \" + w.strip())\r\n sentence = \"\" # Save sentence and word, start new sentence\r\n elif w[0] in punct and w[-1] not in punct:\r\n # Word starts with punctuation, like '('\r\n sentences.append(sentence.strip()) # Save current sentence\r\n sentence = w # Start a new sentence with punct and word\r\n else: # word ends with punct and sentence already long enough\r\n sentences.extend([sentence.strip(), w.strip()])\r\n sentence = \"\" \r\n else:\r\n if (len(sentence) + len(w) + 1 < MAXLEN): # Sentence still\r\n sentence += \" \" + w # short enough\r\n else: # Sentence too long\r\n sentences.append(sentence.strip())\r\n sentence = w # Start a new sentence with the word\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n return sentences",
"def preprocess_raw_text(source_file, output_file, min_length=5):\n STOP_WORDS = set(stopwords.words('english'))\n TOKENIZER = RegexpTokenizer(r'[a-zA-Z]{2,}')\n WORDS = set(nltk.corpus.words.words())\n\n total_sent = 0\n sent_count = 0\n write_file = open(output_file, 'a+')\n\n with open(source_file, 'r') as read_file:\n for line in read_file:\n total_sent += 1\n line = line.lower()\n text = TOKENIZER.tokenize(line)\n filtered_sentence = [\n w for w in text if w not in STOP_WORDS and w in WORDS\n ]\n if len(filtered_sentence) > min_length:\n write_file.write(\" \".join(filtered_sentence) + '\\n')\n sent_count += 1\n\n if total_sent % 1000000 == 0:\n print(str(total_sent // 1000000),\n ' million sentences processed.')\n\n read_file.close()\n write_file.close()\n\n print('total sentence before processed: %i' % (total_sent))\n print('total sentence after processed: %i' % (sent_count))",
"def check_sentences(text, threshold=80, print_only=False):\n non_white_text = re.sub(masks, \"\", re.sub(emojis, \"\", re.sub(punctuation, \"\", re.sub(\"\\s\", \"\", text))))\n num_chars = len(non_white_text)\n num_non_swiss_chars = 0\n for char in non_white_text:\n if char not in swiss_chars:\n num_non_swiss_chars += 1\n ratio = num_non_swiss_chars / num_chars * 100 if num_chars != 0 else 0\n if ratio > threshold:\n return \"POSSIBLE NON_SWISS GERMAN TEXT:\" + text if print_only else False\n else:\n return text",
"def preProcess(text):\n\ttext = text.lower() # lower case the text\n\t# Q4 replace the word with expanded contractions\n\tfor k,v in general_contraction.items():\n\t\tif k in text.split():\n\t\t\ttext = text.replace(k,v)\n\t# Q4 remove speacial char including all puncuattions and replace it with a space\n\ttext = re.sub('[^A-Za-z0-9]+',' ',text) \n\t# tokenise\n\ttokens = text.split()\n\t# stop word removal\n\ttokens = [w for w in tokens if w not in stopwords ]\n\t# Q4 Stemming\n\ttokens = [str(porter.stem(w)) for w in tokens]\n\t# if word is non-english return its english form # too much time-complexity\n\t# tokens = [porter.stem(w) if porter.stem(w) in set(words.words()) else w for w in tokens ]\n\t# for words having digits such as 12gb, 1st, etc expanding the token list\n\tfor k in tokens:\n\t\tif len(k) >2 and re.match(r'[0-9]+',k):\t\t\t\n\t\t\tif len(k) >2 and not k.isdigit():\n\t\t\t\tl = re.split(r'(\\d+)',k)\n\t\t\t\tl = [w for w in l if w is not '' ]\n\t\t\t\tif l and len(l) <= 3:\n\t\t\t\t\tfor i in l:\n\t\t\t\t\t\tif i in digit_contractions.keys():\n\t\t\t\t\t\t\tl = list(map(lambda b: b.replace(i,digit_contractions[i]), l))\n\t\t\t\t\ttokens.remove(k)\n\t\t\t\t\ttokens = tokens+l\n\t\t\t\telse:\n\t\t\t\t\ttokens.remove(k)\n\tfor k,v in digit_contractions.items():\n\t\tif k in tokens:\n\t\t\tif tokens[tokens.index(k)-1].isdigit():\t\n\t\t\t\ttokens = list(map(lambda b: b.replace(k,v), tokens))\n\t# remove tokens of size less than 2\n\ttokens = [t for t in tokens if len(t) > 2]\n\treturn tokens",
"def preprocess(text, freq=5):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n\n words_raw = text.strip().lower().split()\n word_counts = Counter(words_raw)\n words = [w for w in words_raw if word_counts[w] > freq]\n vocab = set(words)\n vocab2index = {w: idx for idx, w in enumerate(vocab)}\n index2vocab = {idx: w for idx, w in enumerate(vocab)}\n words_int = [vocab2index[w] for w in words]\n return words_int, vocab2index, index2vocab",
"def extract_clean_words(para):\n paras = para.split()\n noise = [\"http\",\"apnews\",\"news\",\"link\",\"subscribe\",\"dc\",\"d\",\"c\",\"s\"]\n for x in noise:\n if x in para.split():\n paras.remove(x)\n \n\n if (len(paras) <= 300) & (len(paras) >= 200):\n words_hundred = \"\"\n for i in paras:\n words_hundred = words_hundred + \" \" + i\n\n return words_hundred.strip()\n elif len(paras) > 300:\n words_hundred = \"\"\n for i in range(300):\n words_hundred = words_hundred + \" \" + paras[i]\n return words_hundred.strip()\n else:\n pass\n # print(\"*********************************************\")\n # print(\"length of the news is \" +str(len(paras)))\n # paras = \" \".join(i for i in paras) \n # print(paras)\n # print(\"=======================LOST==================\")",
"def reformat(par, length, height):\n\n buf = []\n lines = len(par)\n idx = 0\n while idx < lines and (height is None or idx < height):\n buf.extend(par[idx].split())\n words = []\n if idx > 0:\n words.append('')\n curlen = 0\n while buf:\n word = buf[0]\n curlen += _word_length(word)\n if height is not None and idx == height - 1:\n if curlen + 3 > length:\n break\n else:\n if curlen > length:\n break\n buf.pop(0)\n words.append(word)\n if word == '.':\n break\n elif word.endswith('.'):\n words.append('')\n if height is not None and idx == height - 1 and words[-1]:\n words.append('...')\n par[idx] = ' '.join(words).rstrip()\n idx += 1\n\n while buf and (height is None or idx < height):\n words = []\n curlen = 0\n while curlen < length and buf:\n word = buf[0]\n curlen += _word_length(word)\n if height is not None and idx == height - 1:\n if curlen + 3 > length:\n break\n else:\n if curlen > length:\n break\n buf.pop(0)\n words.append(word)\n if word == '.':\n words.append('\\n')\n break\n elif word.endswith('.'):\n words.append('')\n if height is not None and idx == height - 1 and words[-1]:\n words.append('...')\n par.append(' '.join(words).rstrip())\n idx += 1\n\n if height is not None:\n del par[height:]",
"def gen_adjusted_text(self, UNK_threshold):\n temp_text = self.text[:]\n\n word_count = self.get_ngram_freq(1)\n for word_tuple, word_count in word_count.items():\n if word_count == 1:\n temp_text = re.sub(r\"\\b{0}\\b\".format(word_tuple[0]), \" \" + TOKENS[\"UNK\"] + \" \", temp_text)\n\n return temp_text",
"def clean_text(self, text, length=False, chars_to_remove=None, truncate=False):\n new_text = text\n # remove given chars\n for char in chars_to_remove:\n new_text = new_text.replace(char, '')\n\n # replace double lines with single lines\n new_text = new_text.replace('\\n\\n', '\\n')\n\n # shorten response to last full sentence (ending with period)\n idx_last_period = new_text.rfind(\".\")\n if idx_last_period != -1: # period found\n new_text = new_text[:idx_last_period+1]\n\n # shorten sentence if possible\n if length < len(new_text):\n new_text = new_text[:length - 1]\n print('truncated response length to: ', len(new_text))\n\n return new_text",
"def preprocessing_a1(self, text):\n # clean description\n cleaned_text = self.text_cleaning(text)\n # preprocess description\n preprocessed_text = self.text_preprocessing_a1(cleaned_text)\n\n return preprocessed_text",
"def process_text(text):\n no_split_dict = {'u . s': 'u.s', 'u . n': 'u.n', 'u . k': 'u.k', 'l . a': 'l.a', 'j . k': 'j.k', 'a . m': 'a.m',\n 'p . m': 'p.m', 'd . j': 'd.j', 'd . a': 'd.a'}\n\n text = re.sub(\".*--\", \"\", text, count=1) # Removing cnn from start of text\n if text.startswith('(CNN)'): # Remove cnn from articles that starts with only cnn\n text = re.sub('\\(CNN\\)', '', text, count=1)\n text = re.sub(r'(?<=[^?!.0-9])(?=[.,!?])', ' ', text) # 4\n text = re.sub(r'(?![0-9])(?<=[.,])(?=[^\\s])', r' ', text) # 4\n text = text.lower() # 2\n text = re.sub('[^A-Za-z0-9 .!?,øæå]+', '', text) # 3\n text = re.sub(r'((?<=[a-z])(?=[.]))|((?=[a-z])(?<=[.]))(?=[^\\s])', r' ', text) # space a-z.a-z\n text = re.sub(r'((?=[0-9])(?<=[a-z]))|((?=[a-z])(?<=[0-9]))(?=[^\\s])', r' ', text) # space 0-9a-z\n for key in no_split_dict:\n text = text.replace(key, no_split_dict[key]) # Fixing word splits\n text = re.sub('[0-9]', '#', text) # 8\n text = \" \".join(text.split()) # 5, 6, 7 - i think\n return text",
"def cleaning(full_text):\n try:\n if open(RESULT_PATH):\n os.remove(RESULT_PATH)\n \n else:\n print(\"No output.mp3\")\n except Exception as e:\n print(str(e))\n\n text = full_text\n\n book = ''.join(text)\n\n\n book = book.replace('.', '.<eos>')\n book = book.replace('?', '?<eos>')\n book = book.replace('!', '!<eos>')\n\n sentences = book.split('<eos>')\n\n return sentences",
"def analyse(self):\n logging.info(\"transferring text to CorpusCook...\")\n\n paragraphs = self.text.split('\\n\\n')\n print(\"mean length of splitted lines\", (mean([len(p) for p in paragraphs])))\n\n # If TIKA resolved '\\n'\n if (mean([len(p) for p in paragraphs])) > 80:\n paragraphs = [re.sub(r\"- *\\n\", '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', \" \") for p in paragraphs]\n paragraphs = [p.replace(';', \" \") for p in paragraphs]\n joiner = \" \"\n else:\n # If TIKA did not\n joiner = \" \"\n\n processed_text = joiner.join([p\n for p in paragraphs\n if\n p and\n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold\n ]\n )\n\n return processed_text.strip()[:self.length_limit]",
"def process_paragraph( paragraph ):\n\t# Lists of bounding boxes, text, and probabilities\n\tline_box_list = []\n\tline_text_list = []\n\tline_prob_list = []\n\n\t# Line under processing\n\tcurrent_line_text = []\n\tcurrent_line_prob = []\n\t# Bounding box temporary variables\n\tx1 = 100000\n\ty1 = 100000\n\tx2 = 0\n\ty2 = 0\n\n\tfor word in paragraph.words:\n\t\tfor symbol in word.symbols:\n\t\t\t# x1, y1 (Left upper corner)\n\t\t\tif symbol.bounding_box.vertices[0].x < x1:\n\t\t\t\tx1 = symbol.bounding_box.vertices[0].x\n\t\t\tif symbol.bounding_box.vertices[0].y < y1:\n\t\t\t\ty1 = symbol.bounding_box.vertices[0].y\n\t\t\tif symbol.bounding_box.vertices[1].y < y1: \n\t\t\t\ty1 = symbol.bounding_box.vertices[1].y\n\t\t\tif symbol.bounding_box.vertices[3].x < x1:\n\t\t\t\tx1 = symbol.bounding_box.vertices[3].x\n\t\t\t# x2, y2 (right lower corner)\n\t\t\tif symbol.bounding_box.vertices[2].x > x2:\n\t\t\t\tx2 = symbol.bounding_box.vertices[2].x\n\t\t\tif symbol.bounding_box.vertices[2].y > y2:\n\t\t\t\ty2 = symbol.bounding_box.vertices[2].y\n\t\t\tif symbol.bounding_box.vertices[1].x > x2:\n\t\t\t\tx2 = symbol.bounding_box.vertices[1].x\n\t\t\tif symbol.bounding_box.vertices[3].y > y2:\n\t\t\t\ty2 = symbol.bounding_box.vertices[3].y\n\n\t\t\tcurrent_line_text.append( symbol.text )\n\t\t\tcurrent_line_prob.append( symbol.confidence )\n\t\t\t# Check for blank spaces\n\t\t\tif symbol.property.detected_break.type in [ breaks.SPACE, breaks.SURE_SPACE ]:\n\t\t\t\tcurrent_line_text.append( ' ' )\n\t\t\t\tcurrent_line_prob.append( 0.95 )\n\t\t\t# Check for new lines\n\t\t\tif symbol.property.detected_break.type in [ breaks.EOL_SURE_SPACE, breaks.HYPHEN, breaks.LINE_BREAK ]:\n\t\t\t\tline_box_list.append( [x1, y1, x2, y2] )\n\t\t\t\tline_text_list.append( current_line_text )\n\t\t\t\tline_prob_list.append( current_line_prob )\n\t\t\t\t# Line under processing\n\t\t\t\tcurrent_line_text = []\n\t\t\t\tcurrent_line_prob = []\n\t\t\t\t# Bounding box temporary variables\n\t\t\t\tx1 = 100000\n\t\t\t\ty1 = 100000\n\t\t\t\tx2 = 0\n\t\t\t\ty2 = 0\n\n\treturn( line_box_list, line_text_list, line_prob_list )",
"def cleanup(text):\n with open(text, 'r') as uncleaned_text:\n no_chapters = re.sub('[A-Z]{3,}', ' ', uncleaned_text.read())\n remove_periods = re.sub('(\\s\\.){4,}', '', no_chapters)\n new_text = re.sub('\\*', '', remove_periods)\n return new_text",
"def preprocess(sent):\n return sent",
"def text_preprocessing_pdf(self,p):\n #remover_end_paragraphs=np.vectorize(self.remove_end_paragraphs,otypes=[str])\n cleaner=np.vectorize(self.remove_non_alpha,otypes=[str])\n cut_text=np.vectorize(self.cut_text,otypes=[str])\n cut_text_raw=np.vectorize(self.cut_text_raw,otypes=[str])\n assert len(self.parser)==len(self.parser_raw), \"Length of the treated sentence treated list does not match length of raw text list: {} / {}\".format(len(self.parser),len(self.parser_raw))\n cut_text_raw(p)\n p=cleaner(p)\n cut_text(p)\n return p",
"def process_text(text):\n text = text.strip()\n textList = text.split('\\n')\n newText = ''\n addNewline = True\n for line in textList:\n # Remove duplicate white space\n temp = ' '.join(line.split())\n # Trim any beginning non-alphabet letters\n temp = trim(temp)\n # Remove overly short lines, but keep ends of sentences\n # Add a newline if gap detected\n if len(temp) < 40 and not '.' in temp:\n if addNewline:\n newText += '\\n'\n addNewline = False\n continue\n # Add line to growing string\n newText += temp + ' '\n addNewline = True\n return newText",
"def preprocess(self, text):\n if self.model_name == \"bert-base-arabert\":\n return self._old_preprocess(\n text,\n do_farasa_tokenization=True,\n )\n\n if self.model_name == \"bert-base-arabertv01\":\n return self._old_preprocess(text, do_farasa_tokenization=False)\n\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n\n if self.replace_urls_emails_mentions:\n # replace all possible URLs\n for reg in url_regexes:\n text = re.sub(reg, \" [رابط] \", text)\n # REplace Emails with [بريد]\n for reg in email_regexes:\n text = re.sub(reg, \" [بريد] \", text)\n # replace mentions with [مستخدم]\n text = re.sub(user_mention_regex, \" [مستخدم] \", text)\n\n if self.remove_html_markup:\n # remove html line breaks\n text = re.sub(\"<br />\", \" \", text)\n # remove html markup\n text = re.sub(\"</?[^>]+>\", \" \", text)\n\n # remove repeated characters >2\n if self.remove_elongation:\n text = self._remove_elongation(text)\n\n # insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets\n if self.insert_white_spaces:\n text = re.sub(\n \"([^0-9\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u0669a-zA-Z\\[\\]])\",\n r\" \\1 \",\n text,\n )\n\n # insert whitespace between words and numbers or numbers and words\n text = re.sub(\n \"(\\d+)([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)\", r\" \\1 \\2 \", text\n )\n text = re.sub(\n \"([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)(\\d+)\", r\" \\1 \\2 \", text\n )\n\n # remove unwanted characters\n if self.keep_emojis:\n emoji_regex = \"\".join(list(self.emoji.UNICODE_EMOJI[\"en\"].keys()))\n rejected_chars_regex2 = \"[^%s%s]\" % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, \" \", text)\n else:\n text = re.sub(rejected_chars_regex, \" \", text)\n\n # remove extra spaces\n text = \" \".join(text.replace(\"\\uFE0F\", \"\").split())\n\n if (\n self.model_name == \"bert-base-arabertv2\"\n or self.model_name == \"bert-large-arabertv2\"\n ):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI[\"en\"].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = \" \".join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n\n # ALl the other models dont require Farasa Segmentation\n return text",
"def split_filter_captions(data, max_token_length, tokens_type, verbose=True):\r\n captions_kept = 0\r\n img_kept = 0\r\n img_removed = 0\r\n captions_removed = 0\r\n pbar = mmcv.ProgressBar(len(data))\r\n for i, img in enumerate(data):\r\n regions_per_image = 0\r\n for region in img['relationships']:\r\n # create tokens array\r\n if tokens_type == 'words':\r\n tokens = words_preprocess(region['phrase'])\r\n elif tokens_type == 'chars':\r\n tokens = list(region['label'])\r\n else:\r\n assert False, 'tokens_type must be \"words\" or \"chars\"'\r\n\r\n # filter by length\r\n if max_token_length > 0 and len(tokens) <= max_token_length:\r\n region['tokens'] = tokens\r\n # pdb.set_trace()\r\n region['parts'] = [1] * len(words_preprocess(region['subject']['name'])) + [2] * len(\r\n words_preprocess(region['predicate'])) + [3] * len(words_preprocess(region['object']['name']))\r\n\r\n captions_kept += 1\r\n regions_per_image = regions_per_image + 1\r\n else:\r\n region['tokens'] = None\r\n captions_removed += 1\r\n if regions_per_image == 0:\r\n img_removed += 1\r\n else:\r\n img_kept += 1\r\n pbar.update()\r\n\r\n print('\\n ###### WANRING: kept %d, removed %d' % (img_kept, img_removed))\r\n\r\n if verbose:\r\n print('Keeping %d captions' % captions_kept)\r\n print('Skipped %d captions for being too long' % captions_removed)",
"def remove_footnotes(self, xml):\n text_nodes = xml.find_all('text')\n\n # Find the most commonly occurring height of text nodes. We'll assume this is\n # the height of the law text itself.\n heights = {}\n for node in text_nodes:\n if not node.has_attr(\"height\"):\n continue\n height = int(node[\"height\"])\n heights[height] = ((heights[height] + 1) if heights.has_key(height) else 1)\n most_common_height = max(heights, key = heights.get)\n\n # Remove all text nodes whose height is lower than most_common_height.\n for node in text_nodes:\n if node.has_attr(\"height\") and (int(node[\"height\"]) < most_common_height):\n node.extract()",
"def init_fragments():\n for word in fileinput.input(dictionary_file):\n word = word.strip()\n if len(word) < target_length:\n fragments.add(word)",
"def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = clean_number(text)\n text = decontracted(text)\n text = correct_spelling(text)\n text = spacing_punctuation(text)\n text = spacing_some_connect_words(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n text = text.lower()\n return text",
"def calculate_texts(self) -> None:\n texts = []\n for text in self.texts:\n paragraphs = list(filter(lambda x: x != \"\", text.split(\"\\n\\n\")))\n for paragraph in paragraphs:\n text = paragraph.replace(\"\\n\", \" \").strip()\n if len(text) > self.split_threshold_min:\n text_sentences = nlp(text)\n sentences = []\n for sentence in text_sentences.sents:\n current = sentence.text\n sentences.append(current.strip())\n texts.extend(sentences)\n else:\n texts.append(text)\n self.texts = list(set(texts))",
"def _clean(sentence, subword_option):\n sentence = sentence.strip()\n if subword_option is not None and '@' in subword_option:\n subword_option_0 = subword_option.split('@')[0]\n subword_option_1 = subword_option.split('@')[1]\n else:\n subword_option_0 = None\n subword_option_1 = None\n # BPE\n if subword_option_0 == \"bpe\":\n sentence = re.sub(\"@@ \", \"\", sentence)\n\n # SPM\n elif subword_option_0 == \"spm\":\n sentence = u\"\".join(sentence.split()).replace(u\"\\u2581\", u\" \").lstrip()\n\n # speical for chinese\n if subword_option_1 == 'bpe':\n sentence = re.sub(\"@@ \", \"\", sentence)\n if subword_option_1 == 'space':\n sentence = sentence.replace(\" \", \"\")\n sentence = sentence.replace(\"<SPACE>\",\" \")\n if subword_option_1 == 'char':\n sentence = sentence.replace(\"<SPACE>\", \"\")\n sentence = sentence.replace(\"@@\", \"\")\n sentence = sentence.replace(\" \",\"\")\n sentence = \" \".join(sentence)\n elif subword_option_1 == 'char2char':\n sentence = sentence.replace(\" \", \"\")\n sentence = sentence.replace(\"@@\", \"\")\n sentence = \" \".join(sentence)\n elif subword_option_1 == 'char2word':\n sentence = sentence.replace(\" \", \"\")\n sentence = sentence.replace(\"@@\", \" \")\n # sentence = \" \".join(sentence)\n elif subword_option_1 == 'hybrid':\n sentence = sentence.replace(\" @@ \", \"\")\n sentence = sentence.replace(\"@@ \", \"\")\n sentence = sentence.replace(\" @@\", \"\")\n elif subword_option_1 == 'hybrid2':\n sentence = sentence.replace(\" \", \"\")\n sentence = sentence.replace(\"@@\", \" \")\n return sentence",
"def clean(corpus):\n # Initiate clean_corpus\n clean_corpus = [] \n \n for speech in corpus:\n \n # Removes meaningless intro \n speech = speech[5:] \n\n for i in range(len(speech)):\n # Removes 'meaningless text hear (min:sec)\\n' at the beginning of each paragraph\n speech[i] = speech[i][speech[i].find('\\n') + 1:] \n # Replaces brackets with paranthesis\n speech[i] = speech[i].replace('[', '(') \n speech[i] = speech[i].replace(']', ')')\n # Removes meaningless text in parantheses\n speech[i] = re.sub(r'\\([^)]*\\)', '', speech[i]) \n\n # Join all of the paragraphs into one speech\n speech = ','.join(speech) \n\n clean_corpus.append(speech)\n \n # Combined all of the speeches into one document\n \n if len(clean_corpus) == 1:\n clean_corpus = clean_corpus[0]\n if len(clean_corpus) == 2:\n clean_corpus = clean_corpus[0] + clean_corpus[1]\n if len(clean_corpus) == 3:\n clean_corpus = clean_corpus[0] + clean_corpus[1] + clean_corpus[2]\n if len(clean_corpus) == 8:\n clean_corpus = clean_corpus[0] + clean_corpus[1] + clean_corpus[2] + clean_corpus[3] + clean_corpus[4] + \\\n clean_corpus[5] + clean_corpus[6] + clean_corpus[7]\n \n return clean_corpus",
"def clean_text(text):\n\n lemmizer = WordNetLemmatizer()\n stemmer = porter.PorterStemmer()\n\n stop = stopwords.words('english')\n stop += ['.', ',', ':', '...', '!\"', '?\"', \"'\", '\"', ' - ', ' — ', ',\"', '.\"', '!', ';', '♫♫', '♫', \\\n '.\\'\"', '[', ']', '—', \".\\'\", 'ok', 'okay', 'yeah', 'ya', 'stuff', ' 000 ', ' em ', \\\n ' oh ', 'thank', 'thanks', 'la', 'was', 'wa', '?', 'like', 'go', ' le ', ' ca ', ' I ', \" ? \", \"s\", \" t \",\n \"ve\", \"re\"]\n # stop = set(stop)\n\n cleaned_text = []\n\n for post in text:\n cleaned_words = []\n\n # remove parentheticals\n clean_parens = re.sub(r'\\([^)]*\\)', ' ', post)\n\n #clean_parens = [line.decode('utf-8').strip() for line in clean_parens]\n\n # tokenize into words\n for word in wordpunct_tokenize(clean_parens):\n\n\n # lowercase and throw out any words in stop words\n if word.lower() not in stop:\n\n # lemmatize to roots\n low_word = lemmizer.lemmatize(word)\n\n # stem and lowercase ( an alternative to lemmatize)\n # low_word = stemmer.stem(root.lower())\n\n # keep if not in stopwords (yes, again)\n if low_word.lower() not in stop:\n # put into a list of words for each document\n cleaned_words.append(low_word.lower())\n\n # keep corpus of cleaned words for each document\n cleaned_text.append(' '.join(cleaned_words))\n\n\n return cleaned_text",
"def prepare_for_char_n_gram(text):\n # 1. Go to lower case (only good for english)\n # Go to bytes_strings as I had issues removing all \\n in r\"\"\n clean = bytes(text.lower(), encoding=\"utf-8\")\n # 2. Drop \\n and \\t\n clean = clean.replace(b\"\\n\", b\" \")\n clean = clean.replace(b\"\\t\", b\" \")\n clean = clean.replace(b\"\\b\", b\" \")\n clean = clean.replace(b\"\\r\", b\" \")\n\n clean = clean.replace(b\"\\p\", b\" \")\n # 3. Replace english contractions\n for (pattern, repl) in patterns:\n clean = re.sub(pattern, repl, clean)\n # 4. Drop puntuation\n # I could have used regex package with regex.sub(b\"\\p{P}\", \" \")\n exclude = re.compile(b'[%s]' % re.escape(\n bytes(string.punctuation, encoding='utf-8')))\n clean = b\" \".join([exclude.sub(b'', token) for token in clean.split()])\n # 5. Drop numbers - as a scientist I don't think numbers are toxic ;-)\n clean = re.sub(b\"\\d+\", b\" \", clean)\n # 6. Remove extra spaces - At the end of previous operations we multiplied space accurences\n clean = re.sub(b'\\s+', b' ', clean)\n # Remove ending space if any\n clean = re.sub(b'\\s+$', b'', clean)\n # 7. Now replace words by words surrounded by # signs\n # e.g. my name is bond would become #my# #name# #is# #bond#\n # clean = re.sub(b\"([a-z]+)\", b\"#\\g<1>#\", clean)\n #clean = re.sub(b\" \", b\"# #\", clean) # Replace space\n #clean = b\"#\" + clean + b\"#\" # add leading and trailing #\n\n return str(clean, 'utf-8')",
"def tokenize_descriptions_with_threshold(input_file_path, output_file_path):\n if os.path.exists(output_file_path):\n print(\"Tokenized descriptions found. Will not be generated.\")\n return\n\n print(\"Generating tokenized descriptions\")\n f = open(output_file_path, 'a')\n with open(input_file_path, 'r') as file:\n word_count_threshold = 4\n word_counts = {}\n sequences = []\n for line in file:\n if line.strip():\n sequence = line.strip().replace(\" '\",\"'\").split()\n sequence[1:] = clean_tokens(sequence[1:])\n sequence.insert(1, '<START>')\n sequence.append('<END>')\n sequences.append(sequence)\n for w in sequence[1:]:\n word_counts[w] = word_counts.get(w, 0) + 1\n vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]\n for sequence in sequences:\n sequence[1:] = ['<UNK>' if x not in vocab else x for x in sequence[1:]]\n f.write(\",\".join(sequence) + \"\\n\") \n f.close()\n print(\"Finished generating tokenized descriptions\")"
]
| [
"0.5930999",
"0.56018895",
"0.54720354",
"0.54013246",
"0.5385498",
"0.53538865",
"0.5309747",
"0.5302773",
"0.530155",
"0.5279971",
"0.5275908",
"0.52461535",
"0.5241778",
"0.523378",
"0.5210483",
"0.5210375",
"0.52024615",
"0.51972234",
"0.51928794",
"0.51886725",
"0.5184129",
"0.517423",
"0.5150837",
"0.5145812",
"0.5114915",
"0.5110835",
"0.5086657",
"0.50844157",
"0.5081482",
"0.50673413"
]
| 0.6658864 | 0 |
Generate a small set of random templates for testing | def tst_random_set():
final_wave, final_spec, final_z = desi_qso_templates(
outfil='test_random_set.fits', N_perz=100, seed=12345) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_multi_template():\n data = []\n data.extend([\"{}_data.json\".format(i) for i in range(50)])\n data.extend([\"{}_log.csv\".format(i) for i in range(50)])\n data.extend([\"filename_{}.py\".format(i) for i in range(50)])\n data.extend([\"stuff_{}.py\".format(i) for i in range(50)])\n temp = data[:]\n random.shuffle(temp)\n assert data == sort(temp)",
"def generate_random_tomogram_set(templates, criteria, number_of_tomograms, dim, seed=None, noise=False):\n if (seed == None):\n seed = random.randint(0, 2 ** 31 - 1)\n print('Using random seed: ', seed)\n random.seed(int(seed))\n np.random.seed(int(seed))\n\n for i in range(number_of_tomograms):\n yield generate_random_tomogram(templates, criteria, dim, noise)",
"def generate():",
"def pick_random(template):\n random.seed()\n result = {}\n for k, v in template.iteritems():\n if isinstance(v, dict):\n result[k] = pick_random(v)\n else:\n result[k] = random.choice(v)\n return result",
"def generatoze(b):\r\n l = []\r\n for i in range(b):\r\n k = random.randint(0, 100)\r\n l.append(k)\r\n return l",
"def generate(count):\n return unpack_random_animals(generate_animals_randomly(count))",
"def set_generator(random, args):\n representation = args.get('representation')\n indices = list(range(len(representation)))\n max_size = args.get('max_size', 9)\n variable_size = args.get('variable_size', True)\n if variable_size and max_size > 1:\n size = random.randint(1, max_size)\n else:\n size = max_size\n candidate = random.sample(indices, size)\n return sorted(candidate)",
"def simulate_fixtures(x_width = 36, y_height = 18, total = 250, x_offset = 0):\n locations = make_locations(x_width, y_height, total, x_offset)\n fixtures = []\n i, j = 0, 0\n count = 0\n for grid_loc in locations:\n strand = int(count >= total/2)\n address = j if strand else i\n pixels = 1\n data = {\"strand\": strand,\n \"address\": address,\n \"pixels\": pixels,\n \"pos1\": map_loc_to_pixel(grid_loc),\n \"pos2\": map_loc_to_pixel(grid_loc),\n \"grid_loc\": grid_loc}\n fixtures.append(Fixture(data))\n if not strand:\n i += 1\n else:\n j += 1\n count += 1\n return fixtures",
"def random_var_triplets(global_vars1, global_vars2, templates):\n m1, m2 = type_var_map(global_vars1), type_var_map(global_vars2)\n mapping = []\n for t in templates:\n if t not in m1:\n continue\n if t not in m2:\n continue\n random_tuples = random_tuple_list(m1[t], m2[t])\n for tup in random_tuples:\n mapping.append((tup[0], tup[1], random.choice(templates[t]), t))\n return mapping",
"def tempmap():\n rand_number = [random.randint(0, 9) for i in range(6)]\n rand_number_str = ''.join(map(str, rand_number))\n mapname = 'temp_' + rand_number_str\n maplist = grass.read_command('g.list', type='vector', mapset='.').split()\n while mapname in maplist:\n rand_number = [random.randint(0, 9) for i in range(6)]\n rand_number_str = ''.join(map(str, rand_number))\n mapname = 'temp_' + rand_number_str\n maplist = grass.read_command('g.list', type='vector', mapset='.').split()\n return mapname",
"def test_sample(system_generator):\n\n name, test = system_generator()\n print(name)\n\n w_F, w_R, N_k = test.sample([10, 8], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([1, 1], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([10, 0], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([0, 5], mode=\"wFwR\")",
"def _generate_raw_environments(self, num, seed):",
"def generate_test():\n o = []\n pos = [384, 288]\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n generate_set(begin=3 * note_group_size, start_pos=pos,\n length_multiplier=dist_multiplier, group_id=3, plot_map=True)",
"def example7(n):\n return mvmt.randomize(tile, n)",
"def generate_standard_tech_tiles(seed=0):\n\n if seed is not 0:\n random.seed(seed)\n all_standard_tiles = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n randomized_tiles = list()\n\n for _ in range(9):\n chosen_tile_index = random.randint(0, len(all_standard_tiles) - 1)\n randomized_tiles.append(all_standard_tiles[chosen_tile_index])\n all_standard_tiles.pop(chosen_tile_index)\n\n return tuple(randomized_tiles)",
"def totem_random():\n random_head()\n random_head()\n random_head()",
"def get_random_template(self, curr_template, curr_meter, pref_pos=None, exclude=None):\n #gets all templates which end in curr_template and curr_meter\n poss_templates = [item for item in self.templates.keys() if item[-len(curr_template):] == curr_template and self.templates[item].split('_')[-len(curr_meter.split('_')):] == curr_meter.split('_')]\n if exclude: poss_templates = [x for x in poss_templates if x.split()[0] not in exclude] #if exclude is given, remove those ones\n if len(poss_templates) == 0: return False\n if pref_pos:\n n = len(poss_templates)\n template_scores = np.zeros(n)\n for i in range(n): #iterates through all valid templates\n score = 0\n for pos in poss_templates[i].split(): #iterates through each POS in the template\n if pos in pref_pos: score += pref_pos[pos] #adds the weight of that POS\n template_scores[i] = score\n\n #to normalize make all values positive\n template_scores += abs(min(template_scores)) + 1\n #then ensure sums to 1 ie is a distribution\n template_scores /= sum(template_scores)\n return np.random.choice(poss_templates, p=template_scores) #Very nifty function which chooses from a list with a custom distribution\n return random.choice(poss_templates)",
"def mock_tweet():\n count = random.randint(70, 140)\n return ''.join([random.choice(string.letters) for i in xrange(count)])",
"def generate_T(number_obeservations):\n T = np.random.randint(0, 2, (number_obeservations,1))\n return T",
"def make_random_passphrase():\n import random\n prng = random.SystemRandom()\n templates = ['aababbab', 'aabbabab', 'aabbabba', 'abaabbab', 'abababab',\n 'abababba', 'ababbaab', 'ababbaba', 'abbaabab', 'abbaabba',\n 'abbabaab', 'abbababa', 'abbabbaa', 'baababab', 'baababba',\n 'baabbaab', 'baabbaba', 'babaabab', 'babaabba', 'bababaab',\n 'babababa', 'bababbaa', 'babbaaba', 'babbabaa']\n alphabet = {'a':\"aeiou\", 'b':list(\"bcdfghjklmnprsvwxyz\") + [\"ch\",\"ph\",\"st\"]}\n for n in (1,2,3):\n template = prng.choice(templates)\n password = \"\".join([prng.choice(alphabet[c]) for c in template])\n print password.capitalize() + prng.choice(\"0123456789\"),\n return 0",
"def generate_test_strings(nr_strings, alphabet, length):\n for _ in range(0, nr_strings):\n yield rng_string(alphabet, randint(0, length))",
"def testrandom(self):\n for i in range(100):\n AmuletAbility()",
"def generate_animals_randomly(count):\n animals = []\n for _ in range(count):\n # chooses an animal at random from one of the map keys\n animal_choice = np.random.choice(list(mappings.keys()))\n # generates a list of 1 animal and takes the first element\n animals.append(generate_animals(animal_choice, 1)[0])\n\n return animals # list of Animal objects",
"def generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)):\n yield random.sample(test_universe, sample_size)",
"def generate():\n s = random_data.random_bytes(100)\n return generate_from_string(s)",
"def test_random_small_sample(self):\n pop0 = []\n pop1 = [1]\n popmany = range(10)\n self.assertEqual(set(), random_small_sample(pop0, 0.80))\n self.assertEqual(set(pop1), random_small_sample(pop1, 0.80))\n self.assertEqual(set(popmany), random_small_sample(popmany, 1))\n self.assertEqual(set(pop0), random_small_sample(popmany, 0))\n popmany_50 = random_small_sample(popmany, 0.50)\n self.assertLess(len(popmany_50), len(popmany))\n self.assertGreater(len(popmany_50), 0)",
"def generate_advanced_tech_tiles(seed=0):\n\n if seed is not 0:\n random.seed(seed)\n all_advanced_tiles = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 ,15]\n randomized_tiles = list()\n\n for _ in range(6):\n chosen_tile_index = random.randint(0, len(all_advanced_tiles) - 1)\n randomized_tiles.append(all_advanced_tiles[chosen_tile_index])\n all_advanced_tiles.pop(chosen_tile_index)\n\n return tuple(randomized_tiles)",
"def Generate_Uniform( self, Spacing=None, Variation=0.75 ):\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n tpa = 0.0\n for t in trees:\n years = self.Data.Stand[s].Tree[t].Year.keys()\n tpa += self.Data.Stand[s].Tree[t].Year[years[0]].TPA\n #print tpa\n if( Spacing==None ):\n #tpa = self.Data.Stand[s].Year[15].TPA\n rows = math.floor( math.sqrt( 43560 ) / math.sqrt( 43560 / math.ceil( tpa ) ) )\n spacing = 208.71 / rows\n else:\n spacing = Spacing\n print( tpa, spacing )\n GRID = {}\n x = 5\n y = 5\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n if( x > 208.71 ):\n x = 5\n y += spacing\n if( y > 208.71 ):\n x = 5\n y = 5\n GRID[t] = (x,y)\n x += spacing\n for t in trees:\n g = int(random.uniform( 1, tpa))\n var = random.uniform( 0, Variation)\n ang = random.uniform( 0, 360 )\n (ox,oy) = self.Compute_Offset( ang, var)\n #print ox, oy\n (x,y) = GRID[g]\n self.Data.Stand[s].Tree[t].X = x+ox\n self.Data.Stand[s].Tree[t].Y = y+oy",
"def test_get_device_templates(self):\n pass",
"def test_generate_all_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \"tests/system-testing/inputs/generate-all/\"\n input_filenames = [\n \"simplest.chatette\", \"only-words.chatette\",\n \"words-and-groups.chatette\", \"alias.chatette\", \"include.chatette\",\n \"slot.chatette\", \"slotrolegroup.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n if not TestSystem.check_no_duplicates(facade.train_examples):\n pytest.fail(\n \"Some examples were generated several times \" +\n \"when dealing with file '\" + filename + \"'.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n if len(legal_examples) != len(facade.train_examples):\n training_texts = [ex.text for ex in facade.train_examples]\n for legal_ex in legal_examples:\n if legal_ex[\"text\"] not in training_texts:\n pytest.fail(\n \"Example '\" + legal_ex[\"text\"] + \\\n \"' was not generated.\"\n )\n pytest.fail(\n \"An unknown example was not generated (\" + \\\n str(len(facade.train_examples)) + \\\n \" generated instead of \" + str(len(legal_examples)) + \\\n \").\\nGenerated: \" + str(facade.train_examples)\n )\n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )"
]
| [
"0.72083974",
"0.63701993",
"0.6368686",
"0.6321199",
"0.62644243",
"0.6257337",
"0.6201381",
"0.617634",
"0.6142078",
"0.6015824",
"0.6010889",
"0.59950966",
"0.5977103",
"0.597154",
"0.5879443",
"0.5858846",
"0.5847928",
"0.583918",
"0.5821612",
"0.58189946",
"0.58071667",
"0.58049893",
"0.5789044",
"0.5773003",
"0.5772692",
"0.57617307",
"0.5758779",
"0.5746818",
"0.57466006",
"0.57372934"
]
| 0.6923423 | 1 |
Prints a message that the restaurant is open. | def open_restaurant(self):
print(f"The restaurant is open.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open_restaurant(self):\n msg = f\"{self.name} is open. Come on in!\"\n print(f\"\\n{msg}\")",
"def open_restaurant(self):\n\t\topen = f\"{self.restaurant_name} is now open.\"\n\t\tprint(f\"\\n{open}\")",
"def open_restaurant(self):\r\n\t\tprint(self.restaurant_name.title() + \" is open\")",
"def open_restaurant(self):\n\t\tprint(f\"The {self.restaurant_name} is open now.\")",
"def open_restaurant(self):\r\n print(\"The restaurant is open now \")",
"def open_restaurant(self):\n print(f\"{self.restaurant_name} is now open!\")",
"def open_restaurant(self):\n print(f\"{self.restaurant_name} is now open!\")",
"def open_restaurant(self):\n print(f\"{self.restaurant_name} is now open!\")",
"def open_restaurant(self):\n print(self.name.title() + \" is now open!\")",
"def open_restaurant(self):\n print(self.name.title() + \" is now open!\")",
"def open_restaurant(self):\n print(self.name.title() + \" is now open!\")",
"def open_restaurant(self):\n print(self.name.title() + \" is now open!\")",
"def open_restaurant(self):\n\t\tprint(\"The restaurant is now open!\")",
"def open_restaurant(self):\n print(f\"\\nThe resturant {self.restaurant_name} is now open!\")",
"def open_restaurant(self):\n print(f'The Restaurant {self.restaurant_name} is opened...')",
"def open_restaurant(self):\n msg = self.name + \" is open. Come on in!\"\n print(\"\\n\" + msg)",
"def open_restaurant(self):\n print(\"We're Open!\")",
"def open_restaurant(self):\n return \"Restaurante esta aberto\"",
"def open_restaurant(self):\n print(\"O Restaurante esta aberto\")",
"def open_restaurant(self):\n\t\tprint(\"restaurant is open\")",
"def describe_restaurant(self):\n print(f\"{self.restaurant_name} is a new restaurant opening on Main Street!\")\n print(f\"The restaurant specializes in {self.cuisine_type}.\")",
"def describe_restaurant(self):\n print(f\"{self.restaurant_name} is a new restaurant opening on Main Street!\")\n print(f\"The restaurant specializes in {self.cuisine_type}-style food.\")",
"def describe_restaurant(self):\r\n print(\"\\n==========This is our restaurant \" + self.restaurant.title() + \"===============\")\r\n print(\"We serve you amazing \" + self.cuisine + \" 's cusine\")",
"def describe_restaurant(self):\n msg = f\"{self.name} serves wonderful {self.cuisine_type}.\"\n print(f\"\\n{msg}\")",
"def describe_restaurant(self):\r\n\t\tprint(\"Our restaurant is \" + self.restaurant_name.title() + \".\")\r\n\t\tprint(\"We are known for our \" + self.cuisine_type.title())",
"def describe_restaurant(self):\n print(\"The Restaurant is called {} and offers {} cuisine.\".format(self.restaurant_name, self.cuisine_type))\n print(\"It has served {} clients.\".format(self.number_served))",
"def describe_restaurant(self):\n\t\tprint(f\"{self.restaurant_name.title()} serves {self.cuisine_type}.\")",
"def describe_restaurant(self):\n\t\tdetails = f\"{self.restaurant_name} is a {self.cuisine_type} restaurant.\"\n\t\tprint(f\"\\n{details}\")",
"def describeRestaurant(self):\n print (f\"{self.name} has the best {self.cuisineType}\")",
"def check_open():\n print(\"***** Check if Business is Open/Closed *****\")\n while True:\n print()\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n if business_object['is_open'] == 1:\n print(\"This business is open!\")\n else:\n print(\"This business is closed!\")\n\n print()\n\n print_business(business_object)"
]
| [
"0.84129494",
"0.83964026",
"0.83871007",
"0.8371406",
"0.8318077",
"0.8231593",
"0.8231593",
"0.8231593",
"0.8207684",
"0.8207684",
"0.8207684",
"0.8207684",
"0.81987953",
"0.81586903",
"0.8079485",
"0.80655825",
"0.7935586",
"0.7746546",
"0.7586863",
"0.7568593",
"0.6807336",
"0.6760241",
"0.66971165",
"0.6481739",
"0.64014405",
"0.63801587",
"0.62512994",
"0.61640805",
"0.6125222",
"0.6122222"
]
| 0.8771324 | 0 |
Remove duplicates in a 2D list. | def remove_dupl_2d(arr):
arr_len = len(arr)
idx = 0
unique = set()
while idx < arr_len:
if tuple(arr[idx]) in unique:
del arr[idx]
arr_len -= 1
continue
unique.add(tuple(arr[idx]))
idx += 1
return arr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unique_elements_from_2D_list(list_2d):\n return list(set(flatten_2D_list(list_2d)))",
"def remove_duplicates(list):\n x = 0\n while x < len(list):\n y = x + 1\n while y < len(list):\n if list[x] == list[y]:\n del list[y]\n else:\n y += 1\n x += 1\n return list",
"def remove_duplicates(lst):\n (els, inds) = np.unique(lst, return_index=True)\n out = np.zeros(lst.shape, dtype=lst.dtype)\n out[inds] = els\n return out",
"def deduplicate_list(lst):\n return list(set(lst))",
"def remove_duplicates(mylist):\n return list(set(mylist))",
"def removeDuplicates(list):\n\treturn set((item for item in list))",
"def remove_duplicates(self,list_):\r\n ret =[]\r\n\r\n for item in list_:\r\n if item not in ret:\r\n ret.append(item)\r\n removed = len(list_)-len(ret)\r\n logger.info('%d duplicate%s removed.' %(removed,plural_or_not(removed)))\r\n return ret",
"def remove_consequetive_duplicates(your_list):\n out = [v for i, v in enumerate(your_list) if i == 0 or v != your_list[i-1]]\n if type(your_list) == np.ndarray:\n return np.array(out)\n return out",
"def removeDuplicates(list):\n\treturn Set((item for item in list))",
"def remove_duplicates(lst):\n\tnew_lst = list()\n\tfor item in lst:\n\t\tif item not in new_lst:\n\t\t\tnew_lst.append(item)\n\treturn new_lst",
"def remove_duplicates(somelist):\n return set(somelist)",
"def remove_duplicates_badSolution( li ):\n newli=[]\n seen = set()\n for item in li:\n if item not in seen:\n seen.add( item )\n newli.append(item)\n\n return newli",
"def unique_list(input_list):\n output_list = []\n if len(input_list) > 0:\n dim = _sp.shape(input_list)[1]\n for i in input_list:\n match = False\n for j in output_list:\n if dim == 3:\n if i[0] == j[0] and i[1] == j[1] and i[2] == j[2]:\n match = True\n elif dim == 2:\n if i[0] == j[0] and i[1] == j[1]:\n match = True\n elif dim == 1:\n if i[0] == j[0]:\n match = True\n if match is False:\n output_list.append(i)\n return output_list",
"def dedup_list(l):\n return list(set(l))",
"def duplicates(ls: list):\n\n seen = set([])\n dups = set([])\n\n for x in ls:\n if x in seen:\n dups.add(x)\n else:\n seen.add(x)\n\n return dups",
"def dedup(lst):\n new_lst = []\n seen = set()\n for elem in lst:\n if elem not in seen:\n new_lst.append(elem)\n seen.add(elem)\n\n return new_lst",
"def list_remove_duplicates(l):\n seen = set()\n seen_add = seen.add\n return [x for x in l if not (x in seen or seen_add(x))]",
"def remove_identical(list):\n seen = set()\n seen_add = seen.add\n return [x for x in list if not (x in seen or seen_add(x))]",
"def removeDuplicatesInList(self, data):\n newDataList = []\n for i in data:\n if newDataList.count(i) == 0:\n newDataList.append(i)\n data.clear()\n data += newDataList",
"def dedup_list(my_list):\r\n new_list = []\r\n for elem in my_list:\r\n if elem not in new_list:\r\n new_list.append(elem)\r\n return new_list",
"def remove_duplicates(list1):\n #iterative, not recursive\n if len(list1) == 0:\n return list1\n new_list = []\n new_list.append(list1[0])\n for item in list1[1:]:\n if item != new_list[-1]:\n new_list.append(item)\n return new_list",
"def remove_duplicates(lst):\n lst.sort()\n lst_without_duplicates = [x for (x, _) in groupby(lst)]\n num_removed = len(lst) - len(lst_without_duplicates)\n print(\"Removed %d duplicates!\" % num_removed)\n return lst_without_duplicates",
"def _deduplicate(lst):\n out = []\n for i in lst:\n if i not in out:\n out.append(i)\n return out",
"def remove_duplicates_array(self, array: List) -> List:\n seen = set()\n seen_add = seen.add\n return [x for x in array if not (x in seen or seen_add(x))]",
"def dedupe_list(input):\n return list(set(input))",
"def duplicated(list):\n u, c = np.unique(list, return_counts=True)\n dup = u[c > 1]\n return dup",
"def remove_duplicates(my_list):\n result = []\n for item in my_list:\n if item not in result:\n result.append(item)\n return result",
"def remove_duplicates(a):\n b = np.ascontiguousarray(a).view(np.dtype((np.void, a.dtype.itemsize * a.shape[1])))\n dedup = np.unique(b).view(a.dtype).reshape(-1, a.shape[1])\n return dedup",
"def remove_duplicates(list1):\r\n if len(list1) == 1 or len(list1) == 0:\r\n return [item for item in list1]\r\n else:\r\n if list1[-1] == list1[-2]:\r\n return remove_duplicates(list1[:-1])\r\n else:\r\n new_list = remove_duplicates(list1[:-1])\r\n new_list.append(list1[-1])\r\n return new_list",
"def remove_duplicate_in_list(liste, column_name):\n return list(set([row[column_name] for row in liste]))"
]
| [
"0.77904236",
"0.7317339",
"0.7272788",
"0.70233446",
"0.6959906",
"0.6949109",
"0.69180614",
"0.6898379",
"0.6891898",
"0.6872626",
"0.686393",
"0.68368906",
"0.6829106",
"0.6820149",
"0.6818071",
"0.6759585",
"0.6757643",
"0.66838396",
"0.66789544",
"0.66774946",
"0.6656065",
"0.66402674",
"0.6624403",
"0.6600495",
"0.65770495",
"0.6558952",
"0.6549767",
"0.6533307",
"0.65306765",
"0.6502627"
]
| 0.75136393 | 1 |
Initialise verboseprint() if verbose to a specific nvfunc or print, else to no printing. | def init_verbose_print(verbose=True, vfunc=print, nvfunc=None):
global verboseprint
if verbose:
verboseprint = vfunc
else:
if not nvfunc:
verboseprint = lambda *a, **k: None
else:
verboseprint = nvfunc
return verboseprint | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verbose_print(verbose, print_function=None):\n\n if verbose:\n return print_function or print\n else:\n def vprint(*args, **kwars):\n pass\n return vprint",
"def set_verboseprint(func=misc.init_verbose_print(verbose=True, vfunc=print, nvfunc=misc.log)):\n global verboseprint\n verboseprint = func\n ml.verboseprint = verboseprint\n transform.verboseprint = verboseprint",
"def vprint(string):\n global verbose\n if verbose:\n print(string)",
"def _verbose(self,text):\n if self.verbose:\n print(text)",
"def verbose_print(msg: str = '') -> None:\n assert isinstance(msg, str)\n if __verbose:\n print(msg)",
"def v_print(msg):\n if (VERBOSE == 1):\n print(msg)",
"def printv(self, *arg):\n if self.verbose:\n print(*arg)",
"def vprint(msg):\n if defaults.verbose:\n print(msg)",
"def verbosePrint(string, nonl=False):\n if not verbose:\n return\n if nonl:\n print(string, end=' ')\n else:\n print(string)",
"def printv(self, string, **kwargs):\n if self.verbose:\n print(string, **kwargs)",
"def vprint (*args, take_action=False, **kwargs):\n\n take_action = take_action and not opts.take_action\n\n if opts.verbose or take_action:\n print (*args, **kwargs)\n\n return take_action",
"def _vprint(self, string):\n if self.verbose:\n print(string)",
"def init_vprinting(**kwargs):\n kwargs['str_printer'] = vsstrrepr\n kwargs['pretty_printer'] = vpprint\n kwargs['latex_printer'] = vlatex\n init_printing(**kwargs)",
"def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text",
"def verbose_print(text,verbose_level):\n if Args.verbose >= verbose_level:\n print '\\t' * (verbose_level-1) + text",
"def printmsg(msg, verbose):\n if verbose:\n print(msg)\n\n return None",
"def print_verbose(args, msg):\n if args.verbose:\n print(msg)",
"def print_verbose(message:str):\n if params['verbose']:\n print(message)\n return",
"def test():\n v_print(1, \"-vvv Verbose 1 - INFO\")\n v_print(2, \"-vv Verbose 2 - WARN\")\n v_print(3, \"-v Verbose 3 - ERROR\")",
"def print_if_verbose(self, log):\n\n if self.verbose:\n print(log)\n return log",
"def pr(string, verbose):\n if(verbose):\n print(string)",
"def verbose():\n GLOBAL['VERBOSE'] = True",
"def _default_vprint_worker(*args, **kwargs):\r\n print(*args, **kwargs)",
"def vprint(expr, **settings):\n\n outstr = vsprint(expr, **settings)\n\n import builtins\n if (outstr != 'None'):\n builtins._ = outstr\n print(outstr)",
"def print_optional(string, print_in_log):\n # type: (str, bool) -> None\n if print_in_log:\n print(string)",
"def do_verbose(self, arg):\n global verbose\n if verbose == 1:\n verbose = 0\n # prtin and add to log file \n logmsg = \" INFO: verbose mode disable\"\n log(logmsg)\n else:\n verbose = 1\n # prtin and add to log file \n logmsg = \" INFO: verbose mode enable\"\n log(logmsg)",
"def __init__(self, enable_verbose=True):\n self.enable_verbose = enable_verbose\n if self.enable_verbose:\n self.show = self._print_screen\n else:\n self.show = self._not_print_screen",
"def verbose():\n return _verbose",
"def show(self,verbose=0):\n print 'inferenceArgs',self.ws.inferenceArgs\n print 'inferenceExpr',theano.pp(self.ws.inferenceExpr)\n if verbose>=1:\n print 'debugprint inferenceExpr:'\n theano.printing.debugprint(self.ws.inferenceExpr)\n if self.ws.dataLossExpr:\n print 'dataLossArgs',self.ws.dataLossArgs\n print 'dataLossExpr',theano.pp(self.ws.dataLossExpr)\n print 'debugprint dataLossExpr:'\n theano.printing.debugprint(self.ws.dataLossExpr)",
"def vprint(*args, **kwargs ):\n\n forceprint = False\n for key in kwargs:\n if key == \"forceprint\":\n forceprint =kwargs[key]\n \n line = ''\n if debug or forceprint : \n for arg in args:\n line += str(arg) +\" \"\n log = open(exepath + 'pyframe.log', 'a') \n log.write(line + \"\\n\")\n log.close() \n print line"
]
| [
"0.7538737",
"0.7518328",
"0.6643926",
"0.66073173",
"0.6594862",
"0.65729207",
"0.65286297",
"0.63692355",
"0.6299959",
"0.629843",
"0.6282386",
"0.6246638",
"0.612347",
"0.6121666",
"0.6113824",
"0.6102391",
"0.6084847",
"0.598136",
"0.59381145",
"0.5932641",
"0.5875921",
"0.5873893",
"0.5804555",
"0.5748319",
"0.57230777",
"0.5712258",
"0.5635065",
"0.5597259",
"0.5588121",
"0.5580226"
]
| 0.8172151 | 0 |
Returns a list of all abundant numbers less than an upper limit | def getAbundantNumbers(upperLimit):
result = []
for i in range(1, upperLimit):
if d(i) > i:
result.append(i)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solution(limit=28123):\n sum_divs = [1] * (limit + 1)\n\n for i in range(2, int(limit**0.5) + 1):\n sum_divs[i * i] += i\n for k in range(i + 1, limit // i + 1):\n sum_divs[k * i] += k + i\n\n abundants = set()\n res = 0\n\n for n in range(1, limit + 1):\n if sum_divs[n] > n:\n abundants.add(n)\n\n if not any((n - a in abundants) for a in abundants):\n res += n\n\n return res",
"def non_abundant_sums():\n # the sum of divisors of every number\n divisor_sum = [0] * LIMIT\n for i in range(1, LIMIT):\n for j in range(i * 2, LIMIT, i):\n divisor_sum[j] += i\n # abundant numbers\n abundant_nums = [i for (i, x) in enumerate(divisor_sum) if x > i]\n\n expressible = [False] * LIMIT\n for i in abundant_nums:\n for j in abundant_nums:\n if i + j < LIMIT:\n expressible[i + j] = True\n else:\n break\n ans = sum(i for (i, x) in enumerate(expressible) if not x)\n return str(ans)",
"def make_random_ints_no_dups(num, lower_bound, upper_bound):\n result = []\n rng = random.Random()\n for i in range(num):\n while True:\n candidate = rng.randrange(lower_bound, upper_bound)\n if candidate not in result:\n break\n result.append(candidate)\n return result",
"def eratosthenes(upperbound: int) -> list:\n if upperbound < 0 or type(upperbound) != int:\n raise ValueError(\"The value is not valid. The upperbound should be a positive integer.\")\n numbers = list(range(2, upperbound + 1)) # create a list between 0 and the upperbound inclusive\n counter = 0 # begin the counter at 2 as 1 and zero are not prime numbers\n while numbers[counter] < upperbound ** (1/2): # loop thru numbers until it reaches the square root of upperbound\n numbers = remove_multiples(numbers, numbers[counter]) # update numbers by removing multiples of current number\n counter += 1 # move on to the next number to check\n return numbers",
"def num_array(lower_limit = 0, upper_limit = 5, increment = 1):\n numbers = []\n while lower_limit < upper_limit:\n numbers.append(lower_limit)\n lower_limit += increment\n return numbers",
"def problem077():\n\n cond = lambda n: num_prime_sum_ways(n) > 5000\n ans = next(filter(cond, itertools.count(2)))\n return ans",
"def solve(limit):\n upper_limit = ceil(sqrt(limit - 2**4 - 2**3))\n p_list = PrimeList(upper_limit)\n\n num_set = set()\n for x in p_list:\n val = limit - 2**4 - x**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for y in takewhile(lambda i: i<lim, p_list):\n val = limit - min(x,y)**4 - max(x,y)**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for z in takewhile(lambda i: i<lim, p_list):\n\n for a,b,c in permutations([x,y,z]):\n ans = a**2 + b**3 + c**4\n if ans > limit: continue\n num_set.add(ans)\n if a ==b and b == c: break\n\n return len(num_set)",
"def sum_amicable(limit):\n\n def find_amicable_pair(n):\n check_n= 0\n potential_half = 0\n for i in range(1,n):\n if n % i == 0:\n potential_half += i\n for i in range(1, potential_half):\n if potential_half % i == 0:\n check_n += i\n if check_n == n and n != potential_half: # exclude self amicable\n result.append(n)\n result.append(potential_half)\n\n result = []\n for num in range(1, limit):\n if num not in result:\n find_amicable_pair(num)\n return sum(result)",
"def filterCnts(cnts, threshold = 5):\n\tc = []\n\tfor item in cnts:\n\t\tif threshold < len(item):\n\t\t\tc.append(item)\n\treturn c",
"def sat(n: int, nums=[15, 27, 102], upper_bound=5000):\n return all(n % i == 0 for i in nums) and n <= upper_bound",
"def solution(n: int = 28123) -> int:\n\n nums = range(1, n+1)\n abundant = list(filter(is_abundant, nums))\n abundant_sums = set(all_sums(abundant, n))\n fit = set(nums) - abundant_sums\n return fit",
"def Z_most_abundant(self) -> list[Integral]:\n if np.any(np.isnan(self.ionic_fractions)):\n raise ParticleError(\n f\"Cannot find most abundant ion of {self.base_particle} \"\n f\"because the ionic fractions have not been defined.\"\n )\n\n return np.flatnonzero(\n self.ionic_fractions == self.ionic_fractions.max()\n ).tolist()",
"def non_maximum_suppression_slow(boxes, confs, iou_threshold, top_k):\n idxs = np.argsort(-confs)\n selected = []\n for idx in idxs:\n if np.any(iou(boxes[idx], boxes[selected]) >= iou_threshold):\n continue\n selected.append(idx)\n if len(selected) >= top_k:\n break\n return selected",
"def conceptcover(bin_arr, limit=1, uncovered=0.1):\n arr = np.copy(bin_arr)\n arr_sum = np.sum(arr)\n result = []\n while True:\n k = kernel(arr)\n i = intent(bin_arr, k)\n e = extent(bin_arr, i)\n if len(e)*len(i) < limit or (e, i) in result: break\n result.append((e, i))\n arr = removed(arr, e, i)\n if np.sum(arr)/arr_sum < uncovered: break\n return result",
"def sum_amnicable(limit):\n return sum(map(lambda num: num * is_amnicable(num), range(2, limit)))",
"def how_many_5(numbers):\r\n # Modify example to take argument that specifies threshold\r\n return sum( 1 for number in numbers if number > 5 )",
"def primes_below_thresh(thresh):\n primes_lookup = {n: True for n in range(2, thresh)}\n for n in range(2, thresh):\n if primes_lookup[n]:\n for tick_off in range(n+n, thresh, n):\n primes_lookup[tick_off] = False\n\n return sorted((n for n, is_prime in primes_lookup.items() if is_prime))",
"def gen_triangle_numbers(limit):\n n = 1\n tris = []\n while 0.5*n*(n + 1) < limit:\n tris.append(int(0.5*n*(n + 1)))\n n += 1\n return tris",
"def random_int_below(upper_bound):\r\n \r\n try:\r\n upper_bound = int(upper_bound)\r\n except ValueError:\r\n raise TypeError('number should be an integer')\r\n \r\n if upper_bound <= 0:\r\n raise ValueError('number must be greater than zero')\r\n \r\n \r\n # If upper_bound == 1, the math_log call will loop infinitely.\r\n # The only int in [0, 1) is 0 anyway, so return 0 here.\r\n # Resolves bug #927\r\n if upper_bound == 1:\r\n return 0\r\n \r\n k = int(1.00001 + math_log(upper_bound - 1, 2.0)) # 2**k > n-1 > 2**(k-2)\r\n r = random_nbit_int(k)\r\n while r >= upper_bound:\r\n r = random_nbit_int(k)\r\n return r",
"def non_mcnugget():\n nugget = [0, 6, 9, 20]\n mcnugget = set([6, 9, 20])\n\n while True:\n mcnugget = set([m+n for m in mcnugget for n in nugget])\n\n for m in mcnugget:\n found = all([m+j in mcnugget for j in range(6)])\n if found:\n return [k for k in range(1, m) if k not in mcnugget]",
"def get_most_abundant(abundances, xxx):\n abundant = []\n\n for a in abundances:\n if a.count('|') > 0:\n abundant.append((float(abundances[a]), a.replace('|', '.')))\n elif a.count('.') > 0:\n abundant.append((float(abundances[a]), a))\n\n abundant.sort(reverse=True)\n return abundant[:xxx]",
"def genRandomIntListWithinRange(size, minLim, maxLim):\n\tvalues = set()\n\tfor i in range(size):\n\t\tval = randint(minLim, maxLim)\n\t\twhile val not in values:\n\t\t\tvalues.add(val)\n\treturn list(values)",
"def omega_primes(upper=10**5):\n nums = [0] * (upper + 1)\n for i in range(2, upper + 1):\n if nums[i] != 0: continue\n for j in range(i, upper + 1, i):\n nums[j] += 1\n return nums",
"def _get_genome_amounts_uniform(probability, max_genome_amount):\n\t\tassert isinstance(probability, (int, float))\n\t\tassert 0 <= probability <= 1\n\t\tassert isinstance(max_genome_amount, int)\n\n\t\tfinal_amounts = []\n\t\twhile sum(final_amounts) < max_genome_amount:\n\t\t\tif random.uniform(0, 1) < probability:\n\t\t\t\tfinal_amounts.append(1)\n\t\t\telse:\n\t\t\t\tamount = 1 + random.randint(1, 3)\n\t\t\t\tfinal_amounts.append(amount)\n\n\t\tfinal_amounts[-1] -= sum(final_amounts) - max_genome_amount\n\t\treturn final_amounts",
"def pickingNumbers(a):\n a = sorted(a)[::-1]\n len_a = len(a)\n counts = []\n for ind, ele in enumerate(a):\n count = 0\n for i in range(ind, len_a):\n if ele - a[i] < 2:\n count += 1\n else:\n break\n counts.append(count)\n return max(counts)",
"def get_triangle_numbers(max_score):\n l=[]\n n = 1\n t_n = triangular_number(n)\n while t_n <= max_score:\n l.append(t_n)\n n += 1\n t_n = triangular_number(n)\n return l",
"def where_above(lst, limit):\n return [x for x in lst if x > limit]",
"def loto() -> List[int]:\n numeros = []\n nbre_valeurs = 6\n val_min = 1\n val_max = 49\n\n nbre_elements = 0\n while nbre_elements <= nbre_valeurs:\n numero = random.randint(val_min, val_max)\n if numero not in numeros:\n numeros.append(numero)\n nbre_elements += 1\n\n return numeros",
"def task8_missing_number(num):\n check_list = list(range(1, max(num) + 1))\n result = list(set(check_list) - set(num))\n return result",
"def zero_upper_range(x, upper_threshold):\r\n x = np.asarray(x, dtype=complex)\r\n count = 0\r\n for i in range(0, x.shape[0], 1):\r\n if x[i].real > upper_threshold:\r\n x[i] = complex(0, 0j)\r\n count+=1\r\n print(\"zeroed samples: \", count)\r\n return x;"
]
| [
"0.6772986",
"0.65578127",
"0.6472297",
"0.6274483",
"0.61667085",
"0.6099296",
"0.6032808",
"0.6027965",
"0.5983113",
"0.59658635",
"0.59558725",
"0.5935221",
"0.5906062",
"0.58387524",
"0.5832162",
"0.5813864",
"0.58016706",
"0.579645",
"0.57799524",
"0.57706046",
"0.57606244",
"0.5750397",
"0.57434577",
"0.57306665",
"0.5720922",
"0.5708397",
"0.57014894",
"0.5672474",
"0.56542295",
"0.56414515"
]
| 0.8741688 | 0 |
Returns true iff the number n can be written as the sum of two abundant numbers. | def canBeWritten(n):
for a in abundantNumbersList:
if a >= n: break
if (n - a) in abundantNumbersSet:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_abundant(n):\r\n if sum_proper_divisors(n) > n:\r\n return True\r\n else:\r\n return False",
"def isAbundant(n):\n\treturn sumProperDivisors(n, PRIMES) > n",
"def is_abundant(num: int) -> bool:\n return sum(divisors(num)) - num > num",
"def is_abundant(n: int, print_div: bool = False) -> bool:\n\n divisors = proper_divisors(n)\n if print_div:\n print(f\"Divisors of {n}: {divisors}\")\n\n divisor_sum = sum(divisors) if divisors else 0\n return divisor_sum > n",
"def is_abundant(check_number):\n if number < sum(proper_divisors(check_number)):\n return True\n else:\n return False",
"def is_abundant_number(x):\n return sum(proper_divisors(x)) > x",
"def is_deficient(n):\r\n if sum_proper_divisors(n) < n:\r\n return True\r\n else:\r\n return False",
"def is_abundant_num(num: int, get_divisors_func=simple_get_divisors) -> bool:\n divisors = get_divisors_func(num)\n return sum(divisors) > num",
"def _can_do_sum_of_squares(n, k):\n if k < 1:\n return False\n if n < 0:\n return False\n if n == 0:\n return True\n if k == 1:\n return is_square(n)\n if k == 2:\n if n in (1, 2):\n return True\n if isprime(n):\n if n % 4 == 1:\n return 1 # signal that it was prime\n return False\n else:\n f = factorint(n)\n for p, m in f.items():\n # we can proceed iff no prime factor in the form 4*k + 3\n # has an odd multiplicity\n if (p % 4 == 3) and m % 2:\n return False\n return True\n if k == 3:\n if (n//4**multiplicity(4, n)) % 8 == 7:\n return False\n # every number can be written as a sum of 4 squares; for k > 4 partitions\n # can be 0\n return True",
"def sat(nums: List[int], n=12345):\n return len(nums) <= 4 and sum(i ** 2 for i in nums) == n",
"def sat(nums: List[int]):\n a, b, c, n = nums\n return (a ** n + b ** n == c ** n) and min(a, b, c) > 0 and n > 2",
"def perfectd(n: int) -> bool:\n if sum(divisors(n)) - n == n:\n return True\n else:\n return False",
"def is_perfect(n):\r\n if sum_proper_divisors(n) == n:\r\n return True\r\n else:\r\n return False",
"def sat(n: int):\n return pow(2, n, n) == 3",
"def has_sum(total, n, m):\n if total == 0 or total == m or total == n:\n return True\n elif total < min(m, n):\n return False\n return has_sum(total - n, n, m) or has_sum(total - m, n, m)",
"def sat(n: int, nums=[77410, 23223, 54187], lower_bound=2):\n return all(i % n == 0 for i in nums) and n >= lower_bound",
"def has_single_eligible_pair(n):\n pairs = [(a,b) for a,b in pairs_of_factors(n) if 1<a<100 and 1<b<100]\n num_eligible_pairs = sum(not is_sum_of_primes(a+b) for a,b in pairs)\n return len(pairs) > 1 and num_eligible_pairs==1",
"def is_amnicable(num):\n\n # Because d(m) = d(n) = s(m) + s(n)\n # so d(s(m) - m) = d(n)\n result = divisor(1, num)\n\n # s(n) and n is supposed to be\n # different numbers so not amnicable\n if 2 * num == result:\n return False\n\n result2 = divisor(1, result - num)\n return result == result2",
"def is_amicable(num: int) -> bool:\n friend = sum(divisors(num)) - num\n # Only those in pairs are amicable numbers. If the sum is the number itself, it's a perfect number\n return friend != num and sum(divisors(friend)) - friend == num",
"def McNuggets(n):\n \n '''if n == 0:\n return True\n for i in (6, 9, 20):\n if n >= i and McNuggets(n - i):\n return True\n return False\n '''\n \n for a in range(0,n):\n for b in range(0,n):\n for c in range(0,n):\n if 6*a+9*b+20*c == n:\n return True\n return False",
"def is_sum(check_list, number):\n for i, num1 in enumerate(check_list):\n for num2 in check_list[i+1:]:\n if num1 + num2 == number and num1 != num2:\n return True\n return False",
"def sat(n: int):\n i = n ** 17 + 9\n j = (n + 1) ** 17 + 9\n\n while i != 0: # compute gcd using Euclid's algorithm\n (i, j) = (j % i, i)\n\n return n >= 0 and j != 1",
"def check_fermat(a, b, c, n):\n if n > 2 and ((a**n + b**n) == c**n):\n print('Holy smokes, Fermat was wrong!')\n else:\n print('No, that doesn’t work.')",
"def is_composite(n, s, d):\n a = random.randint(2, n)\n if pow(a, d, n) == 1:\n return False\n for r in range(s):\n ind = (2 ** r) * d\n if pow(a, ind, n) == n - 1:\n return False\n return True",
"def check_sum(num_list: list) -> bool:\r\n num_list_len = len(num_list)\r\n\r\n if num_list_len <= 0:\r\n return False\r\n\r\n for i in range(num_list_len):\r\n for j in range(i+1, num_list_len):\r\n if num_list[i] + num_list[j] == 0:\r\n return True\r\n return False",
"def sat(n: int, nums=[15, 27, 102], upper_bound=5000):\n return all(n % i == 0 for i in nums) and n <= upper_bound",
"def _is_safe_size(n):\n n = int(n)\n\n if n == 0:\n return True\n\n # Divide by 3 until you can't, then by 5 until you can't\n for c in (3, 5):\n while n % c == 0:\n n //= c\n\n # Return True if the remainder is a power of 2\n return not n & (n-1)",
"def isPowerOfTwo(self, n: int) -> bool:\n if n <= 0:\n return False\n return bin(n).count('1') == 1",
"def has_sum(total, n, m):\n if total == 0:\n return True\n elif total < 0:\n return False\n\n # keep decrementing.\n # if decrement goes evenly to 0 then we printed the exact total\n # otherwise, if it decremented into a negative, then we couldn't\n # print exactly the total\n return has_sum(total - n, n, m) or has_sum(total - m, m, n)",
"def McNuggets(n):\n a=0\n b=0\n c=0\n result=0\n while result <= n:\n result = 6*a + 9*b + 20*c\n if result > n:\n return False\n elif result == n:\n return True\n else:\n a+=1\n ..."
]
| [
"0.7858181",
"0.76310307",
"0.7264005",
"0.7210944",
"0.68021965",
"0.6637436",
"0.6592846",
"0.6582656",
"0.6508782",
"0.64454526",
"0.63586575",
"0.63261133",
"0.6279456",
"0.623999",
"0.62178534",
"0.616031",
"0.61566705",
"0.61406416",
"0.61299574",
"0.6128495",
"0.6111292",
"0.59589535",
"0.59412515",
"0.592138",
"0.5920171",
"0.58928436",
"0.5879885",
"0.5875397",
"0.5870067",
"0.5855936"
]
| 0.7694291 | 1 |
For making a new request for ride. | def ride():
from services import taxi_service
customer_id = request.form['customer_id']
res = taxi_service.ride(customer_id)
return json_response(res) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_request(self, **kwargs):\n if kwargs['ride_id'] in app.database['Rides']:\n request_ids = [x for x in app.database['Requests']]\n if request_ids:\n request_id = max(request_ids) + 1\n else:\n request_id = 1\n self.new_request = Request(\n request_id=request_id,\n ride_id=kwargs['ride_id'],\n status='available'\n )\n request = self.new_request.__dict__\n app.database['Requests'][request_id] = request\n message = 'Ride request created successfully'\n attributes = {\n 'location': '/api/v1/rides/' + str(request_id) + '/requests'\n }\n response = Response.success(message=message, attributes=attributes)\n return response, 201\n meta = {'errors': 1,\n 'source': '/' + str(kwargs['ride_id']) + '/requests'}\n message = 'NOT FOUND'\n return Response.failed(meta=meta, message='NOT FOUND',\n info='The ride requested does not exist'), 404",
"def test_make_ride_request_if_signed_in_success(self):\n # signup\n passenger = {\n \"Email\": \"[email protected]\",\n \"Type\": \"passenger\",\n \"Password\": \"pass234\",\n \"Confirm Password\": \"pass234\"\n }\n res = self.client().post('/api/v1/auth/register', data=passenger)\n self.assertEqual(res.status_code, 201)\n # login\n logins = {\"Email\": \"[email protected]\", \"Password\": \"pass234\"}\n res = self.client().post('api/v1/auth/login', data=logins)\n self.assertEqual(res.status_code, 200)\n\n # get authorization token\n token = json.loads(res.data.decode('UTF-8'))\n access_token = token.get('access-token')\n\n # request ride\n res = self.client().post('api/v1/rides/1/requests',\n data=self.request, headers={'Authorization': 'Bearer '+access_token})\n self.assertEqual(res.status_code, 200)",
"def get_ride_request(reqID):\n req = RideRequest.query.get(reqID)\n return req",
"def post(self, current_user):\n data = request.json\n origin = data['origin']\n destination = data['destination']\n date = data['date']\n \n ride = Ride(origin=origin, destination=destination, date=date)\n try:\n all_rides = ride.fetch_all()\n for this_ride in all_rides:\n if this_ride['origin'] == ride.origin and this_ride['destination'] == ride.destination and this_ride['date'] == ride.date and this_ride['driver'] == current_user[2]:\n response = {\n 'message': 'This ride already exists.',\n }\n return make_response(jsonify(response)), 202\n driver = current_user[2]\n ride.insert(driver)\n\n response = {\n 'message': 'You offered a ride successfully.',\n }\n return make_response(jsonify(response)), 201\n\n except Exception as e:\n response = {\n 'message': str(e)\n }\n return make_response(jsonify(response)), 500",
"def join(self, request, *args, **kwargs):\n\n ride = self.get_object()\n\n serializer_class = self.get_serializer_class()\n serializer = serializer_class(\n ride,\n data={\n 'passenger': request.user.pk\n },\n context=self.get_serializer_context(),\n partial=True\n )\n\n if serializer.is_valid(raise_exception=True):\n ride = serializer.save()\n data = RideModelSerializer(ride).data\n\n return Response(data=data, status=HTTP_200_OK)",
"def finish(self, request, *args, **kwargs):\n\n ride = self.get_object()\n\n serializer_class = self.get_serializer_class()\n serializer = serializer_class(\n ride,\n data={\n 'is_active': False,\n 'current_time': timezone.now()\n },\n context=self.get_serializer_context(),\n partial=True\n )\n\n if serializer.is_valid(raise_exception=True):\n ride = serializer.save()\n\n data = RideModelSerializer(ride).data\n\n return Response(data=data, status=HTTP_200_OK)",
"def ride(self, ride):\n\n self._ride = ride",
"def NewRide(self, ride, startTime):\n\n self.currRide = ride\n self.waitTimes.append(int(startTime) - int(self.idleTime))",
"def qualify(self, request, *args, **kwargs):\n\n serializer_class = self.get_serializer_class()\n serializer = serializer_class(\n data=request.data,\n context=self.get_serializer_context(),\n )\n\n if serializer.is_valid(raise_exception=True):\n ride = serializer.save()\n\n data = RideModelSerializer(ride).data\n\n return Response(data=data, status=HTTP_200_OK)",
"def _send_request(self):\n route_chosen = self.comboBox_route_list.currentText()\n route_id = route_chosen.split(',')[0] #to get the id of the route\n trip_headsign_chosen = self.comboBox_trip_headsign_list.currentText()\n stop_chosen = self.comboBox_stop_list.currentText()\n self.request(route_id, trip_headsign_chosen, stop_chosen)",
"def test_get_ride_if_exists_success(self):\n ride = {\n \"Destination\": \"Meru\",\n \"Origin\": \"Kutus\",\n \"Time\": \"9:40\",\n \"Date\": \"25/8/2018\",\n \"Ride Name\": \"Toyota\",\n \"Capacity\": \"7\"\n }\n driver = {\n \"Email\": \"[email protected]\",\n \"Type\": \"passenger\",\n \"Password\": \"pass234\",\n \"Confirm Password\": \"pass234\"\n }\n res = self.client().post('/api/v1/auth/register', data=driver)\n self.assertEqual(res.status_code, 201)\n logins = {\n \"Email\": \"[email protected]\",\n \"Password\": \"pass234\"\n }\n response = self.client().post('/api/v1/auth/login', data=logins)\n self.assertEqual(response.status_code, 200)\n token = json.loads(response.data.decode('UTF-8'))\n access_token = token.get('access-token')\n\n result = self.client().post('/api/v1/rides', data=ride,\n headers={'Authorization': 'Bearer '+access_token})\n self.assertEqual(201, result.status_code)\n\n res = self.client().get('/api/v1/rides/1', headers={'Authorization': 'Bearer '+access_token})\n self.assertEqual(res.status_code, 200)",
"def create(self,data):\n circle = self.context['circle']\n ride = Rides.objects.create(**data,offered_in=circle)\n\n #update informacion del Circle\n circle.rides_offered+=1\n circle.save()\n\n # Membership update \n membership=self.context['membership'] \n membership.rides_offered+=1\n membership.save()\n\n #profile\n profile = data['offered_by'].profile #es un objeto de tipo user\n profile.rides_offered+=1\n profile.save()\n\n return ride",
"def new_request(self, **kwargs):\n url = self.config[\"base_url\"]\n\n if kwargs.get(\"user_id\") is not None:\n url = url + kwargs[\"user_id\"]\n\n self.req = request.Request(host=self.config[\"host\"], protocol=constant.HTTP, url=url,\n method=kwargs[\"method\"], time_out=kwargs[\"timeout\"])\n\n return self",
"def _make_request(self, method, path, **kwargs):\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'User-Agent': USER_AGENT,\r\n }\r\n headers.update(kwargs.get('headers', {}))\r\n kwargs['headers'] = headers\r\n kwargs['auth'] = self.auth\r\n\r\n url = '/'.join((self.endpoint, 'v1', self.account_id, path))\r\n resp = requests.request(method, url, **kwargs)\r\n resp.raise_for_status()\r\n return resp",
"def _request(self, method, path, *args, **kwargs):\n path = '/courses/{course_id}/' + path\n path = path.format(**{n: getattr(self, n) for n in ('course_id', 'course_num', 'course_run', 'course_org')})\n logging.debug(path)\n return getattr(self.client, method)(path, *args, **kwargs)",
"def _trip_request(self, triprequest: Trip.Request):\n now = datetime.now()\n\n assert triprequest.walk_speed in ('slow', 'normal', 'fast')\n\n linetypes = triprequest.linetypes\n if linetypes is None:\n linetypes = LineTypes()\n\n departure = triprequest.departure\n arrival = triprequest.arrival\n\n if isinstance(departure, datetime):\n departure = RealtimeTime(departure)\n if isinstance(arrival, datetime):\n arrival = RealtimeTime(arrival)\n\n if departure is not None:\n deparr = 'dep'\n time_ = departure.livetime\n elif arrival is not None:\n deparr = 'arr'\n time_ = arrival.livetime\n else:\n deparr = 'dep'\n time_ = now\n\n max_changes = triprequest.max_changes\n if max_changes is None:\n max_changes = 9\n\n post = {\n 'changeSpeed': triprequest.walk_speed,\n 'command': '',\n 'coordOutputFormat': 'WGS84',\n 'imparedOptionsActive': 1,\n 'includedMeans': 'checkbox',\n 'itOptionsActive': 1,\n 'itdDateDay': time_.day,\n 'itdDateMonth': time_.month,\n 'itdDateYear': time_.year,\n 'itdTimeHour': time_.hour,\n 'itdTimeMinute': time_.minute,\n 'itdTripDateTimeDepArr': deparr,\n 'language': 'de',\n 'locationServerActive': 1,\n 'maxChanges': max_changes,\n 'name_via': '', # .decode('utf-8').encode('iso-8859-1'),\n 'nextDepsPerLeg': 1,\n 'place_via': '', # decode('utf-8').encode('iso-8859-1'),\n 'ptOptionsActive': 1,\n 'requestID': 0,\n 'routeType': 'LEASTTIME', # {'speed':'LEASTTIME', 'waittime':'LEASTINTERCHANGE', 'distance':'LEASTWALKING'}[select_interchange_by],\n 'sessionID': 0,\n 'type_via': 'stop',\n 'useRealtime': 1,\n 'outputFormat': 'XML'\n }\n\n # if use_realtime: post['useRealtime'] = 1\n\n if 'train' in linetypes:\n post['inclMOT_0'] = 'on'\n\n if 'train.longdistance.highspeed' in linetypes:\n post['lineRestriction'] = 400\n elif 'train.longdistance' in linetypes:\n post['lineRestriction'] = 401\n else:\n post['lineRestriction'] = 403\n\n for linetype, number in (('urban', '1'), ('metro', '2'), ('metro', '3'),\n ('tram', '4'), ('bus.city', '5'), ('bus.regional', '6'),\n ('bus.express', '7'), ('suspended', '8'), ('ship', '9'),\n ('dialable', '10'), ('other', '11')):\n if linetype in linetypes:\n post['inclMOT_' + number] = 'on'\n\n if triprequest.wayduration_origin or triprequest.wayduration_destination:\n post['useProxFootSearch'] = 1\n\n waytypes = {'walk': 100, 'bike': 101, 'car': 104, 'taxi': 105}\n post['trITDepMOT'] = waytypes[str(triprequest.waytype_origin)]\n post['trITArrMOT'] = waytypes[str(triprequest.waytype_destination)]\n\n post['trITDepMOTvalue%d' % post['trITDepMOT']] = triprequest.wayduration_origin.total_seconds() // 60\n post['trITArrMOTvalue%d' % post['trITArrMOT']] = triprequest.wayduration_destination.total_seconds() // 60\n\n if triprequest.with_bike:\n post['bikeTakeAlong'] = 1\n\n if triprequest.wheelchair:\n post['wheelchair'] = 1\n\n if triprequest.low_floor_only:\n post['lowPlatformVhcl'] = 1\n\n if not triprequest.allow_solid_stairs:\n post['noSolidStairs'] = 1\n\n if not triprequest.allow_escalators:\n post['noEscalators'] = 1\n\n if not triprequest.allow_elevators:\n post['noElevators'] = 1\n\n post.update(self._convert_location(triprequest.origin, '%s_origin'))\n post.update(self._convert_location(triprequest.destination, '%s_destination'))\n\n xml = self._post('XSLT_TRIP_REQUEST2', post)\n servernow = datetime.strptime(xml.attrib['now'], '%Y-%m-%dT%H:%M:%S')\n\n data = xml.find('./itdTripRequest')\n\n results = Trip.Results(self._parse_routes(data.find('./itdItinerary/itdRouteList')))\n results.origin = self._parse_odv(data.find('./itdOdv[@usage=\"origin\"]'))\n results.destination = self._parse_odv(data.find('./itdOdv[@usage=\"destination\"]'))\n\n return results, servernow",
"def newRequest(self):\n return Request( )",
"def get_serializer_context(self):\n\n context = super(RideViewSet, self).get_serializer_context()\n\n context['circle'] = self.circle\n\n if self.action in ['join', 'finish', 'qualify']:\n context['ride'] = self.get_object()\n\n return context",
"def request_action(reqID, action):\n req = get_ride_request(reqID)\n req.status = action.lower().title()\n req.save();",
"def get(self, current_user, r_id):\n if r_id:\n try:\n ride = Ride(id = r_id)\n ride = ride.find_by_id(r_id)\n if ride:\n return jsonify(ride), 200\n return jsonify({'msg': \"Ride not found \"}), 404\n except Exception as e:\n response = {\n 'message': str(e)\n }\n return make_response(jsonify(response)), 500\n\n else:\n try:\n ride = Ride()\n rides = ride.fetch_all()\n if rides == []:\n return jsonify({\"msg\": \" There are no rides rides at the moment\"}), 200\n return jsonify(rides), 200\n except Exception as e:\n response = {\n 'message': str(e)\n }\n return make_response(jsonify(response)), 500",
"def start_ride(vehicle_id):\n if movr.start_ride(vehicle_id):\n flash('Ride started with vehicle {}.'.format(vehicle_id))\n return redirect(url_for('ride', vehicle_id=vehicle_id, _external=True))\n\n flash('Could not start ride on vehicle {}.'.format(vehicle_id))\n flash('Either the vehicle is actively being ridden, or it has been '\n 'deleted from the database.')\n return redirect(url_for('vehicles', _external=True))",
"def start_ride(self, vehicle_id, user_email):\n return run_transaction(\n self.sessionfactory,\n lambda session: start_ride_txn(session, vehicle_id, user_email))",
"def test_edit_ride_if_signed_in_success(self):\n # signup\n passenger = {\n \"Email\": \"[email protected]\",\n \"Type\": \"passenger\",\n \"Password\": \"pass234\",\n \"Confirm Password\": \"pass234\"\n }\n\n response = self.client().post('/api/v1/auth/register',\n data=passenger)\n self.assertEqual(response.status_code, 201)\n\n # login\n logins = {\"Email\": \"[email protected]\", \"Password\": \"pass234\"}\n res = self.client().post('api/v1/auth/login', data=logins)\n self.assertEqual(res.status_code, 200)\n\n # # get authorization token\n token = json.loads(res.data.decode('UTF-8'))\n access_token = token.get('access-token')\n\n # edit the ride\n edit_data = {\"Ride Name\": \"Red\", \"Capacity\": 7}\n res = self.client().put('/api/v1/rides/1', data=edit_data,\n headers={'Authorization': 'Bearer '+access_token})\n self.assertEqual(res.status_code, 201)",
"def ride(vehicle_id):\n form = EndRideForm()\n vehicle_at_start = movr.get_vehicle(vehicle_id)\n if vehicle_at_start is None: # Vehicle not found in database\n flash(\"Vehicle `{}` not found.\".format(vehicle_id))\n return redirect(url_for('vehicles', _external=True))\n elif not vehicle_at_start['in_use']: # Ride hasn't started.\n flash(\"Cannot view the ride for this vehicle. It is not currently in \"\n \"use.\")\n return redirect(url_for('vehicle', vehicle_id=vehicle_id,\n _external=True))\n\n if form.validate_on_submit():\n try:\n if movr.end_ride(vehicle_id, form.longitude.data, form.latitude.data,\n form.battery.data):\n vehicle_at_end = movr.get_vehicle(vehicle_id)\n for message in generate_end_ride_messages(vehicle_at_start,\n vehicle_at_end):\n flash(message)\n return redirect(url_for('vehicle', vehicle_id=vehicle_id,\n _external=True))\n # else: end_ride didn't work\n flash(\"Unable to end ride for vehicle `{id}`.\".format(id=vehicle_id))\n return redirect(url_for('ride', vehicle_id=vehicle_id, _external=True))\n except ValueError as e:\n return render_error_page(e, movr)\n return render_template('ride.html',\n title=('Riding a {}'\n ).format(vehicle_at_start[\"vehicle_type\"]),\n form=form, vehicle=vehicle_at_start, _external=True)",
"def test_build__subsequent_calls_return_new_ride_object(self) -> None:\n ride_one: dict = RecurringRideFactory.build()\n ride_two: dict = RecurringRideFactory.build()\n\n assert ride_one['ride'] != ride_two['ride']",
"def test_build__generate_ride_object(self) -> None:\n ride: dict = RecurringRideFactory.build()\n\n assert ride['ride'] is not None",
"def abort_request_already_accepted(reqID):\n req = get_ride_request(reqID)\n if req.status == \"Accepted\":\n msg=\"Ride Request Cannot be changed: already accpeted\"\n abort(HTTPStatus.FORBIDDEN, message=msg)",
"def test_can_edit_ride_offer(self):\n response = self.app.put('/api/v1/users/rides/1',\n data=json.dumps(self.ride),\n content_type='application/json',\n headers=self.headers)\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.get_data().decode('utf-8'))\n self.assertEqual(response_data['start point'],'Juja')",
"def request(self, *args, **kwargs):\n req = RemindoRequest(self, *args, **kwargs)\n return req.request()",
"def request_action(self, request, data):\n\n response = self.oauth.post(url=f'{self.base_url}/json/{request}', data=data)\n return response.json()"
]
| [
"0.75216687",
"0.65378934",
"0.64955944",
"0.6391881",
"0.6327837",
"0.6260179",
"0.6085085",
"0.6083617",
"0.6060242",
"0.5994067",
"0.5968204",
"0.58465064",
"0.58400995",
"0.5741157",
"0.5677039",
"0.5617212",
"0.55390745",
"0.55201983",
"0.5494111",
"0.5423132",
"0.5416372",
"0.5402005",
"0.5396125",
"0.53782815",
"0.5354097",
"0.5351626",
"0.5345179",
"0.53231335",
"0.530484",
"0.52890515"
]
| 0.6671646 | 1 |
Test the function 'save_screenshot' | def test_save_screenshot():
surface_flow_file = Path(TEST_RESULTS_FILES_PATH, "surface_flow.vtu")
screenshot_file = save_screenshot(surface_flow_file, "Mach")
assert screenshot_file.exists()
if screenshot_file.exists():
screenshot_file.unlink() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __take_screenshot(web_driver: webdriver, test_name: str):\n root_dir = os.path.dirname(os.path.abspath(__file__)).replace(\"tests\", \"reports\")\n file_name = f\"{str(datetime.datetime.now().timestamp())}_{test_name}.jpg\"\n screenshot_file_path = os.path.join(root_dir, file_name)\n web_driver.save_screenshot(screenshot_file_path)",
"def test_screenshots_generated():\n with temporary_dir() as output_dir:\n output_dir = Path(output_dir)\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_screenshots.xml\",\n output_dir / \"output.xml\",\n )\n open(output_dir / \"selenium-screenshot-1.png\", mode=\"w+\")\n open(output_dir / \"selenium-screenshot-2.png\", mode=\"w+\")\n\n flowtask = FlowTaskFactory()\n robot_importer.import_robot_test_results(flowtask, output_dir)\n\n # output.xml asset created\n assert 1 == BuildFlowAsset.objects.filter(category=\"robot-output\").count()\n # suite setup screenshot assets created\n assert 1 == BuildFlowAsset.objects.filter(category=\"robot-screenshot\").count()\n # No screenshots created for 'Via API' test\n tr_method = models.TestMethod.objects.get(name=\"Via API\")\n test_api = models.TestResult.objects.get(method=tr_method, task=flowtask)\n assert 0 == test_api.assets.count()\n\n # One screenshot created for 'Via UI' test\n tr_method = models.TestMethod.objects.get(name=\"Via UI\")\n test_ui = models.TestResult.objects.get(method=tr_method, task=flowtask)\n assert 1 == test_ui.assets.count()",
"def save_screenshot(self, img, file_name: str):\n img.save(str(self.info.screenshots_path / file_name))",
"def save_screenshot(self, screenshot_name):\n self.driver.save_screenshot(screenshot_name)",
"def take_screenshot(browser, test_name):\n screenshot_file_path = \"screenshots/{}.png\".format(test_name)\n browser.save_screenshot(screenshot_file_path)",
"def takeScreenshot(self, driver):\n fileName = str(round(time.time() * 1000)) + \".png\"\n screenshotDirectory = \"/Users/echalo/PycharmProjects/ICP Framwork/screenshots\"\n destinationFile = screenshotDirectory + fileName\n\n try:\n driver.save_screenshot(destinationFile)\n print(\"Screenshot saved to directory --> :: \" + destinationFile)\n except NotADirectoryError:\n print(\"Not a directory issue\")",
"def export_screenshot(self):\n\n if self.vis_type is None or len(self.vis_type) < 1:\n vis_type_suffix = ''\n else:\n vis_type_suffix = self.vis_type\n\n print(\"exporting screenshot for {}\".format(self.current_unit_id))\n ss_out_file = self.screenshot_dir / \"{}_{}_{}.{}\".format(\n self.current_unit_id, vis_type_suffix,\n cfg.screenshot_suffix, cfg.screenshot_format_ext)\n self.fig.savefig(ss_out_file, bbox_inches='tight', dpi=cfg.dpi_export_fig)",
"def takeScreenshot(self, driver):\n\n fileName = str(round(time.time() * 1000)) + \".png\"\n screenshotDirectory = \"C://Users/Mathe/Desktop//Selenium WebDriver with Python 3.x/Advanced/\"\n destinationFile = screenshotDirectory + fileName\n try:\n driver.save_screenshot(destinationFile)\n print(\"Screenshot saved to directory -> \" + destinationFile)\n except NotADirectoryError:\n print(\"Not a directory issue!!\")",
"def save_screenshot(self, screenshot_name, screenshot_folder=False):\r\n\t\tif not screenshot_folder:\r\n\t\t\tscreenshot_folder = os.path.join(PROJECT_ROOT, 'screenshots')\r\n\r\n\t\tscreenshot_location = os.path.join(\r\n\t\t\tscreenshot_folder, '{}.png'.format(screenshot_name))\r\n\r\n\t\tself.driver.save_screenshot(screenshot_location)\r\n\t\t# return screenshot_location\r",
"def __save_screenshot(path=\".\", file_name=\"current_time_stamp\", file_extension=\"png\"):\n\n # create folder for the screenshot to be saved\n try:\n os.makedirs(path, exist_ok=False)\n except FileExistsError as e:\n pass\n except BaseException as e:\n print(f\"unable to create folder {path}\")\n print(e)\n\n # generate filepath for the screenshot to be saved\n try:\n # generate the filename as current timestamp if not input\n if file_name == \"current_time_stamp\":\n _datenow = str(datetime.datetime.now())\n file_name = _datenow.replace(\" \", \"-\").replace(\":\", \"-\").replace(\".\", \"-\")\n\n _file_path = os.path.join(path, file_name + \".\" + file_extension)\n except BaseException as e:\n print(f\"unable to generate the file path\")\n print(e)\n\n # save the screenshot\n try:\n gui.screenshot(_file_path)\n return _file_path\n except BaseException as e:\n print(f\"unable to save the screenshot\")\n print(e)",
"async def capture_and_upload_screenshot(self) -> None:",
"def _save_screenshot_callback(self, _):\n\n self._curr_image_inc += 1\n image = self._screenshot_func()\n print(\"Captured image of shape\", np.shape(image))\n print(\"Current number of images:\", self._curr_image_inc)\n\n image.save(os.path.join(self._image_path, str(self._curr_image_inc) + '.png'))",
"def screenshot(self, path=getcwd(), name=\"screenshot.png\", element=None):\n\t\tp = path +\"/\"+name\n\t\tif element is not None:\n\t\t\ttry:\n\t\t\t\tassert(type(element)) == webdriver.firefox.webelement.FirefoxWebElement\n\t\t\t\telement.save_screenshot(p)\n\t\t\t\treturn 0\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"Unable to save screenshot using given element\\n{}\".format(e))\n\t\t\t\treturn -1\n\t\tself.driver.save_screenshot(p)\n\t\treturn 0",
"async def capture_screenshot(self) -> bytes:",
"def save_screenshot(self, file_name, width=3840, height=2160, first=True, last=True):\n if first and self.assigned_opengl_context is not None:\n self.assigned_opengl_context.makeCurrent()\n gr3.export(file_name, width, height)\n if last and self.assigned_opengl_context is not None:\n self.assigned_opengl_context.doneCurrent()",
"def saveWindowState(self):\n print(\"Save button has been pressed!\")\n screenshot = self.widgetHolder.grab()\n self.screenshotNum += 1\n if(self.addressBox.text() != \"\"):\n screenshot.save(os.path.join(self.addressBox.text(), (\"screenshot\" + str(self.screenshotNum) + \".jpg\")))\n else:\n screenshot.save(\"screenshot\" + str(self.screenshotNum) + \".jpg\", \"jpg\")",
"def get_screenshot(self):\n method_name = self._testMethodName\n class_name = type(self).__name__\n time_now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n folder = os.path.dirname(os.getcwd())\n directory = \"\".join([folder, \"/test-results/\", class_name])\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n file_name = \"%s/%s - %s.png\" % (directory, time_now, method_name)\n\n self.driver.get_screenshot_as_file(file_name)\n print \"[[ATTACHMENT|%s]]\" % file_name\n print \"current url - %s\" % self.driver.current_url",
"def screenshot(filename):\n call([\"screencapture\", \"Screenshot for\" + strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()) + filename +\".jpg\"])",
"def takeScreenshot ():\n\n im = ImageGrab.grab()\n return im",
"def screenshot(url, path):\n # open in webpage\n driver = webdriver.PhantomJS()\n driver.set_window_size(1080, 800)\n driver.set_page_load_timeout(30)\n driver.get(url)\n driver.save_screenshot(path)\n driver.quit()",
"def capture_screenshot(self, output_path):\n self._driver.get_screenshot_as_file(str(output_path))",
"def screenshot_on_error(scenario):\r\n if scenario.failed:\r\n try:\r\n output_dir = '{}/log'.format(settings.TEST_ROOT)\r\n image_name = '{}/{}.png'.format(output_dir, scenario.name.replace(' ', '_'))\r\n world.browser.driver.save_screenshot(image_name)\r\n except WebDriverException:\r\n LOGGER.error('Could not capture a screenshot')",
"def capture_screenshot(image_name):\r\n output_dir = '{}/log/auto_screenshots'.format(settings.TEST_ROOT)\r\n image_name = '{}/{}.png'.format(output_dir, image_name.replace(' ', '_'))\r\n try:\r\n world.browser.driver.save_screenshot(image_name)\r\n except WebDriverException:\r\n LOGGER.error(\"Could not capture a screenshot '{}'\".format(image_name))",
"def create_screenshot(driver: webdriver.Chrome, name: str):\n\n driver.save_screenshot(f\".\\\\{name}_{time.strftime('%d-%m_%H-%M-%S')}.png\")",
"def capture_screenshot_for_step(step, when):\r\n if world.auto_capture_screenshots:\r\n scenario_num = step.scenario.feature.scenarios.index(step.scenario) + 1\r\n step_num = step.scenario.steps.index(step) + 1\r\n step_func_name = step.defined_at.function.func_name\r\n image_name = \"{prefix:03d}__{num:03d}__{name}__{postfix}\".format(\r\n prefix=scenario_num,\r\n num=step_num,\r\n name=step_func_name,\r\n postfix=when\r\n )\r\n world.capture_screenshot(image_name)",
"def take_screenshot(x, y, num=''):\n # screenshot takes starting x,y coordinates and then for how far the shot should stretch\n pic = pyautogui.screenshot(region=(0, y * 1.3, x * 0.75, y * 0.6))\n pic.save(\"Screenshot\" + str(num) + \".png\")",
"def test_save_png():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.png', 'data': [img]}\n\n assert images.save(parameters)",
"def take_screenshot(self, filepath):\n self.driver.get_screenshot_as_file(filepath)",
"def screenshot(self, name):\n screenshot_name = str(self.screenshot_count) + \"_\" + name + \".png\"\n self.log(\"Taking screenshot: \" + screenshot_name)\n # on Android, switching context to NATIVE_APP for screenshot\n # taking to get screenshots also stored to Testdroid Cloud\n # device run view. After screenshot switching back to\n # WEBVIEW. Works ok for Safari too.\n orig_context = self.driver.current_context\n self.driver.switch_to.context(\"NATIVE_APP\")\n self.driver.save_screenshot(self.screenshot_dir + \"/\" + screenshot_name)\n # only change context if originally context was WEBVIEW\n if orig_context not in self.driver.current_context:\n self.driver.switch_to.context(orig_context)\n self.screenshot_count += 1",
"def screenshot(self):\n self.context.draw.window.screenshot(self.filename)"
]
| [
"0.7212455",
"0.7189058",
"0.70866424",
"0.7077667",
"0.70456177",
"0.6968092",
"0.69598687",
"0.6943727",
"0.6886083",
"0.6874362",
"0.6820328",
"0.6815366",
"0.68121475",
"0.6712085",
"0.6625147",
"0.6597559",
"0.6579359",
"0.6511203",
"0.65101516",
"0.6504467",
"0.65036196",
"0.64741117",
"0.64709353",
"0.64525473",
"0.637446",
"0.63646615",
"0.63624597",
"0.6335064",
"0.6323168",
"0.6276193"
]
| 0.81533283 | 0 |
get the date as a string only shows month/day [bias] as days want to add on today(could be minus) defaut is zero stands for today | def get_date_str(bias=0):
today = datetime.datetime.today() # 獲得今天的日期
date = (today + datetime.timedelta(days=bias)).strftime("%m/%d") # 格式化日期
return ' ' + date[1:] if date[0] == '0' else date # 把0換成空白 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def todaystr():\n today = datetime.datetime.today()\n return f\"{today.year}{today.month:02}{today.day:02}\"",
"def getDate():\n current_time = datetime.datetime.now()\n day = current_time.day\n month = current_time.month\n year = current_time.year\n date = \"{dd}-{mm}-{yyyy}\".format(dd=day,mm=month,yyyy=year)\n return date",
"def get_date_DM(): \n \n now = date.datetime.now()\n date_DM = str(now.day)+'_'+str(now.month)+'/' \n return date_DM",
"def get_todays_date(self):\r\n \r\n date=str(dt.datetime.today())\r\n raw_date=date.split(\" \")[0]\r\n Day=raw_date.split(\"-\")[-1]\r\n Month=raw_date.split(\"-\")[-2]\r\n Year=raw_date.split(\"-\")[-3]\r\n todays_date=Day+\"-\"+Month+\"-\"+Year\r\n return todays_date",
"def get_date_today() -> str:\n return datetime.now().strftime(\"%Y-%m-%d\")",
"def get_date():\n now = datetime.now()\n date = now.strftime(\"%Y%m%d\")\n return date",
"def date() -> str:\n\n return datetime.strftime(datetime.today(), _fmt)",
"def date_string(date):\n day = date.day\n month = date.month\n year = date.year\n formatted_string = str(month) + \"/\"\n formatted_string += str(day) + \"/\"\n formatted_string += str(year)\n return formatted_string",
"def formalDateToday():\n return dt.date.today().strftime(\"%B %d, %Y\")",
"def calculate_date(x, now):\n\t#now = datetime.datetime.now()\n\tn = int(extract_only_number(x))\n\tif n > 0:\n\t\treturn (now - datetime.timedelta(n)).strftime(\"%d-%m-%Y\")\n\treturn now.strftime(\"%d-%m-%Y\")",
"def get_date():\n dt = datetime.now()\n return dt.strftime(\"%Y-%m-%d\")",
"def today():\n today_object = datetime.utcnow()\n today_string = today_object.strftime('%m/%d/%Y')\n return today_string",
"def get_today(self):\n # using now() to get current time\n current_time = datetime.datetime.now()\n day = str(current_time.day)\n month = str(current_time.month)\n\n day = self.check_and_repair_right_format(day)\n month = self.check_and_repair_right_format(month)\n\n return str(current_time.year) + month + day",
"def get_date(date):\n return date",
"def get_date():\n return str(datetime.now()).split(' ')[0]",
"def actual_date():\n actual_date = datetime.now()\n return str(actual_date.day) + '-' + str(actual_date.month) + '-' + str(actual_date.year)",
"def string_date(mnthDay, year):\n return(mnthDay + '/' + str(year))",
"def get_gds_current_date(self, remove_leading_zero='true'):\r\n time_now = datetime.datetime.now().time()\r\n today_2pm = time_now.replace(hour=14, minute=31, second=0, microsecond=0)\r\n if time_now < today_2pm:\r\n gds_date = datetime.datetime.now() - datetime.timedelta(days=int(1))\r\n else:\r\n gds_date = datetime.datetime.now()\r\n\r\n if remove_leading_zero.lower() == 'true':\r\n return str('{dt.day}{dt:%b}'.format(dt=gds_date).upper())\r\n else:\r\n return self._set_gds_date_format(gds_date)",
"def get_date(format_of_date):\n current_date = datetime.datetime.today().strftime(format_of_date) # \"%d%m%Y\"\n return current_date",
"def get_date():\n now=datetime.now()\n s=\"%s%s%s\" % (now.year, str(now.month).zfill(2), str(now.day).zfill(2))\n return (now, s)",
"def get_today_date():\n return date.today()",
"def get_date():\n return datetime.now().strftime(\"%c\")",
"def _calculate_date(day_of_year):\n date = datetime.datetime.strptime(str(day_of_year), '%j')\n return date.strftime('%d-%b')",
"def get_day_today() -> str:\n day = datetime.now().strftime(\"%w\")\n if day == '0': # Sunday\n return '6'\n elif day == '6': # Saturday\n return '5'\n elif day == '1': # Monday\n return '0'\n elif day == '2': # Tuesday\n return '1'\n elif day == '3': # Wednesday\n return '2'\n elif day == '4': # Thursday\n return '3'\n elif day == '5': # Friday\n return '4'",
"def get_date(self):\n return self.date.strftime(\"%a %x\")",
"def shortDate(self, date):\n return u'%s %02i' % (date.pMonth(), date.day())",
"def get_current_date():\n return datetime.datetime.today().strftime(constants.DATE_FORMAT)",
"def format_date(int_date):\n\n if int_date == 0:\n return 'today'\n\n tmp_date = int_date\n day = tmp_date % 100\n tmp_date = tmp_date / 100\n month = tmp_date % 100\n year = tmp_date / 100\n\n month_str = MONTHS[month]\n date_str = '%d-%s-%d' % (year, month_str, day)\n return date_str",
"def date_now():\n return datetime.today().strftime('%c')",
"def utc_today_str():\n return datetime.datetime.strftime(datetime.datetime.utcnow(), \"%Y-%m-%d\")"
]
| [
"0.77219003",
"0.7503358",
"0.7500843",
"0.7455331",
"0.7453163",
"0.7407494",
"0.73675853",
"0.73555744",
"0.7328088",
"0.7196816",
"0.7162487",
"0.70597667",
"0.7008564",
"0.6929033",
"0.6923107",
"0.6921202",
"0.68978816",
"0.6877118",
"0.6834021",
"0.68301964",
"0.68108714",
"0.6809185",
"0.6779825",
"0.6755164",
"0.6737278",
"0.6736767",
"0.67350847",
"0.6730851",
"0.6729056",
"0.66957134"
]
| 0.8040248 | 0 |
Calculates the demand along each edge for a particular routing and flow | def calc_demand(self, routing: np.ndarray, demand: float,
commodity_idx: int) -> np.ndarray:
commodity = self.commodities[commodity_idx]
node_flow = np.zeros(self.num_nodes)
node_flow[commodity[0]] = demand
split_matrix = np.zeros((self.num_nodes, self.num_nodes), dtype=float)
for edge_idx, edge in enumerate(self.edges):
split_matrix[edge[1]][edge[0]] = routing[commodity_idx][edge_idx]
split_matrix[:, commodity[1]] = 0 # no send from the destination node
edge_utilisation = np.zeros((self.num_nodes, self.num_nodes))
num_steps = 0
while True:
change = np.multiply(split_matrix, node_flow)
edge_utilisation += change
node_flow = np.matmul(split_matrix, node_flow)
if np.any(np.isnan(change)):
print("is_nan :'(")
comparison = np.less(np.nan_to_num(change), self.min_delta)
if np.logical_and.reduce(np.logical_and.reduce(comparison)):
break
num_steps += 1
# if we take more than |E| steps we have cycles which is not good.
# Therefore: end here with really bad reward, scaled by number of
# cycles
if num_steps > routing.shape[1]:
remaining_flow = np.greater(np.nan_to_num(change), 0.0)
edge_utilisation += np.multiply(remaining_flow, np.full(
(self.num_nodes, self.num_nodes), demand))
break
return edge_utilisation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_per_flow_link_utilisation(self, flow: Tuple[int, int],\n demand: float,\n routing: np.ndarray) -> np.ndarray:\n edge_mapping = {edge: i for i, edge in\n enumerate(sorted(self.graph.edges))}\n\n link_utilisation = np.zeros(self.num_edges)\n node_flow = np.zeros(self.num_nodes) # the flow stored at a node\n node_flow[flow[0]] = demand\n\n to_explore = [flow[0]]\n while to_explore:\n current_node = to_explore.pop(0)\n current_flow = node_flow[current_node]\n\n # this is the flow destination node so we absorb all flow\n if current_node == flow[1]:\n node_flow[current_node] = 0.0\n continue\n\n # push the flow at this node over all edges\n for edge in self.graph.out_edges(current_node):\n edge_index = edge_mapping[edge]\n ratio = routing[edge_index]\n flow_to_send = ratio * current_flow\n # only send flow if greater than epsilon (so no 'infinite' loops)\n if flow_to_send > 1.e-8:\n node_flow[edge[1]] += ratio * current_flow\n # all important step, update our output\n link_utilisation[edge_index] += ratio * current_flow\n # have updated the dst so add it to the list of things to do\n to_explore.append(edge[1])\n # we've moved all the flow from this node now, so reset back to zero\n node_flow[current_node] = 0.0\n\n return link_utilisation",
"def calc_lp(self, demands: Demand, routing: Routing) -> float:\n epsilon = self.epsilon\n\n # Create the linear solver with the GLOP backend.\n solver = pywraplp.Solver('flow_utilisation_lp',\n pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n\n ## VARIABLES\n # Flow variables, the amount of flow along each edge\n # stored as a list flow_variables[ith_flow][jth_edge]\n flow_variables = []\n for i in range(len(self.commodities)):\n flow_variable_edges = []\n for j in range(len(self.edges)):\n flow_variable_edges.append(\n solver.NumVar(0, solver.infinity(), '({},{})'.format(i, j)))\n flow_variables.append(flow_variable_edges)\n\n ## CONSTRAINTS\n # Flow from source constraint (net flow must equal demand)\n conservation_source_constraints = []\n for i, commodity in enumerate(self.commodities):\n # create constraint\n constraint_i = solver.Constraint(demands[i] - epsilon,\n demands[i] + epsilon,\n '(source,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[0])]:\n # out flow is positive\n constraint_i.SetCoefficient(flow_variables[i][edge_index], 1)\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[0])]:\n # in flow is negative\n constraint_i.SetCoefficient(flow_variables[i][edge_index], -1)\n conservation_source_constraints.append(constraint_i)\n\n # Flow to sink constraint (in flow must equal demand, out must be zero)\n conservation_sink_constraints = []\n for i, commodity in enumerate(self.commodities):\n # create in flow constraint\n constraint_i_in = solver.Constraint(-demands[i] - epsilon,\n -demands[i] + epsilon,\n '(sink_in,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[1])]:\n # in flow is negative\n constraint_i_in.SetCoefficient(flow_variables[i][edge_index],\n -1)\n conservation_sink_constraints.append(constraint_i_in)\n\n constraint_i_out = solver.Constraint(0, 0,\n '(sink_out,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[1])]:\n # out flow is positive\n constraint_i_out.SetCoefficient(flow_variables[i][edge_index],\n 1)\n conservation_sink_constraints.append(constraint_i_out)\n\n # Flow at transit node constraint (net flow must be zero)\n conservation_transit_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n for j in range(self.graph.number_of_nodes()):\n if j != commodity[0] and j != commodity[1]:\n # create constraint\n constraint_j = solver.Constraint(-epsilon, +epsilon,\n '(transit,{},{})'.format(i,\n j))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(j)]:\n # out flow is positive\n constraint_j.SetCoefficient(\n flow_variables[i][edge_index],\n 1)\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(j)]:\n # in flow is negative\n constraint_j.SetCoefficient(\n flow_variables[i][edge_index],\n -1)\n constraints_flow_i.append(constraint_j)\n conservation_transit_constraints.append(constraints_flow_i)\n\n # Flow splitting at transit constraints (edge flow must be correct split of\n # in flow)\n splitting_ratio_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n for j in range(self.graph.number_of_nodes()):\n # Sink has not such constraint and we handle source differently\n if j != commodity[1] and j != commodity[0]:\n in_edges = [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(j)]\n out_edges = [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(j)]\n\n # separate constraint for split of each out_edge taking into\n # account all in_edges\n for out_edge_index in out_edges:\n # create constraint\n constraint_edge = \\\n solver.Constraint(-epsilon, +epsilon,\n '(split,{},{},{})'.format(\n i, j,\n out_edge_index))\n split_ratio = routing[i][out_edge_index]\n # flow on out edge\n constraint_edge.SetCoefficient(\n flow_variables[i][out_edge_index], 1)\n for in_edge_index in in_edges:\n # should equal sum of flow on all in edges scaled by\n # split ratio\n constraint_edge.SetCoefficient(\n flow_variables[i][in_edge_index],\n -1 * split_ratio)\n constraints_flow_i.append(constraint_edge)\n splitting_ratio_constraints.append(constraints_flow_i)\n\n # Flow splitting at source constraints (edge flow must be correct split of\n # in flow + demand)\n source_splitting_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n in_edges = [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[0])]\n out_edges = [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[0])]\n for out_edge_index in out_edges:\n # create constraint\n split_ratio = routing[i][out_edge_index]\n split_demand = split_ratio * demands[i]\n constraint_edge = \\\n solver.Constraint(split_demand - epsilon,\n split_demand + epsilon,\n '(split,{},{},{})'.format(i, j,\n out_edge_index))\n # flow on out edge\n constraint_edge.SetCoefficient(\n flow_variables[i][out_edge_index], 1)\n for in_edge_index in in_edges:\n # should equal sum of flow on all in edges scaled by split ratio\n constraint_edge.SetCoefficient(\n flow_variables[i][in_edge_index],\n -1 * split_ratio)\n constraints_flow_i.append(constraint_edge)\n source_splitting_constraints.append(constraints_flow_i)\n\n solver.Solve()\n\n result_status = solver.Solve()\n\n utilisation = np.zeros(\n (len(self.commodities), self.graph.number_of_edges()))\n # # extract the actual routing. Useful for debugging, maybe use to bootstrap\n # assignment = np.zeros(\n # (len(self.commodities), self.graph.number_of_edges()))\n\n # if routing is really that bad, just bail and give a sad result\n if result_status == solver.NOT_SOLVED or result_status == solver.INFEASIBLE:\n return 1.0\n\n for i in range(len(self.commodities)):\n for j in range(self.graph.number_of_edges()):\n utilisation[i][j] = flow_variables[i][j].solution_value() / \\\n self.edges[j][2]['weight']\n # assignment[i][j] = flow_variables[i][j].solution_value()\n\n return np.max(np.sum(utilisation, axis=0))",
"def get_weight_from_minflow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"start\"],\n self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"destin\"],\n self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.source(),\n x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.sink(),\n x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n v,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n v,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n x,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n x,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i], capacities[i], unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n print(\"Has become ({}, {}) with sup {}\".format(start,\n destin,\n sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in \\\n self.arc_info[arc].keys()):\n print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n new_flow = old_flow + sup_flow\n self.arc_info[arc][\"weight\"] = int(new_flow)\n print(\"Edge ({},{}) weight is changed from {} to {}\".format(\n start,\n destin,\n old_flow,\n new_flow))\n else:\n print('There was an issue with the min cost flow input.')\n #self.check_conservation_of_flow() # check that solution is valid",
"def energy_cost(edge):\n return edge_weight(edge) * 1.2",
"def calc_slow(self, demands: Demand, routing: Routing) -> float:\n edge_capacities = [e[2]['weight'] for e in\n sorted(self.graph.edges(data=True))]\n link_utilisation = self.calc_overall_link_utilisation(demands, routing)\n # Because utilisation compared to link width is what we care about here\n ratio_capacities = np.divide(link_utilisation, edge_capacities)\n\n return np.max(ratio_capacities)",
"def opt(self, demands: Demand) -> float:\n # Create the linear solver with the GLOP backend.\n solver = pywraplp.Solver('multicommodity_flow_lp',\n pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n\n ## VARIABLES\n # Flow variables, the splitting ratios for each edge\n # Stored as a list of lists (flow_variables[ith_flow][jth_edge])\n flow_variables = []\n for i in range(len(self.commodities)):\n flow_variable_edges = []\n for j in range(self.num_edges):\n flow_variable_edges.append(\n solver.NumVar(0, 1, '({},{})'.format(i, j)))\n flow_variables.append(flow_variable_edges)\n\n ## CONSTRAINTS\n # Capacity constraint\n capacity_constraints = []\n for i, edge in enumerate(self.edges):\n # Constraint between 0 and edge capacity\n constraint_i = solver.Constraint(\n 0, self.graph.get_edge_data(*edge)['weight'],\n '(1,{},{})'.format(*edge))\n for j, commodity in enumerate(self.commodities):\n # Coefficient for jth flow over ith edge is scaled by flow width\n constraint_i.SetCoefficient(flow_variables[j][i],\n # cast because or-tools :'(\n float(demands[j]))\n capacity_constraints.append(constraint_i)\n\n # Conservation on transit nodes\n conservation_transit_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n for j in range(self.num_nodes):\n if j != commodity[0] and j != commodity[1]:\n # Constraint must sum to zero\n constraint_j = solver.Constraint(0, 0,\n '(2,{},{})'.format(i, j))\n for k in list(sorted(self.graph.adj[j].keys())):\n # Ingress edges\n constraint_j.SetCoefficient(\n flow_variables[i][self.edge_index_dict[(k, j)]], 1)\n # Egress edges\n constraint_j.SetCoefficient(\n flow_variables[i][self.edge_index_dict[(j, k)]], -1)\n constraints_flow_i.append(constraint_j)\n conservation_transit_constraints.append(constraints_flow_i)\n\n # Conservation of flow at source node\n conservation_source_constraints = []\n for i, commodity in enumerate(self.commodities):\n # Constraint must sum to one (assuming all the demand can be met)\n constraint_i = solver.Constraint(1, 1, '(3,{})'.format(i))\n for edge_dest in list(sorted(self.graph.adj[commodity[0]].keys())):\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(commodity[0], edge_dest)]],\n 1)\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(edge_dest, commodity[0])]],\n -1)\n conservation_source_constraints.append(constraint_i)\n\n # Conservation of flow at destination node\n conservation_dest_constraints = []\n for i, commodity in enumerate(self.commodities):\n # Constraint must sum to one (assuming all the demand can be met)\n constraint_i = solver.Constraint(1, 1, '(4,{})'.format(i))\n for edge_dest in list(sorted(self.graph.adj[commodity[1]].keys())):\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(edge_dest, commodity[1])]],\n 1)\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(commodity[1], edge_dest)]],\n -1)\n conservation_dest_constraints.append(constraint_i)\n\n ## OBJECTIVES\n # Implementation of the load-balancing example from Wikipedia\n # First we add more constraints so that we are minimising the maximum\n max_utilisation_variable = solver.NumVar(0, solver.Infinity(),\n 'max_link_utilisation')\n min_of_max_constraints = []\n for i, edge in enumerate(self.edges):\n # Constraint that '-inf < f_0 + f_1 +... - max < 0'\n # i.e 'f_0 + f_1 + ... < max'\n constraint_i = solver.Constraint(-solver.Infinity(), 0,\n '(5,{})'.format(i))\n constraint_i.SetCoefficient(max_utilisation_variable, -1)\n for j, flow_variable in enumerate(flow_variables):\n constraint_i.SetCoefficient(flow_variable[i],\n demands[j] /\n self.graph.get_edge_data(*edge)[\n 'weight'])\n min_of_max_constraints.append(constraint_i)\n\n # Objective now is to minimise the maximum link utilisation\n objective = solver.Objective()\n objective.SetCoefficient(max_utilisation_variable, 1)\n objective.SetMinimization()\n solver.Solve()\n\n return objective.Value()",
"def route_cost(self, route):\n total_weight = 0\n c = 0\n start = route[0]\n for end in route[1:]:\n y = float(self.stars[start][end]['weight']) - c\n t = total_weight + y\n c = (t - total_weight) - y\n\n total_weight = t\n\n start = end\n return total_weight",
"def calc(self, demands: Demand, routing: Routing) -> np.ndarray:\n total_utilisation = np.zeros((self.num_nodes, self.num_nodes),\n dtype=float)\n\n for commodity_idx in range(len(self.commodities)):\n utilisation = self.calc_demand(routing,\n demands[commodity_idx],\n commodity_idx)\n total_utilisation += utilisation\n\n return np.max(np.divide(total_utilisation, self.edge_capacities))",
"def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False",
"def calculate_flow(self):\r\n \r\n for i in range(0, self.cells_number-1):\r\n self.flows[i] = min(self.cells[i].demand, self.cells[i+1].supply)",
"def calculate_cost(route, adjacency_matrix):\n route_shifted = np.roll(route,1)\n cost = np.sum(adjacency_matrix[route, route_shifted])\n st_dev = np.std(adjacency_matrix[route, route_shifted])\n return st_dev, cost",
"def calculate_demand(flow, requested_sf, available_sf, service_functions):\n\n if requested_sf in available_sf:\n vnf_need_placement = False\n demanded_total_capacity = 0.0\n for sf_i, sf_data in available_sf.items():\n if requested_sf == sf_i:\n # Include flows data rate in requested sf capacity calculation\n demanded_total_capacity += service_functions[sf_i]['resource_function'](\n sf_data['load'] + flow.dr)\n else:\n demanded_total_capacity += service_functions[sf_i]['resource_function'](sf_data['load'])\n return demanded_total_capacity, vnf_need_placement\n else:\n vnf_need_placement = True\n available_sf[requested_sf] = {'load': 0.0}\n demanded_total_capacity = 0.0\n for sf_i, sf_data in available_sf.items():\n if requested_sf == sf_i:\n # Include flows data rate in requested sf capacity calculation\n demanded_total_capacity += service_functions[sf_i]['resource_function'](\n sf_data['load'] + flow.dr)\n else:\n demanded_total_capacity += service_functions[sf_i]['resource_function'](sf_data['load'])\n del available_sf[requested_sf]\n return demanded_total_capacity, vnf_need_placement",
"def calc_overall_link_utilisation(self, demands: Demand,\n routing: Routing) -> np.ndarray:\n flows = [(i, j) for i in range(self.num_nodes)\n for j in range(self.num_nodes)\n if i != j]\n\n link_utilisation = np.zeros(self.num_edges)\n\n for i, flow in enumerate(flows):\n flow_link_utilisation = self.calc_per_flow_link_utilisation(flow,\n demands[\n i],\n routing[\n i])\n link_utilisation += flow_link_utilisation\n\n return link_utilisation",
"def get_routecost(self, route):\n return np.sum(self.parent.edges.lengths[route])",
"def calc_optimal_mass_flow(q1, q2, q3, q4, E1, E2, E3, E4, m1, m2, m3, m4, dP1, dP2, dP3, dP4, Area_a):\n\n mass_flow_opt = np.empty(8760)\n dP_opt = np.empty(8760)\n const = Area_a / 3600\n mass_flow_all_kgpers = [m1 * const, m2 * const, m3 * const, m4 * const] # [kg/s]\n dP_all_Pa = [dP1 * Area_a, dP2 * Area_a, dP3 * Area_a, dP4 * Area_a] # [Pa]\n balances = [q1 - E1 * 2, q2 - E2 * 2, q3 - E3 * 2, q4 - E4 * 2] # energy generation function eq.(63)\n for time in range(8760):\n balances_time = [balances[0][time], balances[1][time], balances[2][time], balances[3][time]]\n max_heat_production = np.max(balances_time)\n ix_max_heat_production = np.where(balances_time == max_heat_production)\n mass_flow_opt[time] = mass_flow_all_kgpers[ix_max_heat_production[0][0]]\n dP_opt[time] = dP_all_Pa[ix_max_heat_production[0][0]]\n return mass_flow_opt, dP_opt",
"def route_info(self, route):\r\n total_distance = 0\r\n cost_mult = 0.35\r\n cost = 0\r\n time = 0\r\n if route[0] in self.edges:\r\n for i in range(len(route) - 1):\r\n for edge in self.edges[route[i]]:\r\n if edge.destination == route[i + 1]:\r\n total_distance += edge.distance\r\n cost += cost_mult * edge.distance\r\n time += self.calc_time(edge.distance)\r\n outgoing = len(self.edges[edge.destination])\r\n # if this airport is not the last one since we don't need to calculate layover for last\r\n if i is not len(route) - 2:\r\n time += 2 - ((1 / 6) * (outgoing - 1))\r\n if cost_mult > 0:\r\n cost_mult -= 0.05\r\n break;\r\n else:\r\n if edge == self.edges[route[i]][-1]:\r\n return\r\n return total_distance, round(cost, 2), round(time, 2)",
"def get_routing_solution(self):\n G = self.base_digraph\n s1 = self.sources[0]\n s2 = self.sources[1]\n t1 = self.destinations[0]\n t2 = self.destinations[1]\n\n try:\n m = Model('routing')\n m.setParam('OutputFlag', False)\n\n # variables,\n # We have one variable per edge per session\n # e is the dict of dict for the variables\n e = {}\n r = {}\n for i in [1,2]:\n e[i] = {}\n r[i] = m.addVar()\n for u,v in G.edges():\n e[i][u,v] = m.addVar(lb=0)\n\n m.update()\n\n obj = quicksum(r.values())\n m.setObjective(obj, GRB.MAXIMIZE)\n\n # constraints\n # 1. conservations of flow at all intermediate nodes\n # 2. capacity constraints for each edge\n\n for u,v in G.edges():\n m.addConstr(e[1][u,v] + e[2][u,v] <= G[u][v]['capacity'])\n\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s2)) == r[2])\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s2)) == 0)\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s1)) == 0)\n m.addConstr(quicksum(e[1][u,v] for u,v in G.in_edges(t1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.in_edges(t2)) == r[2])\n\n for n in G.nodes():\n if n not in [s1, s2, t1, t2]:\n for i in [1, 2]:\n inflow = quicksum(e[i][u,v] for u,v in G.in_edges(n))\n outflow = quicksum(e[i][u,v] for u,v in G.out_edges(n))\n m.addConstr(inflow == outflow)\n\n m.optimize()\n\n if m.status == GRB.status.OPTIMAL:\n for u, v in G.edges():\n G[u][v]['Routing'] = {}\n G[u][v]['Routing'][1] = e[1][u,v].x\n G[u][v]['Routing'][2] = e[2][u,v].x\n return (m.objVal, r[1].x, r[2].x)\n else:\n # something went wrong...err...\n print \"Something was wrong, no optimal solution obtained\"\n return None, None, None\n\n except GurobiError:\n Print ('Error Report from Gurobi')",
"def objective(graph, flows):\n\n G = graph.copy()\n rules = {}\n flows.sort(key=lambda a: a[1], reverse=True)\n\n for flow,demand in flows:\n src = get_host_from_ip(G, flow.nw_src)\n dst = get_host_from_ip(G, flow.nw_dst)\n\n if not (src and dst):\n continue\n if not (src in G.nodes() and dst in G.nodes()):\n continue\n\n path = widest_path(G, src, dst)\n\n hops = []\n for a,b in pairwise(path):\n hops.append(Hop(dpid=int(a[1:]), port=G.edge[a][b]['port']))\n G.edge[a][b]['capacity'] -= demand\n G.edge[b][a]['capacity'] -= demand\n\n rules[flow] = hops\n\n return rules",
"def get_residual(n, key, edges):\n\n if n['direction'] == 'F':\n e = [e for e in edges if e['first'] == key and e['last'] == n['id']][0]\n r = e['capacity'] - e['flow'] \n \n if n['direction'] == 'B':\n e = [e for e in edges if e['last'] == key and e['first'] == n['id']][0]\n r = e['flow']\n return r",
"def get_interval_from_minflow(self, wide=False):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.arc_info[arc][\"start\"],\n # self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.arc_info[arc][\"destin\"],\n # self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # x,\n # self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.source(),\n # x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.sink(),\n # x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # x,\n # self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".\n # format(s_prime, v, int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".\n # format(v, t_prime, int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n # s_prime,\n # x,\n # int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n # x,\n # t_prime,\n # int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i],\n capacities[i],\n unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n # print('Minimum cost:', min_cost_flow.OptimalCost())\n # print('')\n # print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n # cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n # print('%1s -> %1s %3s / %3s %3s' % (\n # min_cost_flow.Tail(i),\n # min_cost_flow.Head(i),\n # min_cost_flow.Flow(i),\n # min_cost_flow.Capacity(i),\n # cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n # print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n # print(\"Has become ({}, {}) with sup {}\".format(start,\n # destin,\n # sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in\n self.arc_info[arc].keys()):\n # print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n bound_1 = old_flow + sup_flow\n bound_2 = old_flow - sup_flow\n new_lb = max(0, int(min(bound_1, bound_2)))\n new_ub = int(max(bound_1, bound_2))\n if wide:\n if new_lb == new_ub:\n # print(\"We had a zero interval\")\n new_lb = int(new_lb*0.8)\n new_ub = int(new_ub*1.2)\n if new_lb == 0:\n # print(\"We got a zero lower bound\")\n new_ub = 5\n # print(\"But now we're doing {} {}\".\n # format(new_lb, new_ub))\n\n self.arc_info[arc][\"lower_bound\"] = new_lb\n self.arc_info[arc][\"upper_bound\"] = new_ub\n # print(\"Edge ({},{}) bounds are [{},{}]\".format(\n # start,\n # destin,\n # self.arc_info[arc][\"lower_bound\"],\n # self.arc_info[arc][\"upper_bound\"]))\n # print(self.arc_info[arc])\n else:\n print('There was an issue with the min cost flow input.')\n # self.check_conservation_of_flow() # check that solution is valid",
"def compute_path_hopping_flow_allocations(target_graph, K=3):\n flow_allocation_seed_number = 0xCAFE_BABE\n np.random.seed(flow_allocation_seed_number)\n # id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph)\n link_utilization = {(u, v): 0.0 for u, v in target_graph.edges}\n node_capacity = {u: 0.0 for u in target_graph.nodes}\n flows = []\n while True:\n source_node, destination_node = flow_selection_fn(target_graph.nodes, 2, replace=False)\n print(source_node, destination_node)\n\n shortest_paths = sorted(nx.all_simple_paths(target_graph, source_node, destination_node,\n cutoff=3),\n key=lambda p: len(p))\n k_shortest_paths = list(itertools.islice(shortest_paths, K))\n\n # flow_tx_rate = np.random.uniform() * 10\n flow_tx_rate = 1.0\n # if node_capacity[source_node] + flow_tx_rate > LINK_CAPACITY:\n # break\n node_capacity[source_node] += flow_tx_rate\n capacity_was_exceeded = False \n for path in [nx.utils.pairwise(p_i) for p_i in k_shortest_paths]:\n for u, v in [sorted(h_i) for h_i in path]:\n flow_rate_per_subpath = flow_tx_rate / K\n if (link_utilization[u, v] + flow_rate_per_subpath) > LINK_CAPACITY:\n capacity_was_exceeded = True\n break\n link_utilization[u, v] += flow_rate_per_subpath\n if capacity_was_exceeded:\n break\n\n if capacity_was_exceeded:\n break\n\n the_flow = Flow( source_node = source_node\n , destination_node = destination_node\n , flow_tx_rate = flow_tx_rate\n , paths = k_shortest_paths\n , splitting_ratio = [1.0/K]*K\n )\n flows.append(the_flow)\n return flows, link_utilization",
"def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost",
"def _cost_route_fine(self):\n return self.fine",
"def cost(self, route: List[int]) -> float:\n raise NotImplementedError",
"def edge_cost(self, edge_data):\n\n if edge_data is None:\n # the target of the edge is a gate in the target basis,\n # so we return a default value of 1.\n return 1\n\n cost_tot = 0\n for instruction in edge_data.rule.circuit:\n key = Key(name=instruction.operation.name, num_qubits=len(instruction.qubits))\n cost_tot += self._opt_cost_map[key]\n\n return cost_tot - self._opt_cost_map[edge_data.source]",
"def flowStress(f_hard,eps,d,q,a):\n\n pass",
"def update_flow(self):\n N = len(self.vertices)\n _vertices = self.vertices+['_source', '_sink']\n s, t = _vertices.index('_source'), _vertices.index('_sink')\n cost, capacity = dok_matrix((N+2, N+2)), dok_matrix((N+2, N+2))\n\n cost[:N, :N] = self.cost\n capacity[:N, :N] = self.upper_bound-self.lower_bound\n # _source to main vertices\n l_in = self.lower_bound.toarray().sum(axis=0)\n us, = l_in.nonzero()\n for u in us:\n capacity[s, u] = l_in[u]\n # main vertices to _sink\n l_out = self.lower_bound.toarray().sum(axis=1)\n us, = l_out.nonzero()\n for u in us:\n capacity[u, t] = l_out[u]\n # sink to source\n infinite_flow = self.upper_bound.toarray().sum()\n capacity[_vertices.index('sink'), _vertices.index('source')] = infinite_flow\n\n # get a feasible flow on original graph by finding the max flow on\n # auxiliary graph\n aux_fg = FlowGraph(_vertices, cost, capacity, True)\n aux_fg.FordFulkerson()\n assert aux_fg.residual[s].toarray().sum()==0, 'feasible flow within bounds not found'\n\n self.residual = aux_fg.residual[:N, :N]\n s, t = self.vertices.index('source'), self.vertices.index('sink')\n self.residual[s, t] = 0\n self.residual[t, s] = 0\n\n self.FordFulkerson()",
"def resetVehicleAdaptedTravelTime(vehicle, edges):\n routeTime = 0\n for edge in edges:\n edgeTime = edgeSpeedGlobal[edge]\n routeTime += edgeTime\n traci.vehicle.setAdaptedTraveltime(vehID=vehicle, edgeID=edge, time=edgeTime)\n\n return edgeTime",
"def calculate_eft_and_cost(self, task, resource_id, arrival_time=0):\r\n start_time, eft, runtime_on_resource, place_id = self.calculate_eft(task, resource_id, arrival_time=arrival_time)\r\n if task.dummy_task:\r\n return start_time, eft, runtime_on_resource, place_id, 0\r\n else:\r\n cost = self.calculate_share_cost_change(resource_id, start_time, eft, task.graph.name, True)\r\n return start_time, eft, runtime_on_resource, place_id, cost",
"def cost(route):\n cost = 0\n for li in route:\n if cost<li.get_cost():\n cost=li.get_cost()\n return cost"
]
| [
"0.7155596",
"0.6779073",
"0.6652812",
"0.6349164",
"0.63395905",
"0.6288503",
"0.62845767",
"0.6226773",
"0.62261355",
"0.61473256",
"0.61391884",
"0.6069398",
"0.6058541",
"0.6003596",
"0.5980734",
"0.5970249",
"0.59189695",
"0.59046817",
"0.58885455",
"0.58686656",
"0.57540387",
"0.5696498",
"0.56842405",
"0.56639075",
"0.5640717",
"0.56367725",
"0.5627333",
"0.5621166",
"0.5612718",
"0.56083673"
]
| 0.7746539 | 0 |
Calculates the link utilisation over a graph for a particular flow and its demand. (NB utilisation in bandwidth, not relative to capacity) | def calc_per_flow_link_utilisation(self, flow: Tuple[int, int],
demand: float,
routing: np.ndarray) -> np.ndarray:
edge_mapping = {edge: i for i, edge in
enumerate(sorted(self.graph.edges))}
link_utilisation = np.zeros(self.num_edges)
node_flow = np.zeros(self.num_nodes) # the flow stored at a node
node_flow[flow[0]] = demand
to_explore = [flow[0]]
while to_explore:
current_node = to_explore.pop(0)
current_flow = node_flow[current_node]
# this is the flow destination node so we absorb all flow
if current_node == flow[1]:
node_flow[current_node] = 0.0
continue
# push the flow at this node over all edges
for edge in self.graph.out_edges(current_node):
edge_index = edge_mapping[edge]
ratio = routing[edge_index]
flow_to_send = ratio * current_flow
# only send flow if greater than epsilon (so no 'infinite' loops)
if flow_to_send > 1.e-8:
node_flow[edge[1]] += ratio * current_flow
# all important step, update our output
link_utilisation[edge_index] += ratio * current_flow
# have updated the dst so add it to the list of things to do
to_explore.append(edge[1])
# we've moved all the flow from this node now, so reset back to zero
node_flow[current_node] = 0.0
return link_utilisation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_overall_link_utilisation(self, demands: Demand,\n routing: Routing) -> np.ndarray:\n flows = [(i, j) for i in range(self.num_nodes)\n for j in range(self.num_nodes)\n if i != j]\n\n link_utilisation = np.zeros(self.num_edges)\n\n for i, flow in enumerate(flows):\n flow_link_utilisation = self.calc_per_flow_link_utilisation(flow,\n demands[\n i],\n routing[\n i])\n link_utilisation += flow_link_utilisation\n\n return link_utilisation",
"def compute_path_metric(self, sw, path, util, time_now, local_contrib):\n pathmetric = 1\n linkmetrics = []\n links = zip(path[:-1], path[1:])\n # calculate available capacity for each link in path\n for link in links:\n u, v = link\n # Use the last-learned-via-sync value for a link\n if (not local_contrib) and 'sync_learned' in self.graph[u][v]:\n used1 = self.graph[u][v]['sync_learned'] + util\n used2 = self.graph[u][v]['used'] + util\n # ['used'] is a strict lower bound for ['sync_learned']\n if used1 > used2: \n used = used1\n logging.debug(\"CS [%s] using sync_learned value 1 [%f]\", str(self.name), used1)\n else:\n used = used2\n logging.debug(\"CS [%s] using sync_learned value 2 [%f]\", str(self.name), used2)\n else:\n logging.debug(\"CS [%s] using tracking value\", str(self.name))\n used = self.graph[u][v]['used'] + util\n\n capacity = self.graph[u][v]['capacity']\n linkmetric = float(used) / capacity\n # If the controller estimates it would oversubscribe this link\n if linkmetric > 1:\n logging.info(\"[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]\", str(time_now), linkmetric, str(sw))\n break\n else:\n linkmetrics.append(linkmetric)\n\n # We define pathmetric to be the worst link metric in path\n if len(linkmetrics) > 0:\n pathmetric = max(linkmetrics)\n\n funname = sys._getframe().f_code.co_name\n logging.debug(\"[%s] [%s] [%s] [%s]\", funname, str(time_now), str(self),\n str((path, linkmetrics)))\n return (pathmetric, len(links))",
"def compute_path_metric(self, sw, path, util, time_now):\n pathmetric = 1\n linkmetrics = []\n links = zip(path[:-1], path[1:])\n # calculate available capacity for each link in path\n for link in links:\n u, v = link\n #DESIGN CHOICE: Should we 1) always include extra-domain state, 2)\n #only include extra-domain state when not stale (timestamp), 3) always exclude\n #extra-domain state when calculating the path metric? Here we do (1)\n used = self.graph[u][v]['used'] + util\n capacity = self.graph[u][v]['capacity']\n linkmetric = float(used) / capacity\n # If the controller estimates it would oversubscribe this link\n if linkmetric > 1:\n logging.info(\"[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]\", str(time_now), linkmetric, str(sw))\n break\n else:\n linkmetrics.append(linkmetric)\n\n # We define pathmetric to be the worst link metric in path\n if len(linkmetrics) > 0:\n pathmetric = max(linkmetrics)\n\n funname = sys._getframe().f_code.co_name\n logging.debug(\"[%s] [%s] [%s] [%s]\", funname, str(time_now), str(self),\n str((path, linkmetrics)))\n return (pathmetric, len(links))",
"def calc_slow(self, demands: Demand, routing: Routing) -> float:\n edge_capacities = [e[2]['weight'] for e in\n sorted(self.graph.edges(data=True))]\n link_utilisation = self.calc_overall_link_utilisation(demands, routing)\n # Because utilisation compared to link width is what we care about here\n ratio_capacities = np.divide(link_utilisation, edge_capacities)\n\n return np.max(ratio_capacities)",
"def compute_link_utilization_over_time(link_byte_counts):\n def find_matching_iface_stats(byte_count, source_id, destination_id):\n matching_stats = [d_i for d_i in byte_count\n if d_i[\"sourceSwitchId\"] == source_id and\n d_i[\"destinationSwitchId\"] == destination_id]\n if len(matching_stats) != 1:\n raise ValueError(\"Unexpected results in find_matching_iface_stats. \\\n Found %d matching iface_stats\" % len(matching_stats))\n return matching_stats[0]\n\n def compute_tx_rate(count_in_bytes):\n return (count_in_bytes * 8) / 10.0**7\n\n # First compute the delta between the iface_stats in time_period t_i and the iface_stats\n # in time period t_{i+1}.\n # tx_rate_t: (source_id x destination_id) -> link_utilization_in_time_period_t forall. t\n tx_rate_t = []\n for t_0, t_1 in zip(link_byte_counts, link_byte_counts[1:]):\n byte_count_delta_t = defaultdict(float)\n for iface_stats in t_0:\n source_id = iface_stats[\"sourceSwitchId\"]\n destination_id = iface_stats[\"destinationSwitchId\"]\n t_0_count = iface_stats[\"bytesSent\"] + iface_stats[\"bytesReceived\"]\n try:\n t_1_stats = find_matching_iface_stats(t_1, source_id, destination_id)\n t_1_count = t_1_stats[\"bytesSent\"] + t_1_stats[\"bytesReceived\"]\n except ValueError:\n t_1_count = t_0_count\n\n count_delta = t_1_count - t_0_count\n link_key = compute_link_key(source_id, \n destination_id)\n byte_count_delta_t[link_key] += count_delta\n\n tx_rate_t.append({the_link_key: compute_tx_rate(byte_count_t) \n for the_link_key, byte_count_t in byte_count_delta_t.items()})\n return tx_rate_t",
"def compute_path_hopping_flow_allocations(target_graph, K=3):\n flow_allocation_seed_number = 0xCAFE_BABE\n np.random.seed(flow_allocation_seed_number)\n # id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph)\n link_utilization = {(u, v): 0.0 for u, v in target_graph.edges}\n node_capacity = {u: 0.0 for u in target_graph.nodes}\n flows = []\n while True:\n source_node, destination_node = flow_selection_fn(target_graph.nodes, 2, replace=False)\n print(source_node, destination_node)\n\n shortest_paths = sorted(nx.all_simple_paths(target_graph, source_node, destination_node,\n cutoff=3),\n key=lambda p: len(p))\n k_shortest_paths = list(itertools.islice(shortest_paths, K))\n\n # flow_tx_rate = np.random.uniform() * 10\n flow_tx_rate = 1.0\n # if node_capacity[source_node] + flow_tx_rate > LINK_CAPACITY:\n # break\n node_capacity[source_node] += flow_tx_rate\n capacity_was_exceeded = False \n for path in [nx.utils.pairwise(p_i) for p_i in k_shortest_paths]:\n for u, v in [sorted(h_i) for h_i in path]:\n flow_rate_per_subpath = flow_tx_rate / K\n if (link_utilization[u, v] + flow_rate_per_subpath) > LINK_CAPACITY:\n capacity_was_exceeded = True\n break\n link_utilization[u, v] += flow_rate_per_subpath\n if capacity_was_exceeded:\n break\n\n if capacity_was_exceeded:\n break\n\n the_flow = Flow( source_node = source_node\n , destination_node = destination_node\n , flow_tx_rate = flow_tx_rate\n , paths = k_shortest_paths\n , splitting_ratio = [1.0/K]*K\n )\n flows.append(the_flow)\n return flows, link_utilization",
"def calculate_available_link_res (self, sg_hops_to_be_ignored, mode=MODE_ADD):\n # set availbandwidth to the maximal value\n for i, j, k, d in self.network.edges_iter(data=True, keys=True):\n if d.type == 'STATIC':\n setattr(self.network[i][j][k], 'availbandwidth', d.bandwidth)\n # subtract the reserved link and internal (inside Infras) bandwidth\n if mode == self.MODE_ADD:\n for d in self.infras:\n for p in d.ports:\n for fr in p.flowrules:\n if fr.id not in sg_hops_to_be_ignored and fr.bandwidth is not None:\n # Flowrules are cummulatively subtracted from the switching \n # capacity of the node.\n d.availres['bandwidth'] -= fr.bandwidth\n if d.availres['bandwidth'] < 0:\n raise RuntimeError(\"The node bandwidth of %s got below zero \"\n \"during available resource calculation!\" %\n d.id)\n # Get all the mapped paths of all SGHops from the NFFG\n sg_map = NFFGToolBox.get_all_sghop_info(self, return_paths=True)\n for sg_hop_id, data in sg_map.iteritems():\n src, dst, flowclass, bandwidth, delay, path = data\n if bandwidth is not None:\n for link in path:\n link.availbandwidth -= bandwidth\n if link.availbandwidth < 0:\n raise RuntimeError(\n \"The link bandwidth of %s got below zero during\"\n \"available resource calculation!\" % link.id)",
"def calc_lp(self, demands: Demand, routing: Routing) -> float:\n epsilon = self.epsilon\n\n # Create the linear solver with the GLOP backend.\n solver = pywraplp.Solver('flow_utilisation_lp',\n pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n\n ## VARIABLES\n # Flow variables, the amount of flow along each edge\n # stored as a list flow_variables[ith_flow][jth_edge]\n flow_variables = []\n for i in range(len(self.commodities)):\n flow_variable_edges = []\n for j in range(len(self.edges)):\n flow_variable_edges.append(\n solver.NumVar(0, solver.infinity(), '({},{})'.format(i, j)))\n flow_variables.append(flow_variable_edges)\n\n ## CONSTRAINTS\n # Flow from source constraint (net flow must equal demand)\n conservation_source_constraints = []\n for i, commodity in enumerate(self.commodities):\n # create constraint\n constraint_i = solver.Constraint(demands[i] - epsilon,\n demands[i] + epsilon,\n '(source,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[0])]:\n # out flow is positive\n constraint_i.SetCoefficient(flow_variables[i][edge_index], 1)\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[0])]:\n # in flow is negative\n constraint_i.SetCoefficient(flow_variables[i][edge_index], -1)\n conservation_source_constraints.append(constraint_i)\n\n # Flow to sink constraint (in flow must equal demand, out must be zero)\n conservation_sink_constraints = []\n for i, commodity in enumerate(self.commodities):\n # create in flow constraint\n constraint_i_in = solver.Constraint(-demands[i] - epsilon,\n -demands[i] + epsilon,\n '(sink_in,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[1])]:\n # in flow is negative\n constraint_i_in.SetCoefficient(flow_variables[i][edge_index],\n -1)\n conservation_sink_constraints.append(constraint_i_in)\n\n constraint_i_out = solver.Constraint(0, 0,\n '(sink_out,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[1])]:\n # out flow is positive\n constraint_i_out.SetCoefficient(flow_variables[i][edge_index],\n 1)\n conservation_sink_constraints.append(constraint_i_out)\n\n # Flow at transit node constraint (net flow must be zero)\n conservation_transit_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n for j in range(self.graph.number_of_nodes()):\n if j != commodity[0] and j != commodity[1]:\n # create constraint\n constraint_j = solver.Constraint(-epsilon, +epsilon,\n '(transit,{},{})'.format(i,\n j))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(j)]:\n # out flow is positive\n constraint_j.SetCoefficient(\n flow_variables[i][edge_index],\n 1)\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(j)]:\n # in flow is negative\n constraint_j.SetCoefficient(\n flow_variables[i][edge_index],\n -1)\n constraints_flow_i.append(constraint_j)\n conservation_transit_constraints.append(constraints_flow_i)\n\n # Flow splitting at transit constraints (edge flow must be correct split of\n # in flow)\n splitting_ratio_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n for j in range(self.graph.number_of_nodes()):\n # Sink has not such constraint and we handle source differently\n if j != commodity[1] and j != commodity[0]:\n in_edges = [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(j)]\n out_edges = [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(j)]\n\n # separate constraint for split of each out_edge taking into\n # account all in_edges\n for out_edge_index in out_edges:\n # create constraint\n constraint_edge = \\\n solver.Constraint(-epsilon, +epsilon,\n '(split,{},{},{})'.format(\n i, j,\n out_edge_index))\n split_ratio = routing[i][out_edge_index]\n # flow on out edge\n constraint_edge.SetCoefficient(\n flow_variables[i][out_edge_index], 1)\n for in_edge_index in in_edges:\n # should equal sum of flow on all in edges scaled by\n # split ratio\n constraint_edge.SetCoefficient(\n flow_variables[i][in_edge_index],\n -1 * split_ratio)\n constraints_flow_i.append(constraint_edge)\n splitting_ratio_constraints.append(constraints_flow_i)\n\n # Flow splitting at source constraints (edge flow must be correct split of\n # in flow + demand)\n source_splitting_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n in_edges = [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[0])]\n out_edges = [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[0])]\n for out_edge_index in out_edges:\n # create constraint\n split_ratio = routing[i][out_edge_index]\n split_demand = split_ratio * demands[i]\n constraint_edge = \\\n solver.Constraint(split_demand - epsilon,\n split_demand + epsilon,\n '(split,{},{},{})'.format(i, j,\n out_edge_index))\n # flow on out edge\n constraint_edge.SetCoefficient(\n flow_variables[i][out_edge_index], 1)\n for in_edge_index in in_edges:\n # should equal sum of flow on all in edges scaled by split ratio\n constraint_edge.SetCoefficient(\n flow_variables[i][in_edge_index],\n -1 * split_ratio)\n constraints_flow_i.append(constraint_edge)\n source_splitting_constraints.append(constraints_flow_i)\n\n solver.Solve()\n\n result_status = solver.Solve()\n\n utilisation = np.zeros(\n (len(self.commodities), self.graph.number_of_edges()))\n # # extract the actual routing. Useful for debugging, maybe use to bootstrap\n # assignment = np.zeros(\n # (len(self.commodities), self.graph.number_of_edges()))\n\n # if routing is really that bad, just bail and give a sad result\n if result_status == solver.NOT_SOLVED or result_status == solver.INFEASIBLE:\n return 1.0\n\n for i in range(len(self.commodities)):\n for j in range(self.graph.number_of_edges()):\n utilisation[i][j] = flow_variables[i][j].solution_value() / \\\n self.edges[j][2]['weight']\n # assignment[i][j] = flow_variables[i][j].solution_value()\n\n return np.max(np.sum(utilisation, axis=0))",
"def get_weight_from_minflow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"start\"],\n self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"destin\"],\n self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.source(),\n x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.sink(),\n x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n v,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n v,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n x,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n x,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i], capacities[i], unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n print(\"Has become ({}, {}) with sup {}\".format(start,\n destin,\n sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in \\\n self.arc_info[arc].keys()):\n print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n new_flow = old_flow + sup_flow\n self.arc_info[arc][\"weight\"] = int(new_flow)\n print(\"Edge ({},{}) weight is changed from {} to {}\".format(\n start,\n destin,\n old_flow,\n new_flow))\n else:\n print('There was an issue with the min cost flow input.')\n #self.check_conservation_of_flow() # check that solution is valid",
"def opt(self, demands: Demand) -> float:\n # Create the linear solver with the GLOP backend.\n solver = pywraplp.Solver('multicommodity_flow_lp',\n pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n\n ## VARIABLES\n # Flow variables, the splitting ratios for each edge\n # Stored as a list of lists (flow_variables[ith_flow][jth_edge])\n flow_variables = []\n for i in range(len(self.commodities)):\n flow_variable_edges = []\n for j in range(self.num_edges):\n flow_variable_edges.append(\n solver.NumVar(0, 1, '({},{})'.format(i, j)))\n flow_variables.append(flow_variable_edges)\n\n ## CONSTRAINTS\n # Capacity constraint\n capacity_constraints = []\n for i, edge in enumerate(self.edges):\n # Constraint between 0 and edge capacity\n constraint_i = solver.Constraint(\n 0, self.graph.get_edge_data(*edge)['weight'],\n '(1,{},{})'.format(*edge))\n for j, commodity in enumerate(self.commodities):\n # Coefficient for jth flow over ith edge is scaled by flow width\n constraint_i.SetCoefficient(flow_variables[j][i],\n # cast because or-tools :'(\n float(demands[j]))\n capacity_constraints.append(constraint_i)\n\n # Conservation on transit nodes\n conservation_transit_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n for j in range(self.num_nodes):\n if j != commodity[0] and j != commodity[1]:\n # Constraint must sum to zero\n constraint_j = solver.Constraint(0, 0,\n '(2,{},{})'.format(i, j))\n for k in list(sorted(self.graph.adj[j].keys())):\n # Ingress edges\n constraint_j.SetCoefficient(\n flow_variables[i][self.edge_index_dict[(k, j)]], 1)\n # Egress edges\n constraint_j.SetCoefficient(\n flow_variables[i][self.edge_index_dict[(j, k)]], -1)\n constraints_flow_i.append(constraint_j)\n conservation_transit_constraints.append(constraints_flow_i)\n\n # Conservation of flow at source node\n conservation_source_constraints = []\n for i, commodity in enumerate(self.commodities):\n # Constraint must sum to one (assuming all the demand can be met)\n constraint_i = solver.Constraint(1, 1, '(3,{})'.format(i))\n for edge_dest in list(sorted(self.graph.adj[commodity[0]].keys())):\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(commodity[0], edge_dest)]],\n 1)\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(edge_dest, commodity[0])]],\n -1)\n conservation_source_constraints.append(constraint_i)\n\n # Conservation of flow at destination node\n conservation_dest_constraints = []\n for i, commodity in enumerate(self.commodities):\n # Constraint must sum to one (assuming all the demand can be met)\n constraint_i = solver.Constraint(1, 1, '(4,{})'.format(i))\n for edge_dest in list(sorted(self.graph.adj[commodity[1]].keys())):\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(edge_dest, commodity[1])]],\n 1)\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(commodity[1], edge_dest)]],\n -1)\n conservation_dest_constraints.append(constraint_i)\n\n ## OBJECTIVES\n # Implementation of the load-balancing example from Wikipedia\n # First we add more constraints so that we are minimising the maximum\n max_utilisation_variable = solver.NumVar(0, solver.Infinity(),\n 'max_link_utilisation')\n min_of_max_constraints = []\n for i, edge in enumerate(self.edges):\n # Constraint that '-inf < f_0 + f_1 +... - max < 0'\n # i.e 'f_0 + f_1 + ... < max'\n constraint_i = solver.Constraint(-solver.Infinity(), 0,\n '(5,{})'.format(i))\n constraint_i.SetCoefficient(max_utilisation_variable, -1)\n for j, flow_variable in enumerate(flow_variables):\n constraint_i.SetCoefficient(flow_variable[i],\n demands[j] /\n self.graph.get_edge_data(*edge)[\n 'weight'])\n min_of_max_constraints.append(constraint_i)\n\n # Objective now is to minimise the maximum link utilisation\n objective = solver.Objective()\n objective.SetCoefficient(max_utilisation_variable, 1)\n objective.SetMinimization()\n solver.Solve()\n\n return objective.Value()",
"def compute_greedy_flow_allocations( target_graph\n , flow_selection_fn\n , seed_number=DEFAULT_SEED_NUMBER):\n\n flow_allocation_seed_number = seed_number\n np.random.seed(flow_allocation_seed_number)\n\n link_utilization = {tuple(sorted(link_tuple)): 0.0 for link_tuple in target_graph.edges}\n flows = []\n\n while True:\n capacity_was_exceeded = False\n\n source_node, destination_node = flow_selection_fn(target_graph.nodes)\n flow_tx_rate = np.random.uniform(FLOW_TX_RATE_LOWER_BOUND, FLOW_TX_RATE_UPPER_BOUND)\n\n connecting_paths = list(node_disjoint_paths(target_graph, source_node, destination_node))\n disjoint_path_count = len(connecting_paths)\n flow_rate_per_subpath = flow_tx_rate / disjoint_path_count\n for path in [nx.utils.pairwise(p_i) for p_i in connecting_paths]:\n for u, v in [tuple(sorted(t_i)) for t_i in path]:\n if (link_utilization[u, v] + flow_rate_per_subpath) > LINK_CAPACITY:\n capacity_was_exceeded = True\n break\n link_utilization[u, v] += flow_rate_per_subpath\n if capacity_was_exceeded:\n break\n if capacity_was_exceeded:\n break\n\n the_flow = Flow( source_node = source_node\n , destination_node = destination_node\n , flow_tx_rate = flow_tx_rate\n , paths = connecting_paths\n , splitting_ratio = [1.0/disjoint_path_count]*disjoint_path_count\n )\n flows.append(the_flow)\n return flows, link_utilization",
"def cost(graph,a, undirected=True):\n cost = sum(sum(np.array(abs(graph-a))))\n if undirected:\n return cost/2.0\n else:\n return cost",
"def get_link_cost(self, global_clock_sec):\n return self.static_delay_sec + \\\n self.link_buffer.get_average_q_del_sec(global_clock_sec)",
"def calc_demand(self, routing: np.ndarray, demand: float,\n commodity_idx: int) -> np.ndarray:\n commodity = self.commodities[commodity_idx]\n node_flow = np.zeros(self.num_nodes)\n node_flow[commodity[0]] = demand\n\n split_matrix = np.zeros((self.num_nodes, self.num_nodes), dtype=float)\n for edge_idx, edge in enumerate(self.edges):\n split_matrix[edge[1]][edge[0]] = routing[commodity_idx][edge_idx]\n split_matrix[:, commodity[1]] = 0 # no send from the destination node\n\n edge_utilisation = np.zeros((self.num_nodes, self.num_nodes))\n\n num_steps = 0\n while True:\n change = np.multiply(split_matrix, node_flow)\n edge_utilisation += change\n node_flow = np.matmul(split_matrix, node_flow)\n if np.any(np.isnan(change)):\n print(\"is_nan :'(\")\n comparison = np.less(np.nan_to_num(change), self.min_delta)\n if np.logical_and.reduce(np.logical_and.reduce(comparison)):\n break\n num_steps += 1\n # if we take more than |E| steps we have cycles which is not good.\n # Therefore: end here with really bad reward, scaled by number of\n # cycles\n if num_steps > routing.shape[1]:\n remaining_flow = np.greater(np.nan_to_num(change), 0.0)\n edge_utilisation += np.multiply(remaining_flow, np.full(\n (self.num_nodes, self.num_nodes), demand))\n break\n\n return edge_utilisation",
"def calc_cost(self):\n cost = 0\n for i,[source, sinks] in enumerate(self.nets):\n self.costs[i] = self.calc_half_perimeter(source, sinks)\n cost += self.costs[i]\n self.cost = cost\n return True",
"def bandwidth_share(self, nodes_efficiency: Dict[str, float]) -> Dict[str, float]:\n pass",
"def compute_unequal_flow_allocations(target_graph, K=3):\n\n # id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph)\n flow_allocation_seed_number = 0xDEAD_BEEF\n np.random.seed(flow_allocation_seed_number)\n flows = []\n link_utilization = {}\n for node in target_graph.nodes:\n possible_destination_nodes = set(target_graph.nodes) - {node}\n destination_node = np.random.choice(list(possible_destination_nodes), 1, \n replace=False).item()\n shortest_path = nx.shortest_path(target_graph, node, destination_node)\n the_flow = Flow( source_node = node\n , destination_node = destination_node\n , flow_tx_rate = 10.0\n , paths = [shortest_path]\n , splitting_ratio = [1.0]\n )\n flows.append(the_flow)\n\n return flow_allocation_seed_number, flows",
"def add_link_capacity(self, path, bw):\n\n # PART 1, TASK 3.4 add bw to edges",
"def calculate_demand(flow, requested_sf, available_sf, service_functions):\n\n if requested_sf in available_sf:\n vnf_need_placement = False\n demanded_total_capacity = 0.0\n for sf_i, sf_data in available_sf.items():\n if requested_sf == sf_i:\n # Include flows data rate in requested sf capacity calculation\n demanded_total_capacity += service_functions[sf_i]['resource_function'](\n sf_data['load'] + flow.dr)\n else:\n demanded_total_capacity += service_functions[sf_i]['resource_function'](sf_data['load'])\n return demanded_total_capacity, vnf_need_placement\n else:\n vnf_need_placement = True\n available_sf[requested_sf] = {'load': 0.0}\n demanded_total_capacity = 0.0\n for sf_i, sf_data in available_sf.items():\n if requested_sf == sf_i:\n # Include flows data rate in requested sf capacity calculation\n demanded_total_capacity += service_functions[sf_i]['resource_function'](\n sf_data['load'] + flow.dr)\n else:\n demanded_total_capacity += service_functions[sf_i]['resource_function'](sf_data['load'])\n del available_sf[requested_sf]\n return demanded_total_capacity, vnf_need_placement",
"def objective(graph, flows):\n\n G = graph.copy()\n rules = {}\n flows.sort(key=lambda a: a[1], reverse=True)\n\n for flow,demand in flows:\n src = get_host_from_ip(G, flow.nw_src)\n dst = get_host_from_ip(G, flow.nw_dst)\n\n if not (src and dst):\n continue\n if not (src in G.nodes() and dst in G.nodes()):\n continue\n\n path = widest_path(G, src, dst)\n\n hops = []\n for a,b in pairwise(path):\n hops.append(Hop(dpid=int(a[1:]), port=G.edge[a][b]['port']))\n G.edge[a][b]['capacity'] -= demand\n G.edge[b][a]['capacity'] -= demand\n\n rules[flow] = hops\n\n return rules",
"def global_efficiency(graph):\n return nx.global_efficiency(graph.graph)",
"def maximum_flow(graph, start, end):\n if not isinstance(graph, BasicGraph):\n raise TypeError(f\"Expected subclass of BasicGraph, not {type(graph)}\")\n Graph = type(graph)\n \n if start not in graph:\n raise ValueError(f\"{start} not in graph\")\n if end not in graph:\n raise ValueError(f\"{end} not in graph\")\n\n inflow = sum(d for s, e, d in graph.edges(from_node=start))\n outflow = sum(d for s, e, d in graph.edges(to_node=end))\n unassigned_flow = min(inflow, outflow) # search in excess of this 'flow' is a waste of time.\n total_flow = 0\n # -----------------------------------------------------------------------\n # The algorithm\n # I reviewed a number of algorithms, such as Ford-fulkerson algorithm,\n # Edmonson-Karp and Dinic, but I didn't like them due to their naive usage\n # of DFS, which leads to a lot of node visits.\n #\n # I therefore choose to invert the capacities of the graph so that the\n # capacity any G[u][v] = c becomes 1/c in G_inverted.\n # This allows me to use the shortest path method to find the path with\n # most capacity in the first attempt, resulting in a significant reduction\n # of unassigned flow.\n #\n # By updating G_inverted, with the residual capacity, I can keep using the\n # shortest path, until the capacity is zero, whereby I remove the links\n # When the shortest path method returns 'No path' or when unassigned flow\n # is zero, I exit the algorithm.\n #\n # Even on small graphs, this method is very efficient, despite the overhead\n # of using shortest path. For very large graphs, this method outperforms\n # all other algorithms by orders of magnitude.\n # -----------------------------------------------------------------------\n\n edges = [(n1, n2, 1 / d) for n1, n2, d in graph.edges() if d > 0]\n inverted_graph = Graph(from_list=edges) # create G_inverted.\n capacity_graph = Graph() # Create structure to record capacity left.\n flow_graph = Graph() # Create structure to record flows.\n\n while unassigned_flow:\n # 1. find the best path\n d, path = shortest_path(inverted_graph, start, end)\n if d == float(\"inf\"): # then there is no path, and we must exit.\n return total_flow, flow_graph\n # else: use the path and lookup the actual flow from the capacity graph.\n\n path_flow = min([min(d, capacity_graph.edge(s, e, default=float(\"inf\"))) for s, e, d in graph.edges(path=path)])\n\n # 2. update the unassigned flow.\n unassigned_flow -= path_flow\n total_flow += path_flow\n\n # 3. record the flows and update the inverted graph, so that it is\n # ready for the next iteration.\n edges = graph.edges(path)\n for n1, n2, d in edges:\n # 3.a. recording:\n v = flow_graph.edge(n1, n2, default=None)\n if v is None:\n flow_graph.add_edge(n1, n2, path_flow)\n c = graph.edge(n1, n2) - path_flow\n else:\n flow_graph.add_edge(n1, n2, value=v + path_flow)\n c = graph.edge(n1, n2) - (v + path_flow)\n capacity_graph.add_edge(n1, n2, c)\n\n # 3.b. updating:\n # if there is capacity left: update with new 1/capacity\n # else: remove node, as we can't do 1/zero.\n if c > 0:\n inverted_graph.add_edge(n1, n2, 1 / c)\n else:\n inverted_graph.del_edge(n1, n2)\n return total_flow, flow_graph",
"def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False",
"def compute_equal_flow_allocations(target_graph, K=3):\n # id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph)\n flow_allocation_seed_number = 0xDEAD_BEEF\n np.random.seed(flow_allocation_seed_number)\n flows = []\n for node in target_graph.nodes:\n possible_destination_nodes = set(target_graph.nodes) - set([node])\n [destination_node] = np.random.choice(list(possible_destination_nodes), 1, replace=False)\n # shortest_paths = all_shortest_paths(target_graph, node, destination_node.item())\n shortest_paths = sorted(nx.all_simple_paths(target_graph, node, destination_node.item(),\n cutoff=3),\n key=lambda p: len(p))\n k_shortest_paths = list(itertools.islice(shortest_paths, K))\n the_flow = Flow( source_node = node\n , destination_node = destination_node.item()\n , flow_tx_rate = 10.0\n , paths = k_shortest_paths\n , splitting_ratio = [1/K]*K\n )\n flows.append(the_flow)\n \n return flow_allocation_seed_number, flows",
"def sub_link_capacity(self, path, bw):\n \n # PART 1, TASK 3.4 sub bw to edges",
"def check_link_availability(self, link):\n circuits = self.load_circuits()\n total = 0\n for circuit in circuits:\n exists = circuit.get_link(link)\n if exists:\n total += exists.bandwidth\n if total + link.bandwidth > 100000000000: # 100 Gigabits\n return None\n return total",
"def compare_greedy_dist_to_sync_dist(max_demand=8, show_graph=False):\n pass",
"def calc(self, demands: Demand, routing: Routing) -> np.ndarray:\n total_utilisation = np.zeros((self.num_nodes, self.num_nodes),\n dtype=float)\n\n for commodity_idx in range(len(self.commodities)):\n utilisation = self.calc_demand(routing,\n demands[commodity_idx],\n commodity_idx)\n total_utilisation += utilisation\n\n return np.max(np.divide(total_utilisation, self.edge_capacities))",
"def main(supply):\n\n # Define four parallel arrays: start_nodes, end_nodes, capacities, and unit costs\n # between each pair. For instance, the arc from node 0 to node 1 has a\n # capacity of 15 and a unit cost of 4.\n\n start_nodes = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 8, 8, 8, 8, 8, 9, 9, 9]\n end_nodes = [8, 2, 4, 6, 5, 4, 7, 6, 9, 8, 9, 0, 3, 4, 2, 5, 1, 0, 2, 5, 1, 8, 3, 4, 1, 0, 8, 1, 1, 0, 9, 5, 6, 1, 8, 2]\n capacities = [23, 10, 25, 15, 17, 14, 10, 21, 17, 11, 22, 27, 14, 6, 19, 9, 11, 8, 29, 16, 22, 29, 20, 13, 18, 14, 20, 25, 13, 8, 10, 24, 5, 9, 20, 28]\n unit_costs = [6, 9, 7, 8, 8, 5, 8, 5, 6, 9, 6, 5, 6, 6, 9, 7, 8, 6, 9, 6, 5, 5, 8, 7, 5, 8, 7, 9, 7, 6, 9, 6, 5, 5, 6, 7]\n\n # Define an array of supplies at each node.\n supplies = supply\n\n\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n\n # Add each arc.\n for i in range(0, len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i], end_nodes[i],\n capacities[i], unit_costs[i])\n\n # Add node supplies.\n\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n\n\n # Find the minimum cost flow between node 0 and node 4.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n flag = 1\n optimal_flows = np.zeros(36)\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i) * min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # save answer to the variable\n optimal_flows[i] = min_cost_flow.Flow(i)\n return flag, optimal_flows\n else:\n print('There was an issue with the min cost flow input.')\n flag = 0\n return flag, 0",
"def get_expected_cost(self):"
]
| [
"0.71253026",
"0.67660636",
"0.66454524",
"0.6522981",
"0.6514812",
"0.647129",
"0.6460042",
"0.6392503",
"0.6243768",
"0.62313336",
"0.6146924",
"0.61348885",
"0.6108246",
"0.5984085",
"0.5855024",
"0.58338815",
"0.5774301",
"0.57635266",
"0.5752603",
"0.57300353",
"0.5705901",
"0.566435",
"0.5619962",
"0.5604273",
"0.55933666",
"0.55786896",
"0.5571607",
"0.54947335",
"0.548535",
"0.5471225"
]
| 0.77368706 | 0 |
Calculates the overall utilisation of each link in a network given a routing choice and a set of demands. (NB utilisation in bandwidth, not relative to capacity) | def calc_overall_link_utilisation(self, demands: Demand,
routing: Routing) -> np.ndarray:
flows = [(i, j) for i in range(self.num_nodes)
for j in range(self.num_nodes)
if i != j]
link_utilisation = np.zeros(self.num_edges)
for i, flow in enumerate(flows):
flow_link_utilisation = self.calc_per_flow_link_utilisation(flow,
demands[
i],
routing[
i])
link_utilisation += flow_link_utilisation
return link_utilisation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_slow(self, demands: Demand, routing: Routing) -> float:\n edge_capacities = [e[2]['weight'] for e in\n sorted(self.graph.edges(data=True))]\n link_utilisation = self.calc_overall_link_utilisation(demands, routing)\n # Because utilisation compared to link width is what we care about here\n ratio_capacities = np.divide(link_utilisation, edge_capacities)\n\n return np.max(ratio_capacities)",
"def calc_per_flow_link_utilisation(self, flow: Tuple[int, int],\n demand: float,\n routing: np.ndarray) -> np.ndarray:\n edge_mapping = {edge: i for i, edge in\n enumerate(sorted(self.graph.edges))}\n\n link_utilisation = np.zeros(self.num_edges)\n node_flow = np.zeros(self.num_nodes) # the flow stored at a node\n node_flow[flow[0]] = demand\n\n to_explore = [flow[0]]\n while to_explore:\n current_node = to_explore.pop(0)\n current_flow = node_flow[current_node]\n\n # this is the flow destination node so we absorb all flow\n if current_node == flow[1]:\n node_flow[current_node] = 0.0\n continue\n\n # push the flow at this node over all edges\n for edge in self.graph.out_edges(current_node):\n edge_index = edge_mapping[edge]\n ratio = routing[edge_index]\n flow_to_send = ratio * current_flow\n # only send flow if greater than epsilon (so no 'infinite' loops)\n if flow_to_send > 1.e-8:\n node_flow[edge[1]] += ratio * current_flow\n # all important step, update our output\n link_utilisation[edge_index] += ratio * current_flow\n # have updated the dst so add it to the list of things to do\n to_explore.append(edge[1])\n # we've moved all the flow from this node now, so reset back to zero\n node_flow[current_node] = 0.0\n\n return link_utilisation",
"def calculate_available_link_res (self, sg_hops_to_be_ignored, mode=MODE_ADD):\n # set availbandwidth to the maximal value\n for i, j, k, d in self.network.edges_iter(data=True, keys=True):\n if d.type == 'STATIC':\n setattr(self.network[i][j][k], 'availbandwidth', d.bandwidth)\n # subtract the reserved link and internal (inside Infras) bandwidth\n if mode == self.MODE_ADD:\n for d in self.infras:\n for p in d.ports:\n for fr in p.flowrules:\n if fr.id not in sg_hops_to_be_ignored and fr.bandwidth is not None:\n # Flowrules are cummulatively subtracted from the switching \n # capacity of the node.\n d.availres['bandwidth'] -= fr.bandwidth\n if d.availres['bandwidth'] < 0:\n raise RuntimeError(\"The node bandwidth of %s got below zero \"\n \"during available resource calculation!\" %\n d.id)\n # Get all the mapped paths of all SGHops from the NFFG\n sg_map = NFFGToolBox.get_all_sghop_info(self, return_paths=True)\n for sg_hop_id, data in sg_map.iteritems():\n src, dst, flowclass, bandwidth, delay, path = data\n if bandwidth is not None:\n for link in path:\n link.availbandwidth -= bandwidth\n if link.availbandwidth < 0:\n raise RuntimeError(\n \"The link bandwidth of %s got below zero during\"\n \"available resource calculation!\" % link.id)",
"def calc(self, demands: Demand, routing: Routing) -> np.ndarray:\n total_utilisation = np.zeros((self.num_nodes, self.num_nodes),\n dtype=float)\n\n for commodity_idx in range(len(self.commodities)):\n utilisation = self.calc_demand(routing,\n demands[commodity_idx],\n commodity_idx)\n total_utilisation += utilisation\n\n return np.max(np.divide(total_utilisation, self.edge_capacities))",
"def compute_link_utilization_over_time(link_byte_counts):\n def find_matching_iface_stats(byte_count, source_id, destination_id):\n matching_stats = [d_i for d_i in byte_count\n if d_i[\"sourceSwitchId\"] == source_id and\n d_i[\"destinationSwitchId\"] == destination_id]\n if len(matching_stats) != 1:\n raise ValueError(\"Unexpected results in find_matching_iface_stats. \\\n Found %d matching iface_stats\" % len(matching_stats))\n return matching_stats[0]\n\n def compute_tx_rate(count_in_bytes):\n return (count_in_bytes * 8) / 10.0**7\n\n # First compute the delta between the iface_stats in time_period t_i and the iface_stats\n # in time period t_{i+1}.\n # tx_rate_t: (source_id x destination_id) -> link_utilization_in_time_period_t forall. t\n tx_rate_t = []\n for t_0, t_1 in zip(link_byte_counts, link_byte_counts[1:]):\n byte_count_delta_t = defaultdict(float)\n for iface_stats in t_0:\n source_id = iface_stats[\"sourceSwitchId\"]\n destination_id = iface_stats[\"destinationSwitchId\"]\n t_0_count = iface_stats[\"bytesSent\"] + iface_stats[\"bytesReceived\"]\n try:\n t_1_stats = find_matching_iface_stats(t_1, source_id, destination_id)\n t_1_count = t_1_stats[\"bytesSent\"] + t_1_stats[\"bytesReceived\"]\n except ValueError:\n t_1_count = t_0_count\n\n count_delta = t_1_count - t_0_count\n link_key = compute_link_key(source_id, \n destination_id)\n byte_count_delta_t[link_key] += count_delta\n\n tx_rate_t.append({the_link_key: compute_tx_rate(byte_count_t) \n for the_link_key, byte_count_t in byte_count_delta_t.items()})\n return tx_rate_t",
"def compute_path_metric(self, sw, path, util, time_now, local_contrib):\n pathmetric = 1\n linkmetrics = []\n links = zip(path[:-1], path[1:])\n # calculate available capacity for each link in path\n for link in links:\n u, v = link\n # Use the last-learned-via-sync value for a link\n if (not local_contrib) and 'sync_learned' in self.graph[u][v]:\n used1 = self.graph[u][v]['sync_learned'] + util\n used2 = self.graph[u][v]['used'] + util\n # ['used'] is a strict lower bound for ['sync_learned']\n if used1 > used2: \n used = used1\n logging.debug(\"CS [%s] using sync_learned value 1 [%f]\", str(self.name), used1)\n else:\n used = used2\n logging.debug(\"CS [%s] using sync_learned value 2 [%f]\", str(self.name), used2)\n else:\n logging.debug(\"CS [%s] using tracking value\", str(self.name))\n used = self.graph[u][v]['used'] + util\n\n capacity = self.graph[u][v]['capacity']\n linkmetric = float(used) / capacity\n # If the controller estimates it would oversubscribe this link\n if linkmetric > 1:\n logging.info(\"[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]\", str(time_now), linkmetric, str(sw))\n break\n else:\n linkmetrics.append(linkmetric)\n\n # We define pathmetric to be the worst link metric in path\n if len(linkmetrics) > 0:\n pathmetric = max(linkmetrics)\n\n funname = sys._getframe().f_code.co_name\n logging.debug(\"[%s] [%s] [%s] [%s]\", funname, str(time_now), str(self),\n str((path, linkmetrics)))\n return (pathmetric, len(links))",
"def calc_cost(self):\n cost = 0\n for i,[source, sinks] in enumerate(self.nets):\n self.costs[i] = self.calc_half_perimeter(source, sinks)\n cost += self.costs[i]\n self.cost = cost\n return True",
"def calc_lp(self, demands: Demand, routing: Routing) -> float:\n epsilon = self.epsilon\n\n # Create the linear solver with the GLOP backend.\n solver = pywraplp.Solver('flow_utilisation_lp',\n pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n\n ## VARIABLES\n # Flow variables, the amount of flow along each edge\n # stored as a list flow_variables[ith_flow][jth_edge]\n flow_variables = []\n for i in range(len(self.commodities)):\n flow_variable_edges = []\n for j in range(len(self.edges)):\n flow_variable_edges.append(\n solver.NumVar(0, solver.infinity(), '({},{})'.format(i, j)))\n flow_variables.append(flow_variable_edges)\n\n ## CONSTRAINTS\n # Flow from source constraint (net flow must equal demand)\n conservation_source_constraints = []\n for i, commodity in enumerate(self.commodities):\n # create constraint\n constraint_i = solver.Constraint(demands[i] - epsilon,\n demands[i] + epsilon,\n '(source,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[0])]:\n # out flow is positive\n constraint_i.SetCoefficient(flow_variables[i][edge_index], 1)\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[0])]:\n # in flow is negative\n constraint_i.SetCoefficient(flow_variables[i][edge_index], -1)\n conservation_source_constraints.append(constraint_i)\n\n # Flow to sink constraint (in flow must equal demand, out must be zero)\n conservation_sink_constraints = []\n for i, commodity in enumerate(self.commodities):\n # create in flow constraint\n constraint_i_in = solver.Constraint(-demands[i] - epsilon,\n -demands[i] + epsilon,\n '(sink_in,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[1])]:\n # in flow is negative\n constraint_i_in.SetCoefficient(flow_variables[i][edge_index],\n -1)\n conservation_sink_constraints.append(constraint_i_in)\n\n constraint_i_out = solver.Constraint(0, 0,\n '(sink_out,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[1])]:\n # out flow is positive\n constraint_i_out.SetCoefficient(flow_variables[i][edge_index],\n 1)\n conservation_sink_constraints.append(constraint_i_out)\n\n # Flow at transit node constraint (net flow must be zero)\n conservation_transit_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n for j in range(self.graph.number_of_nodes()):\n if j != commodity[0] and j != commodity[1]:\n # create constraint\n constraint_j = solver.Constraint(-epsilon, +epsilon,\n '(transit,{},{})'.format(i,\n j))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(j)]:\n # out flow is positive\n constraint_j.SetCoefficient(\n flow_variables[i][edge_index],\n 1)\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(j)]:\n # in flow is negative\n constraint_j.SetCoefficient(\n flow_variables[i][edge_index],\n -1)\n constraints_flow_i.append(constraint_j)\n conservation_transit_constraints.append(constraints_flow_i)\n\n # Flow splitting at transit constraints (edge flow must be correct split of\n # in flow)\n splitting_ratio_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n for j in range(self.graph.number_of_nodes()):\n # Sink has not such constraint and we handle source differently\n if j != commodity[1] and j != commodity[0]:\n in_edges = [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(j)]\n out_edges = [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(j)]\n\n # separate constraint for split of each out_edge taking into\n # account all in_edges\n for out_edge_index in out_edges:\n # create constraint\n constraint_edge = \\\n solver.Constraint(-epsilon, +epsilon,\n '(split,{},{},{})'.format(\n i, j,\n out_edge_index))\n split_ratio = routing[i][out_edge_index]\n # flow on out edge\n constraint_edge.SetCoefficient(\n flow_variables[i][out_edge_index], 1)\n for in_edge_index in in_edges:\n # should equal sum of flow on all in edges scaled by\n # split ratio\n constraint_edge.SetCoefficient(\n flow_variables[i][in_edge_index],\n -1 * split_ratio)\n constraints_flow_i.append(constraint_edge)\n splitting_ratio_constraints.append(constraints_flow_i)\n\n # Flow splitting at source constraints (edge flow must be correct split of\n # in flow + demand)\n source_splitting_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n in_edges = [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[0])]\n out_edges = [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[0])]\n for out_edge_index in out_edges:\n # create constraint\n split_ratio = routing[i][out_edge_index]\n split_demand = split_ratio * demands[i]\n constraint_edge = \\\n solver.Constraint(split_demand - epsilon,\n split_demand + epsilon,\n '(split,{},{},{})'.format(i, j,\n out_edge_index))\n # flow on out edge\n constraint_edge.SetCoefficient(\n flow_variables[i][out_edge_index], 1)\n for in_edge_index in in_edges:\n # should equal sum of flow on all in edges scaled by split ratio\n constraint_edge.SetCoefficient(\n flow_variables[i][in_edge_index],\n -1 * split_ratio)\n constraints_flow_i.append(constraint_edge)\n source_splitting_constraints.append(constraints_flow_i)\n\n solver.Solve()\n\n result_status = solver.Solve()\n\n utilisation = np.zeros(\n (len(self.commodities), self.graph.number_of_edges()))\n # # extract the actual routing. Useful for debugging, maybe use to bootstrap\n # assignment = np.zeros(\n # (len(self.commodities), self.graph.number_of_edges()))\n\n # if routing is really that bad, just bail and give a sad result\n if result_status == solver.NOT_SOLVED or result_status == solver.INFEASIBLE:\n return 1.0\n\n for i in range(len(self.commodities)):\n for j in range(self.graph.number_of_edges()):\n utilisation[i][j] = flow_variables[i][j].solution_value() / \\\n self.edges[j][2]['weight']\n # assignment[i][j] = flow_variables[i][j].solution_value()\n\n return np.max(np.sum(utilisation, axis=0))",
"def compute_path_metric(self, sw, path, util, time_now):\n pathmetric = 1\n linkmetrics = []\n links = zip(path[:-1], path[1:])\n # calculate available capacity for each link in path\n for link in links:\n u, v = link\n #DESIGN CHOICE: Should we 1) always include extra-domain state, 2)\n #only include extra-domain state when not stale (timestamp), 3) always exclude\n #extra-domain state when calculating the path metric? Here we do (1)\n used = self.graph[u][v]['used'] + util\n capacity = self.graph[u][v]['capacity']\n linkmetric = float(used) / capacity\n # If the controller estimates it would oversubscribe this link\n if linkmetric > 1:\n logging.info(\"[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]\", str(time_now), linkmetric, str(sw))\n break\n else:\n linkmetrics.append(linkmetric)\n\n # We define pathmetric to be the worst link metric in path\n if len(linkmetrics) > 0:\n pathmetric = max(linkmetrics)\n\n funname = sys._getframe().f_code.co_name\n logging.debug(\"[%s] [%s] [%s] [%s]\", funname, str(time_now), str(self),\n str((path, linkmetrics)))\n return (pathmetric, len(links))",
"def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost",
"def cost(route):\n cost = 0\n for li in route:\n if cost<li.get_cost():\n cost=li.get_cost()\n return cost",
"def calc_demand(self, routing: np.ndarray, demand: float,\n commodity_idx: int) -> np.ndarray:\n commodity = self.commodities[commodity_idx]\n node_flow = np.zeros(self.num_nodes)\n node_flow[commodity[0]] = demand\n\n split_matrix = np.zeros((self.num_nodes, self.num_nodes), dtype=float)\n for edge_idx, edge in enumerate(self.edges):\n split_matrix[edge[1]][edge[0]] = routing[commodity_idx][edge_idx]\n split_matrix[:, commodity[1]] = 0 # no send from the destination node\n\n edge_utilisation = np.zeros((self.num_nodes, self.num_nodes))\n\n num_steps = 0\n while True:\n change = np.multiply(split_matrix, node_flow)\n edge_utilisation += change\n node_flow = np.matmul(split_matrix, node_flow)\n if np.any(np.isnan(change)):\n print(\"is_nan :'(\")\n comparison = np.less(np.nan_to_num(change), self.min_delta)\n if np.logical_and.reduce(np.logical_and.reduce(comparison)):\n break\n num_steps += 1\n # if we take more than |E| steps we have cycles which is not good.\n # Therefore: end here with really bad reward, scaled by number of\n # cycles\n if num_steps > routing.shape[1]:\n remaining_flow = np.greater(np.nan_to_num(change), 0.0)\n edge_utilisation += np.multiply(remaining_flow, np.full(\n (self.num_nodes, self.num_nodes), demand))\n break\n\n return edge_utilisation",
"def rate(way):\n cost = 0\n for i in range(len(way)-1):\n cost += DISTANCES[way[i]][way[i+1]]\n return cost",
"def astar_heuristic(n1, n2):\n average_speed = 70\n return edge_weight(n1, n2, 70)",
"def route_cost(self, route):\n total_weight = 0\n c = 0\n start = route[0]\n for end in route[1:]:\n y = float(self.stars[start][end]['weight']) - c\n t = total_weight + y\n c = (t - total_weight) - y\n\n total_weight = t\n\n start = end\n return total_weight",
"def cost(self, route: List[int]) -> float:\n raise NotImplementedError",
"def build_links_capacity(self):\n\n links_capacity = {}\n # Iterates all the edges in the topology formed by switches\n for src, dst in self.topo.keep_only_p4switches().edges:\n bw = self.topo.edges[(src, dst)]['bw']\n # add both directions\n links_capacity[(src, dst)] = bw\n links_capacity[(dst, src)] = bw\n\n return links_capacity",
"def caculate_network_statistics(self):\n divide_factor_sum = 0 \n for key in self.stars.keys():\n star = self.stars[key]\n if star.nb_num == 0 :\n self.standalone_star_num += 1 \n\n divide_factor = star.nb_num + 2 * (star.spec_num - star.shared_spec_num )/self.ave_starlet_size\n divide_factor_sum += divide_factor\n divide_factor_int = round(divide_factor)\n self.star_divide_factor_dist[divide_factor_int] = self.star_divide_factor_dist.get(divide_factor_int,0) + 1\n if star.spec_num < star.shared_spec_num:\n print(\"!!!!!!!!!!!!Becareful, total spectra No is less than Shared Spectra with starlets\")\n print(\"with star \" + star.id + \" \" + str(star.spec_num) + \"is less than\" + str(star.shared_spec_num))\n if star.spec_num > star.shared_spec_num:\n self.star_lost_spec_num += star.spec_num - star.shared_spec_num\n self.ave_divide_factor_star = divide_factor_sum/self.stars_length\n\n divide_factor_sum = 0 \n for key in self.starlets.keys():\n starlet = self.starlets[key]\n if starlet.nb_num == 0 :\n self.standalone_starlet_num += 1 \n\n divide_factor = starlet.nb_num + 2 * (starlet.spec_num - starlet.shared_spec_num )/self.ave_star_size\n divide_factor_sum += divide_factor\n divide_factor_int = round(divide_factor)\n self.starlet_divide_factor_dist[divide_factor_int] = self.starlet_divide_factor_dist.get(divide_factor_int,0) + 1\n if starlet.spec_num < starlet.shared_spec_num:\n print(\"!!!!!!!!!!!!Becareful, total spectra No is less than Shared Spectra with starlets\")\n print(\"with star \" + starlet.id + \" \" + str(starlet.spec_num) + \"is less than\" + str(starlet.shared_spec_num))\n if starlet.spec_num > starlet.shared_spec_num:\n self.starlet_lost_spec_num += starlet.spec_num - starlet.shared_spec_num\n self.ave_divide_factor_starlet = divide_factor_sum/self.starlets_length",
"def get_weight_from_minflow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"start\"],\n self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"destin\"],\n self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.source(),\n x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.sink(),\n x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n v,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n v,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n x,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n x,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i], capacities[i], unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n print(\"Has become ({}, {}) with sup {}\".format(start,\n destin,\n sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in \\\n self.arc_info[arc].keys()):\n print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n new_flow = old_flow + sup_flow\n self.arc_info[arc][\"weight\"] = int(new_flow)\n print(\"Edge ({},{}) weight is changed from {} to {}\".format(\n start,\n destin,\n old_flow,\n new_flow))\n else:\n print('There was an issue with the min cost flow input.')\n #self.check_conservation_of_flow() # check that solution is valid",
"def search_optimal_capacities(network, step_size, tolerance, filename):\r\n ## Initialization\r\n # Initialize the value of total flow over the network\r\n totalflow = max(network.lb_totalflow, step_size)\r\n \r\n # An auxiliary threshold of the total flow computed based on the capacity upper bounds, used in Line 4 of Algorithm 3.\r\n aux_bound = 1 - np.exp(network.beta - network.b + network.phi/network.u)\r\n \r\n \r\n # Initialize the bounds for flow over each route\r\n ub_flow = np.zeros(network.num_routes)\r\n lb_flow = np.zeros(network.num_routes)\r\n \r\n # Initialize the optimal solution over the network\r\n opt_socialwelfare = np.array([])\r\n opt_totalflow = 0\r\n opt_flows = np.array([])\r\n opt_capacity = np.zeros(network.num_routes)\r\n \r\n\r\n# # For debugging only\r\n# lower_bound = np.zeros(network.num_routes)\r\n# upper_bound = np.zeros(network.num_routes)\r\n# count = 0\r\n \r\n # Try to plot out the (totalflow, social_welfare) scatter plot\r\n z = []\r\n hz = []\r\n# # End of debugging\r\n\r\n ## Start the search\r\n while totalflow < 1 - tolerance:\r\n flag_nofeasibleflow = False\r\n \r\n # Compute the bounds for the flow.\r\n for i in range(network.num_routes):\r\n # Line 3-8 of Algorithm 3. Compute the upper bounds for the flow.\r\n if totalflow >= aux_bound[i]: \r\n x3_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 3) \r\n if x3_star > network.u[i]:\r\n flag_nofeasibleflow = True\r\n break \r\n else:\r\n ub_flow[i] = x3_star \r\n else: \r\n ub_flow[i] = 1 \r\n # Line 9-10 of Algorithm 3. Compute the lower bounds of the flow.\r\n x1_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 1)\r\n x2_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 2)\r\n lb_flow[i] = max(x1_star, x2_star)\r\n \r\n \r\n if not flag_nofeasibleflow:\r\n # Check feasibility of the flow based on the current total flow, lower and upper bounds of the flow\r\n if totalflow < np.sum(lb_flow) or totalflow > np.sum(ub_flow): \r\n totalflow += step_size \r\n\r\n# # For debugging only\r\n# print(\"\\nThe current total flow is: \" + str(totalflow))\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count))\r\n# # Eng of debugging\r\n# \r\n continue\r\n \r\n # The implementation of line 11 to 18. Find the optimal flow given the current value of z.\r\n [opt_obj, opt_x] = ip.max_sum_xlogx(network.num_routes, totalflow, lb_flow, ub_flow) \r\n \r\n \r\n # Line 18 of Algorithm 3. Compute the social welfare given the current z and optimal q(z).\r\n temp = opt_obj - totalflow * np.log(1-totalflow)\r\n\r\n ##### Testing: to plot out the function of h(z)\r\n z.append(totalflow)\r\n hz.append(temp)\r\n ##### End of Testing: to plot out the function of h(z)\r\n \r\n if opt_socialwelfare.size == 0 or temp > opt_socialwelfare:\r\n opt_socialwelfare = temp\r\n opt_flows = opt_x\r\n opt_totalflow = totalflow \r\n \r\n # For debugging only\r\n# print(\"\\nUpdate optimal flow\")\r\n# print(opt_x)\r\n# print(lb_flow)\r\n# print(ub_flow)\r\n# print(\"Total flow is \" + str(opt_totalflow)) \r\n \r\n # For debugging\r\n# np.copyto(lower_bound, lb_flow) \r\n# np.copyto(upper_bound, ub_flow) \r\n# count += 1\r\n# print(\"The lower and upper bounds are: \")\r\n# print(lb_flow)\r\n# print(lower_bound)\r\n# print(\"\\n\")\r\n# print(ub_flow)\r\n# print(upper_bound)\r\n# print(\"\\n\")\r\n \r\n totalflow += step_size \r\n\r\n \r\n \r\n# # For debugging only\r\n# print(\"\\n----------------\\n Exiting the while loop.\")\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count)) \r\n# # Eng of debugging\r\n \r\n # Line 20 of ALgorithm 3\r\n if opt_flows.size > 0:\r\n network.update_flow(opt_flows) \r\n for i in range(network.num_routes): \r\n network.compute_capacity(opt_totalflow, i)\r\n opt_capacity[i] = network.capacity[i]\r\n print(\"\\n--------------\\nThe optimal flow is: \")\r\n print(opt_flows)\r\n print(\"\\n--------------\\nThe optimal parking capacity is: \")\r\n print(opt_capacity) \r\n print(\"\\n--------------\\nThe optimal total flow is \" + str(opt_totalflow))\r\n print(\"\\n--------------\\nThe maximum social welfare is \" + str(opt_socialwelfare) +\".\")\r\n \r\n \r\n ##### Testing: to plot out the function of h(z)\r\n #plt.scatter(z, hz, c='r', marker='r')\r\n plt.plot(z, hz, '-', linewidth=0.5)\r\n #plt.xlim(0.5, 1)\r\n plt.savefig(filename + '.png', bbox_inches='tight')\r\n ##### End of Testing: to plot out the function of h(z)\r\n \r\n \r\n \r\n# # For debugging\r\n# temp1 = np.zeros(network.num_routes)\r\n# temp2 = np.zeros(network.num_routes)\r\n# temp3 = np.zeros(network.num_routes)\r\n# for i in range(network.num_routes): \r\n# temp1[i] = zeta(network, i, opt_flows[i], opt_totalflow, 1)\r\n# temp2[i] = zeta(network, i, opt_flows[i], opt_totalflow, 2)\r\n# temp3[i] = zeta(network, i, opt_flows[i], opt_totalflow, 3)\r\n# print(\"The function value of zeta at the optimal flow: \")\r\n# print(temp1)\r\n# print(temp2)\r\n# print(temp3)\r\n# \r\n# # For debugging\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count))\r\n# # End of debugging\r\n \r\n return opt_flows, opt_capacity, opt_socialwelfare \r\n else:\r\n print(\"\\nNo optimal solution is found!\")\r\n return np.array([]), opt_capacity, opt_socialwelfare",
"def combined_costs(matrix_MSLL_IO):\r\n return",
"def find_standby2(model, limit, w_a, w_b, w_m, demand_path, demand_dict, slink_dict):\n #print slink_dict\n #print \"FIND\"\n failed_dict = model.failed_dict\n #print failed_dict, limit\n dist_matrix = model.cost_dict['dist']\n rtt_matrix = model.cost_dict['rtt']\n bw_matrix = model.cost_dict['bw']\n #cpu_vector = model.cost_dict['cpu']\n selected_dict = {}\n \n # get aggregated residual bw for all substrate nodes, and store it as a list\n snode_bw_list = total_bw(bw_matrix)\n \n # get total capacity and used bw for each snode\n node_port_list, used_bw_list = total_port(model)\n #vnet_set = model.vnets\n sorted_vn = sort_vnet(model)\n\n #for vnet in vnet_set:\n if w_m[2] >= 10*w_m[1]:\n threshold = 0.8\n else:\n threshold = 0.8\n snode_traffic = {}\n for vn_traffic in sorted_vn:\n vnet = vn_traffic[0]\n failed_vr = failed_dict[vnet.vnet_id]\n if failed_vr != -1: \n # this node is failed\n standby_list = vnet.get_standby_ids()\n standby_cost = {}\n for s_vr in standby_list:\n dist_f = float(dist_matrix[s_vr][failed_vr + 1])\n failed_node = vnet.vnodes[failed_vr]\n vneighbors = failed_node.vneighbors\n dist_k = 0\n rtt_k = 0\n for k in vneighbors:\n dist_k += float(dist_matrix[s_vr][k + 1])\n rtt_k += float(rtt_matrix[s_vr][k + 1])\n\n connect_cost = w_b[0] * (w_a[0] * dist_f + w_a[1] * dist_k) +\\\n w_b[1] * rtt_k\n res_cost = snode_bw_list[s_vr]\n req_bw = sum(failed_node.neighbor_traffic.values())\n total = w_m[1] * connect_cost + w_m[2] * req_bw / res_cost\n standby_cost[s_vr] = total\n sorted_x = sorted(standby_cost.iteritems(), key=operator.itemgetter(1))\n #print \"SORTED\", sorted_x\n \n for item in sorted_x:\n if item[0] not in snode_traffic:\n #utilization = vn_traffic[1] / total_bw(bw_matrix)[item[0]]\n utilization = (vn_traffic[1] + used_bw_list[item[0]])/node_port_list[item[0]]\n else:\n #utilization = (snode_traffic[item[0]] + vn_traffic[1]) / total_bw(bw_matrix)[item[0]]\n utilization = (snode_traffic[item[0]] + vn_traffic[1] + used_bw_list[item[0]])/node_port_list[item[0]]\n #print utilization\n # Link-Path selsection add-on\n path_alloc = 1\n for k in vneighbors:\n demand_id = find_demand_id(demand_dict, vnet.vnet_id, failed_vr + 1,\n item[0] + 1, k + 1)\n demand = demand_dict[demand_id]['capacity']\n find, path = find_path(demand_path, demand_id, \n slink_dict, demand) \n if find == 0:\n print \"No available path between svr and nbr on the substrate network\" \n path_alloc = 0\n #print \"FIND PATH\", find, path\n # End link-path block \n #print \"ALLOCATED: \", path_alloc\n if path_alloc == 1:\n if selected_dict.values().count(item[0]) < limit:\n if utilization < threshold and w_m[2] >= 10*w_m[1]:\n if item[0] not in snode_traffic: \n selected_dict[vnet.vnet_id] = item[0]\n snode_bw_list[item[0]] -= vn_traffic[1]\n snode_traffic[item[0]] = vn_traffic[1]\n for slink_id in path:\n #print vn_traffic[1], slink_dict[slink_id]['capacity']\n slink_dict[slink_id]['capacity'] = slink_dict[slink_id]['capacity'] - vn_traffic[1]\n #print slink_dict[slink_id]['capacity']\n break;\n else:\n min_id = find_min(sorted_x, bw_matrix, snode_traffic, vn_traffic[1]) \n if min_id == item[0]:\n selected_dict[vnet.vnet_id] = item[0]\n snode_bw_list[item[0]] -= vn_traffic[1]\n snode_traffic[item[0]] += vn_traffic[1]\n for slink_id in path:\n #print vn_traffic[1],slink_dict[slink_id]['capacity']\n slink_dict[slink_id]['capacity'] = slink_dict[slink_id]['capacity'] - vn_traffic[1]\n #print slink_dict[slink_id]['capacity']\n #threshold = (threshold + 0.01)/2\n break\n elif utilization < threshold:\n selected_dict[vnet.vnet_id] = item[0]\n snode_bw_list[item[0]] -= vn_traffic[1]\n if item[0] not in snode_traffic: \n snode_traffic[item[0]] = vn_traffic[1]\n else:\n snode_traffic[item[0]] += vn_traffic[1]\n for slink_id in path:\n #print vn_traffic[1],slink_dict[slink_id]['capacity']\n slink_dict[slink_id]['capacity'] = slink_dict[slink_id]['capacity'] - vn_traffic[1]\n #print slink_dict[slink_id]['capacity']\n break\n else:\n print \"does not satisfy the threshold\" \n # if a svr is selected -- item[0]\n \n \n \n else:\n print \"cannot allocate paths\"\n \n #print slink_dict \n return selected_dict, slink_dict",
"def bandwidth_share(self, nodes_efficiency: Dict[str, float]) -> Dict[str, float]:\n pass",
"def compute_greedy_flow_allocations( target_graph\n , flow_selection_fn\n , seed_number=DEFAULT_SEED_NUMBER):\n\n flow_allocation_seed_number = seed_number\n np.random.seed(flow_allocation_seed_number)\n\n link_utilization = {tuple(sorted(link_tuple)): 0.0 for link_tuple in target_graph.edges}\n flows = []\n\n while True:\n capacity_was_exceeded = False\n\n source_node, destination_node = flow_selection_fn(target_graph.nodes)\n flow_tx_rate = np.random.uniform(FLOW_TX_RATE_LOWER_BOUND, FLOW_TX_RATE_UPPER_BOUND)\n\n connecting_paths = list(node_disjoint_paths(target_graph, source_node, destination_node))\n disjoint_path_count = len(connecting_paths)\n flow_rate_per_subpath = flow_tx_rate / disjoint_path_count\n for path in [nx.utils.pairwise(p_i) for p_i in connecting_paths]:\n for u, v in [tuple(sorted(t_i)) for t_i in path]:\n if (link_utilization[u, v] + flow_rate_per_subpath) > LINK_CAPACITY:\n capacity_was_exceeded = True\n break\n link_utilization[u, v] += flow_rate_per_subpath\n if capacity_was_exceeded:\n break\n if capacity_was_exceeded:\n break\n\n the_flow = Flow( source_node = source_node\n , destination_node = destination_node\n , flow_tx_rate = flow_tx_rate\n , paths = connecting_paths\n , splitting_ratio = [1.0/disjoint_path_count]*disjoint_path_count\n )\n flows.append(the_flow)\n return flows, link_utilization",
"def calculate_path_cost_with_weighted_sum(self, path, attr1, attr2): \n costs = [] \n for i in range(len(path) - 1):\n a = (1- self.G[path[i]][path[i+1]][attr2]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n b = (1- self.G[path[i]][path[i+1]][attr1]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n costs.append(a * self.G[path[i]][path[i+1]][attr1] + b * self.G[path[i]][path[i+1]][attr2]) \n return max(costs)",
"def get_obj_new(model_old, demand_path, slink_dict, demand_dict, w_a, w_b, theta, s_limit):\n model = copy.deepcopy(model_old)\n \n #cpu_vector = model.cost_dict['cpu']\n dist_matrix = model.cost_dict['dist']\n rtt_matrix = model.cost_dict['rtt']\n #bw_matrix = model.cost_dict['bw']\n w_a1, w_a2 = w_a\n w_b1, w_b2 = w_b\n theta1, theta2, theta3 = theta\n # find total capacity and used bw on substrate nodes\n node_port_list, used_bw_list = total_port(model)\n #print \"CHECK POINT-1\", used_bw_list, node_port_list\n vnet_info = model.get_vnet_info()\n infeasible = 0\n\n fail_nodes = model.failed_dict\n start_time = time.time()\n select_dict, slink_dict = find_standby2(model, s_limit, w_a, w_b, theta, demand_path, demand_dict, slink_dict)\n # random selection\n #select_dict, slink_dict = find_random(model, s_limit, w_a, w_b, theta, demand_path, demand_dict, slink_dict)\n #print \"selection takes: \", time.time() - start_time\n #print \"Selected\", select_dict\n sum_cost_1_2 = 0\n r_list = {}\n \n for node_id in range(0,len(used_bw_list)):\n r_list[node_id] = used_bw_list[node_id]/node_port_list[node_id]\n #print \"INITIAL\", r_list\n svr_subset = {}\n for vnet in model.vnets:\n j = vnet.vnet_id\n f = fail_nodes[j]\n # only count the failed virtual network\n if f == -1:\n pass\n else:\n for node_id in vnet_info[j]['standby']:\n svr_subset[node_id] = r_list[node_id]\n #print \"Subset SVR: \", svr_subset\n \n if j in select_dict:\n #print \"FEASIBLE FOUND\"\n #print vnet_info[j]['standby']\n i = select_dict[j]\n failed_node = vnet.vnodes[f]\n vneighbors = failed_node.vneighbors\n for k in vneighbors: \n eta = operation_cost()\n #print \"a1: \", w_a1, \"a2: \", w_a2, \"b1: \", w_b1, \"b2: \", w_b2\n dist_c = dist_cost(i, f, k, dist_matrix, w_a1, w_a2)\n rtt_c = rtt_cost(i, k, rtt_matrix)\n sigma = connect_cost(dist_c, rtt_c, w_b1, w_b2)\n sum_cost_1_2 += theta1 * eta + theta2 * sigma\n #find residual bw on substrate node\n #xi = resource_cost(bw_matrix, i)\n req_bw = sum(failed_node.neighbor_traffic.values())\n #util = req_bw / xi\n #print req_bw, i, used_bw_list[i], node_port_list[i]\n util = req_bw/node_port_list[i]\n \n if i not in r_list:\n r_list[i] = util #+ used_bw_list[i]/node_port_list[i]\n else:\n r_list[i] += util\n svr_subset[i] = r_list[i]\n #print sigma, \" v_\" + str(vnet.vnet_id) + \"_\" + str(f) + \"_\" + str(i) + \"_\" + str(k) + \"_\" + str(k)\n else:\n print \"INFEASIBLE at vnet: \", j\n infeasible = 1\n #print \"DONE\"\n \n if infeasible == 0:\n #print svr_subset\n max_util = max(svr_subset.values())\n obj = sum_cost_1_2 + theta3 * max_util\n else:\n obj = \"infeasible\"\n max_util = \"none\"\n #print obj\n used_time = time.time() - start_time\n return obj, select_dict, max_util, used_time",
"def utilization(self):\n\n counters = net_io_counters()\n return LinuxNetworkMetrics._util_tuple(counters.bytes_recv,\n counters.bytes_sent) # TODO max\n # ethtool",
"def calculate_cost(route, adjacency_matrix):\n route_shifted = np.roll(route,1)\n cost = np.sum(adjacency_matrix[route, route_shifted])\n st_dev = np.std(adjacency_matrix[route, route_shifted])\n return st_dev, cost",
"def check_link_availability(self, link):\n circuits = self.load_circuits()\n total = 0\n for circuit in circuits:\n exists = circuit.get_link(link)\n if exists:\n total += exists.bandwidth\n if total + link.bandwidth > 100000000000: # 100 Gigabits\n return None\n return total",
"def opt(self, demands: Demand) -> float:\n # Create the linear solver with the GLOP backend.\n solver = pywraplp.Solver('multicommodity_flow_lp',\n pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n\n ## VARIABLES\n # Flow variables, the splitting ratios for each edge\n # Stored as a list of lists (flow_variables[ith_flow][jth_edge])\n flow_variables = []\n for i in range(len(self.commodities)):\n flow_variable_edges = []\n for j in range(self.num_edges):\n flow_variable_edges.append(\n solver.NumVar(0, 1, '({},{})'.format(i, j)))\n flow_variables.append(flow_variable_edges)\n\n ## CONSTRAINTS\n # Capacity constraint\n capacity_constraints = []\n for i, edge in enumerate(self.edges):\n # Constraint between 0 and edge capacity\n constraint_i = solver.Constraint(\n 0, self.graph.get_edge_data(*edge)['weight'],\n '(1,{},{})'.format(*edge))\n for j, commodity in enumerate(self.commodities):\n # Coefficient for jth flow over ith edge is scaled by flow width\n constraint_i.SetCoefficient(flow_variables[j][i],\n # cast because or-tools :'(\n float(demands[j]))\n capacity_constraints.append(constraint_i)\n\n # Conservation on transit nodes\n conservation_transit_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n for j in range(self.num_nodes):\n if j != commodity[0] and j != commodity[1]:\n # Constraint must sum to zero\n constraint_j = solver.Constraint(0, 0,\n '(2,{},{})'.format(i, j))\n for k in list(sorted(self.graph.adj[j].keys())):\n # Ingress edges\n constraint_j.SetCoefficient(\n flow_variables[i][self.edge_index_dict[(k, j)]], 1)\n # Egress edges\n constraint_j.SetCoefficient(\n flow_variables[i][self.edge_index_dict[(j, k)]], -1)\n constraints_flow_i.append(constraint_j)\n conservation_transit_constraints.append(constraints_flow_i)\n\n # Conservation of flow at source node\n conservation_source_constraints = []\n for i, commodity in enumerate(self.commodities):\n # Constraint must sum to one (assuming all the demand can be met)\n constraint_i = solver.Constraint(1, 1, '(3,{})'.format(i))\n for edge_dest in list(sorted(self.graph.adj[commodity[0]].keys())):\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(commodity[0], edge_dest)]],\n 1)\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(edge_dest, commodity[0])]],\n -1)\n conservation_source_constraints.append(constraint_i)\n\n # Conservation of flow at destination node\n conservation_dest_constraints = []\n for i, commodity in enumerate(self.commodities):\n # Constraint must sum to one (assuming all the demand can be met)\n constraint_i = solver.Constraint(1, 1, '(4,{})'.format(i))\n for edge_dest in list(sorted(self.graph.adj[commodity[1]].keys())):\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(edge_dest, commodity[1])]],\n 1)\n constraint_i.SetCoefficient(\n flow_variables[i][\n self.edge_index_dict[(commodity[1], edge_dest)]],\n -1)\n conservation_dest_constraints.append(constraint_i)\n\n ## OBJECTIVES\n # Implementation of the load-balancing example from Wikipedia\n # First we add more constraints so that we are minimising the maximum\n max_utilisation_variable = solver.NumVar(0, solver.Infinity(),\n 'max_link_utilisation')\n min_of_max_constraints = []\n for i, edge in enumerate(self.edges):\n # Constraint that '-inf < f_0 + f_1 +... - max < 0'\n # i.e 'f_0 + f_1 + ... < max'\n constraint_i = solver.Constraint(-solver.Infinity(), 0,\n '(5,{})'.format(i))\n constraint_i.SetCoefficient(max_utilisation_variable, -1)\n for j, flow_variable in enumerate(flow_variables):\n constraint_i.SetCoefficient(flow_variable[i],\n demands[j] /\n self.graph.get_edge_data(*edge)[\n 'weight'])\n min_of_max_constraints.append(constraint_i)\n\n # Objective now is to minimise the maximum link utilisation\n objective = solver.Objective()\n objective.SetCoefficient(max_utilisation_variable, 1)\n objective.SetMinimization()\n solver.Solve()\n\n return objective.Value()"
]
| [
"0.69715726",
"0.6957851",
"0.6760472",
"0.6361233",
"0.6345169",
"0.61192685",
"0.61020505",
"0.6086766",
"0.60547054",
"0.5906217",
"0.5893766",
"0.58539325",
"0.58028054",
"0.56963044",
"0.5695608",
"0.5679503",
"0.56758183",
"0.56425416",
"0.5628271",
"0.5583995",
"0.55727",
"0.55416006",
"0.5533431",
"0.5531597",
"0.5522171",
"0.5478762",
"0.54680395",
"0.5465506",
"0.5453772",
"0.5453414"
]
| 0.75995314 | 0 |
Takes a time interval, number of steps, adjacency matrix, payment matrix (or matrices), relationship matrix and initial state of all vertices to return the result of the hyperrational evolutionary game played on the given graph in that time interval | def hr_game(t0, tf, n, A, B, R, x0):
# t0 - Initial time
# tf - Final time
# n - Number of steps
# A - Adjacency matrix, np.ndarray (N,N)
# B - A 2D or 3D matrix with all payoff matrices, np.ndarray (S,S,N)
# R - Relationship or preference matrix, np.ndarray (N,N)
# x0 - Initial state of our system, np.ndarray (N,S), must be double
# Number of players
N = A[:, 0].size
# Number of strategies
S = x0[0, :].size
# Step in each iteration
h = (tf - t0) / n
# Result of each step, np.ndarray (N, S, n+1)
y = np.zeros([N, S, n+1], dtype='double')
y[:, :, 0] = x0
k = np.zeros([N, S])
# I still don't know why, but theres a problem with negative payoffs
B = matrixTranslate(B)
# Fourth order Runge-Kutta
for t in range(n):
k1 = np.multiply(h, hr_egn(A, B, R, y[:, :, t]))
k2 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], np.divide(k1, 2))))
k3 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], np.divide(k2, 2))))
k4 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], k3)))
# k = (k1 + 2*k2 + 2*k3 + k4)/6
k = np.divide(np.add(np.add(k1, np.multiply(2, k2)), np.add(np.multiply(2, k3), k4)), 6)
y[:, :, t+1] = np.add(y[:, :, t], k)
# Filter results with machine epsilon
for v in range(N):
for s in range(S):
if y[v, s, t+1] < np.sqrt(np.finfo('double').eps):
y[v, s, t+1] = 0
elif y[v, s, t+1] > np.subtract(1, np.sqrt(np.finfo('double').eps)):
y[v, s, t + 1] = 1
return y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def params(timeseries_input):\n # Settings for Nelder Mead Algorithm\n global timeseries\n timeseries=timeseries_input\n\n NumIters = 1 # First Iteration\n MaxIters = 1e3 # Maximum number of iterations\n Tolerance = 1e-5 # Tolerance on best and worst function values\n N = 5 # Number of Heston and Nandi parameters\n r = 0.01 / 252.0 # Risk Free Rate\n\n # Heston and Nandi parameter starting values (vertices) in vector form\n\n x = [[0 for i in range(N + 1)] for j in range(N)]\n x[0][0] = 5.02e-6;\n x[0][1] = 5.12e-6;\n x[0][2] = 5.00e-6;\n x[0][3] = 4.90e-6;\n x[0][4] = 4.95e-6;\n x[0][5] = 4.99e-6 # omega\n x[1][0] = 1.32e-6;\n x[1][1] = 1.25e-6;\n x[1][2] = 1.35e-6;\n x[1][3] = 1.36e-6;\n x[1][4] = 1.30e-6;\n x[1][5] = 1.44e-6 # alpha\n x[2][0] = 0.79;\n x[2][1] = 0.80;\n x[2][2] = 0.78;\n x[2][3] = 0.77;\n x[2][4] = 0.81;\n x[2][5] = 0.82 # beta\n x[3][0] = 427.0;\n x[3][1] = 421.0;\n x[3][2] = 425.0;\n x[3][3] = 419.1;\n x[3][4] = 422.1;\n x[3][5] = 430.0 # gamma\n x[4][0] = 0.21;\n x[4][1] = 0.20;\n x[4][2] = 0.22;\n x[4][3] = 0.19;\n x[4][4] = 0.18;\n x[4][5] = 0.205 # lambda\n\n # Run Nelder Mead and output Nelder Mead results\n B = NelderMead(LogLike, N, NumIters, MaxIters, Tolerance, x, r)\n\n #\tprint(\"Nelder Mead Minimization of Log-Likelihood for Heston and Nandi parameters\")\n #\tprint(\"---------------------------------\")\n #\tprint(\"omega = \", B[0])\n #\tprint(\"alpha = \", B[1])\n #\tprint(\"beta = \", B[2])\n #\tprint(\"gamma = \", B[3])\n #\tprint(\"lambda = \", B[4])\n #\tprint(\"Value of Objective Function = \", B[N])\n #\tprint(\"Number of Iterations = \", B[N+1])\n #\tprint(\"Persistence \", B[2]+B[1]*(B[3]**2) )\n #\tprint(\"---------------------------------\")\n\n # alpha,beta,gamma,omega,lambda\n return [B[1], B[2], B[3], B[0], B[4]]",
"def rwgraph_analyze2(input=(None)):\r\n\r\n\r\n #set up graph and degree distribution arrays\r\n n=2000\r\n m=4\r\n G=nx.barabasi_albert_graph(n, m, seed=5)\r\n Nt=100\r\n M=20000\r\n maxdeg=0\r\n degree_dist=[]\r\n for i in range(0,n):\r\n degree_dist.append(G.degree[i])\r\n if G.degree[i]>maxdeg:\r\n maxdeg=G.degree[i]\r\n j=i\r\n\r\n #set inital conditions and D\r\n y0=np.zeros(n,dtype=int)\r\n y0[j]=200\r\n D=1\r\n #define time for odi Int\r\n t=np.arange(Nt+1,dtype=int)\r\n #set up operators\r\n A = nx.adjacency_matrix(G)\r\n Q = A.toarray().sum(axis=1)\r\n L=np.diag(Q)-A.toarray()\r\n Q_inv=1/Q\r\n Ls=np.diag(np.ones(n))-np.matmul(np.diag(Q_inv),A.toarray())\r\n Ls_tran=np.transpose(Ls)\r\n\r\n #convert to sparse operators and include diffusion\r\n L_spar = scipy.sparse.csr_matrix(-D*L)\r\n Ls_spar = scipy.sparse.csr_matrix(-D*Ls)\r\n Ls_tran_spar = scipy.sparse.csr_matrix(-D*Ls_tran)\r\n A=nx.adjacency_matrix(G)\r\n L=-D*(scipy.sparse.diags(degree_arr)-A)\r\n Ls=-D*(scipy.sparse.diags(np.ones(N))-scipy.sparse.diags(1/degree_arr).dot(A))\r\n\r\n #define operators\r\n def Lap(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(L_spar,y)\r\n def Lap_Ls(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_spar,y)\r\n def Lap_Ls_tran(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_tran_spar,y)\r\n\r\n #solutions of different operators\r\n solL=scipy.integrate.odeint(Lap,y0,t)\r\n solLs=scipy.integrate.odeint(Lap_Ls,y0,t)\r\n solLs_tran=scipy.integrate.odeint(Lap_Ls_tran,y0,t)\r\n\r\n\r\n #finds eigen values and vectors and puts them into order\r\n def eigen(L):\r\n eigen_values,eigen_vectors=scipy.linalg.eig(-L)\r\n idx = eigen_values.argsort()[::-1]\r\n eigen_values = eigen_values[idx]\r\n eigen_vectors = eigen_vectors[:,idx]\r\n return eigen_values,eigen_vectors\r\n\r\n #finds all eigen values and eigen vectors of the different operators. can use sparse matrics\r\n eigen_values_LS,eigen_vectors_LS=eigen(Ls)\r\n eigen_values_LS_tran,eigen_vectors_LS_tran=eigen(Ls_tran)\r\n eigen_values_L,eigen_vectors_L=eigen(L)\r\n eigen_values_L2,eigen_vectors_L2=eigen(L*0.36)\r\n\r\n ### could have eigs here as didn't end up using all eigenvalues ####\r\n #eigen values graph\r\n n0=len(eigen_values_L)\r\n eig_nums=np.arange(n0)\r\n plt.figure(figsize=(12, 6))\r\n plt.scatter(eig_nums[0:10],eigen_values_L2[0:10],s=50,marker=\"x\" ,label='L , D=0.36')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS[0:10],s=50, marker=\"|\",label='LS , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS_tran[0:10],s=50,marker='_',label='LS_tran , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_L[0:10],s=50,marker=\"+\" ,label='L , D=1')\r\n plt.legend(loc=\"lower left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.xlabel('eigen value number')\r\n plt.ylabel('eigenvalue')\r\n plt.title(\"Eigenvlaues of Laplacian Matrixs\")\r\n plt.show()\r\n\r\n print(\"4 biggest eigenvalues for each operater\")\r\n print('L=',eigen_values_L[0:4])\r\n print('Ls=',eigen_values_LS[0:4])\r\n print('Ls_tran=',eigen_values_LS_tran[0:4])\r\n #prints 4 biggest eigen values\r\n #counts node distrubtion by creating dictionary\r\n def result_count(sol,Nt,G):\r\n \"\"\" returns cumlative frequency/probailties for nodes of same degree and returns dictionary\"\"\"\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq\r\n\r\n #frequency count of solutions\r\n dict_freq=result_count(solL,Nt,G)\r\n dict_freq2=result_count(solLs,Nt,G)\r\n dict_freq3=result_count(solLs_tran,Nt,G)\r\n\r\n #random walk data\r\n X=rwgraph(G,j,20000,100)\r\n Listnodes7=[]\r\n for i in range(20000):\r\n Listnodes7.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,200,100)\r\n Listnodes8=[]\r\n for i in range(200):\r\n Listnodes8.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,50000,5000)\r\n Listnodes9=[]\r\n for i in range(50000):\r\n Listnodes9.append(G.degree(X[i,5000]))\r\n listfreq7=CountFrequency(Listnodes7)\r\n listfreq8=CountFrequency(Listnodes8)\r\n listfreq9=CountFrequency(Listnodes9)\r\n listfreq_deg=CountFrequency(degree_dist)\r\n z2=[]\r\n z3=[]\r\n z1=[]\r\n z_deg2=[]\r\n z_deg3=[]\r\n z_deg1=[]\r\n for i in listfreq7:\r\n z2.append(listfreq7[i]/(listfreq_deg[i]*20000))\r\n z_deg2.append(i)\r\n for i in listfreq8:\r\n z3.append(listfreq8[i]/(listfreq_deg[i]*200))\r\n z_deg3.append(i)\r\n for i in listfreq8:\r\n z1.append(listfreq9[i]/(listfreq_deg[i]*50000))\r\n z_deg1.append(i)\r\n #operator solutions compared to node degree frequency\r\n z4,z5,z6=[],[],[]\r\n z_deg4,z_deg5,z_deg6=[],[],[]\r\n for i in dict_freq:\r\n z4.append(dict_freq[i]/(listfreq_deg[i]*200))\r\n z_deg4.append(i)\r\n for i in dict_freq2:\r\n z5.append(dict_freq2[i]/(listfreq_deg[i]*200))\r\n z_deg5.append(i)\r\n for i in dict_freq3:\r\n z6.append(dict_freq3[i]/(listfreq_deg[i]*200))\r\n z_deg6.append(i)\r\n\r\n plt.figure(figsize=(15, 10))\r\n plt.scatter(z_deg1, z1,label='Nt=5000, M=50000')\r\n plt.scatter(z_deg2, z2,label='Nt=100, M=20000')\r\n plt.scatter(z_deg3, z3,label='Nt=100, M=200')\r\n plt.scatter(z_deg4, z4,label='L, Nt=100')\r\n plt.scatter(z_deg5, z5,label='Ls, Nt=100')\r\n plt.scatter(z_deg6, z6,label='Ls_tran, Nt=100')\r\n plt.ylim((-0.005,0.020))\r\n plt.xlabel('degree of node')\r\n plt.ylabel('frequency of final position / M*frequency of degree')\r\n plt.legend(loc=\"upper left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.title(\"Frequency of final positions relative to number of nodes of that degree, for changing times Nt and M.\")\r\n plt.show()\r\n\r\n #code to produce final graph\r\n iarray1=LinearModel(G,x=j,i0=1,L1='L',D=1,tf=20,Nt=Nt)\r\n iarray2=LinearModel(G,x=j,i0=1,L1='Ls',D=1,tf=20,Nt=Nt)\r\n iarray3=LinearModel(G,x=j,i0=1,L1='Lst',D=1,tf=20,Nt=Nt)\r\n tarray = np.linspace(0,5,Nt+1)\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarray, iarray1[:,7] ,label='rand node L,deg=46',color='b',alpha=0.5)\r\n plt.plot(tarray, iarray2[:,7] ,label='rand node Ls,deg=46',marker='|',color='r')\r\n plt.scatter(tarray, iarray3[:,7] ,label='rand node LST,deg=46',marker='_',color='y')\r\n plt.scatter(tarray, iarray1[:,1801] ,label='rand node L, deg=5',color='m',alpha=0.5,marker='+')\r\n plt.plot(tarray, iarray2[:,1801] ,label='rand node Ls,deg=5',marker='|',color='c')\r\n plt.scatter(tarray, iarray3[:,1801] ,label='rand node LST,deg=5',marker='_',color='g')\r\n plt.xlabel('time')\r\n plt.ylabel('representive frequency')\r\n plt.legend()\r\n plt.title(\"Comparing repestive frequency of a random nodes, for the different linear models,time step=50,D=0.1\")\r\n plt.show()\r\n return None #modify as needed\r",
"def hr_egn(A, B, R, x0):\n # A - Adjacency matrix, np.ndarray (N,N)\n # B - A 2D or 3D matrix with all payoff matrices, np.ndarray (S,S,N)\n # R - Relationship or preference matrix, np.ndarray (N,N)\n # x0 - Initial state of our system, np.ndarray (N,S), must be double\n\n # Number of players\n N = A[:, 0].size\n # Number of strategies\n S = x0[0, :].size\n # Degree and degree of preferences\n d = np.zeros([N, 2])\n d[:, 0] = np.dot(A, np.ones(N))\n\n for v in range(N):\n d[v, 1] = np.dot(np.ceil(np.abs(R[v, :])), A[v, :])\n\n # Player v neighborhood\n k = np.zeros([N, S], dtype='double')\n for v in range(N):\n for u in range(N):\n k[v, :] = np.add(k[v, :], np.multiply(A[v, u], x0[u, :]))\n # Weights the neighborhood\n k[v, :] = np.multiply(np.divide(1, d[v, 0]), k[v, :])\n\n # This variable is the increments that x0 receives, the derivative\n x = np.zeros([N, S], dtype='double')\n # This is the unit vector with 1 in some entry\n es = np.zeros(S, dtype='int')\n\n # Phi and gamma\n p = 0\n g = 0\n\n # Auxiliary variables for better clarity\n aux1 = 0\n aux2 = 0\n\n # Here is the derivative calculation\n # We first test if all payoffs are the same so we do less comparisons\n if B.ndim == 2:\n for v in range(N):\n for s in range(S):\n # Set es value\n es[s] = 1\n for u in range(N):\n if v == u:\n # Same payoff personal equation\n # First we will do the dot products\n # e_s*B*k_v\n aux1 = np.dot(es, np.dot(B, k[v, :]))\n # x_v*B*k_v\n aux2 = np.dot(x0[v, :], np.dot(B, k[v, :]))\n # Finally we subtract them to multiply by r_vv\n p = np.multiply(R[v, u], np.subtract(aux1, aux2))\n elif A[v, u] != 0:\n # Same payoff social equation\n # x_u*B*e_s\n aux1 = np.dot(x0[u, :], np.dot(B, es))\n # x_u*B*x_v\n aux2 = np.dot(x0[u, :], np.dot(B, x0[v, :]))\n # Subtract then multiply\n aux1 = np.subtract(aux1, aux2)\n aux2 = np.multiply(R[v, u], A[v, u])\n g = np.add(g, np.multiply(aux2, aux1))\n # Weights the social part\n if d[v, 1] != 0:\n g = np.multiply(np.divide(1, d[v, 1]), g)\n # Estimates the derivative\n x[v, s] = np.multiply(x0[v, s], np.add(p, g))\n # Prepare variables to next iteration\n p = 0\n g = 0\n es[s] = 0\n else:\n for v in range(N):\n for s in range(S):\n # Same thing as before, but now with individual payoffs\n es[s] = 1\n for u in range(N):\n if v == u:\n # Individual payoffs personal equation\n # e_s*B_v*k_v\n aux1 = np.dot(es, np.dot(B[:, :, v], k[v, :]))\n # x_u*B_v*k_v\n aux2 = np.dot(x0[v, :], np.dot(B[:, :, v], k[v, :]))\n p = np.multiply(R[v, u], np.subtract(aux1, aux2))\n elif A[v, u] != 0:\n # Individual payoffs social equation\n # x_u*B_u*e_s\n aux1 = np.dot(x0[u, :], np.dot(B[:, :, u], es))\n # x_u*B_u*x_v\n aux2 = np.dot(x0[u, :], np.dot(B[:, :, u], x0[v, :]))\n # Subtract then multiply\n aux1 = np.subtract(aux1, aux2)\n aux2 = np.multiply(R[v, u], A[v, u])\n g = np.add(g, np.multiply(aux2, aux1))\n # Weights the social part\n if d[v, 1] != 0:\n g = np.multiply(np.divide(1, d[v, 1]), g)\n # Estimates the derivative\n x[v, s] = np.multiply(x0[v, s], np.add(p, g))\n # Prepare variables to next iteration\n p = 0\n g = 0\n es[s] = 0\n return x",
"def energies():\n # Hardcoded initial values\n numsteps = 10000\n time_max = 1\n # Running the calculation in the solver class using the velocity verlet method\n # for better accuracy.\n verlet = solver(input_matrix, 'verlet', time_max, numsteps)\n output_matrix, KE, PE, AM = verlet.main()\n # Creating a simple time axis for plotting\n x = np.linspace(0, 1, numsteps+1)\n\n # Plotting kinetic energy over time\n plt.figure(1, figsize=(10, 10))\n plt.plot(x, KE)\n plt.suptitle('Total kinetic energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE'])\n\n # Plotting potential energy over time\n plt.figure(2, figsize=(10, 10))\n plt.plot(x, PE)\n plt.suptitle('Total potential energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE'])\n\n # Plotting total energy against time\n plt.figure(3, figsize=(10, 10))\n plt.plot(x, PE+KE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE+PE'])\n\n # Plotting angular momentum against time. print the amplitude to terminal\n amplitude = max(AM)-min(AM)\n print('Amplitude of angular momentum during 1 year: %g[AU²/yr²]' %(amplitude))\n plt.figure(4, figsize=(10, 10))\n plt.plot(x, AM)\n plt.suptitle('Total angular momentum in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²/yr²]', fontsize=16)\n plt.legend(['AM'])\n\n # Plotting the kinetic, potential and total energy against time to see\n # how great the variations are\n plt.figure(5, figsize=(10, 10))\n plt.plot(x, PE, x, KE, x, KE+PE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE', 'KE', 'KE+PE'])\n plt.show()",
"def task2_extra():\n N = 0\n lam = 0\n Ls = numpy.array([2*L for L in range(1,23)])\n h = 0.01\n tau = 0.000099\n\n iterss = []\n\n for L in Ls:\n a = L // 2\n print(L)\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n # eps = int(0.1 * len(x))\n\n Vm = V1D(lam, x)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center\")\n plt.xlabel(\"$L$\")\n plt.ylabel(\"Time\")\n plt.plot(Ls, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel.pdf\", bbox_inches=\"tight\")",
"def solve(self, state, times):",
"def general_solver(heat_flux, temp_initial, temp_air, k, alpha, x_grid,t_grid, upsilon, \n bc_surface, sigma):\n\n # temperatures are reported as a data frame, where each column is a step in time\n temperatures = pd.DataFrame(columns = [n for n in t_grid])\n\n # extract the necessary parameters to determine the surface heat losses\n if bc_surface[0] == \"linear\":\n h = bc_surface[1] + bc_surface[2]\n hc = 0\n emissivity = 0\n elif bc_surface[0] == \"non-linear\":\n h = 0\n hc = bc_surface[1]\n emissivity = bc_surface[2]\n\n # initialize temperature arrays for present and future temperatures\n T = np.zeros_like(x_grid) + temp_initial\n Tn = np.zeros_like(x_grid)\n\n # iterate over each time step\n temperatures.iloc[:,0] = T\n for j, t in enumerate(t_grid[:-1]):\n \n # create tri-diagonal matrix A\n A = tridiag_matrix(bc_surface_type = bc_surface[0], upsilon = upsilon, \n space_divisions = len(x_grid), dx = x_grid[1] - x_grid[0], \n k = k, T = T, h = h, hc = hc, emissivity = emissivity, sigma = sigma)\n \n # create vector b\n b = vector_b(bc_surface_type = bc_surface[0], upsilon = upsilon, \n space_divisions = len(x_grid), dx = x_grid[1] - x_grid[0], \n k = k, T = T, T_air = temp_air, heat_flux = heat_flux, h = h, hc = hc, \n emissivity = emissivity, sigma = sigma, j = j)\n \n # calculate value of future temperature\n Tn = np.linalg.solve(A,b)\n \n # update present temperature\n T = Tn.copy()\n \n # store temperature profile at this time in the data frame\n temperatures.iloc[:, j+1] = Tn\n \n return temperatures",
"def task2_extra2():\n N = 0\n lam = 0\n L = 10\n h = 0.001\n tau = 0.000099\n aa = numpy.array([0.25*a for a in range((L-1)*4)])\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n Vm = V1D(lam, x)\n # eps=int(0.1*len(x))\n\n iterss = []\n for a in aa:\n print(a)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center ($L={}$)\".format(L))\n plt.xlabel(\"$a$\")\n plt.ylabel(\"Time\")\n plt.plot(aa, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel_fixedL={}.pdf\".format(L), bbox_inches=\"tight\")",
"def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)",
"def enthalpy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n h = g - temp*g_t\n return h",
"def graphformation(time_lower, time_upper):\n\tprm = param.Para()\n\ttry:\n\t\tdb_connection = mysql.connector.connect(\n\t\t host=prm.hostname,\n\t\t user=prm.username,\n\t\t passwd=prm.password,\n\t\t database= prm.dbname\n\t\t )\n\t\tdb_cursor = db_connection.cursor()\n\texcept:\n\t\tprint(\"Can't Connect to database, check credentials in parameter file\")\n\tquery = (\"SELECT * FROM identity \")\n\tdb_cursor.execute(query)\n\tdf1=pd.DataFrame(db_cursor.fetchall())\n\tdf1.columns= ['node','deviceid','student','rollno']\n\tdict_identity = dict(zip(df1.deviceid, df1.node))\n\trev_dict_identity = dict(zip(df1.node, df1.deviceid ))\n\tquery = (\"SELECT * FROM activity WHERE time BETWEEN '{}' AND '{}'\".format(time_lower,time_upper)) ## incomplete\n\tdb_cursor.execute(query)\n\tactivity_data = pd.DataFrame(db_cursor.fetchall())\n\tif activity_data.empty==False:\n\t\tactivity_data.columns=[\"sl_no\",\"time\",\"node\",\"latitude\",\"longitude\"]\n\telse:\n\t\tprint(\"No Activity in the selected Time Window\")\n\t\treturn\n\tnumnodes= len(df1)\n\tedges= []\n\tscore = {}\n\t#print(activity_data)\n\ttime_groups = activity_data.groupby('time')\n\twith open(r'C:\\Users\\HP\\Desktop\\project\\Contact_Graph\\bluetooth.txt') as json_file:\n\t\tdata1 = json.load(json_file)\n\tfor name, group in time_groups:\n\t\tscore_tmp = decayfunc(name,time_upper)\n\t\tgroup = group.sort_values('node')\n\t\tfor i in range(len(group)-1):\n\t\t\tnode1 = group.iloc[i,2]\n\t\t\t###########################\n\t\t\tlistnearby=[]\n\t\t\ttry:\n\t\t\t\tlistnearby = data1[rev_dict_identity[node1]][str(name)]\n\t\t\t\tlistnearby = [dict_identity[i] for i in listnearby if dict_identity[i]>node1]\n\t\t\t\tfor i in listnearby:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tscore[(node1,i)]+=1\n\t\t\t\t\texcept:\n\t\t\t\t\t\tscore[(node1,i)]=1\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t###########################\n\t\t\tfor j in range(i+1,len(group)):\n\t\t\t\tnode2 =group.iloc[j,2]\n\t\t\t\tif proximityfunc(group.iloc[i,3],group.iloc[i,4],group.iloc[j,3],group.iloc[j,4]) and node2 not in listnearby:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tscore[(group.iloc[i,2],group.iloc[j,2])]+=1\n\t\t\t\t\texcept:\n\t\t\t\t\t\tscore[(group.iloc[i,2],group.iloc[j,2])]=1\n\tnode_list = list(df1.node)\n\ttitle_list = list(df1.deviceid)\n\tedges_list = []\n\tfor edge,val in score.items():\n\t\tedges_list.append((int(edge[0]),int(edge[1]),float(val)))\n\n\treturn edges_list,node_list,title_list",
"def run(graph, time_axis, initial, element2edge, var, element_component_clause_literal_node, ts, type_num,\n type_robot_label, buchi, show, last_subtask=None, loop=False):\n\n frontier = [[initial, -1, []]]\n # iterate until the accepting state is reached\n while True:\n if show:\n print([f[0] for f in frontier])\n node, clock, acpt_run_ = frontier.pop()\n\n # Determine the set of identical time instants\n instant_element = time_axis[clock + 1]\n if acpt_run_:\n pre_neg_edge = acpt_run_[-1]['neg_edge']\n else:\n pre_neg_edge = []\n # loop over each successor to see whether progress can be made\n for succ in graph.succ[node]:\n # equivalent subtask\n if graph.edges[element2edge[instant_element[1]]]['formula'] == graph.edges[(node, succ)]['formula'] and \\\n graph.nodes[element2edge[instant_element[1]][0]]['formula'] == graph.nodes[node]['formula']:\n # if isEquivalent(graph.edges[element2edge[instant_element[1]]]['formula'], graph.edges[(node, succ)]['formula']) and \\\n # isEquivalent(graph.nodes[element2edge[instant_element[1]][0]]['formula'], graph.nodes[node]['formula']):\n\n # print((node, succ), graph.edges[(node, succ)]['formula'])\n # whether the collection of paths at clock satisfies the edge label\n # neg_literal: negative clause that needs to be addressed\n # exe_robot: set of robots that takes the subtask with nonzero id\n\n essential_clause_edge, neg_clause_edge, exe_robots_edge \\\n = determine_essentials(instant_element, var, graph.edges[(node, succ)]['label'],\n graph.edges[(node, succ)]['neg_label'], 1,\n element_component_clause_literal_node, ts, type_num,\n type_robot_label, last_subtask, buchi, [], loop)\n\n essential_clause_vertex, neg_clause_vertex, exe_robots_vertex \\\n = determine_essentials(instant_element, var, graph.nodes[node]['label'],\n graph.nodes[node]['neg_label'], 0,\n element_component_clause_literal_node, ts, type_num, dict(),\n last_subtask, buchi,\n pre_neg_edge, loop)\n\n # clock, the exact time when transition occurs\n acpt_run = acpt_run_.copy() # copy the history\n acpt_run.append({'subtask': (node, succ), 'time_element': time_axis[clock + 1],\n 'essential_robot_edge': exe_robots_edge,\n 'essential_clause_edge': essential_clause_edge, 'neg_edge': neg_clause_edge,\n 'essential_robot_vertex': exe_robots_vertex,\n 'neg_vertex': neg_clause_vertex})\n\n # stop when accept is reached\n if 'accept' in succ:\n return acpt_run\n # clock + 1, after reaching succ, the immediate time clock that should be verified\n frontier.append([succ, clock + 1, acpt_run])",
"def create_graph_hyp_space(t=0.8, b=0.01):\n\n # enumerate all graphs with three nodes\n common_cause_1 = np.array([[0, 1, 1], [0, 0, 0], [0, 0, 0]])\n common_cause_1_cpd = [np.array([1-b, b]),\n np.array([[1-b, b], [(1-t)*(1-b), t + (1-t)*b]]),\n np.array([[1-b, b], [(1-t)*(1-b), t + (1-t)*b]])]\n\n common_cause_2 = np.array([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n common_cause_2_cpd = [common_cause_1_cpd[i] for i in [1, 0, 2]]\n\n common_cause_3 = np.array([[0, 0, 0], [0, 0, 0], [1, 1, 0]])\n common_cause_3_cpd = [common_cause_1_cpd[i] for i in [1, 2, 0]]\n\n common_effect_1 = np.array([[0, 0, 1], [0, 0, 1], [0, 0, 0]])\n common_effect_1_cpd = [np.array([1-b, b]),\n np.array([1-b, b]),\n np.array([[[1-b, b], [(1-t)*(1-b), (t + (1-t)*b)]],\n [[(1-t)*(1-b), (t + (1-t)*b)],\n [(1-t)*(1-b)*(1-t),\n (t)**2 + (t*(1-t))*2 + b*(1-t)**2]]])]\n\n common_effect_2 = np.array([[0, 1, 0], [0, 0, 0], [0, 1, 0]])\n common_effect_2_cpd = [common_effect_1_cpd[i] for i in [0, 2, 1]]\n\n common_effect_3 = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0]])\n common_effect_3_cpd = [common_effect_1_cpd[i] for i in [2, 0, 1]]\n\n causal_chain_1 = np.array([[0, 1, 0], [0, 0, 1], [0, 0, 0]])\n causal_chain_1_cpd = [np.array([1-b, b]),\n np.array([[1-b, b], [(1-t)*(1-b), t + (1-t)*b]]),\n np.array([[1-b, b], [(1-t)*(1-b), t + (1-t)*b]])]\n\n causal_chain_2 = np.array([[0, 0, 1], [0, 0, 0], [0, 1, 0]])\n causal_chain_2_cpd = [causal_chain_1_cpd[i] for i in [0, 2, 1]]\n\n causal_chain_3 = np.array([[0, 0, 0], [0, 0, 1], [1, 0, 0]])\n causal_chain_3_cpd = [causal_chain_1_cpd[i] for i in [1, 0, 2]]\n\n causal_chain_4 = np.array([[0, 0, 1], [1, 0, 0], [0, 0, 0]])\n causal_chain_4_cpd = [causal_chain_1_cpd[i] for i in [2, 0, 1]]\n\n causal_chain_5 = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]])\n causal_chain_5_cpd = [causal_chain_1_cpd[i] for i in [1, 2, 0]]\n\n causal_chain_6 = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]])\n causal_chain_6_cpd = [causal_chain_1_cpd[i] for i in [2, 1, 0]]\n\n single_link_1 = np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n single_link_1_cpd = [np.array([1-b, b]),\n np.array([[1-b, b], [(1-t)*(1-b), t + (1-t)*b]]),\n np.array([1-b, b])]\n\n single_link_2 = np.array([[0, 0, 1], [0, 0, 0], [0, 0, 0]])\n single_link_2_cpd = [single_link_1_cpd[i] for i in [0, 2, 1]]\n\n single_link_3 = np.array([[0, 0, 0], [1, 0, 0], [0, 0, 0]])\n single_link_3_cpd = [single_link_1_cpd[i] for i in [1, 0, 2]]\n\n single_link_4 = np.array([[0, 0, 0], [0, 0, 1], [0, 0, 0]])\n single_link_4_cpd = [single_link_1_cpd[i] for i in [2, 0, 1]]\n\n single_link_5 = np.array([[0, 0, 0], [0, 0, 0], [1, 0, 0]])\n single_link_5_cpd = [single_link_1_cpd[i] for i in [1, 2, 0]]\n\n single_link_6 = np.array([[0, 0, 0], [0, 0, 0], [0, 1, 0]])\n single_link_6_cpd = [single_link_1_cpd[i] for i in [2, 1, 0]]\n\n graph_names = [\"common_cause_1\", \"common_cause_2\", \"common_cause_3\",\n \"common_effect_1\", \"common_effect_2\", \"common_effect_3\",\n \"causal_chain_1\", \"causal_chain_2\", \"causal_chain_3\",\n \"causal_chain_4\", \"causal_chain_5\", \"causal_chain_6\",\n \"single_link_1\", \"single_link_2\", \"single_link_3\",\n \"single_link_4\", \"single_link_5\", \"single_link_6\"]\n\n graphs = [common_cause_1, common_cause_2, common_cause_3,\n common_effect_1, common_effect_2, common_effect_3,\n causal_chain_1, causal_chain_2, causal_chain_3,\n causal_chain_4, causal_chain_5, causal_chain_6,\n single_link_1, single_link_2, single_link_3,\n single_link_4, single_link_5, single_link_6]\n\n cpds = [common_cause_1_cpd, common_cause_2_cpd, common_cause_3_cpd,\n common_effect_1_cpd, common_effect_2_cpd, common_effect_3_cpd,\n causal_chain_1_cpd, causal_chain_2_cpd, causal_chain_3_cpd,\n causal_chain_4_cpd, causal_chain_5_cpd, causal_chain_6_cpd,\n single_link_1_cpd, single_link_2_cpd, single_link_3_cpd,\n single_link_4_cpd, single_link_5_cpd, single_link_6_cpd]\n\n hyp_space = {graph_names: dag.DirectedGraph(graph, cpd, t, b)\n for (graph_names, graph, cpd) in\n zip(graph_names, graphs, cpds)}\n\n return hyp_space",
"def SIRD_graph(graph_obj, config):\n\n N = len(graph_obj.nodes)\n T = config['horizon']\n si, ir, id = config['infection_rate'], config['recovery_rate'], config['death_rate']\n sv, iv, vi = config['vaccination_rate'], config['vaccination_rate'], config['reinfection_rate']\n\n # ensure that sum of probabilities don't exceed 1\n assert ir + id + iv < 1\n assert si + sv < 1\n\n # initial state\n I0, R0, D0, V0 = config['init_state']['I'], config['init_state']['R'], config['init_state']['D'], config['init_state']['V']\n S0 = N - I0 - R0 - D0\n assert S0 > 0\n\n # shuffle node list and assign initial state\n shuffled_nodes = np.array(range(N))\n np.random.shuffle(shuffled_nodes)\n\n # state array is a Nx1 vector with array[i] denoting state of ith person\n # states for S,I,R,D,V -> (0,1,2,3,4)\n category_array = np.zeros(N, dtype=np.uint8)\n category_array[shuffled_nodes[:I0]] = 1\n category_array[shuffled_nodes[I0: I0+R0]] = 2\n category_array[shuffled_nodes[I0+R0: I0+R0+D0]] = 3\n category_array[shuffled_nodes[I0+R0+D0: I0+R0+D0+V0]] = 4\n\n # state array [i][j] = category of node j after time step i\n state_array = np.zeros((T + 1, N))\n state_array[0] = np.copy(category_array)\n\n for t in range(1, T + 1):\n if t % (T // 3) == 0:\n print('Time step:', t, '/', T)\n\n cur_categories = np.copy(state_array[t - 1])\n\n for node in graph_obj.nodes:\n # initialise new state with old state\n new_state = cur_categories[node]\n neighbours = graph_obj.adj[node]\n i_neighbours = np.sum(cur_categories[neighbours] == 1)\n\n if cur_categories[node] == 0: # susceptible\n prob_i = si * (i_neighbours / len(neighbours))\n # S can either be vaccinated or become infected\n next_possible_states = [1, 4, 0]\n next_possible_probs = [prob_i, sv, 1 - prob_i - sv]\n new_state = np.random.choice(a=next_possible_states, size=1, p=next_possible_probs)\n\n elif cur_categories[node] == 1: # infectious\n # I can either recover or die\n next_possible_states = [2, 3, 4, 1]\n next_possible_probs = [ir, id, iv, 1 - ir - id - iv]\n new_state = np.random.choice(a=next_possible_states, size=1, p=next_possible_probs)\n\n elif cur_categories[node] == 4: # vaccinated\n prob_r = vi * (i_neighbours / len(neighbours))\n next_possible_states = [1, 4]\n next_possible_probs = [prob_r, 1 - prob_r]\n new_state = np.random.choice(a=next_possible_states, size=1, p=next_possible_probs)\n\n # do nothing for recovered and dead\n\n # assign new state to current time vector\n cur_categories[node] = new_state\n\n # store new time step results in final array\n state_array[t] = cur_categories\n\n return state_array",
"def inverse_q_learning(feature_matrix,nA, gamma, transitions, alpha_r, alpha_q, alpha_sh, epochs, real_distribution):\n nS = feature_matrix.shape[0]\n\n \n # initialize tables for reward function, value functions and state-action visitation counter.\n r = np.zeros((nS, nA))\n q = np.zeros((nS, nA))\n q_sh = np.zeros((nS, nA))\n state_action_visitation = np.zeros((nS, nA))\n\n for i in range(epochs):\n if i%10 == 0:\n print(\"Epoch %s/%s\" %(i+1, epochs))\n \n for traj in transitions:\n for (s, a, _, ns) in traj:\n state_action_visitation[s][a] += 1\n d = False # no terminal state\n\n # compute shifted q-function.\n q_sh[s, a] = (1-alpha_sh) * q_sh[s, a] + alpha_sh * (gamma * (1-d) * np.max(q[ns]))\n \n # compute log probabilities.\n sum_of_state_visitations = np.sum(state_action_visitation[s])\n log_prob = np.log((state_action_visitation[s]/sum_of_state_visitations) + epsilon)\n \n # compute eta_a and eta_b for Eq. (9).\n eta_a = log_prob[a] - q_sh[s][a]\n other_actions = [oa for oa in range(nA) if oa != a]\n eta_b = log_prob[other_actions] - q_sh[s][other_actions]\n sum_oa = (1/(nA-1)) * np.sum(r[s][other_actions] - eta_b)\n\n # update reward-function.\n r[s][a] = (1-alpha_r) * r[s][a] + alpha_r * (eta_a + sum_oa)\n\n # update value-function.\n q[s, a] = (1-alpha_q) * q[s, a] + alpha_q * (r[s, a] + gamma * (1-d) * np.max(q[ns]))\n s = ns\n\n # compute Boltzmann distribution.\n boltzman_distribution = []\n for s in range(nS):\n boltzman_distribution.append([])\n for a in range(nA):\n boltzman_distribution[-1].append(np.exp(q[s][a]))\n boltzman_distribution = np.array(boltzman_distribution)\n boltzman_distribution /= np.sum(boltzman_distribution, axis=1).reshape(-1, 1)\n return q, r, boltzman_distribution",
"def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)",
"def make_euler_circuit(start_node, updated_graph_instance):\n\n current_edges_on_graph_list = make_edges_list(updated_graph_instance.edges_dict)\n\n current_node = start_node\n\n node_visit_order = [current_node]\n edge_visit_order = []\n\n # print(\"\\n\\n\\ncurrent_edges_on_graph_list:\", current_edges_on_graph_list)\n\n while len(current_edges_on_graph_list) > 0:\n\n # print(\"current_edges_on_graph_list:\", current_edges_on_graph_list)\n # while there are still edges on the graph, keep traversing\n\n current_bridges_on_graph = get_bridges(current_edges_on_graph_list)\n\n edges_conn_to_current_node = get_all_conn_edges_remaining_in_graph(\n current_node, current_edges_on_graph_list, updated_graph_instance.nodes_dict\n )\n\n edge_to_traverse = choose_edge_to_traverse(\n current_node, edges_conn_to_current_node, current_bridges_on_graph\n )\n\n if edge_to_traverse in current_edges_on_graph_list:\n\n current_edges_on_graph_list.remove(edge_to_traverse)\n\n else:\n\n current_edges_on_graph_list.remove(edge_to_traverse[::-1])\n\n edge_to_traverse_list = list(edge_to_traverse)\n # remove current node from edge to traverse\n edge_to_traverse_list.remove(current_node)\n # update current node to be the only node left in the edge list\n\n # update edge traveral list with edge just traversed\n edge_traversed = (current_node, edge_to_traverse_list[0])\n\n edge_visit_order.append(edge_traversed)\n\n current_node = edge_to_traverse_list[0]\n\n # add the new current node to the nodes visit order list\n node_visit_order.append(current_node)\n\n # add node visit order and edge_visit order to graph instance\n\n updated_graph_instance.node_visit_order = node_visit_order\n\n updated_graph_instance.edge_visit_order = edge_visit_order\n\n updated_graph_instance.node_geojson = make_node_geojson(updated_graph_instance)\n\n updated_graph_instance.edge_geojson = make_edge_geojson(updated_graph_instance)\n\n updated_graph_instance.route_geojson = make_route_geojson(updated_graph_instance)\n\n print(\"\\n\\n\\n\\n\\nROUTE COLLECTION\", updated_graph_instance.route_geojson)\n\n print(\"check done\")\n\n return updated_graph_instance",
"def get_problem():\n\n problem = beluga.optim.Problem('Track_demo')\n problem.mode='analytical' #Other options: 'numerical', 'dae'\n\n #Define independent variables\n problem.independent('t', 's')\n\n # Define equations of motion\n problem.state('x','V*cos(hdg)','k') \\\n .state('y','V*sin(hdg)','k') \\\n\n # Define controls\n problem.control('hdg','rad')\n\n # Define cost functional\n problem.cost['path'] = Expression('(1-w)+w*V*conv*elev*terrain(x,y)', 's')\n\n #Define constraints\n problem.constraints().initial('x-x_0','k') \\\n .initial('y-y_0','k') \\\n .terminal('x-x_f','k') \\\n .terminal('y-y_f','k')\n\n #Define constants\n problem.constant('w',0.9,'1') #Initial Terrain weighting factor\n problem.constant('conv',1,'s/k^2') #Integral conversion factor\n problem.constant('V',1,'k/s') #Vehicle speed\n problem.constant('elev',0.001,'k') #Units for the elevation\n\n #Unit scaling\n problem.scale.unit('k',1) \\\n .unit('s',1) \\\n .unit('rad',1)\n\n #Configure solver\n problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=50, verbose = True, cached = False, number_arcs=8)\n #problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=50, verbose = True, cached = False)\n\n #Initial Guess\n problem.guess.setup('auto',start=[16,10], costate_guess=[0.0,-0.1])\n\n #Add continuation steps\n problem.steps.add_step(strategy='HPA') \\\n .terminal('x', 180, 50) \\\n .terminal('y', 98, 50)\n\n return problem",
"def E_generator(beta, eta, h):\n def E(x, y):\n \"\"\"Calculate energy for matrices x, y.\n\n Note: the computation is not localized, so this is quite expensive.\n \"\"\"\n # sum of products of neighboring paris {xi, yi}\n xxm = np.zeros_like(x)\n xxm[:-1, :] = x[1:, :] # down\n xxm[1:, :] += x[:-1, :] # up\n xxm[:, :-1] += x[:, 1:] # right\n xxm[:, 1:] += x[:, :-1] # left\n xx = np.sum(xxm * x)\n xy = np.sum(x * y)\n xsum = np.sum(x)\n return h * xsum - beta * xx - eta * xy\n\n def is_valid(i, j, shape):\n \"\"\"Check if coordinate i, j is valid in shape.\"\"\"\n return i >= 0 and j >= 0 and i < shape[0] and j < shape[1]\n\n def localized_E(E1, i, j, x, y):\n \"\"\"Localized version of Energy function E.\n\n Usage: old_x_ij, new_x_ij, E1, E2 = localized_E(Ecur, i, j, x, y)\n \"\"\"\n oldval = x[i, j]\n newval = oldval * -1 # flip\n # local computations\n E2 = E1 - (h * oldval) + (h * newval)\n E2 = E2 + (eta * y[i, j] * oldval) - (eta * y[i, j] * newval)\n adjacent = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n neighbors = [x[i + di, j + dj] for di, dj in adjacent\n if is_valid(i + di, j + dj, x.shape)]\n E2 = E2 + beta * sum(a * oldval for a in neighbors)\n E2 = E2 - beta * sum(a * newval for a in neighbors)\n return oldval, newval, E1, E2\n\n return E, localized_E",
"def test_tdsp_dijsktra_with_timesteps_key_error(self):\n input_file = \"/home/andy/code/python/om_task/data/input_data2.txt\"\n graph_data = process_input_file(input_file)\n\n graphs = {}\n\n today = datetime.today()\n t = time(0, 0) # input time value\n start_time = datetime(today.year, today.month, today.day, t.hour, t.minute)\n\n #print(\"graph_data = \", graph_data)\n print(\"graph_data[4] = \", graph_data[4])\n\n for timestep in graph_data:\n # print(\"Timestep=\", timestep)\n # Create a graph for each timestep\n graphs[timestep] = Graph()\n for edge in graph_data.get(timestep):\n graphs[timestep].add_edge(*edge)\n\n for graph in graphs:\n print(graphs[graph].edges)\n print(graphs[graph].weights)\n\n print(\"graph[4].weights = \", graphs[4].weights)\n\n print(\"Start Test 1\")\n path = tdsp_dijsktra(graphs, start_time, 'A0', 'E1')\n print(\"Path = \", path)\n self.assertEqual(['A0', 'B9', 'C0', 'D0', 'E1'], path)",
"def algo_graph_iht(\n x_mat, y_tr, max_epochs, lr, x_star, x0, tol_algo, edges, costs, s,\n g=1, root=-1, gamma=0.1, proj_max_num_iter=50, verbose=0):\n start_time = time.time()\n x_hat = np.copy(x0)\n xtx = np.dot(np.transpose(x_mat), x_mat)\n xty = np.dot(np.transpose(x_mat), y_tr)\n\n # graph projection para\n h_low = int(len(x0) / 2)\n h_high = int(h_low * (1. + gamma))\n t_low = int(s)\n t_high = int(s * (1. + gamma))\n\n num_epochs = 0\n for epoch_i in range(max_epochs):\n num_epochs += 1\n grad = -1. * (xty - np.dot(xtx, x_hat))\n head_nodes, proj_gradient = algo_head_tail_bisearch(\n edges, grad, costs, g, root, h_low, h_high,\n proj_max_num_iter, verbose)\n bt = x_hat - lr * proj_gradient\n tail_nodes, proj_bt = algo_head_tail_bisearch(\n edges, bt, costs, g, root, t_low, t_high,\n proj_max_num_iter, verbose)\n x_hat = proj_bt\n\n # early stopping for diverge cases due to the large learning rate\n if np.linalg.norm(x_hat) >= 1e3: # diverge cases.\n break\n if np.linalg.norm(y_tr - np.dot(x_mat, x_hat)) <= tol_algo:\n break\n x_err = np.linalg.norm(x_hat - x_star)\n run_time = time.time() - start_time\n return x_err, num_epochs, run_time",
"def graph_eigenvalues(N, eigenvalues,interval):\n (a,b) =interval\n a=abs(a-b)\n n=[]\n E_numeric=[]\n E_anlytic=[]\n E_anlytic_harm=[]\n \n for i in range(N-1):\n n.append(i)\n E_numeric.append(eigenvalues[0,i])\n E_anlytic.append((((math.pi)**2)*(i**2))/(a**2))\n E_anlytic_harm.append(((1/2)**0.5)*((i)+(1/2)))\n plt.plot(n,E_numeric,'b')\n plt.plot(n,E_anlytic,'g')\n plt.plot(n,E_anlytic_harm,'r')\n plt.xlabel('n', fontsize=20, color='black')\n plt.ylabel('E', fontsize=20, color='black')\n plt.show()\n return None",
"def solve_rocket_equations():\n # initial data\n u0 = numpy.array([h0,v0,mp0])\n t = [0.0] # The array of times\n u = [u0] # The array of evolved solutions\n # Evolve!\n while u[-1][0] >= 0.0:\n t.append(t[-1]+dt)\n u.append(euler_step(u[-1],t[-1],rhs,dt))\n return numpy.array(t),numpy.array(u)",
"def get_problem():\n\n # Rename this and/or move to optim package?\n problem = beluga.optim.Problem('Hannibal_HPAdemo')\n problem.mode='analytical' #Other options: 'numerical', 'dae'\n\n #Define independent variables\n problem.independent('t', 's')\n\n #~~~~~!!!\n #problem.quantity('terrain3','(-0.3*exp(-0.5*((x-2.7)^2+1.5*(y-2.1)^2))+2.6*exp(-0.55*(0.87*(x-6.7)^2+(y-2.2)^2))+2.1*exp(-0.27*(0.2*(x-5.5)^2+(y-7.2)^2))+1.6*(cos(0.8*y))^2*(sin(0.796*x))^2)')\n\n # Define equations of motion\n problem.state('x','V*cos(hdg)','m') \\\n .state('y','V*sin(hdg)','m') \\\n\n # Define controls\n problem.control('hdg','rad')\n\n # Define Cost Functional\n problem.cost['path'] = Expression('(1-w)+w*V*conv*elev*terrain(x,y)', 's')\n\n #Define constraints\n problem.constraints().initial('x-x_0','m') \\\n .initial('y-y_0','m') \\\n .terminal('x-x_f','m') \\\n .terminal('y-y_f','m')\n\n #Define constants\n problem.constant('w',0.9,'1') #Initial Terrain weighting factor\n problem.constant('conv',1,'s/m^2') #Integral conversion factor\n problem.constant('V',1,'m/s') #Vehicle speed\n problem.constant('elev',1,'m') #Initial Elevation\n\n #Unit scaling\n problem.scale.unit('m',1) \\\n .unit('s',1) \\\n .unit('rad',1)\n\n #Configure solver\n #problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False, number_arcs=8)\n problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=15, verbose = True, cached = False)\n\n #Initial Guess (Classic test example [4.9,0.4])\n problem.guess.setup('auto',start=[9.0,0.5], costate_guess=[0.0,-0.1]) #City A\n #problem.guess.setup('auto',start=[4.9,0.4], costate_guess=[0.1,-0.1]) #City A\n\n #Add Continuation Steps (Classic test example [7.2,8.5]) [8, 4.5]\n problem.steps.add_step(strategy='HPA',hweight=0.9) \\\n .terminal('x', 3.0, 10) \\\n .terminal('y', 9.5, 10) \\\n # .const('w', 0.9, 2, confined=True)\n\n #problem.steps.add_step(strategy='manual').num_cases(10) \\\n # .terminal('x', 3.0) \\\n # .terminal('y', 9.5) \\\n\n #problem.steps.add_step().num_cases(30) \\\n # .const('w',0.99) #Final Terrain weighting factor\n\n\n return problem",
"def hyperbolic_generator():\n\n hyperbolicRadius = np.arccosh(1 + N / (2 * np.pi * pointDensity))\n hyperbolicThreshold = np.arccosh(1 + thresholdFrac * (np.cosh(hyperbolicRadius) - 1))\n\n data_hyperbolic = []\n for r in range(num_graphs):\n # generates dictionary of positions (in a circle of radius) for each node: node_pos = {node_i: (radius, theta)} <-- uses polar coordinates\n # uses the inversion sampling idea to give Euclidean radii sampled uniformly across a hyperbolic sheet\n node_pos = {}\n for i in range(N):\n rnd_angle = np.random.random() * 2 * np.pi\n p = np.random.random() # random float between 0 and 1\n rnd_radii = np.arccosh(1 + p * (np.cosh(hyperbolicRadius) - 1)) # <-- inversion sampling\n node_pos.update({i: (rnd_radii, rnd_angle)})\n\n # computes the adjacency matrix\n Adj_Matrix = np.zeros((N, N))\n for i in range(N):\n for j in range(N):\n ij_dist = hyp_dist(node_pos[i], node_pos[j])\n if ij_dist < hyperbolicThreshold:\n Adj_Matrix[i, j] = 1 # nodes that are connected are assigned a 1 in the matrix\n\n data_hyperbolic.append(Adj_Matrix)\n\n return data_hyperbolic",
"def integration_testing(solver):\n time_step = [g_initial_time + step for step in g_time_steps]\n u = 0.0\n system_equation = lambda t, state, u: system(t, state, u)\n system_jacobian = lambda t, state, u: jacobian(t, state, u)\n solver.set_system_equation(system_equation, system_jacobian)\n solver.set_initial_value(g_initial_value, g_initial_time)\n assert g_initial_time == solver.t\n for steps in time_step:\n solver.set_f_params(u)\n state = solver.integrate(steps)\n assert all(state == solver.y), \"state error\"\n assert solver.t == steps, \"time error, desired and actual time are not the same\"",
"def tryEverything(g, verbose, graphname):\r\n prio = ['rku', 'random', 'BIL', 'rkd', 'cluHPS', 'rkusd', 'rkuad']\r\n placement = ['eft', 'BIM*', 'OLB', 'MET', 'DL', 'GDL']\r\n costFunction = ['mean', 'median', 'maxmax', 'minmax', 'minmin', 'maxmin']\r\n desc = ['DLS/DC', None, 'DCP']\r\n useOfBIM = [False, True]\r\n insertion = [False, True]\r\n BSA = [False, True]\r\n res: Dict[str, List[float]] = {}\r\n cnt = 0\r\n\r\n for ip, p in enumerate(prio):\r\n for ipl, pl in enumerate(placement):\r\n for ic, c in enumerate(costFunction):\r\n if p != 'BIL' or c == 'mean' or pl in ['DL', 'GDL']:\r\n for idd, d in enumerate(desc):\r\n for iu, u in enumerate(useOfBIM):\r\n for ii, i in enumerate(insertion):\r\n for ib, b in enumerate(BSA):\r\n cnt += 1\r\n name = \";\".join(map(str, [ip, ic, ipl, idd, iu, ii, ib]))\r\n\r\n # dispName = \"-\".join(map(str, [p, pl, c, d, u, i, b]))\r\n # print(\"Heuristic n°\", cnt, \"-\", dispName)\r\n # print(\"Heuristic n°\", cnt, \"-\", name)\r\n\r\n startScheduling = timeit.default_timer()\r\n try:\r\n schedule = computeSchedule(g, strategyPrio=p, costFunction=c,\r\n strategyPlacement=pl,\r\n useOfBIM=u, desc=d,\r\n insertion=i, bsa=b, verbose=verbose)\r\n verifPrec(g, schedule, verbose)\r\n endScheduling = timeit.default_timer()\r\n # print(\"Ended in :\", 1000*(endScheduling - startScheduling), \"ms\")\r\n # print(\"Ended in :\", round(1000 * (endScheduling - startScheduling),2), \"ms\")\r\n timeS = round(1000 * (endScheduling - startScheduling), 2)\r\n # print(f\"timeS : {timeS}\")\r\n if verbose:\r\n print(f\"Time : {timeS}ms\")\r\n res[name] = [round(schedule[getExitTask(g)][2], 6), timeS]\r\n except Exception as _:\r\n\r\n print(\"Error for : \" + name + \" on file \" + graphname)\r\n file = open(\"error.log\", 'a')\r\n file.write(f\"Error for {name} on file {graphname}\\n\")\r\n file.close()\r\n raise _\r\n return res\r\n return res",
"def test_tdsp_dijsktra_with_timesteps(self):\n input_file = \"/home/andy/code/python/om_task/data/test_data3.txt\"\n graph_data = process_input_file(input_file)\n\n graphs = {}\n\n today = datetime.today()\n t = time(0, 0) # input time value\n start_time = datetime(today.year, today.month, today.day, t.hour, t.minute)\n\n for timestep in graph_data:\n # print(\"Timestep=\", timestep)\n # Create a graph for each timestep\n graphs[timestep] = Graph()\n for edge in graph_data.get(timestep):\n graphs[timestep].add_edge(*edge)\n\n for graph in graphs:\n print(graphs[graph].edges)\n print(graphs[graph].weights)\n\n print(\"Start Test 1\")\n path = tdsp_dijsktra(graphs, start_time, 'A0', 'A1')\n print(path)\n self.assertEqual(['A0', 'B0', 'B1', 'A1'], path)\n\n print(\"Start Test 2\")\n t = time(0, 0) # input time value\n start_time = datetime(today.year, today.month, today.day, t.hour, t.minute)\n path = tdsp_dijsktra(graphs, start_time, 'B0', 'A1')\n print(path)\n self.assertEqual(['B0', 'B1', 'A1'], path)\n\n print(\"Start Test 3\")\n t = time(1, 1) # input time value\n start_time = datetime(today.year, today.month, today.day, t.hour, t.minute)\n path = tdsp_dijsktra(graphs, start_time, 'B0', 'A1')\n print(path)\n self.assertEqual(['B0', 'A1'], path)",
"def g_dynamic_programming_algorithm(self, vehicle_id, Flag):\r\n for t in range(self.g_number_of_time_intervals):\r\n self.g_time_dependent_state_vector[vehicle_id][t] = C_time_indexed_state_vector()\r\n self.g_time_dependent_state_vector[vehicle_id][t].Reset()\r\n self.g_time_dependent_state_vector[vehicle_id][t].current_time = t\r\n\r\n self.g_ending_state_vector[vehicle_id] = C_time_indexed_state_vector()\r\n self.g_ending_state_vector[vehicle_id].Reset()\r\n max_time_interval = 0\r\n # 1.Initial state for original depot\r\n new_element = CVSState()\r\n new_element.current_node_id = self.origin\r\n new_element.remaining_capacity = self.capacity\r\n new_element.node_seq.append(self.origin)\r\n new_element.node_serving_state = [0] * self.g_number_of_nodes\r\n self.g_time_dependent_state_vector[vehicle_id][0].update_state(new_element, Flag)\r\n for t in range(self.g_number_of_time_intervals):\r\n for index in range(min(len(self.g_time_dependent_state_vector[vehicle_id][t].VSStateVector), self.Best_K_Size)):\r\n pElement = self.g_time_dependent_state_vector[vehicle_id][t].VSStateVector[index]\r\n from_node_id = pElement.current_node_id # 起点ID\r\n from_node = self.node_list[from_node_id] # 起点\r\n for i in range(from_node.outbound_size): # 邻接点\r\n to_node_id = from_node.outbound_nodes_list[i] # 从当前点可以到达哪些点ID\r\n to_node = self.node_list[to_node_id] # 邻接点\r\n link_to = from_node.outbound_links_list[i] # 邻接路段\r\n if link_to.mean == 0:\r\n next_time =t+1\r\n else:\r\n next_time = t + link_to.mean\r\n\r\n if next_time > self.g_number_of_time_intervals - 1:\r\n continue\r\n # Case 1: to_node is the destination\r\n if to_node_id == self.destination:\r\n new_element = CVSState()\r\n new_element.my_copy(pElement)\r\n # new_element.current_node_id = to_node_id\r\n # new_element.remaining_capacity -= to_node.demand\r\n new_element.node_seq.append(to_node_id)\r\n # new_element.node_serving_state[to_node_id] = 1\r\n new_element.Calculate_Label_Cost(to_node, link_to,self.multiplier_v)\r\n self.g_ending_state_vector[vehicle_id].VSStateVector.append(new_element)\r\n\r\n # self.g_ending_state_vector[vehicle_id].update_state(new_element, Flag)\r\n\r\n # Case 2: to_node is not the destination\r\n if to_node_id != self.destination:\r\n # check 1: vehicle capacity\r\n if pElement.remaining_capacity < to_node.demand:\r\n continue\r\n # check 2: if the to node is served\r\n node_serving_state = pElement.node_serving_state\r\n if node_serving_state[to_node_id] == 1:\r\n continue\r\n\r\n new_element = CVSState()\r\n new_element.my_copy(pElement)\r\n new_element.current_node_id = to_node_id\r\n new_element.remaining_capacity -= to_node.demand\r\n new_element.node_seq.append(to_node_id)\r\n new_element.node_serving_state[to_node_id] = 1\r\n new_element.Calculate_Label_Cost(to_node, link_to,self.multiplier_v)\r\n self.g_time_dependent_state_vector[vehicle_id][next_time].update_state(new_element, Flag)\r\n if next_time > max_time_interval:\r\n max_time_interval = next_time\r\n # check\r\n if max_time_interval <= t:\r\n break\r\n self.g_ending_state_vector[vehicle_id].Sort(Flag)",
"def __init__(self,\n game,\n exploration_strategy='uniform-exhaustive',\n confidence_method='ucb-standard',\n delta=0.01,\n ucb_eps=0,\n per_payoff_confidence=True,\n time_dependent_delta=False):\n self.exploration_strategy = exploration_strategy\n self.confidence_method = confidence_method\n self.ucb_eps = ucb_eps\n self.G = game # pylint: disable=invalid-name\n self.per_payoff_confidence = per_payoff_confidence\n self.time_dependent_delta = time_dependent_delta\n if self.per_payoff_confidence:\n self._delta = delta\n else:\n self._delta = delta / (\n self.G.n_players *\n functools.reduce(operator.mul, self.G.strategy_spaces, 1))\n\n # Compute the graph\n self.V = list( # pylint: disable=invalid-name\n itertools.product(*[range(smax) for smax in self.G.strategy_spaces]))\n self.E = [] # pylint: disable=invalid-name\n for v in self.V:\n adj_strats = [\n list(range(v[k] + 1, self.G.strategy_spaces[k]))\n for k in range(self.G.n_players)\n ]\n for k in range(self.G.n_players):\n for new_s in adj_strats[k]:\n second_vertex = list(v)\n second_vertex[k] = new_s\n second_vertex = tuple(second_vertex)\n self.E.append((v, second_vertex))\n self.count_history = {v: [] for v in self.V}\n self.total_interactions = 0"
]
| [
"0.6062276",
"0.6009414",
"0.5854938",
"0.55636716",
"0.5466281",
"0.54142034",
"0.54097426",
"0.53703797",
"0.5366806",
"0.5362178",
"0.53470725",
"0.5342757",
"0.5329308",
"0.52993786",
"0.5255946",
"0.52543396",
"0.5253646",
"0.5226769",
"0.5217197",
"0.5200583",
"0.51922",
"0.5166258",
"0.5165572",
"0.5165006",
"0.5151935",
"0.51509225",
"0.51451945",
"0.51360124",
"0.5131971",
"0.5127064"
]
| 0.6342304 | 0 |
Constructor for the RestoreObjectState class | def __init__(self,
error=None,
object_status=None,
resource_pool_id=None,
restored_object_id=None,
source_object_id=None,
):
# Initialize members of the class
self.error = error
self.object_status = object_status
self.resource_pool_id = resource_pool_id
self.restored_object_id = restored_object_id
self.source_object_id = source_object_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, state):\n self.state = state",
"def restore(self, obj):\n return obj",
"def __init__(self,\n alternate_restore_base_directory=None,\n continue_on_error=None,\n encryption_enabled=None,\n generate_ssh_keys=None,\n override_originals=None,\n preserve_acls=None,\n preserve_attributes=None,\n preserve_timestamps=None,\n restore_entities=None,\n restore_to_original_paths=None,\n save_success_files=None,\n skip_estimation=None,\n ):\n\n # Initialize members of the class\n self.alternate_restore_base_directory = alternate_restore_base_directory\n self.continue_on_error = continue_on_error\n self.encryption_enabled = encryption_enabled\n self.generate_ssh_keys = generate_ssh_keys\n self.override_originals = override_originals\n self.preserve_acls = preserve_acls\n self.preserve_attributes = preserve_attributes\n self.preserve_timestamps = preserve_timestamps\n self.restore_entities = restore_entities\n self.restore_to_original_paths = restore_to_original_paths\n self.save_success_files = save_success_files\n self.skip_estimation = skip_estimation",
"def __init__(self, state=State.NORMAL):\n self.state = state",
"def __init__(self):\n raise NotImplementedError('cannot create independent state')",
"def mos_object(self):\n return self._restore_fn(*self._restore_args)",
"def __init__(self, init_state):\n self._curr_state = init_state",
"def __setstate__(self, _state : dict):\n self.__init__(**_state)",
"def restore(self):\n self.abstract_obj.restore()",
"def __init__(self):\n self._state: CartState = None",
"def __init__(self, *args, **kwds):\n if args or kwds:\n super(StateInstantiation, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.state_path is None:\n self.state_path = ''\n if self.state_class is None:\n self.state_class = ''\n if self.initial_state_name is None:\n self.initial_state_name = ''\n if self.input_keys is None:\n self.input_keys = []\n if self.output_keys is None:\n self.output_keys = []\n if self.cond_outcome is None:\n self.cond_outcome = []\n if self.cond_transition is None:\n self.cond_transition = []\n if self.behavior_class is None:\n self.behavior_class = ''\n if self.parameter_names is None:\n self.parameter_names = []\n if self.parameter_values is None:\n self.parameter_values = []\n if self.position is None:\n self.position = [0.] * 2\n if self.outcomes is None:\n self.outcomes = []\n if self.transitions is None:\n self.transitions = []\n if self.autonomy is None:\n self.autonomy = []\n if self.userdata_keys is None:\n self.userdata_keys = []\n if self.userdata_remapping is None:\n self.userdata_remapping = []\n else:\n self.state_path = ''\n self.state_class = ''\n self.initial_state_name = ''\n self.input_keys = []\n self.output_keys = []\n self.cond_outcome = []\n self.cond_transition = []\n self.behavior_class = ''\n self.parameter_names = []\n self.parameter_values = []\n self.position = [0.] * 2\n self.outcomes = []\n self.transitions = []\n self.autonomy = []\n self.userdata_keys = []\n self.userdata_remapping = []",
"def __init__(self,\n application_restore_objects=None,\n hosting_protection_source=None,\n ):\n\n # Initialize members of the class\n self.application_restore_objects = application_restore_objects\n self.hosting_protection_source = hosting_protection_source",
"def __init__(self):\n\t\tself.state = None\n\t\tself.info = None\n\t\tself.next = None",
"def __init__( self, state=None ):\n\n raise NotImplementedError(\"__init__\");",
"def __init__(self):\n self.__dict__ = self._shared_state",
"def __setstate__(self, state):\n # compatibility with data from previous versions\n self._name = \"\"\n self._user_data = dict()\n self.__loaded_from = None\n # Restore state. This overrides the above if contained in the data.\n self.__dict__.update(restore_dict(state))",
"def restore_object(Bucket=None, Key=None, VersionId=None, RestoreRequest=None, RequestPayer=None):\n pass",
"def __setstate__(self, s):\n self.__dict__ = s\n self.experiment_object = None",
"def initialize_state(self):\n super(InverseChain, self).initialize_state()",
"def restore_object(self):\n self.co_worker_list = self.original_co_worker_list",
"def __init__(self, rexarm, state_machine, parent=None):\n QThread.__init__(self, parent=parent)\n self.rexarm = rexarm\n self.sm=state_machine",
"def restore_state(self, state: ale_py.ALEState):\n self.ale.restoreState(state)",
"def __init__(self) -> None:\n\n self.reset()",
"def _save_state_as_orig(self):\n self._orig = None\n self._orig = deepcopy(self)",
"def __init__(self):\n self.update_state()",
"def __restoreBackup(self):\n pass #FIXME!!!",
"def __init__(self, state: str) -> None:\n super().__init__(\"memory\")\n self._fail_counter = 0\n self._opened_at: datetime | None = None\n self._state = state",
"def restore(self, serialized: bytes) -> None:\n loaded_state = pickle.loads(serialized)\n self.__dict__.update(loaded_state.__dict__)",
"def __init__(self):\n self.state_locations = None;\n self.state_actions = None;\n self.start_id = None; # The id assigned to the start state\n self.goal_id = None; # The id assigned to the goal state\n self.start_loc = None; # The real word coordinates of the start state\n self.goal_loc = None; # The real word coordinates of the goal state\n self.reward_states_n = None\n self.reward_states = None\n self.reward_sendcommand = None\n self.reward_timeout = None\n self.timeout = None",
"def restore(self):\n raise NotImplementedError"
]
| [
"0.6527886",
"0.6478938",
"0.63768274",
"0.63061714",
"0.6259334",
"0.6208393",
"0.6155825",
"0.6133407",
"0.612051",
"0.6097409",
"0.5997136",
"0.5995691",
"0.59769154",
"0.5974528",
"0.5970553",
"0.59491473",
"0.59310687",
"0.592171",
"0.5913257",
"0.58927315",
"0.58747876",
"0.5869466",
"0.5866477",
"0.58582354",
"0.583775",
"0.58274937",
"0.5814694",
"0.5771102",
"0.5767165",
"0.57482713"
]
| 0.69528556 | 0 |
Test login with 2SA. | def test_login_2sa(self):
dsm_7 = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER_2SA,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
dsm_7.dsm_version = 7
with pytest.raises(SynologyDSMLogin2SARequiredException) as error:
dsm_7.login()
error_value = error.value.args[0]
assert error_value["api"] == "SYNO.API.Auth"
assert error_value["code"] == 403
assert error_value["reason"] == "One time password not specified"
assert (
error_value["details"]
== "Two-step authentication required for account: valid_user_2sa"
)
assert dsm_7.login(VALID_OTP)
assert dsm_7._session_id == SESSION_ID
assert dsm_7._syno_token == SYNO_TOKEN
assert dsm_7._device_token == DEVICE_TOKEN
assert dsm_7.device_token == DEVICE_TOKEN | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_login_2sa_new_session(self):\n dsm_7 = SynologyDSMMock(\n VALID_HOST,\n VALID_PORT,\n VALID_USER_2SA,\n VALID_PASSWORD,\n VALID_HTTPS,\n VALID_VERIFY_SSL,\n device_token=DEVICE_TOKEN,\n )\n dsm_7.dsm_version = 7\n assert dsm_7.login()\n\n assert dsm_7._session_id == SESSION_ID\n assert dsm_7._syno_token == SYNO_TOKEN\n assert dsm_7._device_token == DEVICE_TOKEN\n assert dsm_7.device_token == DEVICE_TOKEN",
"def test_aio_can_login_to_web_portal(aio):",
"def login():",
"def login():",
"def test_successful_login(self):\n pass",
"def test_login(self):\n self._configure_testshib_provider()\n self._test_login()",
"def test_activate_login(self):\r\n pass",
"def test_basic_login(self):\n c = Client()\n c.login(username='a', password='123456')",
"def login(self):",
"async def test_login(self):\n params = {\n 'VCA_HOST': os.getenv('VCA_HOST', '127.0.0.1'),\n 'VCA_PORT': os.getenv('VCA_PORT', 17070),\n 'VCA_USER': os.getenv('VCA_USER', 'admin'),\n 'VCA_SECRET': os.getenv('VCA_SECRET', 'admin'),\n 'VCA_CHARMS': os.getenv('VCA_CHARMS', None),\n 'VCA_PUBLIC_KEY': os.getenv('VCA_PUBLIC_KEY', None),\n 'VCA_CACERT': os.getenv('VCA_CACERT', \"invalidcacert\"),\n }\n\n client = self.get_n2vc(params)\n\n await client.login()\n assert client.authenticated\n\n await client.logout()\n assert client.authenticated is False",
"def test_regular_user_login(self):\n self.login(\"user\", \"user\")\n self.should_see(\"This is your profile, user.\")",
"def test_login(self):\n\n print('\\n\\nEnter a valid LendingClub account information...')\n email = input('Email:')\n password = getpass.getpass()\n\n self.assertTrue(self.session.authenticate(email, password))\n print('Authentication successful')",
"def test_valid_login(self):\n self.assertTrue(self.session.authenticate('[email protected]', 'supersecret'))",
"def login():\n pass",
"def test_02_account_login(self):\n self.login(email='[email protected]', password='Abcd@1234')\n self.assertEquals(\n self.selenium.current_url, self.get_absolute_url())\n print 'valid login test completed'",
"def test_login(self):\n\n client = Client('username', 'password')\n self.setSessionResponse(200)\n try:\n client.authenticate()\n except Exception as e:\n self.fail(\"Exception raised : \" + str(e))",
"def test_login():\n My.search_merchant_page(driver, My.Testing_Env_EN)\n validate_login()\n print('----------')\n My.search_merchant_page(driver, My.Testing_Env_FR)\n validate_login()\n driver.quit()",
"def test_login(self):\n self.driver.find_element_by_link_text(\"Sign in\").click()\n self.driver.find_element_by_id(\"email\").send_keys(\"[email protected]\")\n self.driver.find_element_by_id(\"passwd\").send_keys(\"control123\")\n self.driver.find_element_by_id(\"SubmitLogin\").click()\n time.sleep(5)",
"def test_professor_can_login_to_web_portal(professor):",
"def test_login(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'__action': 'login', 'id': people[0].id, 'password': \"testing\"}\n self.post('user', 200, params=p)",
"def test_start_new_verification(self):\r\n user = UserFactory.create(username=\"rusty\", password=\"test\")\r\n self.client.login(username=\"rusty\", password=\"test\")",
"def test_login_OK(self):\n rv = self.login('super',\n '1b3231655cebb7a1f783eddf27d254ca')\n assert 'Bienvenido' in rv.data",
"def login(self):\n\t\treturn",
"def test_sucessful_login(self):\n self.user.list_of_accounts = [{'username': 'dalton',\n 'pwd': 'chromelegend',\n 'email': '[email protected]'}]\n msg = self.user.login(\"[email protected]\", \"chromelegend\")\n self.assertEqual(msg, \"Success!\")",
"def test_030_login_as_user(self):\n\n testflow.step(LOG_USR_MSG, TEST_USER1)\n users.loginAsUser(\n TEST_USER1,\n config.INTERNAL_PROFILE,\n self.user_password,\n True,\n )\n\n testflow.step(TST_CON_MSG, TEST_USER1)\n assert connectionTest(), \"User '%s' can't login\" % TEST_USER1",
"def do_login(self, backend, user):",
"def test_admin_can_login_to_web_portal(admin):",
"def test_login(self):\n url_extend = 'user_auth/login/'\n self.browser.get(self.url + url_extend)\n\n # enter the username and password.\n username_field = self.browser.find_element_by_name('user_name')\n username_field.send_keys('user4')\n password_field = self.browser.find_element_by_name('password')\n password_field.send_keys('user')\n\n # click login button.\n # get the first input button under the first form in login page.\n login_button = self.browser.find_element_by_xpath(\"//form[1]/fieldset[1]/input[@type='submit']\")\n try:\n login_button.click()\n except:\n raise Exception(\"Login Error!\")",
"def test_031_login_as_exp_pwd_user(self):\n\n testflow.step(LOG_USR_MSG, TEST_USER2)\n users.loginAsUser(\n TEST_USER2,\n config.INTERNAL_PROFILE,\n self.user_password,\n True,\n )\n\n testflow.step(TST_CON_MSG, TEST_USER2)\n assert not connectionTest(), \"User '%s' can login\" % TEST_USER2",
"def testGaiaLogin(self):\n if self._is_guest:\n return\n try:\n username, password = next(self._GetCredentialsIter())\n except StopIteration:\n username = 'autotest.catapult'\n password = 'autotest'\n with self._CreateBrowser(gaia_login=True,\n username=oobe.Oobe.Canonicalize(username),\n password=password):\n self.assertTrue(py_utils.WaitFor(self._IsCryptohomeMounted, 10))"
]
| [
"0.7610409",
"0.7500811",
"0.74207056",
"0.74207056",
"0.7279427",
"0.72677475",
"0.7121446",
"0.7109126",
"0.707014",
"0.70642114",
"0.7060475",
"0.70395577",
"0.70346",
"0.69922304",
"0.69830513",
"0.6942732",
"0.6919252",
"0.6907737",
"0.6880657",
"0.6877048",
"0.6873271",
"0.6871723",
"0.68097985",
"0.67957515",
"0.67878354",
"0.6772828",
"0.67619437",
"0.6745331",
"0.67446417",
"0.6715374"
]
| 0.80582833 | 0 |
Test login with 2SA and a new session with granted device. | def test_login_2sa_new_session(self):
dsm_7 = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER_2SA,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
device_token=DEVICE_TOKEN,
)
dsm_7.dsm_version = 7
assert dsm_7.login()
assert dsm_7._session_id == SESSION_ID
assert dsm_7._syno_token == SYNO_TOKEN
assert dsm_7._device_token == DEVICE_TOKEN
assert dsm_7.device_token == DEVICE_TOKEN | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_login_2sa(self):\n dsm_7 = SynologyDSMMock(\n VALID_HOST,\n VALID_PORT,\n VALID_USER_2SA,\n VALID_PASSWORD,\n VALID_HTTPS,\n VALID_VERIFY_SSL,\n )\n dsm_7.dsm_version = 7\n with pytest.raises(SynologyDSMLogin2SARequiredException) as error:\n dsm_7.login()\n error_value = error.value.args[0]\n assert error_value[\"api\"] == \"SYNO.API.Auth\"\n assert error_value[\"code\"] == 403\n assert error_value[\"reason\"] == \"One time password not specified\"\n assert (\n error_value[\"details\"]\n == \"Two-step authentication required for account: valid_user_2sa\"\n )\n\n assert dsm_7.login(VALID_OTP)\n\n assert dsm_7._session_id == SESSION_ID\n assert dsm_7._syno_token == SYNO_TOKEN\n assert dsm_7._device_token == DEVICE_TOKEN\n assert dsm_7.device_token == DEVICE_TOKEN",
"async def test_login(self):\n params = {\n 'VCA_HOST': os.getenv('VCA_HOST', '127.0.0.1'),\n 'VCA_PORT': os.getenv('VCA_PORT', 17070),\n 'VCA_USER': os.getenv('VCA_USER', 'admin'),\n 'VCA_SECRET': os.getenv('VCA_SECRET', 'admin'),\n 'VCA_CHARMS': os.getenv('VCA_CHARMS', None),\n 'VCA_PUBLIC_KEY': os.getenv('VCA_PUBLIC_KEY', None),\n 'VCA_CACERT': os.getenv('VCA_CACERT', \"invalidcacert\"),\n }\n\n client = self.get_n2vc(params)\n\n await client.login()\n assert client.authenticated\n\n await client.logout()\n assert client.authenticated is False",
"def test_aio_can_login_to_web_portal(aio):",
"def test_login(self):\n self._configure_testshib_provider()\n self._test_login()",
"def test_open_id_setup(self):\r\n self.attempt_login(200)",
"def the_user_should_be_able_to_connect_to_another_device():\n assert web_app.connect_to_device2()",
"def test_successful_login(self):\n pass",
"def test_start_new_verification(self):\r\n user = UserFactory.create(username=\"rusty\", password=\"test\")\r\n self.client.login(username=\"rusty\", password=\"test\")",
"def test_access_token_in_session_after_login(self, client, valid_otp_data):\n\n resp = client.post(self.url, json=valid_otp_data)\n assert resp.status_code == 200\n\n session_resp = client.get(\"/view_session\")\n assert \"access_token\" in session_resp.json()",
"def test_login(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'__action': 'login', 'id': people[0].id, 'password': \"testing\"}\n self.post('user', 200, params=p)",
"def test_activate_login(self):\r\n pass",
"def test_031_login_as_exp_pwd_user(self):\n\n testflow.step(LOG_USR_MSG, TEST_USER2)\n users.loginAsUser(\n TEST_USER2,\n config.INTERNAL_PROFILE,\n self.user_password,\n True,\n )\n\n testflow.step(TST_CON_MSG, TEST_USER2)\n assert not connectionTest(), \"User '%s' can login\" % TEST_USER2",
"def test_login_post_with_2fa(self, *_):\n view = views.Login.as_view()\n form = forms.LoginForm()\n form.data[\"localname\"] = \"badger\"\n form.data[\"password\"] = \"password\"\n request = self.factory.post(\"\", form.data)\n request.user = self.anonymous_user\n middleware = SessionMiddleware(request)\n middleware.process_request(request)\n request.session.save()\n\n with patch(\"bookwyrm.views.landing.login.login\"):\n result = view(request)\n self.assertEqual(result.url, \"/2fa-check\")\n self.assertEqual(result.status_code, 302)",
"def testLoginTwice(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userG2\", \"password\"))\n self.assertEquals(self.users.login(\"userG2\", \"password\"), 2)\n self.assertEquals(self.users.login(\"userG2\", \"password\"), 3)",
"def test_030_login_as_user(self):\n\n testflow.step(LOG_USR_MSG, TEST_USER1)\n users.loginAsUser(\n TEST_USER1,\n config.INTERNAL_PROFILE,\n self.user_password,\n True,\n )\n\n testflow.step(TST_CON_MSG, TEST_USER1)\n assert connectionTest(), \"User '%s' can't login\" % TEST_USER1",
"def test_login(self):\n\n print('\\n\\nEnter a valid LendingClub account information...')\n email = input('Email:')\n password = getpass.getpass()\n\n self.assertTrue(self.session.authenticate(email, password))\n print('Authentication successful')",
"def testGaiaLogin(self):\n if self._is_guest:\n return\n try:\n username, password = next(self._GetCredentialsIter())\n except StopIteration:\n username = 'autotest.catapult'\n password = 'autotest'\n with self._CreateBrowser(gaia_login=True,\n username=oobe.Oobe.Canonicalize(username),\n password=password):\n self.assertTrue(py_utils.WaitFor(self._IsCryptohomeMounted, 10))",
"def test_valid_login(self):\n self.assertTrue(self.session.authenticate('[email protected]', 'supersecret'))",
"def test_login_user_second_time(client, new_user, registered_user):\n rv = client.post(\"/auth/login/\", json=new_user)\n token = rv.get_json()[\"access_token\"]\n rv = client.post(\n \"/auth/login/\",\n headers={\"Authorization\": \"Bearer {}\".format(token)},\n json=new_user\n )\n response = rv.get_json()\n assert rv.status_code == HTTPStatus.UNAUTHORIZED\n assert response[\"message\"] == \"User logged in already\"",
"def test_login_with_testshib_provider_short_session_length(self):\n # Configure the provider with a 10-second timeout\n self._configure_testshib_provider(max_session_length=10)\n\n now = datetime.datetime.utcnow()\n with freeze_time(now):\n # Test the login flow, adding the user in the process\n self._test_login()\n\n # Wait 30 seconds; longer than the manually-set 10-second timeout\n later = now + datetime.timedelta(seconds=30)\n with freeze_time(later):\n # Test returning as a logged in user; this method verifies that we're logged out first.\n self._test_return_login(previous_session_timed_out=True)",
"def test_twice_logging_in(test_client, test_session):\n tokens = []\n for _ in range(2):\n with patch(\"validators.authentication.session\", test_session):\n with patch(\"views.login.session\", test_session):\n payload = {\"username\": \"testuser1\", \"password\": \"Qwerty123_\"}\n response = test_client.post(\"api/v1/login\", data=payload)\n assert response.status_code == 200\n tokens.append(response.json()[\"access_token\"])\n time.sleep(1)\n assert tokens[0] != tokens[1]",
"def login():",
"def login():",
"def do_login(self, backend, user):",
"def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n self.login(email=common.admin_email, username=common.admin_username)\n self.galaxy_login(email=common.admin_email, username=common.admin_username)",
"def test_basic_login(self):\n c = Client()\n c.login(username='a', password='123456')",
"def test_set_session():",
"def test_login_session_check(self):\r\n\t\tprint(\"\")\r\n\t\tprint(\"`login_session_check` method tests\")\r\n\t\tprint(\"---------------------\")\r\n\t\tprint(\"Test: `login_session_check: logged in`\")\r\n\t\tpath = 'login'\r\n\t\twith requests_mock.mock() as m:\r\n\t\t\tm.get(\r\n\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\tstatus_code = 200,\r\n\t\t\t\treason = 'OK',\r\n\t\t\t\ttext=\"\"\"{\r\n\t\t\t\t\t\"rows\":\r\n\t\t\t\t\t\t[\r\n\t\t\t\t\t\t\t{\"FORCE_PWD_CHANGE\":true,\r\n\t\t\t\t\t\t\t\"LAST_ACCT\":1,\r\n\t\t\t\t\t\t\t\"NEXT_PWNED\":null,\r\n\t\t\t\t\t\t\t\"PWD_EXPIRE\":\"2020-07-30\",\r\n\t\t\t\t\t\t\t\"ROOT\":true,\r\n\t\t\t\t\t\t\t\"USER\":\"restuser\",\r\n\t\t\t\t\t\t\t\"USER_ID\":2,\r\n\t\t\t\t\t\t\t\"expired_pwd\":false\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t],\r\n\t\t\t\t\t\"success\":true\r\n\t\t\t\t}\"\"\"\r\n\t\t\t)\r\n\t\t\tsession_check = sdk.login_session_check()\r\n\t\t\tassert session_check[0] == True\r\n\t\t\tassert session_check[1]['FORCE_PWD_CHANGE'] == True\r\n\t\t\tassert session_check[1]['LAST_ACCT'] == 1\r\n\t\t\tassert session_check[1]['NEXT_PWNED'] == None\r\n\t\t\tassert session_check[1]['ROOT'] == True\r\n\t\t\tassert session_check[1]['USER_ID'] == 2\r\n\t\t\tassert session_check[1]['USER'] == 'restuser'\r\n\t\t\tassert session_check[1]['expired_pwd'] == False\r\n\t\t\tprint(\"Passed!!!\")\r\n\t\t\tprint(\"Test: `login_session_check: not logged in`\")\r\n\t\t\tm.get(\r\n\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\tstatus_code = 200,\r\n\t\t\t\treason = 'OK',\r\n\t\t\t\ttext=\"\"\"{\r\n\t\t\t\t\t\"rows\":\t[],\r\n\t\t\t\t\t\"success\":false\r\n\t\t\t\t}\"\"\"\r\n\t\t\t)\r\n\t\t\tsession_check = sdk.login_session_check()\r\n\t\t\tassert session_check[0] == False\r\n\t\t\tassert not session_check[1] # dictionary should be empty\r\n\t\tprint(\"Passed!!!\")",
"def testing_create_login_session():\n set_logged_in_user(request.form['email'])\n app.logger.debug(\n \"logged in user set to {} for testing\".format(\n request.form['email']))\n return ''",
"def test_02_account_login(self):\n self.login(email='[email protected]', password='Abcd@1234')\n self.assertEquals(\n self.selenium.current_url, self.get_absolute_url())\n print 'valid login test completed'"
]
| [
"0.79027075",
"0.6791165",
"0.6721474",
"0.6619786",
"0.65200156",
"0.649458",
"0.64693856",
"0.64288",
"0.6404014",
"0.64035445",
"0.63950443",
"0.63943654",
"0.6369294",
"0.6346183",
"0.6331206",
"0.6299405",
"0.6282982",
"0.6256549",
"0.6255143",
"0.6224549",
"0.62157404",
"0.61894566",
"0.61894566",
"0.6181815",
"0.61738956",
"0.6173311",
"0.6163155",
"0.6156066",
"0.6153292",
"0.6142292"
]
| 0.82061094 | 0 |
Check if the serial message have a valid CRC. | def crcCheck(serialMessage):
checkResult = False
#CRC from serial message
crc = int.from_bytes(serialMessage[14:16], byteorder='little', signed=False)
#calculated CRC
crcCalc = libscrc.modbus(serialMessage[0:14])
if crc == crcCalc:
checkResult = True
return checkResult | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_crc(self):\n return self.name.startswith('CRC')",
"def check_crc(chunk, crc):\n\n crc = bytes(crc)\n crc_this = bytes(ensure_crc(crc16.crc16xmodem(bytes(chunk))).encode('utf-8'))\n if crc_this == crc:\n return True\n else:\n return False",
"def check_crc(function_specific_data, crc):\n crc_cal = calculate_crc(function_specific_data)\n \n if crc == crc_cal:\n return True\n else:\n return False",
"def crcCheck(data, crcInput):\n crcCalc = crcCompute(data)\n crcLength = len(crcCalc)\n if len(crcInput) != crcLength:\n raise Exception(\"CRC input value must be a sequence of %d bytes\" % (crcLength))\n \n for i in range(crcLength):\n if crcInput[i] != crcCalc[i]:\n return False\n return True",
"def _validate_checksum(self, msg: bytes) -> bool:\n return self._checksum(msg) == msg[8]",
"def check(self, stream):\n return np.all(self._crc(stream.copy()) == 0)",
"def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True",
"def validate_checksum(self):\n return self.calculate_checksum() == self.checksum()",
"def valid_checksum(self, msg: dict) -> bool:\n packed_seg = struct.pack(HEADER_FORMAT + DATA_FORMAT, msg['seq_nr'], msg['ack_nr'], msg['flag'].value,\n msg['win'], msg['dlen'], 0, msg['data'])\n cksum = self.calc_checksum(packed_seg)\n return cksum == msg['cksum']",
"def checkChecksum(self):\n if not self.checkPacketLength():\n return False\n return CCSDS.DU.DataUnit.checkChecksum(self)",
"def verify_checksum(self):\n return self.generate_header_checksum(omit_checksum=False) == 0",
"def validate(msg):\n valid = True\n\n if not msg or len(msg) < 4:\n return False, -1, -1\n\n checksum = msg[-1]\n length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n # try:\n # # here works for pyton 3 only\n # length = int.from_bytes(msg[1:3], byteorder='big', signed=False)\n # except Exception:\n # length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n\n validlen = len(msg[3:-1])\n validsum = 0xFF - ((sum(msg[3:-1])) & 0xFF)\n\n # print('length: ' + str(self.length) + '; ' + str(validlen))\n # print('checksum: ' + str(self.checksum) + '; ' + str(validsum))\n\n # check sanity of computed Length and Checksum with the one in the message\n if (checksum != validsum) or (length != validlen):\n valid = False\n\n return valid, length, checksum",
"def check_crc(self,message_from_sensor, check_value_from_sensor):\n remainder = message_from_sensor << 8 #Pad with 8 bits because we have to add in the check value\n remainder |= check_value_from_sensor #Add on the check value\n\n divsor = SHIFTED_DIVISOR\n\n for i in range(0, 16): #Operate on only 16 positions of max 24. The remaining 8 are our remainder and should be zero when we're done.\n if ((remainder & 1 << (23 - i)) > 0): #Check if there is a one in the left position\n remainder ^= divsor\n divsor >>= 1 #Rotate the divsor max 16 times so that we have 8 bits left of a remainder\n \n return remainder",
"def test_crc():\n status_update = bytes.fromhex('7E1DFFAF13000064082D00000100000400000000000000000064000000067E')\n status_update_crc = 0x06\n\n conf_req = bytes.fromhex('7E050ABF04777E')\n conf_req_crc = 0x77\n\n spa = balboa.BalboaSpaWifi('gnet-37efed')\n\n result = spa.balboa_calc_cs(conf_req[1:], 4)\n print('Expected CRC={0} got {1}'.format(hex(conf_req_crc), hex(result)))\n if result != conf_req_crc:\n return 1\n\n result = spa.balboa_calc_cs(status_update[1:], 28)\n print('Expected CRC={0} got {1}'.format(hex(status_update_crc), hex(result)))\n if result != status_update_crc:\n return 1",
"def is_code_valid_checksum(processed_code):\n\n if processed_code.isnumeric():\n list_of_digits = [int(digit) for digit in processed_code]\n else:\n converted_digits = convert_code_to_decimal(processed_code)\n list_of_digits = [int(digit) for digit in converted_digits]\n\n return sum(list_of_digits) > 0 and get_calculated_checksum(list_of_digits) % 11 == 0",
"def ensure_crc(crc):\n\n crc = str(crc)\n if len(crc) == 1:\n return '0000'+crc\n elif len(crc) == 2:\n return '000'+crc\n elif len(crc) == 3:\n return '00'+crc\n elif len(crc) == 4:\n return '0'+crc\n elif len(crc) == 5:\n return crc\n else:\n print('There was a problem with the number ensure_crc')",
"def ensure_crc(crc):\n\n crc = str(crc)\n if len(crc) == 1:\n return '0000'+crc\n elif len(crc) == 2:\n return '000'+crc\n elif len(crc) == 3:\n return '00'+crc\n elif len(crc) == 4:\n return '0'+crc\n elif len(crc) == 5:\n return crc\n else:\n print('There was a problem with the number ensure_crc')",
"def is_valid(self, key: Bits, verbose=True):\n ivk = wep_make_ivk(key, self.iv)\n if verbose:\n debug(verbose, fun_name + \" : ivk = \" + str(ivk))\n\n decrypted = rc4_crypt(self.payload, ivk, verbose)\n if verbose:\n debug(verbose, fun_name + \" : decrypted = \" + str(ivk))\n\n decrypted_message = decrypted[:-len(self.crc)]\n if verbose:\n debug(verbose, fun_name + \" : decrypted_message = \" + str(decrypted_message))\n\n decrypted_crc = decrypted[-len(self.crc):]\n if verbose:\n debug(verbose, fun_name + \" : decrypted_crc = \" + str(decrypted_crc))\n\n int_computed_crc, computed_crc = crc32(decrypted_message)\n if verbose:\n debug(verbose, fun_name + \" : computed_crc = \" + str(computed_crc))\n debug(verbose, fun_name + \" : computed_crc = \" + str(int_computed_crc))\n debug(verbose, fun_name + \" : frame_crc = \" + str(self.crc))\n\n return decrypted_crc == computed_crc",
"def valid_response(line):\n cksum = int(line[-2:], 16) # checksum is last two characters in ASCII hex\n data = line[:-2] # remove checksum from data\n\n calc_cksum = checksum(data)\n if cksum != calc_cksum:\n log.debug('checksum failed (%r): should be %s', line, hex(calc_cksum))\n return False\n return True",
"def verify_blob_checksum(self, blob):\n path = self.csum_to_path(blob)\n csum = path.checksum()\n return csum != blob",
"def _verify_checksum(data, checksum):\n sha256_hash = hashlib.sha256(data).hexdigest().encode()\n return to_bin(sha256_hash)[0 : len(data) * 8 // 32] == checksum",
"def bech32_verify_checksum(hrp, data):\n return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1",
"def check_record(self, record):\n checking = reduce(lambda x,y: x + y, [int(record[i*2:i*2+2], 16) for i in [x for x in xrange(len(record)/2)]])\n if ('%02x' % checking)[-2:] != '00':\n raise Exception ('ERROR: Checksum doesn\\' match! Record is %s' % (record, ))",
"def in_crc_errors(self) -> str:\n return self._in_crc_errors",
"def is_valid_payload(p):\n # if the checksum is valid the checksum calculation, without removing the\n # checksum, should be equal to zero\n\n if checksum16(p) == 0:\n return True\n else:\n return False",
"def checkChecksum(key):\n\t#decode to base256\n\tcheckKey = enc.b58decode(key)\n\tchecksum = checkKey[-4:]\n\thash = hashlib.sha256(hashlib.sha256(checkKey[:-4]).digest()).digest()[:4]\n\tif hash == checksum:\n\t\treturn True\n\telse:\n\t\treturn False",
"def validate(self, encrypted_token: str) -> bool:\n payload, timestamp_ms, crc = self.unsleeve(encrypted_token)\n ts_bytes = timestamp_ms.to_bytes(8, 'big')\n\n computed_crc = zlib.crc32(payload + ts_bytes)\n\n if crc == computed_crc:\n return in_range(timestamp_ms, deadline=self.token_life_ms)\n\n return False",
"def serial_ok(self) -> bool:\r\n return self.ser is not None",
"def verify_checksum(message, previous_csum=0):\n if message.message_type in CHECKSUM_MSG_TYPES:\n csum = compute_checksum(\n message.checksum[0],\n message.args,\n previous_csum,\n )\n\n if csum == message.checksum[1]:\n return True\n else:\n return False\n else:\n return True",
"def test_wrong_checksum(self):\n self.assertNotEqual(utils.checksum('fooo'), b'A')"
]
| [
"0.7833014",
"0.76461273",
"0.7568382",
"0.7517449",
"0.7303092",
"0.7155623",
"0.711303",
"0.6821579",
"0.68020415",
"0.6791723",
"0.6733442",
"0.64848715",
"0.6392629",
"0.6391682",
"0.6333633",
"0.622844",
"0.622844",
"0.6166218",
"0.6160127",
"0.61058235",
"0.6096547",
"0.60693836",
"0.6011226",
"0.6009101",
"0.59675807",
"0.59547764",
"0.5941652",
"0.5917978",
"0.5904892",
"0.5825876"
]
| 0.8432148 | 0 |
This function tests that square inherits from Base | def test_SquareinheritancefromBase(self):
Square.reset_objects()
self.assertEqual(issubclass(Square, Base), True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_SquareinheritancefromRectangle(self):\n Square.reset_objects()\n self.assertEqual(issubclass(Square, Rectangle), True)",
"def test_inheritance(self):\n self.assertTrue(issubclass(Rectangle, Base))",
"def test_square_class(self):\n s1 = Square(10)\n self.assertEqual(10, s1.size)\n\n s2 = Square(10, 2)\n self.assertEqual(10, s2.size)\n self.assertEqual(2, s2.x)\n\n s3 = Square(3, 5, 2)\n self.assertEqual(3, s3.size)\n self.assertEqual(5, s3.x)\n self.assertEqual(2, s3.y)\n\n s4 = Square(10, 2, 0, 12)\n self.assertEqual(10, s4.size)\n self.assertEqual(12, s4.id)\n self.assertEqual(2, s4.x)\n self.assertEqual(0, s4.y)",
"def test_not_inheritance(self):\n self.assertNotIsInstance(Base, Square)\n self.assertNotIsInstance(Rectangle, Square)",
"def test_objectinheritance(self):\n Square.reset_objects()\n s1 = Square(5)\n self.assertEqual(isinstance(s1, Square), True)",
"def test_create_subclass(self):\n self.assertTrue(issubclass(Rectangle, Base))\n r = Rectangle(10, 2)\n self.assertEqual(r.height, 2)\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n self.assertEqual(r.width, 10)\n r = Rectangle(2, 10)\n self.assertIsInstance(r, Rectangle)\n self.assertEqual(r.width, 2)\n self.assertEqual(r.height, 10)\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n r = Rectangle(10, 2, 0, 0, 12)\n self.assertIsInstance(r, Rectangle)\n self.assertEqual(r.width, 10)\n self.assertEqual(r.height, 2)\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n self.assertEqual(r.id, 12)",
"def test_2_no_args_square(self):\r\n with self.assertRaises(TypeError):\r\n S1 = Square()",
"def test_4_size_integer(self):\r\n with self.assertRaises(TypeError):\r\n S3 = Square('a')",
"def test_6_x_integer(self):\r\n with self.assertRaises(TypeError):\r\n S5 = Square(1, 'a')",
"def test_inheritance(self):\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\")\n\n h.assert_inheritance(x, tubular.base.BaseTransformer)",
"def test_area(self):\n s1 = Square(3)\n self.assertEqual(9, s1.area())\n s4 = Square(5, 0, 0, 12)\n self.assertEqual(25, s4.area())",
"def test_get_square(self):\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.assertEqual(self.game.get_square(row, col), ' ')\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERX\n self.assertEqual(self.game.get_square(row, col), PLAYERX)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERO\n self.assertEqual(self.game.get_square(row, col), PLAYERO)",
"def test_square(self):\n result = shape_area.square_area(5)\n self.assertEqual(result,25)",
"def test_subclass():\n assert issubclass(BlockOnSpring, PhysicsModule)",
"def test_singlesquarecreationwithallvalues(self):\n Square.reset_objects()\n s1 = Square(10, 10, 10, 10)\n self.assertEqual(s1.width, 10)\n self.assertEqual(s1.height, 10)\n self.assertEqual(s1.x, 10)\n self.assertEqual(s1.y, 10)\n self.assertEqual(s1.id, 10)\n s2 = Square(10, 10, 10, 10)\n self.assertEqual(s2.width, 10)\n self.assertEqual(s2.height, 10)\n self.assertEqual(s2.x, 10)\n self.assertEqual(s2.y, 10)\n self.assertEqual(s2.id, 10)",
"def test_0_is_Rectangle_class(self):\r\n self.assertTrue(issubclass(type(self.S0), Rectangle) and\r\n type(self.S0) != Rectangle)",
"def test_cl_fix():\n assert Cl is BaseCl",
"def test_inherits_from_base(self):\n self.assertIsInstance(self.obj, Base, \"created obj does not \" +\n \"inherit from the Base class.\")",
"def test_1_square_attributes(self):\r\n self.assertEqual(self.S0.width, 2)\r\n self.assertEqual(self.S0.height, 2)\r\n self.assertEqual(self.S0.x, 0)\r\n self.assertEqual(self.S0.y, 0)",
"def test_area_method(self):\n s3 = Square(3, 1, 3)\n self.assertEqual(s3.area(), 9)",
"def test_is_subclass(self):\n user = User()\n user_details = {\"user_id\": user.id, \"first_name\": \"Joe\"}\n student = Student(**user_details)\n self.assertIsInstance(student, BaseModel)\n self.assertTrue(hasattr(student, \"id\"))\n self.assertTrue(hasattr(student, \"created_at\"))\n self.assertTrue(hasattr(student, \"updated_at\"))\n self.assertTrue(hasattr(student, \"user_id\"))",
"def test_get_squarerect_sqr_all_int(self):\n result = get_squarerectangle_type(2, 2, 2, 2,)\n self.assertEqual(result, 'square')",
"def test_inheritance(self):\n self.assertTrue(issubclass(type(self.user_1), BaseModel))",
"def test_multiplesquarecreationwithallvalues(self):\n Square.reset_objects()\n s1 = Square(10, 10, 10, 10)\n self.assertEqual(s1.width, 10)\n self.assertEqual(s1.height, 10)\n self.assertEqual(s1.x, 10)\n self.assertEqual(s1.y, 10)\n self.assertEqual(s1.id, 10)",
"def test_sizegetter(self):\n Rectangle.reset_objects()\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)",
"def is_square (self):\n return self.width == self.height",
"def test_square_area(self):\n self.assertEqual(4, square_area(self.values['side']))",
"def test_vec3_square(self):\n\n vec = Vec3(2, 3, 4)\n vec.square()\n\n self.assertEqual(Vec3(4, 9, 16), vec)",
"def test_singlesquarecreation(self):\n Square.reset_objects()\n s1 = Square(10)\n self.assertEqual(s1.id, 1)",
"def test_issubclass(self):\n self.assertTrue(issubclass(User()), BaseModel)"
]
| [
"0.78790617",
"0.7464751",
"0.74157864",
"0.72943133",
"0.7226751",
"0.6838892",
"0.6658183",
"0.63029987",
"0.62793493",
"0.6201508",
"0.61887723",
"0.6183908",
"0.61684614",
"0.6153239",
"0.61244017",
"0.61089444",
"0.6081408",
"0.60803723",
"0.6078038",
"0.6011344",
"0.60054576",
"0.60034215",
"0.5995244",
"0.59866405",
"0.5980287",
"0.59250337",
"0.5923778",
"0.5921001",
"0.58979964",
"0.5893351"
]
| 0.8380413 | 0 |
This function tests that square inherits from Base | def test_SquareinheritancefromRectangle(self):
Square.reset_objects()
self.assertEqual(issubclass(Square, Rectangle), True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_SquareinheritancefromBase(self):\n Square.reset_objects()\n self.assertEqual(issubclass(Square, Base), True)",
"def test_inheritance(self):\n self.assertTrue(issubclass(Rectangle, Base))",
"def test_square_class(self):\n s1 = Square(10)\n self.assertEqual(10, s1.size)\n\n s2 = Square(10, 2)\n self.assertEqual(10, s2.size)\n self.assertEqual(2, s2.x)\n\n s3 = Square(3, 5, 2)\n self.assertEqual(3, s3.size)\n self.assertEqual(5, s3.x)\n self.assertEqual(2, s3.y)\n\n s4 = Square(10, 2, 0, 12)\n self.assertEqual(10, s4.size)\n self.assertEqual(12, s4.id)\n self.assertEqual(2, s4.x)\n self.assertEqual(0, s4.y)",
"def test_not_inheritance(self):\n self.assertNotIsInstance(Base, Square)\n self.assertNotIsInstance(Rectangle, Square)",
"def test_objectinheritance(self):\n Square.reset_objects()\n s1 = Square(5)\n self.assertEqual(isinstance(s1, Square), True)",
"def test_create_subclass(self):\n self.assertTrue(issubclass(Rectangle, Base))\n r = Rectangle(10, 2)\n self.assertEqual(r.height, 2)\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n self.assertEqual(r.width, 10)\n r = Rectangle(2, 10)\n self.assertIsInstance(r, Rectangle)\n self.assertEqual(r.width, 2)\n self.assertEqual(r.height, 10)\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n r = Rectangle(10, 2, 0, 0, 12)\n self.assertIsInstance(r, Rectangle)\n self.assertEqual(r.width, 10)\n self.assertEqual(r.height, 2)\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n self.assertEqual(r.id, 12)",
"def test_2_no_args_square(self):\r\n with self.assertRaises(TypeError):\r\n S1 = Square()",
"def test_4_size_integer(self):\r\n with self.assertRaises(TypeError):\r\n S3 = Square('a')",
"def test_6_x_integer(self):\r\n with self.assertRaises(TypeError):\r\n S5 = Square(1, 'a')",
"def test_inheritance(self):\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\")\n\n h.assert_inheritance(x, tubular.base.BaseTransformer)",
"def test_area(self):\n s1 = Square(3)\n self.assertEqual(9, s1.area())\n s4 = Square(5, 0, 0, 12)\n self.assertEqual(25, s4.area())",
"def test_get_square(self):\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.assertEqual(self.game.get_square(row, col), ' ')\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERX\n self.assertEqual(self.game.get_square(row, col), PLAYERX)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERO\n self.assertEqual(self.game.get_square(row, col), PLAYERO)",
"def test_square(self):\n result = shape_area.square_area(5)\n self.assertEqual(result,25)",
"def test_subclass():\n assert issubclass(BlockOnSpring, PhysicsModule)",
"def test_singlesquarecreationwithallvalues(self):\n Square.reset_objects()\n s1 = Square(10, 10, 10, 10)\n self.assertEqual(s1.width, 10)\n self.assertEqual(s1.height, 10)\n self.assertEqual(s1.x, 10)\n self.assertEqual(s1.y, 10)\n self.assertEqual(s1.id, 10)\n s2 = Square(10, 10, 10, 10)\n self.assertEqual(s2.width, 10)\n self.assertEqual(s2.height, 10)\n self.assertEqual(s2.x, 10)\n self.assertEqual(s2.y, 10)\n self.assertEqual(s2.id, 10)",
"def test_0_is_Rectangle_class(self):\r\n self.assertTrue(issubclass(type(self.S0), Rectangle) and\r\n type(self.S0) != Rectangle)",
"def test_inherits_from_base(self):\n self.assertIsInstance(self.obj, Base, \"created obj does not \" +\n \"inherit from the Base class.\")",
"def test_cl_fix():\n assert Cl is BaseCl",
"def test_1_square_attributes(self):\r\n self.assertEqual(self.S0.width, 2)\r\n self.assertEqual(self.S0.height, 2)\r\n self.assertEqual(self.S0.x, 0)\r\n self.assertEqual(self.S0.y, 0)",
"def test_area_method(self):\n s3 = Square(3, 1, 3)\n self.assertEqual(s3.area(), 9)",
"def test_is_subclass(self):\n user = User()\n user_details = {\"user_id\": user.id, \"first_name\": \"Joe\"}\n student = Student(**user_details)\n self.assertIsInstance(student, BaseModel)\n self.assertTrue(hasattr(student, \"id\"))\n self.assertTrue(hasattr(student, \"created_at\"))\n self.assertTrue(hasattr(student, \"updated_at\"))\n self.assertTrue(hasattr(student, \"user_id\"))",
"def test_get_squarerect_sqr_all_int(self):\n result = get_squarerectangle_type(2, 2, 2, 2,)\n self.assertEqual(result, 'square')",
"def test_inheritance(self):\n self.assertTrue(issubclass(type(self.user_1), BaseModel))",
"def test_multiplesquarecreationwithallvalues(self):\n Square.reset_objects()\n s1 = Square(10, 10, 10, 10)\n self.assertEqual(s1.width, 10)\n self.assertEqual(s1.height, 10)\n self.assertEqual(s1.x, 10)\n self.assertEqual(s1.y, 10)\n self.assertEqual(s1.id, 10)",
"def test_sizegetter(self):\n Rectangle.reset_objects()\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)",
"def is_square (self):\n return self.width == self.height",
"def test_square_area(self):\n self.assertEqual(4, square_area(self.values['side']))",
"def test_vec3_square(self):\n\n vec = Vec3(2, 3, 4)\n vec.square()\n\n self.assertEqual(Vec3(4, 9, 16), vec)",
"def test_singlesquarecreation(self):\n Square.reset_objects()\n s1 = Square(10)\n self.assertEqual(s1.id, 1)",
"def test_issubclass(self):\n self.assertTrue(issubclass(User()), BaseModel)"
]
| [
"0.8381641",
"0.7465298",
"0.74154085",
"0.72949463",
"0.7227583",
"0.68388546",
"0.66581863",
"0.6302443",
"0.62782824",
"0.6202097",
"0.61873174",
"0.61839616",
"0.6166422",
"0.6153196",
"0.61243147",
"0.6108876",
"0.6081987",
"0.6081601",
"0.6077003",
"0.6009714",
"0.600624",
"0.60023105",
"0.5995622",
"0.59862196",
"0.5979558",
"0.5924632",
"0.59223014",
"0.5920604",
"0.58983326",
"0.5893985"
]
| 0.7879409 | 1 |
This function tests that ValueError is thrown for 0 size value | def test_0size(self):
Square.reset_objects()
with self.assertRaises(ValueError) as e:
s1 = Square(0)
self.assertEqual(str(e.exception), "width must be > 0") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_size(s):\n\n s = check_1d(s, \"size\")\n if any(map(lambda d: d <= 0, s)):\n raise Exception('Size cannot be 0 or negative')\n\n return s",
"def test_Sobol_G_raises_error_if_values_wrong_size():\n a = [1, 2, 3, 4, 5, 6, 7, 8]\n with raises(ValueError):\n evaluate(np.array([1, 2, 3, 4, 5, 6, 7]), a)",
"def test_5_size_less_than_1(self):\r\n with self.assertRaises(ValueError):\r\n S4 = Square(0)",
"def test_error_when_length_mismatch(self):\n self._assert_raise_error(\n probabilities=[0.5, 0.5],\n random_nums=[0],\n error=LengthMismatchError,\n code=1\n )",
"def test_wrong_length(self):\n with self.assertRaises(ValueError):\n calc_disc_b(np.ones(10), np.ones(10), np.ones(5), 0.3)",
"def handle_errors(self, value):\n if not isinstance(value, int):\n raise TypeError(\"size must be an integer\")\n if value < 0:\n raise ValueError(\"size must be >= 0\")",
"def array_not_empty(array: np.ndarray) -> None:\n if not array.size:\n raise ValueError(\"Array must not be empty\")",
"def test_error_zero_width_height(self):\n for name in ['width', 'height']:\n self.root.find('size').find(name).text = '0'\n self._test_helper(ValueError)",
"def test_errors(self):\n self.assertRaises(TypeError, columnize, 5, 'reject input - not array')\n return",
"def test_invalid(self):\n a = np.ones((10, 10))\n ai = np.ones((10, 2), dtype=np.intp)\n\n # sanity check\n take_along_axis(a, ai, axis=1)\n\n # not enough indices\n assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)\n # bool arrays not allowed\n assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)\n # float arrays not allowed\n assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)\n # invalid axis\n assert_raises(AxisError, take_along_axis, a, ai, axis=10)",
"def _isvalid(self, data):\n if data is None:\n return False\n elif isinstance(data, (list,tuple)):\n if len(data) <= 0:\n return False\n else:\n return True\n elif isinstance(data, (np.ndarray)):\n if data.size <= 0:\n return False\n else:\n return True\n elif not data:\n return False\n else:\n return True",
"def _check_input_size(n_components, n_features):\n if n_components <= 0:\n raise ValueError(\n \"n_components must be strictly positive, got %d\" % n_components\n )\n if n_features <= 0:\n raise ValueError(\"n_features must be strictly positive, got %d\" % n_features)",
"def _validate_values(self, values):\n prev_len = -1\n i = j = -1\n if values is None or len(values) == 0:\n self.shape = 0, 0\n return\n for i, row in enumerate(values):\n if prev_len == -1:\n prev_len = len(row)\n if prev_len != len(row):\n raise ValueError(f\"Row {i} differs in length: {prev_len} != {len(row)}\")\n for j, val in enumerate(row):\n if type(val) not in (int, float, complex):\n raise ValueError(f\"[{i}, {j}]: {val} is of bad type ({type(val)})\")\n if val == 0:\n self.empty_loc = (i, j)\n if i == -1:\n self.shape = 0, 0\n else:\n self.shape = i + 1, j + 1",
"def test__get_kernel_size_numel_raise_value_error(kernel_size):\n with pytest.raises(ValueError):\n utils._get_kernel_size_numel(kernel_size)",
"def invalid(values):\n # for box in values.keys():\n # if len(values[box]) == 0:\n # return True\n # return False\n return len([box for box in values.keys() if len(values[box]) == 0]) != 0",
"def test_Sobol_G_raises_error_if_values_wrong_size():\n a = [1, 2, 3, 4, 5, 6, 7, 8]\n evaluate(np.array([1, 2, 3, 4, 5, 6, 7]), a)",
"def test_case_07_side_too_small(self):\n self.__assert_equals_test_case([(-2, 2, 3), (0, 2, 3)], 'InvalidInput')",
"def test_badsizevaluelists(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square([1, 2], 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')",
"def check_empty(self):\n if self.size():\n raise AttributeError",
"def test_value_init7(self):\n with self.assertRaises(ValueError) as err:\n r1 = Rectangle(-4, 5)\n msg = \"width must be > 0\"\n self.assertEqual(str(err.exception), msg)",
"def test_random_small_sample_error(self):\n with self.assertRaises(ValueError):\n random_small_sample([], 1e7)",
"def _check_size(size):\r\n\r\n if not isinstance(size, (list, tuple)):\r\n raise ValueError(\"Size must be a tuple\")\r\n if len(size) != 2:\r\n raise ValueError(\"Size must be a tuple of length 2\")\r\n if size[0] < 0 or size[1] < 0:\r\n raise ValueError(\"Width and height must be >= 0\")\r\n\r\n return True",
"def test_empty(self):\n self.assertRaisesInternalError(())",
"def __verify_arguments(self):\r\n if len(self.__pointer_data) == 0:\r\n raise ValueError(\"Input data is empty (size: '%d').\" % len(self.__pointer_data))\r\n\r\n if self.__number_clusters <= 0:\r\n raise ValueError(\"Amount of cluster (current value: '%d') for allocation should be greater than 0.\" %\r\n self.__number_clusters)\r\n\r\n if self.__numlocal < 0:\r\n raise ValueError(\"Local minima (current value: '%d') should be greater or equal to 0.\" % self.__numlocal)\r\n\r\n if self.__maxneighbor < 0:\r\n raise ValueError(\"Maximum number of neighbors (current value: '%d') should be greater or \"\r\n \"equal to 0.\" % self.__maxneighbor)",
"def fit_the_first_value(arr):\n try:\n if arr[0]>len(arr):\n print (\"Too big\")\n elif arr[0]<len(arr):\n print (\"Too small\")\n else:\n print(\"Just right\")\n except IndexError:\n print (\"index error\") \n except TypeError:\n print (\"type error\")",
"def test_init_value_error(self):\n data = [[0, 0], [0, 0], [0, 0]]\n with self.assertRaises(ValueError):\n Board(data)",
"def test_wrong_length(self):\n with self.assertRaises(ValueError):\n calc_disc_c(np.ones(10), np.ones(10), np.ones(5), 0.3)",
"def test_init_with_width_less_or_equal_0(self):\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n Square(0, 1)\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n Square(-1, 1)",
"def test_matern_zero_lengthscale(matern):\n with pytest.raises(ValueError) as exp:\n matern(lengthscale=0.0, variance=1.0, output_dim=1)\n assert exp.value.args[0].find(\"lengthscale must be positive.\") >= 0",
"def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))"
]
| [
"0.7179001",
"0.70014584",
"0.6692978",
"0.66857994",
"0.6674627",
"0.6652692",
"0.665025",
"0.6612061",
"0.66108143",
"0.6585465",
"0.6583776",
"0.6574457",
"0.65705",
"0.6555679",
"0.6463471",
"0.6458329",
"0.6458053",
"0.64574504",
"0.6448534",
"0.64482915",
"0.6438322",
"0.64232624",
"0.64230335",
"0.6418105",
"0.6405816",
"0.640397",
"0.63669175",
"0.63582975",
"0.6355412",
"0.63480127"
]
| 0.73414814 | 0 |
This function tests for bad x value with string | def test_badxvaluewithstring(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(1, "foo", 2, 3)
self.assertEqual(str(e.exception), 'x must be an integer') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_check_x_ValueError(self):\n self.assertRaisesRegex(\n ValueError,\n 'x must be >= 0',\n Rectangle,\n 4, 2, -1, 0, 12\n )",
"def test_is_valid_label_value_invalid_input():\n # test length violations\n assert not is_valid_label_value(value=f\"{'v' * 64}\") # value too long\n # test first character violations (not alphanum)\n assert not is_valid_label_value(value=\"-\")\n assert not is_valid_label_value(value=\"-a\")\n assert not is_valid_label_value(value=\".b\")\n assert not is_valid_label_value(value=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_value(value=\"a-\")\n assert not is_valid_label_value(value=\"b.\")\n assert not is_valid_label_value(value=\"c \")\n assert not is_valid_label_value(value=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_value(value=\"a$$a\")\n assert not is_valid_label_value(value=\"b b\")",
"def test_bad_values(self):\n self.assertOK([60])\n self.assertRaisesInternalError([59.9])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])",
"def test_badyvaluewithstring(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, \"foo\", 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def test_wrong_input(self):\n\n test_float = 2954.02\n test_list = [\"anagram\", \"gramana\"]\n with pytest.raises(AttributeError) as exc_info:\n is_anagram(test_float, test_list)\n expected_error_msg = \"Words must be strings!\"\n assert exc_info.match(expected_error_msg)",
"def test_x_is_not_int(self):\n with self.assertRaisesRegex(TypeError, \"x must be an integer\"):\n Square(1, \"1\", -1)",
"def test_x_is_less_than_0(self):\n with self.assertRaisesRegex(ValueError, \"x must be >= 0\"):\n Square(1, -1, -1)",
"def test_check_x_TypeError_01(self):\n self.assertRaisesRegex(\n TypeError,\n 'x must be an integer',\n Rectangle,\n 4, 2, 'string''', 0, 12\n )",
"def validateWithDocString(self, x):\n up = 0\n sp = 0\n numb = 0\n ints = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n special = string.punctuation\n if type(x) is str:\n if len(x) >= 8:\n for i in x:\n if i.isupper():\n up += 1\n if i in special:\n sp += 1\n if i in ints:\n numb += 1\n if up >= 1 and sp >= 1 and numb >= 1:\n return True\n else:\n return False\n else:\n return False\n else:\n raise TypeError(\"Error\")",
"def _not_valid_(s) :\n return not s.valid()",
"def test_check_X_not_int_not_float():\n with pytest.raises(ValueError):\n check_X(['hi'], verbose=False)",
"def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')",
"def test_non_string_values(self):\n valid_xml = '{\"foo\": \"<b>Bar</b>\", \"baz\": true}'\n eq_(validate_xml(valid_xml), valid_xml)",
"def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)",
"def test_raw_bad_values(self):\n self.assertRawOK(['60'])\n self.assertRawOK(['1' * 10])\n self.assertRaisesHeaderError(['1' * 11])\n self.assertRaisesHeaderError(['60,60'])\n self.assertRaisesHeaderError(['60 60'])\n self.assertRaisesHeaderError(['60;60'])\n self.assertRaisesHeaderError(['60.60'])\n self.assertRaisesHeaderError(['60', '60'])\n self.assertRaisesHeaderError(['foo'])",
"def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)",
"def test_convertCharToInt_bad_value(self):\n self.assertRaises(ValueError, rules.convertCharToInt, 'qq')",
"def stringTruth(x):\n if x is None:\n return False\n if x == '':\n return False\n if x == '0':\n return False\n lx = x.lower()\n if lx == 'false':\n return False\n if lx == 'f':\n return False\n return True",
"def test_non_numberic_validation(self):",
"def test_non_numberic_validation(self):",
"def test_valid_name_invalid():\n assert not valid_name(\"\")\n assert not valid_name(\"a\"*21)",
"def test_badxvaluewithbools(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, False, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_check_x_TypeError_02(self):\n self.assertRaisesRegex(\n TypeError,\n 'x must be an integer',\n Rectangle,\n 4, 2, [1, 2, 3, 4], 0, 12\n )",
"def test_bad_input():\n\n for arg in ['5', 'ch']:\n rv, out = getstatusoutput('{} {}'.format(prg, arg))\n assert rv == 0\n expected = 'I do not know \"{}\".'.format(arg)\n assert out.strip() == expected",
"def check_string( pname, use ):\n for l in pname:\n if l in string.letters: continue\n if l in string.digits : continue\n if l =='_' : continue\n print( \"your \"+use+\" (\" + pname + \") contains invalid characters, please choose another one!\" )\n return False\n return True",
"def test_typeerror_in_case_of_string(self):\n eq_(None,grepit(\"\",\"\",\"\"))",
"def test_single_specifier_missing(self):\n template = 'missing'\n value_count = 1\n msg = 'The formatter should contain one \"{}\" specifier.'\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)",
"def __check_validation(input_string):\n if not input_string:\n raise NullInputException(\"Input string should be not empty\")\n if type(input_string) != str:\n raise NonStringInputException(\"Input value should be a string\")\n if len(input_string) >= 200:\n raise TooLongInputException(\"Input string should be less than 200 characters\")\n for i in input_string:\n if not i.isalpha():\n raise NonStringInputException(\"All input value characters should be an alpha\")",
"def is_resolution_and_offset_str(x):\n if x.count('x') == 1 and x.count('+') == 2:\n return True\n return False",
"def test_is_valid_annotation_value_invalid_input():\n # test valid label values\n assert not is_valid_annotation_value(value=1)"
]
| [
"0.69795847",
"0.6597234",
"0.6589841",
"0.6565734",
"0.6538347",
"0.64426655",
"0.6354306",
"0.6324324",
"0.6260431",
"0.625534",
"0.62540674",
"0.6247289",
"0.6183881",
"0.61823475",
"0.61577374",
"0.615086",
"0.6148752",
"0.6140381",
"0.61370695",
"0.61370695",
"0.6133567",
"0.6117196",
"0.6097802",
"0.6090464",
"0.6081173",
"0.6072278",
"0.60673654",
"0.6063736",
"0.6061528",
"0.6054879"
]
| 0.7074882 | 0 |
This function tests for bad x value with tuple | def test_badxvaluewithtuple(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(1, (1, 2), 2, 3)
self.assertEqual(str(e.exception), 'x must be an integer') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_tuples_to_avoid(self):\n self.assertFalse(\n any(key in self.resultDict and self.resultDict[key] == tuplesToAvoid[key] for key in tuplesToAvoid))",
"def __allowed_values_correct_tuple(self):\n strTestName = 'Values of a tuple (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'tuple')\n RxCSObject.paramAllowed('parameter1', ('Allowed string #1', 'Allowed string #2', 3, 4, 11))\n RxCSObject.parameter1 = (11, 3, 'Allowed string #1')\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def _is_positive_int_tuple(item):\n if not isinstance(item, tuple):\n return False\n for i in item:\n if not _is_positive_int(i):\n return False\n return True",
"def test_16_tuple_test(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, ())\n self.assertEqual(\n \"height must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle((1, 2, 3), 2)\n self.assertEqual(\n \"width must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, 2, (2, 4))\n self.assertEqual(\n \"x must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, 2, 0, (\"hi\",))\n self.assertEqual(\n \"y must be an integer\",\n str(x.exception))",
"def test_t_paired_bad_data(self):\r\n self.assertRaises(ValueError, t_paired, self.y, [1, 2, 3])",
"def test_badyvaluewithtuple(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, (1, 2), 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def tuple_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (tuple, collections.abc.Sequence)):\n name = type(var).__name__\n raise TupleError(\n 'Function {} expected tuple, {} got instead.'.format(func, name))",
"def validate_coordinates_input(points: tuple) -> None:\n\n for coordinate in points:\n if not isinstance(coordinate, tuple):\n raise InvalidGroundValueError(\n f\"Object must be a tuple\"\n f\" with format like (1, 2), not {coordinate}\"\n )",
"def test_int_tuple_validation(value_idx_0: Any, value_idx_1: Any, value_idx_2: Any) -> None:\n m = ParamClass()\n val = (value_idx_0, value_idx_1, value_idx_2)\n if not all([isinstance(x, int) for x in val]):\n with pytest.raises(ValueError):\n m.int_tuple = (value_idx_0, value_idx_1, value_idx_2)\n else:\n m.int_tuple = (value_idx_0, value_idx_1, value_idx_2)",
"def __relational_restriction_incorrect_tuple_vs_number(self):\n strTestName = 'Tuple lower or equal to a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('tParameter1', 'Tuple parameter')\n RxCSObject.paramType('tParameter1', tuple)\n RxCSObject.paramLE('tParameter1', 2, mul=4, add=-3) # In English, all the elements of the tuple must be lower or equal to 5\n RxCSObject.tParameter1 = (13, 8, 9, 11, 2, 5, 7, 101)\n\n self.__parametersCheck_error(RxCSObject, RelationalError, strTestName)",
"def test_mix_tuple_issue_387(self):\n assert_type_and_value(\n tuple,\n (42, 'Test'),\n self.env(\n 'MIX_TUPLE',\n default=(0, ''),\n cast=lambda t: tuple(\n map(\n lambda v: int(v) if v.isdigit() else v.strip(),\n [c for c in t.strip('()').split(',')]\n )\n ),\n )\n )",
"def test__tuple_raise_dimension_error(N):\n dummy_kernel_size = None\n\n with pytest.raises(ValueError):\n utils._tuple(dummy_kernel_size, N)",
"def test_tuples():\n\n @type_checked\n def _run_test(something:(str, int, bool)):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=(None, \"12\", 1))",
"def testtuple ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTup1, fracTup2, tupleValue in self.knownTupleValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracTup1 ) )\r\n\t\t\tfrac2 = eval ( r.sub ( 'frac.frac', fracTup2 ) )\r\n\t\t\tself.assertEqual ( frac1.tuple (), frac2.tuple () )\r\n\t\t\tself.assertEqual ( frac1.tuple () [0], tupleValue [0] )\r\n\t\t\tself.assertEqual ( frac1.tuple () [1], tupleValue [1] )",
"def testTypTagsTupleEnforced(self) -> None:\n fake_typ_tuple = typing.cast(tuple, ['win', 'x86'])\n with self.assertRaises(AssertionError):\n _ = data_types.Result('test', fake_typ_tuple, (1, 10), 'build_id')",
"def test_case_05_not_legal_triangle(self):\n self.__assert_equals_test_case([(4, 6, 11)], 'NotATriangle')",
"def __relational_restriction_correct_tuple_vs_number(self):\n strTestName = 'Tuple higher or equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('tParameter1', 'Tuple parameter')\n RxCSObject.paramType('tParameter1', tuple)\n RxCSObject.paramHE('tParameter1', 10, mul=0.2) # In English, all the elements of the tuple must be higher or equal to 2\n\n RxCSObject.tParameter1 = (3, 8, 9, 11, 2, 5, 7, 101)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def valid_color_tuple(rgb_tuple, fix=False) -> (bool, tuple):\n if not isinstance(rgb_tuple, tuple):\n raise ValueError(\"valid_color_tuple(rgb_tuple) must be type(tuple)\")\n\n elif len(rgb_tuple) < 3 or len(rgb_tuple) > 4:\n raise ValueError(\n \"valid_color_tuple(rgb_tuple) should contain values for (R,G,B, or R,G,B,A)\")\n\n valid = True\n rgb_list = list(rgb_tuple)\n for i in range(len(rgb_list)):\n c = rgb_list[i]\n if not isinstance(c, int):\n raise ValueError(f\"A non-int value was passed as a color value. Received: {c}\")\n if c > 255 or c < 0:\n valid = False\n if fix:\n rgb_list[i] = 255 if c > 255 else 0\n\n if valid:\n return True, tuple(rgb_list)\n else:\n return False, tuple(rgb_list)",
"def test_check_x_ValueError(self):\n self.assertRaisesRegex(\n ValueError,\n 'x must be >= 0',\n Rectangle,\n 4, 2, -1, 0, 12\n )",
"def _checkKey(self, key):\n x, y = self._convertNegativeTupleKeyToPositiveTupleKey(key)\n return x, y",
"def test_badxvaluewithsets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, {1, 2, 3}, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def validate_tuple(validator, data):\n if type(data) is not tuple:\n return False\n if len(validator) != len(data):\n return False\n # all elements must be valid\n return all(imap(validate_common, validator, data))",
"def test_case_04_legal_triangle(self):\n self.__assert_not_equal_test_case([(4, 4, 8), (4, 5, 8)], 'NotATriangle')",
"def test_badxvaluewithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, [1, 2], 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def __type_correct_tuple(self):\n\n strTestName = 'Type (tuple) is given (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddOpt('parameter1', 'type \\'tuple\\' parameter')\n RxCSObject.paramType('parameter1', (tuple))\n RxCSObject.parameter1 = (1, 4)\n\n RxCSObject.paramAddOpt('parameter2', 'type \\'list\\' parameter')\n RxCSObject.paramType('parameter2', (list))\n RxCSObject.parameter2 = [10, 40]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_badxvaluewithbools(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, False, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_point__tuple(self):\n\n p = tuples.Tuple([\"x\", \"y\", \"z\", \"w\"], 4.3, -4.2, 3.1, 1)\n\n self.assertEqual(p.x, 4.3)\n self.assertEqual(p.y, -4.2)\n self.assertEqual(p.z, 3.1)\n self.assertEqual(p.w, 1)",
"def __type_of_elements_incorrect_floats_in_tuple(self):\n strTestName = 'Float elements in a list (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'type \\'tuple\\' parameter')\n RxCSObject.paramType('parameter1', (tuple))\n RxCSObject.paramTypeEl('parameter1', (tuple))\n RxCSObject.parameter1 = (1.2, 4.9)\n\n self.__parametersCheck_error(RxCSObject, ElementTypeError, strTestName)",
"def test_nested_fail():\n\n @type_checked\n def _run_test(thing:(float, float)): pass\n\n with pytest.raises(TypeError) as error:\n _run_test(12)\n\n assert error.exconly() == (\n \"TypeError: Argument length mismatch. Expected a tuple of float, float.\"\n )",
"def filtered_xyz(self) -> tuple[int, int, int]:"
]
| [
"0.7006258",
"0.69747514",
"0.6616531",
"0.6556147",
"0.6536729",
"0.6535097",
"0.6523847",
"0.65184593",
"0.64959896",
"0.6440684",
"0.6398612",
"0.6395714",
"0.637838",
"0.6376401",
"0.6351738",
"0.6275108",
"0.62562424",
"0.62542605",
"0.62175417",
"0.6212631",
"0.6210541",
"0.620968",
"0.6188965",
"0.6185238",
"0.6182492",
"0.6156112",
"0.6136978",
"0.61307156",
"0.60828024",
"0.6042416"
]
| 0.69840086 | 1 |
This function tests for bad x value with list | def test_badxvaluewithlist(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(1, [1, 2], 2, 3)
self.assertEqual(str(e.exception), 'x must be an integer') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __allowed_values_incorrect_list_with_allowed_values(self):\n strTestName = 'Value NaN given in a list with allowed values (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy Array 2D')\n RxCSObject.paramAllowed('parameter1', range(int(2e3)) + [np.NaN])\n RxCSObject.parameter1 = np.random.randint(1, 1e3, (1e2, 1e1))\n\n self.__parametersCheck_error(RxCSObject, ValueError, strTestName)",
"def test_validate_positive_integer_list():\n with pytest.raises(ValueError):\n validate_positive_integer_list(0.5, 1)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list([0.5, 0, 5], 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list([1], 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list(0, 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list(-1, 2)\n\n assert validate_positive_integer_list(1, 2) == [1, 1]",
"def test_badyvaluewithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, [1, 2], 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def check_list(self,list_input):\n if not isinstance(list_input,list):\n raise ValueError('input is not in list type')\n for i in list_input:\n if isinstance(i,list) and len(i) != 0:\n for j in i:\n if not isinstance(j,(float,int)):\n print(j)\n raise ValueError('cannot convert')\n else:\n print(i)\n raise ValueError('wrong defined')",
"def checkLists(self):\n self.x = self.checkList(self.x)\n self.y = self.checkList(self.y)\n return",
"def __allowed_values_incorrect_list(self):\n strTestName = 'Values of a list (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'tuple')\n RxCSObject.paramAddMan('parameter2', 'list')\n\n RxCSObject.paramAllowed('parameter2', ('Allowed string #1', 'Allowed string #2', 3, 4, 11))\n RxCSObject.parameter1 = (1, 3, 4)\n RxCSObject.parameter2 = [11, 3, 'Allowed string #1', 'Allowed string #11']\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)",
"def check_for_list(check):",
"def value_error(var, _list):\n\n #if not any(r):\n if len(_list) == 2:\n divisor = \" or \"\n elif len(_list) > 2:\n divisor = \", \"\n\n print(_list)\n print(len(_list))\n raise ValueError(\"'{var_name}' must be {type}, received '{var_type}'\"\n .format(var_name=RaiseIfNot._get_name(var),\n type=divisor.join(map(\n lambda x: \"'\" + x + \"'\",\n _list)), var_type=var))",
"def validate_X(X: List[str]):\n _check_string_list(X)",
"def test__validate_status__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_status(input_value)",
"def test_return_negative_numbers_from_lst():\n assert return_negative_numbers_from_lst([-1, 0, 1, -23, 4]) == [-1, -23]\n assert return_negative_numbers_from_lst([0]) == []\n assert return_negative_numbers_from_lst([2, 3, 17]) == []\n assert return_negative_numbers_from_lst([-2, -3, -17]) == [-2, -3, -17]",
"def test_bad_values(self):\n self.assertOK([60])\n self.assertRaisesInternalError([59.9])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])",
"def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers(56.58)",
"def test_02_this_step_will_fail(self):\n\n self.assertIn(5, arr)",
"def _list_validity_check(l, valid_l):\n\n if not Settings._is_in_list(l, valid_l):\n raise InvalidSettingError()",
"def test_validate_bad_data(self, value):\n opt = scheme.ListOption('test-opt')\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)",
"def test_likelihoods_unequal_list_lengths(self):\r\n self.assertRaises(ValueError, likelihoods, [1, 2], [1])",
"def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])",
"def isvect(x):\n return isinstance(x,list) and len(x) == 4 and isgoodnum(x[0]) and isgoodnum(x[1]) and isgoodnum(x[2]) and isgoodnum(x[3])",
"def test__validate_features__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_features(input_value)",
"def _validate_list_data(self, expected, actual):\n for e in expected:\n if e not in actual:\n return \"expected item {} not found in actual list\".format(e)\n return None",
"def test_empty_list(self):\n argument = []\n with self.assertRaises(ValueError):\n find_an_even(argument)",
"def test_check_x_ValueError(self):\n self.assertRaisesRegex(\n ValueError,\n 'x must be >= 0',\n Rectangle,\n 4, 2, -1, 0, 12\n )",
"def assertInList(value, values, msg):\n\tassert value in values, msg",
"def eval_list(self, value):\n\n okay = True\n count = 0\n for v in value.elts:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay",
"def test_bad_curie_in_list():\n with pytest.raises(ValidationError):\n pub = Publication(id='PMID:123', mesh_terms=['foo:bar', 'bad_curie'])",
"def test_invalid(self):\n x = np.array([-5, -3, -2, -2, 100])\n with self.assertRaises(ValueError):\n npinterval.interval(x, 1.01)\n with self.assertRaises(ValueError):\n npinterval.interval(x, 0)",
"def test_t_paired_bad_data(self):\r\n self.assertRaises(ValueError, t_paired, self.y, [1, 2, 3])",
"def test_list_no_even_same(self):\n argument = [1, 3, 3, 7]\n with self.assertRaises(ValueError):\n find_an_even(argument)",
"def test_llist_no_parameter_negative(self):\n\n with pytest.raises(TypeError) as typeError:\n TestLList.llist_integer.add()\n\n assert \"Required argument 'value' (pos 1) not found\" in typeError.value"
]
| [
"0.68315846",
"0.65919036",
"0.6537355",
"0.6444303",
"0.641923",
"0.6327543",
"0.6288081",
"0.6235862",
"0.6219361",
"0.61817026",
"0.6179775",
"0.61674714",
"0.61317503",
"0.6115415",
"0.6108486",
"0.61075026",
"0.6064361",
"0.60562",
"0.6040578",
"0.6025303",
"0.6013717",
"0.59967905",
"0.59568334",
"0.59559894",
"0.5955749",
"0.595004",
"0.59407836",
"0.59395814",
"0.5936712",
"0.59351027"
]
| 0.7108924 | 0 |
This function tests for bad size value with bools | def test_badsizevaluebool(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(True, 1, 2, 3)
self.assertEqual(str(e.exception), 'width must be an integer') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_size(size):\r\n\r\n if not isinstance(size, (list, tuple)):\r\n raise ValueError(\"Size must be a tuple\")\r\n if len(size) != 2:\r\n raise ValueError(\"Size must be a tuple of length 2\")\r\n if size[0] < 0 or size[1] < 0:\r\n raise ValueError(\"Width and height must be >= 0\")\r\n\r\n return True",
"def are_sizes_valid(sizes):\n return all(isinstance(size, int) and size >= 16 and size <= 28 for size in sizes)",
"def CheckForSize(collection, expected_size, equal_flag, unequal_flag,\n unexpectedly_empty_flag=None):\n\n if len(collection) == expected_size:\n return equal_flag\n elif collection or unexpectedly_empty_flag is None:\n return unequal_flag\n else:\n return unexpectedly_empty_flag",
"def voxelConsistency(cleaned_dataframe, column_number, expected_size):\n consistency_boolean = True\n for row in cleaned_dataframe.index:\n if cleaned_dataframe[column_number][row] == expected_size:\n continue\n elif cleaned_dataframe[column_number][row] != expected_size:\n print(\"Subject scan \" + cleaned_dataframe[0][row] + \" does not have voxel size of \" +str(expected_size))\n consistency_boolean = False\n return consistency_boolean",
"def check_size(s):\n\n s = check_1d(s, \"size\")\n if any(map(lambda d: d <= 0, s)):\n raise Exception('Size cannot be 0 or negative')\n\n return s",
"def check_resize_size(size):\n if isinstance(size, int):\n check_value(size, (1, FLOAT_MAX_INTEGER))\n elif isinstance(size, (tuple, list)) and len(size) == 2:\n for i, value in enumerate(size):\n check_value(value, (1, INT32_MAX), \"size at dim {0}\".format(i))\n else:\n raise TypeError(\"Size should be a single integer or a list/tuple (h, w) of length 2.\")",
"def has_definite_size(iterable):\n return hasattr(iterable, '__len__')",
"def test_constructed_is_small(self):\n self.assertTrue(all(elt<10 for elt in goodwinsheaf.checkradii()))#check all entries have small radii",
"def full(self):\n return self.size >= self.maxsize",
"def _is_frame_legal_size(data: bytes) -> bool:\n return len(data) < UDP_MAX_SIZE",
"def test_sizesetterwithbool(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = True\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def is_complete(matrix,size):\n for _,row in matrix.items():\n if len(row) < size:\n return False\n return True",
"def is_valid_board_size(board_size):\n if not isinstance(board_size, int):\n return False\n if board_size < 3 or board_size > Board.LARGEST_BOARD_SIZE:\n return False\n return True",
"def check_bounds (position, size):\n \n for item in position:\n # checks whether item is out of bounds\n if item < 0 or item >= size:\n return False\n return True",
"def __check_bit_size(self, value, num_bits):\n is_fit = False\n if value <= 2 ** num_bits - 1:\n is_fit = True\n return is_fit",
"def verify_structure(memlen, itemsize, ndim, shape, strides, offset):\n if offset % itemsize:\n return False\n if offset < 0 or offset + itemsize > memlen:\n return False\n if any(v % itemsize for v in strides):\n return False\n if ndim <= 0:\n return ndim == 0 and not shape and not strides\n if 0 in shape:\n return True\n imin = sum(strides[j] * (shape[j] - 1) for j in range(ndim) if strides[\n j] <= 0)\n imax = sum(strides[j] * (shape[j] - 1) for j in range(ndim) if strides[\n j] > 0)\n return 0 <= offset + imin and offset + imax + itemsize <= memlen",
"def verify_structure(memlen, itemsize, ndim, shape, strides, offset):\n if offset % itemsize:\n return False\n if offset < 0 or offset+itemsize > memlen:\n return False\n if any(v % itemsize for v in strides):\n return False\n\n if ndim <= 0:\n return ndim == 0 and not shape and not strides\n if 0 in shape:\n return True\n\n imin = sum(strides[j]*(shape[j]-1) for j in range(ndim)\n if strides[j] <= 0)\n imax = sum(strides[j]*(shape[j]-1) for j in range(ndim)\n if strides[j] > 0)\n\n return 0 <= offset+imin and offset+imax+itemsize <= memlen",
"def _not_allowed_len(values, sieve):\n sieve = set(sieve)\n return any(len(i) not in sieve for i in values)",
"def test_case_06_side_too_big(self):\n self.__assert_equals_test_case([(195, 10, 201)], 'InvalidInput')",
"def LengthTest(arr):\n\tif len(arr) == 8:\n\t\treturn True;\n\telif len(arr) == 7:\n\t\treturn IsMissingField('cid', arr)\n\telse:\n\t\treturn False",
"def is_int(self, size=None):\n return False",
"def check_crop_size(size):\n type_check(size, (int, list, tuple), \"size\")\n if isinstance(size, int):\n check_value(size, (1, FLOAT_MAX_INTEGER))\n elif isinstance(size, (tuple, list)) and len(size) == 2:\n for value in size:\n check_value(value, (1, FLOAT_MAX_INTEGER))\n else:\n raise TypeError(\"Size should be a single integer or a list/tuple (h, w) of length 2.\")",
"def isFull(self) -> bool:\n return self.size == self.maxlen",
"def _check_shape(placeholder_shape, data_shape):\n\n return True",
"def check_size(self):\n\n if not self.size:\n if not os.path.exists(self.get_path()): return False\n size = os.path.getsize(self.get_path())\n else: size = self.size\n Settings.maybe_print(\"file size: {}kb - {}mb\".format(size/1000, size/1000000))\n global ONE_MEGABYTE\n if size <= ONE_MEGABYTE:\n Settings.warn_print(\"small file size\")\n global ONE_HUNDRED_KILOBYTES\n if size <= ONE_HUNDRED_KILOBYTES:\n Settings.warn_print(\"tiny file size\")\n self.size = size\n if size == 0:\n Settings.err_print(\"empty file size\")\n return False\n return True",
"def correct_batch_size_in_files(self):\n print('checking correct file sizes')\n all_ok = True\n for f in self.data_filenames:\n all_ok *= (np.load(f).shape[0] == self.batch_size)\n if not all_ok:\n break\n print(all_ok)\n return all_ok",
"def isLegal(self):\n counter = 0\n for t in self.types:\n if t > 0:\n counter = counter + 1\n if counter < 4:\n return True\n else:\n return False",
"def _is_size_key (self, key):\n return key == '$size' or key == 'size'",
"def test_size_too_big(self):\n max_size = max(settings.MISAGO_AVATARS_SIZES)\n too_big = max_size * 2\n\n self.assertEqual(clean_size(too_big), max_size)",
"def correct_size():\n check50.run(\"./inheritance_test\").stdout(\"size_true.*\").exit(0)"
]
| [
"0.71509224",
"0.68044245",
"0.6784293",
"0.67276603",
"0.6585997",
"0.654349",
"0.6481282",
"0.6463",
"0.64172286",
"0.6415458",
"0.64140904",
"0.63802457",
"0.6275432",
"0.6251223",
"0.62397385",
"0.6207709",
"0.61241347",
"0.6112032",
"0.6096959",
"0.6092303",
"0.6078279",
"0.6078095",
"0.6077518",
"0.60773",
"0.6072404",
"0.6057765",
"0.6042659",
"0.6041783",
"0.6038626",
"0.60313773"
]
| 0.71739995 | 0 |
This function tests for bad y value with bools | def test_badyvaluewithbools(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(1, 2, True, 3)
self.assertEqual(str(e.exception), 'y must be an integer') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_y(self):\n return any(map(lambda s: s.is_y, self))",
"def false_neg(yt, yp) -> Any:\n from keras import backend as K\n return K.sum(K.cast((1 - yp) * (0 + yt) > 0.5, 'float')) / K.maximum(1.0, K.sum(0 + yt))",
"def isfalse(variable):\n\n # Return the answer\n return variable in [0, 0.0, False, [], {}, math.nan, \"\", (), None]",
"def is_tr(self, y, t):\n return t != 0 and y != 0",
"def test_check_y_not_int_not_float(wage_X_y, wage_gam):\n X, y = wage_X_y\n y_str = ['hi'] * len(y)\n\n with pytest.raises(ValueError):\n check_y(y_str, wage_gam.link, wage_gam.distribution)",
"def _check_value(self, y_pred, y):\n if self._type != 'classification' and not (np.equal(y_pred ** 2, y_pred).all() and np.equal(y ** 2, y).all()):\n raise ValueError('For multilabel case, input value must be 1 or 0.')",
"def test_invalid_boolean_value(self):\n self.helper_test_evaluate_raises(\n 'A or B',\n expected_exc_type=InvalidBooleanValueError,\n A=1,\n B=2)",
"def test_evaluates_correctly_false(self, test_generator):\n self.assertFalse(test_generator())\n self.assertFalse(bool(test_generator()))\n if test_generator():\n self.fail(\"did not evaluate false\")",
"def y_overrun(self):\n return (self.status & 0x20) != 0",
"def is_tn(self, y, t):\n return t != 0 and y == 0",
"def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer",
"def test_bool(self, env: yaenv.Env):\n _val = env.bool('BOOL_VAR')\n assert not _val and type(_val) == bool\n _val = env.bool('INT_VAR')\n assert _val and type(_val) == bool\n _val = env.bool('MISSING', True)\n assert _val and type(_val) == bool\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.bool('FLOAT_VAR')\n assert 'Invalid boolean' in str(err.value)\n assert env.bool('MISSING') is None",
"def false_pos(yt, yp) -> Any:\n from keras import backend as K\n return K.sum(K.cast(yp * (1 - yt) > 0.5, 'float')) / K.maximum(1.0, K.sum(1 - yt))",
"def __bool__(self):\n return self[0] != 0.0 or self[1] != 0.0",
"def __bool__(self):\n return not(self.outcome != 0 or self.filled)",
"def __bool__(x):\n if x.value == 1:\n return True\n elif x.value == -1:\n return False\n else:\n raise ValueError('cannot determine boolean value of Unknown')",
"def test_badxvaluewithbools(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, False, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def is_valid(self):\n posit1 = (self.mean_v > 0) & (self.kappa_y > 0) & (self.eta_y > 0)\n posit2 = (self.kappa_s > 0) & (self.eta_s > 0)\n return posit1 & posit2 & self.feller()",
"def check_for_bool(check):",
"def test_check_y_not_min_samples(wage_X_y, wage_gam):\n X, y = wage_X_y\n\n with pytest.raises(ValueError):\n check_y(y, wage_gam.link, wage_gam.distribution, min_samples=len(y)+1, verbose=False)",
"def valid_value_intbool(val):\n if val is not np.nan:\n return 1\n else:\n return 0",
"def verify(self, y):\n left = self.sgroup.exponentiate(self.a, y)\n right = (self.x * self.sgroup.exponentiate(self.b, self.c)) % self.sgroup.p\n is_ok = (left == right)\n return is_ok",
"def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res",
"def test_human_readable_boolean_false():\n # TODO: add a test case that follows the provided example",
"def is_cr(self, y, t):\n return t == 0 and y != 0",
"def test_false_detections(self):\n expected_accuracy = dict(num_recall=0, uniq_recall=0, num_precision=0, uniq_precision=0)\n self._run_and_validate(self.false_dets, self.ground_truths, expected_accuracy)",
"def test_for_bool(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"for bool b in [True, False]\\n\\tUnaryGate(b, 0) | 0\"\n )\n assert np.all(\n bb._forvar[\"b\"] == np.array([True, False])\n )",
"def is_cn(self, y, t):\n return t == 0 and y == 0",
"def false_negative(y_true, y_pred):\n # initialize\n fn = 0\n for yt, yp in zip(y_true, y_pred):\n if yt == 1 and yp == 0:\n fn += 1\n return fn",
"def test_check_y_TypeError_(self):\n self.assertRaisesRegex(\n ValueError,\n 'y must be >= 0',\n Rectangle,\n 4, 2, 0, -6, 12\n )"
]
| [
"0.69083273",
"0.6695718",
"0.6674473",
"0.66075134",
"0.6587407",
"0.6464221",
"0.64067847",
"0.6404094",
"0.6357871",
"0.63352954",
"0.6284141",
"0.6274285",
"0.62531525",
"0.6222517",
"0.62003815",
"0.6190041",
"0.6136608",
"0.60998005",
"0.6085489",
"0.60599977",
"0.605292",
"0.60434747",
"0.60257524",
"0.6017895",
"0.599801",
"0.5923445",
"0.59125865",
"0.5903979",
"0.58803046",
"0.5879695"
]
| 0.6742841 | 1 |
This function tests for bad size value with floats | def test_badsizevaluefloats(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(float(1), 1, 2, 3)
self.assertEqual(str(e.exception), 'width must be an integer') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_for_float(check):",
"def is_float(self, size=None):\n return False",
"def test_float(self):\n self.assertFalse(validate_measure_input('0.0', self.measures))\n self.assertFalse(validate_measure_input('1.0', self.measures))\n self.assertFalse(validate_measure_input('1.1', self.measures))",
"def test_sizesetterwithfloat(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = float(1)\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def is_floating_point(self, size=None):\n return False",
"def test_14_float_test(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(1.3, 20)\n self.assertEqual(\"width must be an integer\", str(x.exception))\n\n with self.assertRaises(TypeError) as x:\n r = Rectangle(13, 2.0)\n self.assertEqual(\"height must be an integer\", str(x.exception))\n\n with self.assertRaises(TypeError) as x:\n r = Rectangle(13, 20, 1.7777)\n self.assertEqual(\"x must be an integer\", str(x.exception))\n\n with self.assertRaises(TypeError) as x:\n r = Rectangle(13, 20, 17, 8.0)\n self.assertEqual(\"y must be an integer\", str(x.exception))",
"def test_small_floats(self) -> None:\n f = open('test_files/track-with-small-floats.gpx')\n \n\n gpx = mod_gpxpy.parse(f)\n\n xml = gpx.to_xml()\n self.assertNotIn('e-', xml)",
"def isFloat(value): \n try:\n float(value)\n return True\n except ValueError:\n return False",
"def test_01_float(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(float(1.2), float(2.2), 1)\n self.assertEqual(\"width must be an integer\", str(x.exception))",
"def test_check_X_not_int_not_float():\n with pytest.raises(ValueError):\n check_X(['hi'], verbose=False)",
"def test_size_too_small(self):\n min_size = min(settings.MISAGO_AVATARS_SIZES)\n too_small = min_size / 2\n\n self.assertEqual(clean_size(too_small), min_size)",
"def _validate_train_size(train_size):\n assert isinstance(train_size, float) and (0. < train_size < 1.), \\\n \"train_size should be a float between 0 and 1\"",
"def raise_not_number(x: int) -> None:\n try:\n float(x)\n except ValueError:\n raise SizeError('Must pass a number, received {}'.format(x))",
"def testFloatInput(self):\n nb.rescale_length(2.0)\n self.assertEqual(2.0, nb.rscale)",
"def checkFloat(comment, value, expected, tol=1e-10, update=True):\n if np.isnan(value) and np.isnan(expected):\n res = True\n elif np.isnan(value) or np.isnan(expected):\n res = False\n else:\n res = abs(value - expected) <= tol\n if update:\n if not res:\n print(\"checking float\",comment,'|',value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1\n return res",
"def test_float_type(self):\n\n input_ = 1.2\n expected = ValueError\n with self.assertRaises(expected):\n math.factorial(input_)",
"def testfloat ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTup1, expRes in self.knownFloatValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracTup1 ) ) \r\n\t\t\tself.assertAlmostEqual ( float ( frac1 ), expRes )",
"def check_pos_float(v):\n status = True\n try:\n val = float(v)\n if val <= 0:\n status = False\n except ValueError:\n status = False\n return status",
"def test_badxvaluewithfloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, float(1), 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_float_range_2():\n try:\n float_range('2.0')\n assert False # Should be unreachable\n except Exception:\n pass",
"def test_message_float():\n result = True\n\n message = msg.Message()\n for i in range(num_it):\n message.appendFloat(i/128.789456)\n if message.length != msg.HEADER_SIZE + (i+1)*msg.floatStruct.size:\n print(\"Size is \", message.length, \" but should be \", msg.HEADER_SIZE + (i+1)*msg.floatStruct.size)\n print(\"Error : message.appendFloat\")\n result = False\n\n message.resetCursor()\n for i in range(num_it):\n r = message.readFloat()\n if abs(r - i/128.789456) > 0.000001:\n print(r, \" vs \", i/128.789456)\n print(\"Error : message.read/appendFloat\")\n result = False\n\n return result",
"def is_float(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_float)",
"def isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False",
"def test_constructed_is_small(self):\n self.assertTrue(all(elt<10 for elt in goodwinsheaf.checkradii()))#check all entries have small radii",
"def test_badyvaluewithfloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, float(1), 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def check_for_float_and_int(check):",
"def isfloat(value):\r\n try:\r\n float(value)\r\n return True\r\n except ValueError:\r\n return False",
"def _is_non_negative_float(item):\n if not isinstance(item, (int, float)):\n return False\n return item >= 0",
"def _assert_float_dtype(dtype):\n if not dtype.is_floating:\n raise ValueError(\"Expected floating point type, got %s.\" % dtype)\n return dtype",
"def check_size(s):\n\n s = check_1d(s, \"size\")\n if any(map(lambda d: d <= 0, s)):\n raise Exception('Size cannot be 0 or negative')\n\n return s"
]
| [
"0.7329806",
"0.726083",
"0.6919333",
"0.6791456",
"0.67735356",
"0.6667604",
"0.6654166",
"0.6617173",
"0.66136104",
"0.6580767",
"0.6558897",
"0.65265524",
"0.6493573",
"0.6468674",
"0.6459855",
"0.6433279",
"0.64139265",
"0.64002323",
"0.6386368",
"0.63839954",
"0.637679",
"0.63758755",
"0.6365386",
"0.6351307",
"0.63493174",
"0.63471854",
"0.6343838",
"0.63404536",
"0.632691",
"0.6298895"
]
| 0.74446696 | 0 |
This function tests for bad x value with sets | def test_badxvaluewithsets(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(1, {1, 2, 3}, 2, 3)
self.assertEqual(str(e.exception), 'x must be an integer') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_values(self):\n # Assume\n df1 = pd.DataFrame([[1, 6, 2, 3, 19],\n [4, 5, 8, 6, 30],\n [4, 5, 12, 8, 22],\n [4, 7, 9, 5, 21],\n [7, 8, 9, 12, 5]],\n columns=['A', 'B', 'C', 'D', 'E'])\n\n # Assume\n subsets = XbrlSubsets()\n\n # Assert\n with self.assertRaises(ValueError):\n subsets.unique_entries(df1, 'I', False)\n\n with self.assertRaises(ValueError):\n subsets.unique_entries(df1, 'A,B', True)",
"def test_badyvaluewithsets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, {1, 2, 3}, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def _checkValues(set_):\n if len(set_)<3: return False\n x = set_[2]\n # TODO: OPT: need optimization\n if (x is None) or len(x) == 0: return False # undefined\n for v in x:\n try:\n if Nlabels <= 2 and N.isscalar(v):\n continue\n if (isinstance(v, dict) or # not dict for pairs\n ((Nlabels>=2) and len(v)!=Nlabels) # 1 per each label for multiclass\n ): return False\n except Exception, e:\n # Something else which is not supported, like\n # in shogun interface we don't yet extract values per each label or\n # in pairs in the case of built-in multiclass\n if __debug__:\n debug('ROC', \"Exception %s while checking \"\n \"either %s are valid labels\" % (str(e), x))\n return False\n return True",
"def test_badxvaluewithbools(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, False, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_exclusive(self):\n s = djset()\n s.add([1, 2, 3])\n s.add([4, 5, 6])\n self.assertEquals({1, 2, 3}, s.data[1])\n self.assertEquals({4, 5, 6}, s.data[4])",
"def test_nonzero(self):\n self.assertFalse(djset())\n self.assertTrue(djset([1, 2, 3]))\n self.assertTrue(djset([1, 2, 3], [4, 5, 6]))\n self.assertTrue(djset([1, 2, 3], [4, 5, 6], [5, 6]))",
"def one_of_k_encoding_unk(x, allowable_set):\n if x not in allowable_set:\n x = allowable_set[-1]\n return [x == s for s in allowable_set]",
"def test_badxvaluewithtuple(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, (1, 2), 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def SetFunction():\r\n s2 = []\r\n s3 = []\r\n s4 = []\r\n s2 = { i for i in range(21) if i%2 == 0}\r\n s3 = { i for i in range(21) if i%3 == 0}\r\n s4 = { i for i in range(21) if i%4 == 0}\r\n s2 = set(s2)\r\n s3 = set(s3)\r\n s4 = set(s4)\r\n print s3.issubset(s2)\r\n print s4.issubset(s2)",
"def test_badxvaluewithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, [1, 2], 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def is_exceptional(self):\n G = self.poset().hasse_diagram()\n for x in G:\n nx = list(G.neighbors_out(x))\n nx.append(x)\n if min(nx) < x and max(nx) > x:\n return False\n return True",
"def test_negative_stop(self):\n self.assertArrayEqual(self.dset[2:-2], self.arr[2:-2])",
"def test_contains(self):\n dim = Fidelity(\"epoch\", 1, 10)\n\n assert 0 not in dim\n assert 1 in dim\n assert 5 in dim\n assert 10 in dim\n assert 20 not in dim",
"def test_is_set(self):\n cards = numpy.array([[1,1,1,2,0],\n [0,1,2,2,2],\n [0,1,2,2,2],\n [0,1,2,2,2]])\n\n self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))\n self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))\n self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))",
"def _one_of_k_encoding_unk(self, x, allowable_set):\r\n if x not in allowable_set:\r\n x = allowable_set[-1]\r\n return list(map(lambda s: x == s, allowable_set))",
"def _check_support(X: np.ndarray, **kwargs) -> None:\n\n X_union = (X == 0) | (X == 1)\n for k in range(2, kwargs[\"k\"]):\n X_union = X_union | (X == k)\n\n assert (\n X_union.all()\n ), f\"x should be equal to integer from 0 to {kwargs['k']} (exclusive).\"",
"def check_unconformant(self, not_found, local_set):\n not_missing = set()\n for title in local_set:\n if \"|\" in title:\n not_missing.add(re.search(\"\\|.*\\|\", title).group()[1:-1])\n not_missing.add(re.search(\"\\|.*$\", title).group()[1:].replace(\"|\", \"\"))\n not_missing.add(re.search(\"\\|.*$\", title).group()[1:].replace(\"|\", \"_\"))\n not_missing.add(re.search(\"\\|.*$\", title).group()[1:].replace(\"|\", \"\") + \"+\")\n\n not_missing = not_missing.intersection(not_found)\n\n return not_missing",
"def test_bad_x_dimensions(self):\n with pytest.raises(StateError):\n State(substance=\"water\", T=Q_(300.0, \"K\"), x=Q_(1.01325, \"K\"))",
"def test_contains(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 6)\n\n assert 0.1 not in dim\n assert (0.1, -0.2) not in dim\n assert 0 in dim\n assert (1, 2) not in dim\n assert 6 not in dim\n assert -3 in dim\n assert -4 not in dim",
"def test_general_subset_invalid_level():\n pass",
"def _encode_check_unknown(values, uniques, return_mask=False):\n uniques_set = set(uniques)\n diff = list(set(values) - uniques_set)\n if return_mask:\n if diff:\n valid_mask = [val in uniques_set for val in values]\n else:\n valid_mask = [True] * len(values)\n return diff, valid_mask\n else:\n return diff",
"def one_of_k_encoding_unk(self, x, allowable_set):\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))",
"def test_badxvaluewithfloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, float(1), 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def one_of_k_encoding_unk(x, allowable_set):\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))",
"def test_check_x_ValueError(self):\n self.assertRaisesRegex(\n ValueError,\n 'x must be >= 0',\n Rectangle,\n 4, 2, -1, 0, 12\n )",
"def check_unexpected_values(self, expected_values, scraped_values):\n\n\t\tfor key in scraped_values:\n\t\t\tself.assertIn(key, expected_values)",
"def one_of_k_encoding_unk(x, allowable_set):\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))",
"def test_G_2_by_2_bad_data(self):\r\n self.assertRaises(ValueError, G_2_by_2, 1, -1, 1, 1)",
"def test_check_x(self):\n r1 = Rectangle(10, 2)\n self.assertEqual(r1.x, 0)\n\n r2 = Rectangle(2, 10, 6)\n self.assertEqual(r2.x, 6)\n\n r3 = Rectangle(5, 2, 3, 9, 12)\n self.assertEqual(r3.x, 3)\n\n r4 = Rectangle(5, 2, 0, 3, 12)\n self.assertEqual(r4.x, 0)",
"def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0"
]
| [
"0.6515592",
"0.65142566",
"0.64434457",
"0.6162853",
"0.61155057",
"0.60888386",
"0.60695523",
"0.603732",
"0.6008015",
"0.5992704",
"0.5987261",
"0.5945447",
"0.5944256",
"0.59343463",
"0.5927906",
"0.59046",
"0.5895334",
"0.5876729",
"0.5865866",
"0.58559877",
"0.58026785",
"0.5790299",
"0.57723045",
"0.5753136",
"0.57490677",
"0.5748939",
"0.5741376",
"0.5725894",
"0.57201874",
"0.5719614"
]
| 0.705099 | 0 |
This function tests for bad x value with dicts | def test_badxvaluewithdicts(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(1, {"foo": 1}, 2, 3)
self.assertEqual(str(e.exception), 'x must be an integer') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_badyvaluewithdicts(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, {\"foo\": 1}, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def check_for_dict(check):",
"def test_bad_valuetype():\n test = [{'key': {'key1': 'val'}}, ['key']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'Bad data found' in str(t_result.failure())",
"def testDictDoesNotContain(self):\n self.Check(\"\"\"\n d1 = {\"x\": 42}\n if \"x\" not in d1:\n print d1[\"nonsense\"] # Dead code\n else:\n print d1[\"x\"]\n\n d2 = {}\n if \"x\" not in d2:\n pass\n else:\n print d2[\"nonsense\"] # Dead code\n\n d3 = {__any_object__: __any_object__}\n if \"x\" not in d3:\n print d3[\"y\"]\n else:\n print d3[\"x\"]\n \"\"\")",
"def _validate_dict_data(self, expected, actual):\n for k, v in expected.iteritems():\n if k in actual:\n if (isinstance(v, basestring) or\n isinstance(v, bool) or\n isinstance(v, (int, long))):\n if v != actual[k]:\n return \"{}:{}\".format(k, actual[k])\n elif not v(actual[k]):\n return \"{}:{}\".format(k, actual[k])\n else:\n return \"key '{}' does not exist\".format(k)\n return None",
"def test_check_xyz_dict(self):\n xyz1 = converter.check_xyz_dict(self.xyz1['str'])\n self.assertEqual(xyz1, self.xyz1['dict'])\n\n xyz2 = {'symbols': ('C', 'H', 'H', 'H', 'H'),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n xyz2 = converter.check_xyz_dict(xyz2)\n expected_xyz2 = {'symbols': ('C', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n self.assertEqual(xyz2, expected_xyz2)\n\n xyz3 = 3.0\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz3)\n\n xyz4 = {'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz4)\n\n xyz5 = {'symbols': ('C', 'H', 'H', 'H', 'H', 'S', 'S', 'S'),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz5)\n\n # test a zmat input\n zmat6 = {'symbols': ('N', 'N', 'H', 'H'),\n 'coords': ((None, None, None), ('R_1_0', None, None), ('R_2_1', 'A_2_1_0', None),\n ('R_3_2', 'A_3_2_0', 'D_3_2_0_1')),\n 'vars': {'R_1_0': 1.2451214479859707, 'R_2_1': 1.8953164901754294, 'A_2_1_0': 30.18165946689929,\n 'R_3_2': 2.785552137148173, 'A_3_2_0': 24.405141545817347,\n 'D_3_2_0_1': 3.6222548091772e-06}, 'map': {0: 0, 1: 1, 2: 2, 3: 3}}\n xyz6 = converter.check_xyz_dict(zmat6)\n expected_xyz6 = {'symbols': ('N', 'N', 'H', 'H'),\n 'isotopes': (14, 14, 1, 1),\n 'coords': ((-2.4426534384901547e-09, -4.375090750708016e-09, -0.622560729110669),\n (-2.4426534384901547e-09, -4.375090750708016e-09, 0.6225607188753017),\n (-2.4426534384901547e-09, 0.9528575945413793, -1.015818661524137),\n (7.032081834243086e-08, -0.9528574729632926, 1.015818803737915))}\n\n self.assertEqual(xyz6, expected_xyz6)",
"def test_invalid_value_age(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '692', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def test_payment_accepted_invalid_dict(self):\r\n baseline = {\r\n 'orderNumber': '1',\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n }\r\n wrong = {\r\n 'orderNumber': 'k',\r\n }\r\n # tests for missing key\r\n for key in baseline:\r\n params = baseline.copy()\r\n del params[key]\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params)\r\n\r\n # tests for keys with value that can't be converted to proper type\r\n for key in wrong:\r\n params = baseline.copy()\r\n params[key] = wrong[key]\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params)",
"def test_15_dict_test(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, {})\n self.assertEqual(\n \"height must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle({\"a\": 1, \"b\": 2, \"c\": 3}, 2)\n self.assertEqual(\n \"width must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, 2, {\"a\": 1})\n self.assertEqual(\n \"x must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, 2, 0, {\"hi\": None})\n self.assertEqual(\n \"y must be an integer\",\n str(x.exception))",
"def test_invalid_key_age(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Asge': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def run_missing_value_check():\n print(\"\\n### CHECKING FOR MISSING VALUES AND ZEROES ###\")\n for key, value in data.items():\n try:\n print(key, check_missing_values(value), check_zero(value))\n except TypeError:\n print(key, \"Failed\")\n print(\"### END ###\\n\")",
"def test_invalid_value_gen(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'Toaster', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def test_process_dict_false(self):\n\n self.assertNotIn('userB@domain', self.temp_set)",
"def test_validate_bad_data(self, value):\n opt = scheme.DictOption('test-opt', scheme.Scheme())\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)",
"def test_search_validator_bad_data():\n sval = helpers.search_validator()\n assert not sval.validate({})\n bad = dict(foo=\"bar\", baz=42)\n assert not sval.validate(bad)\n bad = loads('{\"fields\": {}}')\n assert not sval.validate(bad)\n bad = loads('{\"fields\": {\"vin\": \"\"}}')\n assert not sval.validate(bad)\n bad = loads('{\"fields\": {\"foo\": \"bar\"}}')\n assert not sval.validate(bad)",
"def test_invalid_key_bmi(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '245', 'BdMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def invalid(values):\n # for box in values.keys():\n # if len(values[box]) == 0:\n # return True\n # return False\n return len([box for box in values.keys() if len(values[box]) == 0]) != 0",
"def test_badxvaluewithsets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, {1, 2, 3}, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_example():\n with pytest.raises(\n AssertionError,\n match=expected_error_match,\n ):\n actual = {\n \"test1\": 1,\n \"test2\": \"foo\",\n \"bar\": {\"cheese\": \"parrot\", \"rabbit\": [\"black\", \"knight\"], \"other\": \"oops\"},\n }\n assert actual == Alike(\n {\n \"something\": A.is_missing,\n \"test2\": \"foo\",\n \"test1\": A < 2,\n \"bar\": {\n \"cheese\": A.is_present,\n \"rabbit\": [\"black\", \"wrong\"],\n \"other\": A.is_missing,\n },\n }\n )",
"def test_badsizevaluedicts(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square({\"foo\": 1}, 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')",
"def test_invalid_value_birthday(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-19595'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def _dict_validity_check(d, valid_d):\n\n if not Settings._is_in_dict(d, valid_d):\n raise InvalidSettingError()",
"def test_3_incorrect_value(self):\n d = copy.deepcopy(self.fitness_dict)\n d['WorkoutType'] = 1\n self.assertFalse(self.fitness.insert_in_database(d))\n\n d = copy.deepcopy(self.fitness_dict)\n d['Minutes'] = 'Running'\n self.assertFalse(self.fitness.insert_in_database(d))\n\n d = copy.deepcopy(self.fitness_dict)\n d['CaloriesBurned'] = 1\n self.assertFalse(self.fitness.insert_in_database(d))",
"def test_invalid_key_birthday(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birsthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def check_dict(dic, validator, messages):\n check_dict_alg(dic, validator, [], messages, validator, \"NoObject\")",
"def test_2_incorrect_key(self):\n d = copy.deepcopy(self.fitness_dict)\n del(d['WorkoutType'])\n d['WorkoutTypeS'] = 'Running'\n self.assertFalse(self.fitness.insert_in_database(d))\n\n d = copy.deepcopy(self.fitness_dict)\n del(d['Minutes'])\n d['MinutesS'] = 10.0\n self.assertFalse(self.fitness.insert_in_database(d))\n\n d = copy.deepcopy(self.fitness_dict)\n del(d['CaloriesBurned'])\n d['CaloriesBurnedS'] = 100.9\n self.assertFalse(self.fitness.insert_in_database(d))",
"def test_invalid_value_bmi(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Gigantic', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def check_unexpected_values(self, expected_values, scraped_values):\n\n\t\tfor key in scraped_values:\n\t\t\tself.assertIn(key, expected_values)",
"def test_invalid_key_sales(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sal5es': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def test_unique_item_properties_failed(self):\n check_value = [{\"a\": 1, \"b\": 3}, {\"a\": 1, \"b\": 2}]\n\n with pytest.raises(AssertionError):\n unique_item_properties(check_value, \"a\")"
]
| [
"0.7063102",
"0.6901642",
"0.68893194",
"0.67338705",
"0.6730255",
"0.66233486",
"0.65747875",
"0.6557071",
"0.6369715",
"0.6351837",
"0.6335188",
"0.6329998",
"0.6318528",
"0.6300104",
"0.62784344",
"0.62771666",
"0.6276584",
"0.62739366",
"0.62530696",
"0.6234832",
"0.62275565",
"0.6212066",
"0.6209081",
"0.6202409",
"0.6195556",
"0.6177628",
"0.61735183",
"0.61623037",
"0.61601716",
"0.6144946"
]
| 0.74902976 | 0 |
This function tests for bad y value with dicts | def test_badyvaluewithdicts(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(1, 2, {"foo": 1}, 3)
self.assertEqual(str(e.exception), 'y must be an integer') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_check_y_not_int_not_float(wage_X_y, wage_gam):\n X, y = wage_X_y\n y_str = ['hi'] * len(y)\n\n with pytest.raises(ValueError):\n check_y(y_str, wage_gam.link, wage_gam.distribution)",
"def _validate_dict_data(self, expected, actual):\n for k, v in expected.iteritems():\n if k in actual:\n if (isinstance(v, basestring) or\n isinstance(v, bool) or\n isinstance(v, (int, long))):\n if v != actual[k]:\n return \"{}:{}\".format(k, actual[k])\n elif not v(actual[k]):\n return \"{}:{}\".format(k, actual[k])\n else:\n return \"key '{}' does not exist\".format(k)\n return None",
"def test_invalid_value_age(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '692', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def test_badxvaluewithdicts(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, {\"foo\": 1}, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_check_xyz_dict(self):\n xyz1 = converter.check_xyz_dict(self.xyz1['str'])\n self.assertEqual(xyz1, self.xyz1['dict'])\n\n xyz2 = {'symbols': ('C', 'H', 'H', 'H', 'H'),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n xyz2 = converter.check_xyz_dict(xyz2)\n expected_xyz2 = {'symbols': ('C', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n self.assertEqual(xyz2, expected_xyz2)\n\n xyz3 = 3.0\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz3)\n\n xyz4 = {'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz4)\n\n xyz5 = {'symbols': ('C', 'H', 'H', 'H', 'H', 'S', 'S', 'S'),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz5)\n\n # test a zmat input\n zmat6 = {'symbols': ('N', 'N', 'H', 'H'),\n 'coords': ((None, None, None), ('R_1_0', None, None), ('R_2_1', 'A_2_1_0', None),\n ('R_3_2', 'A_3_2_0', 'D_3_2_0_1')),\n 'vars': {'R_1_0': 1.2451214479859707, 'R_2_1': 1.8953164901754294, 'A_2_1_0': 30.18165946689929,\n 'R_3_2': 2.785552137148173, 'A_3_2_0': 24.405141545817347,\n 'D_3_2_0_1': 3.6222548091772e-06}, 'map': {0: 0, 1: 1, 2: 2, 3: 3}}\n xyz6 = converter.check_xyz_dict(zmat6)\n expected_xyz6 = {'symbols': ('N', 'N', 'H', 'H'),\n 'isotopes': (14, 14, 1, 1),\n 'coords': ((-2.4426534384901547e-09, -4.375090750708016e-09, -0.622560729110669),\n (-2.4426534384901547e-09, -4.375090750708016e-09, 0.6225607188753017),\n (-2.4426534384901547e-09, 0.9528575945413793, -1.015818661524137),\n (7.032081834243086e-08, -0.9528574729632926, 1.015818803737915))}\n\n self.assertEqual(xyz6, expected_xyz6)",
"def test_invalid_value_gen(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'Toaster', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def test_invalid_value_birthday(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-19595'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def test_cast_y_axis_extrema_invalid_input(self):\r\n self.assertRaises(ValueError, _cast_y_axis_extrema, 'foo')",
"def test_check_y_TypeError_(self):\n self.assertRaisesRegex(\n ValueError,\n 'y must be >= 0',\n Rectangle,\n 4, 2, 0, -6, 12\n )",
"def test_validate_bad_data(self, value):\n opt = scheme.DictOption('test-opt', scheme.Scheme())\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)",
"def test_invalid_value_salary(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '2350',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def test_Y_no_std (self):\n X, Y = self.dm.get_data(std=True, y_std=False)\n expected = [(pd.to_datetime('1/12/2018'), 0.027151911),\n (pd.to_datetime('1/13/2018'), -0.040960432),\n (pd.to_datetime('1/14/2018'), 0.00347081),\n (pd.to_datetime('1/15/2018'), -0.168548025)]\n\n for (idx, e) in expected:\n msg = 'Y value not what expected on {}'.format(cryp.fmt_date(idx))\n actual = Y[idx]\n self.assertAlmostEqual(e, actual, DEC_ACCY, msg)",
"def testDictDoesNotContain(self):\n self.Check(\"\"\"\n d1 = {\"x\": 42}\n if \"x\" not in d1:\n print d1[\"nonsense\"] # Dead code\n else:\n print d1[\"x\"]\n\n d2 = {}\n if \"x\" not in d2:\n pass\n else:\n print d2[\"nonsense\"] # Dead code\n\n d3 = {__any_object__: __any_object__}\n if \"x\" not in d3:\n print d3[\"y\"]\n else:\n print d3[\"x\"]\n \"\"\")",
"def test_invalid_value_sales(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '2145', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def test_invalid_request_values(self):\n TEST_DATA = [\n (-100, 0, 0, 0),\n (100, 0, 0, 0),\n (0, -190, 0, 0),\n (0, 190, 0, 0),\n (0, 0, 0, -10),\n (0, 0, 0, 370)\n ] # yapf: disable\n for (lat, lon, alt, heading) in TEST_DATA:\n self.assertEqual(400,\n self.eval_request_values(lat, lon, alt, heading))",
"def test_invalid_key_age(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Asge': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def check_for_dict(check):",
"def test_bad_probabilities(self):\n categories = {\"asdfa\": 0.05, 2: 0.2, 3: 0.3, 4: 0.4}\n with pytest.raises(ValueError):\n Categorical(\"yolo\", categories, shape=2)",
"def test_bad_valuetype():\n test = [{'key': {'key1': 'val'}}, ['key']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'Bad data found' in str(t_result.failure())",
"def test_invalid_value_bmi(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Gigantic', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def test_rectangle_validation_y_pts(self):\n for bad_y_pts in (1.2, Decimal(3), -1, 'hello'):\n self.assertRaises(\n ValueError,\n Rectangle,\n x_pts=10,\n y_pts=bad_y_pts,\n width_pts=100,\n height_pts=120,\n line_width_pts=5)",
"def test_check_y_not_min_samples(wage_X_y, wage_gam):\n X, y = wage_X_y\n\n with pytest.raises(ValueError):\n check_y(y, wage_gam.link, wage_gam.distribution, min_samples=len(y)+1, verbose=False)",
"def test_3_incorrect_value(self):\n d = copy.deepcopy(self.fitness_dict)\n d['WorkoutType'] = 1\n self.assertFalse(self.fitness.insert_in_database(d))\n\n d = copy.deepcopy(self.fitness_dict)\n d['Minutes'] = 'Running'\n self.assertFalse(self.fitness.insert_in_database(d))\n\n d = copy.deepcopy(self.fitness_dict)\n d['CaloriesBurned'] = 1\n self.assertFalse(self.fitness.insert_in_database(d))",
"def _validate_y(self, y):\n y_encoded = super()._validate_y(y)\n if (\n isinstance(self.sampling_strategy, dict)\n and self.base_sampler_._sampling_type != \"bypass\"\n ):\n self._sampling_strategy = {\n np.where(self.classes_ == key)[0][0]: value\n for key, value in check_sampling_strategy(\n self.sampling_strategy,\n y,\n self.base_sampler_._sampling_type,\n ).items()\n }\n else:\n self._sampling_strategy = self.sampling_strategy\n return y_encoded",
"def test_comparing(self):\n for test in self.test_dict_data:\n self.assertEqual(dottedDict(test[0]), test[1])",
"def test_process_dict_false(self):\n\n self.assertNotIn('userB@domain', self.temp_set)",
"def test_payment_accepted_invalid_dict(self):\r\n baseline = {\r\n 'orderNumber': '1',\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n }\r\n wrong = {\r\n 'orderNumber': 'k',\r\n }\r\n # tests for missing key\r\n for key in baseline:\r\n params = baseline.copy()\r\n del params[key]\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params)\r\n\r\n # tests for keys with value that can't be converted to proper type\r\n for key in wrong:\r\n params = baseline.copy()\r\n params[key] = wrong[key]\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params)",
"def test_badyvaluewithsets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, {1, 2, 3}, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def test_no_ydata(self):\n self.assertRaises(AttributeError, lambda: GroupLinearRegression([1, 2, 3, 4]))",
"def test_invalid_key_birthday(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birsthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)"
]
| [
"0.65423876",
"0.6532567",
"0.6517453",
"0.6497255",
"0.62020665",
"0.6187707",
"0.61815435",
"0.61809665",
"0.6161865",
"0.61170024",
"0.6088386",
"0.6066407",
"0.60333383",
"0.60322386",
"0.60267806",
"0.60139036",
"0.60025436",
"0.5979485",
"0.5971068",
"0.5970892",
"0.5961146",
"0.59521604",
"0.59378237",
"0.5936368",
"0.59159046",
"0.5903041",
"0.5887104",
"0.58812135",
"0.588014",
"0.5874172"
]
| 0.6933743 | 0 |
This function tests for bad x value with funcs | def test_badxvaluewithfuncs(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(1, print(), 2, 3)
self.assertEqual(str(e.exception), 'x must be an integer') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_error(f, x):\n try:\n f(x)\n return False\n except:\n return True",
"def test_correct_value(self):\n self.assertTrue(py_function(6) == 36)\n self.assertFalse(py_function(5) == 9)\n for i in range(0, 10):\n self.assertTrue(py_function(i) == i**2 if i != 0 else 100)",
"def test_check_X_not_int_not_float():\n with pytest.raises(ValueError):\n check_X(['hi'], verbose=False)",
"def test_badyvaluewithfuncs(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, print(), 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def test_badxvaluewithbools(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, False, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_check_x_ValueError(self):\n self.assertRaisesRegex(\n ValueError,\n 'x must be >= 0',\n Rectangle,\n 4, 2, -1, 0, 12\n )",
"def test_err_if(\n self, predicate: t.Callable, val: t.Any, exp: Result\n ) -> None:\n assert Result.err_if(predicate, val) == exp",
"def fun(self, x):\n if np.any(x > 0):\n return np.inf\n else:\n return 0",
"def fun(self, x):\n if np.any(x < 0):\n return np.inf\n else:\n return 0",
"def test_x_is_less_than_0(self):\n with self.assertRaisesRegex(ValueError, \"x must be >= 0\"):\n Square(1, -1, -1)",
"def test_badxvaluewithfloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, float(1), 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_xfailed_but_passed():\n pass",
"def test__validate_status__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_status(input_value)",
"def test_squared_moving_integration_valuechecks(x, window_length):\n from sleepecg._heartbeat_detection import _squared_moving_integration\n with pytest.raises(ValueError):\n _squared_moving_integration(x, window_length)",
"def testfn(arg):\n if arg == 42:\n raise ValueError('Oh noes')\n return arg",
"def test_03_pass(self):\n if x==1:\n pass",
"def if_error(f, x, upon_error):\n try:\n return f(x)\n except:\n return upon_error",
"def test_exception():\n\n @pure\n def fn():\n return 1 / 0\n\n check_peval_expression(\"fn()\", dict(fn=fn), \"fn()\")",
"def test_02_pass(self):\n if x==1:\n pass",
"def test_non_integral_validation(self):",
"def test_non_integral_validation(self):",
"def test_bounds_respected_func_not_called(\n self, check_bounds_respected):\n self.controller.problem.value_ranges = {'test': (0, 1)}\n self.controller.minimizer = \"deriv_free_algorithm\"\n self.controller.flag_expected = [3]\n\n _ = loop_over_hessians(self.controller,\n options=self.options,\n grabbed_output=self.grabbed_output,\n checkpointer=self.cp)\n check_bounds_respected.assert_not_called()",
"def error_func(x, a0, a1, a2, a3):\n return (a0 / 2) * sp.special.erfc((a1 - x) / a2) + a3",
"def test_01_pass(self):\n if x==1:\n pass",
"def test_01_pass(self):\n if x==1:\n pass",
"def test_identical_x_values(self):\n alpha = 0.9\n num_x = 150\n num_y = 100\n x_vals = np.ones(num_x)\n y_vals = (np.sin(x_vals[:num_y]) + 0.3 * np.random.randn(num_y) + 0.5)\n\n with pytest.raises(RuntimeError):\n extrapolated_lowess(x_vals, y_vals, alpha=alpha)",
"def test_68_function_not_return(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin x:=fu(); end\n\t\tfunction fu():integer;\n\t\tbegin with a:boolean; do\n\t\twith a:integer; do for a:=-5 to 5 do return 1;end\"\"\"\n\t\texpect = \"Function fu Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,468))",
"def test_func(x, a, b, c, d):\n return a + b * x + c * x**2 + d * x**3",
"def test__validate_features__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_features(input_value)",
"def __validateInput(self, fx: str, ux:str, lx:str) -> bool:\r\n # validate the input fields\r\n if fx == \"\" or ux == \"\" or lx == \"\":\r\n self.errorMessage = self.errorMessageMissingFields\r\n self.__showErrorMessage()\r\n return False\r\n\r\n # validate the limits\r\n self.lowerX = lx\r\n self.upperX = ux\r\n # check if numeric\r\n try:\r\n self.upperX = float(self.upperX)\r\n self.lowerX = float(self.lowerX)\r\n except:\r\n self.errorMessage = self.errorMessageLimitsNotNumeric\r\n self.__showErrorMessage()\r\n return False\r\n \r\n # check for inquality\r\n if self.lowerX > self.upperX:\r\n self.errorMessage = self.errorMessageLimitsNotOrdered\r\n self.upperXField.setText(str(self.lowerX))\r\n self.lowerXField.setText(str(self.upperX))\r\n self.lowerX, self.upperX = self.upperX, self.lowerX\r\n ##################################\r\n # validate and process the input function\r\n self.inputFunction = fx\r\n try:\r\n self.inputFunction = self.inputFunction.replace(\" \", \"\").replace(\"^\", \"**\").replace(\"sqrt\", \"np.sqrt\")\r\n self.inputFunction = self.inputFunction.replace(\"e**\", \"np.exp\").replace(\"log\", \"np.log\") \r\n self.inputFunction = self.inputFunction.replace(\"sin\", \"np.sin\").replace(\"cos\", \"np.cos\").replace(\"tan\", \"np.tan\")\r\n\r\n except:\r\n self.errorMessage = self.errorMessageNonValidFunction\r\n self.__showErrorMessage()\r\n return True"
]
| [
"0.70177335",
"0.6598538",
"0.6551677",
"0.6535496",
"0.64791",
"0.64751804",
"0.6444109",
"0.6384782",
"0.63809425",
"0.6376168",
"0.6334867",
"0.63270766",
"0.6288615",
"0.6278401",
"0.62581235",
"0.62579507",
"0.62479675",
"0.6207513",
"0.6186114",
"0.6177712",
"0.6177712",
"0.6147219",
"0.61375654",
"0.61308044",
"0.61308044",
"0.610548",
"0.60982066",
"0.6096836",
"0.60932374",
"0.60851365"
]
| 0.7192149 | 0 |
This function tests the size getter | def test_sizegetter(self):
Rectangle.reset_objects()
r1 = Square(1, 2, 2, 3)
self.assertEqual(r1.size, 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_size(self):\n ...",
"def get_size(self):",
"def getSize(self):\n assert False",
"def size(self):",
"def size(self) -> int:",
"def size() -> int:\n ...",
"def _size(self):\n raise NotImplementedError",
"def getSize(self) -> int:\n ...",
"def getSize(self) -> int:\n ...",
"def test_size_returns_length(dq_3):\n assert dq_3.size() == 3",
"def size(self): \r\n pass",
"def size(self):\n # Your implementation here",
"def __len__(self):\r\n return self.size",
"def size(self):\r\n raise NotImplementedError",
"def __len__(self):\n return self.size_",
"def size(self):\n pass",
"def size(self):\n pass",
"def size(self):\n pass",
"def get_size(self):\n return self.__size",
"def test_get_storage_size_bytes(self):\n size = get_storage_size_bytes()\n self.assertEqual(size, 4)\n\n #Testing filter by tags\n self.paper.tags.add(self.tag)\n size = get_storage_size_bytes(self.tag) \n self.assertEqual(size, 4)",
"def __len__(self):\n return self.size",
"def __len__(self):\n return self.size",
"def __len__(self):\n return self.size",
"def __len__(self):\n return self.size",
"def __len__(self):\n return self.size",
"def __len__(self):\n return self.size",
"def __len__(self):\n return self.size",
"def __len__(self):\n return self.size",
"def __len__(self):\n return self.size",
"def size(self):\n return self.__size"
]
| [
"0.79357636",
"0.7931725",
"0.78921765",
"0.7866568",
"0.7854694",
"0.7824038",
"0.7799345",
"0.7788262",
"0.7788262",
"0.7726975",
"0.76932454",
"0.7617639",
"0.75942594",
"0.75844234",
"0.7560417",
"0.75556916",
"0.75556916",
"0.75556916",
"0.7538814",
"0.753443",
"0.7515552",
"0.7515552",
"0.7515552",
"0.7515552",
"0.7515552",
"0.7515552",
"0.7515552",
"0.7515552",
"0.7515552",
"0.7513039"
]
| 0.8108042 | 0 |
This function tests the size setter | def test_sizesetter(self):
Rectangle.reset_objects()
r1 = Square(1, 2, 2, 3)
self.assertEqual(r1.size, 1)
r1.size = 100
self.assertEqual(r1.size, 100) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_sizesetterwithset(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = {\"foo\", 2}\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_sizegetter(self):\n Rectangle.reset_objects()\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)",
"def test_sizesetterwithfunc(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = print()\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_sizesetterwithtuple(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = (1, 2)\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_sizesetterwithbool(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = True\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_updatebadsizevalue(self):\n Square.reset_objects()\n r1 = Square(1, 2, 3, 4)\n r1.update(1, \"foo\")\n self.assertEqual(r1.size, 1)",
"def test_size(self):\n s1 = Square(5)\n self.assertEqual(s1.size, 5)\n\n s1.size = 9\n self.assertEqual(s1.size, 9)\n\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n s1.size = \"foo\"",
"def test_sizesetterwithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = [1, 2]\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def getSize(self):\n assert False",
"def test_sizesetterwithstring(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = \"foo\"\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def actual_size(self, size, mode='normal', state='on'):\n raise NotImplementedError",
"def test_change_size(self, os_mock):\n os_mock.path.isfile.return_value = True\n os_mock.path.getsize.return_value = 42000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 42000)\n\n os_mock.path.getsize.return_value = 43000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 43000)\n\n self.data.status = Data.STATUS_DONE\n os_mock.path.getsize.return_value = 44000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 43000)",
"def test_sizesetterwithdict(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = {\"foo\": 2}\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def _assign_sizes(self):",
"def test_sizesetterwithfloat(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = float(1)\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def _size(self):\n raise NotImplementedError",
"def updateSize(self, *args):\n return None",
"def size(self): \r\n pass",
"def size(self):",
"def testSize(self):\n v1 = Vector(1, 2, 3, size=6)\n assert v1 == [1, 2, 3, 0, 0, 0]\n failed = False\n try:\n Vector(1, 2, 3, size=2)\n except IndexError:\n failed = True\n assert failed\n\n v3 = Vector(size=7)\n assert v3 == Vector(0, 0, 0, 0, 0, 0, 0)\n assert v3 == (0, 0, 0, 0, 0, 0, 0)",
"def calc_size(self):\r\n pass",
"def size(self, size):\n self._size = size",
"def test_sized(self):\n # verify ----------------------\n try:\n len(self.collection)\n except TypeError:\n msg = \"object of type 'Collection' has no len()\"\n self.fail(msg)",
"def getSize(self) -> int:\n ...",
"def getSize(self) -> int:\n ...",
"def correct_size():\n check50.run(\"./inheritance_test\").stdout(\"size_true.*\").exit(0)",
"def get_size(self):\n ...",
"def test_size_returns_length(dq_3):\n assert dq_3.size() == 3",
"def get_size(self):",
"def test_ban_size_kwarg(self):\n with pytest.raises(ValueError):\n Dimension(\"yolo\", \"norm\", 0.9, size=(3, 2))"
]
| [
"0.7792907",
"0.7693151",
"0.7666621",
"0.75800925",
"0.7571968",
"0.7473297",
"0.7417849",
"0.73012525",
"0.7267939",
"0.7258394",
"0.7256592",
"0.7232874",
"0.71553475",
"0.71380746",
"0.70957935",
"0.7058936",
"0.7051922",
"0.70510983",
"0.69948095",
"0.699118",
"0.6963985",
"0.69540167",
"0.6934904",
"0.69256353",
"0.69256353",
"0.68869275",
"0.6861118",
"0.6853308",
"0.6846592",
"0.6838373"
]
| 0.8216275 | 0 |
This function tests the size setter with string | def test_sizesetterwithstring(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(1, 2, 2, 3)
self.assertEqual(r1.size, 1)
r1.size = "foo"
self.assertEqual(str(e.exception), "width must be an integer") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __size_restriction_correct_string_number(self):\n\n strTestName = 'String size equal to a string (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'String parameter')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramSizEq('parameter1', 4)\n\n RxCSObject.parameter1 = 'aaaa'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_size(self):\n s1 = Square(5)\n self.assertEqual(s1.size, 5)\n\n s1.size = 9\n self.assertEqual(s1.size, 9)\n\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n s1.size = \"foo\"",
"def __size_restriction_incorrect_string_number(self):\n\n strTestName = 'String size lower than a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'String parameter')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramSizHE('parameter1', 4, mul=2, add=3)\n\n RxCSObject.parameter1 = 'aaa'\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def test_sizesetter(self):\n Rectangle.reset_objects()\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = 100\n self.assertEqual(r1.size, 100)",
"def size(self, new_size):\n if type(new_size) is str:\n new_size = new_size.replace(\" \", \"\").upper()\n new_size = new_size.replace(\")\", \"\")\n new_size = new_size.replace(\"(\", \"\")\n new_size = new_size.replace(\",\", \".\")\n new_size = new_size.replace(\"B\", \"\").strip()\n target_unit = None\n multiplier = 1\n is_bytes = False\n try:\n float(new_size)\n target_unit = \"B\"\n is_bytes = True\n except Exception as e:\n pass\n\n if not is_bytes:\n multiplier *= 1024\n for unit in [\"K\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\", \"Y\"]:\n if not target_unit and unit in new_size:\n target_unit = unit\n multiplier *= 1024\n # Reject double units\n elif target_unit and unit in new_size:\n target_unit = None\n break\n\n if target_unit:\n new_size = new_size.replace(target_unit, \"\").strip()\n try:\n self._size = int(float(new_size) * multiplier)\n except Exception as e:\n logger.error(f\"Failed to set a size from \\\"{new_size}\\\"\")\n logger.error(e)\n\n elif type(new_size) is int:\n self._size = new_size\n\n else:\n raise Exception(\"Wrong size type provided ({type(new_size)})\")\n\n if not self._size:\n logger.warn(f\"Failed to set a size from \\\"{new_size}\\\"\")",
"def __size_restriction_inccorrect_string_string(self):\n\n strTestName = 'String size equal to the size of another string (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('strRefParameter1', 'Str ref. parameter')\n RxCSObject.paramType('strRefParameter1', str)\n\n # Now, let me define a string\n RxCSObject.paramAddMan('parameter1', 'String parameter')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramSizEq('parameter1', 'strRefParameter1')\n\n RxCSObject.strRefParameter1 = 'bbbcca'\n RxCSObject.parameter1 = 'aaabb'\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def size(name):",
"def test_sizesetterwithfunc(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = print()\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_sizesetterwithset(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = {\"foo\", 2}\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def size(self, size):\n # type: (string_types) -> None\n\n if size is not None:\n if not isinstance(size, string_types):\n raise TypeError(\"Invalid type for `size`, type has to be `string_types`\")\n\n self._size = size",
"def __size_restriction_correct_string_string(self):\n\n strTestName = 'String size lower or equal to the size of another string (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('strRefParameter1', 'Str ref. parameter')\n RxCSObject.paramType('strRefParameter1', str)\n\n # Now, let me define a string\n RxCSObject.paramAddMan('parameter1', 'String parameter')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramSizLE('parameter1', 'strRefParameter1')\n\n RxCSObject.strRefParameter1 = 'bbbccc'\n RxCSObject.parameter1 = 'aaabb'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def __size_restriction_incorrect_string_parameter(self):\n\n strTestName = 'String size higher or equal to a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let me define a string\n RxCSObject.paramAddMan('parameter1', 'String parameter')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramSizHE('parameter1', 'iRefParameter1', mul=2, add=3)\n\n RxCSObject.iRefParameter1 = 2\n RxCSObject.parameter1 = 'aaabb'\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def size(self, size_input: Tuple[str, str]):\n self.isize = [UIMetric.parse(size_input[0]),\n UIMetric.parse(size_input[1])]",
"def test_sizesetterwithtuple(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = (1, 2)\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_badsizevaluewithstring(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(\"foo\", 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')",
"def test_sizegetter(self):\n Rectangle.reset_objects()\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)",
"def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)",
"def test_sizesetterwithdict(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = {\"foo\": 2}\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def __size_restriction_correct_string_parameter(self):\n strTestName = 'String size higher than a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let me define a string\n RxCSObject.paramAddMan('parameter1', 'String parameter')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramSizH('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 2\n RxCSObject.parameter1 = 'aaabbbab'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_sizesetterwithbool(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = True\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)",
"def test_change_size(self, os_mock):\n os_mock.path.isfile.return_value = True\n os_mock.path.getsize.return_value = 42000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 42000)\n\n os_mock.path.getsize.return_value = 43000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 43000)\n\n self.data.status = Data.STATUS_DONE\n os_mock.path.getsize.return_value = 44000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 43000)",
"def set_size(self, value='S'):\n upper = value.upper()\n\n if upper == 'M': # Medium: double height\n # size = 0x01\n # charHeight = 48\n # maxColumn = 32\n self.double_height_on()\n self.double_width_off()\n elif upper == 'L': # Large: double width and height\n # size = 0x11\n # charHeight = 48\n # maxColumn = 16\n self.double_height_on()\n self.double_width_on()\n else: # Small: standard width and height\n # size = 0x00\n # charHeight = 24\n # maxColumn = 32\n self.double_width_off()\n self.double_height_off()\n # writeBytes(ASCII_GS, '!', size)\n # prevByte = '\\n' # Setting the size adds a linefeed",
"def test_size_returns_length(dq_3):\n assert dq_3.size() == 3",
"def test_sizesetterwithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = [1, 2]\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def testsize(self):\n for size in range(5):\n a = AmuletAbility('Skepticism', size=size+1)\n self.assert_(str(size+1) in str(a))\n self.assertEqual(a.size, size+1)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def _is_size_key (self, key):\n return key == '$size' or key == 'size'",
"def test_updatebadsizevalue(self):\n Square.reset_objects()\n r1 = Square(1, 2, 3, 4)\n r1.update(1, \"foo\")\n self.assertEqual(r1.size, 1)",
"def correct_size():\n check50.run(\"./inheritance_test\").stdout(\"size_true.*\").exit(0)",
"def test_get_storage_size_bytes(self):\n size = get_storage_size_bytes()\n self.assertEqual(size, 4)\n\n #Testing filter by tags\n self.paper.tags.add(self.tag)\n size = get_storage_size_bytes(self.tag) \n self.assertEqual(size, 4)"
]
| [
"0.7452561",
"0.74343324",
"0.72125316",
"0.7165165",
"0.7162416",
"0.7097643",
"0.70611894",
"0.70401406",
"0.7039656",
"0.700409",
"0.69777435",
"0.6852879",
"0.6768443",
"0.6762896",
"0.6759838",
"0.67128867",
"0.669268",
"0.6645321",
"0.66379744",
"0.6613321",
"0.6609989",
"0.6598351",
"0.6593159",
"0.6567083",
"0.65386516",
"0.653018",
"0.65209794",
"0.6502508",
"0.65008694",
"0.6496873"
]
| 0.7871389 | 0 |
This function tests the size setter with func | def test_sizesetterwithfunc(self):
Rectangle.reset_objects()
with self.assertRaises(TypeError) as e:
r1 = Square(1, 2, 2, 3)
self.assertEqual(r1.size, 1)
r1.size = print()
self.assertEqual(str(e.exception), "width must be an integer") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_sizesetter(self):\n Rectangle.reset_objects()\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = 100\n self.assertEqual(r1.size, 100)",
"def test_sizesetterwithset(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = {\"foo\", 2}\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_sizegetter(self):\n Rectangle.reset_objects()\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)",
"def test_sizesetterwithbool(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = True\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_sizesetterwithtuple(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = (1, 2)\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_updatebadsizevalue(self):\n Square.reset_objects()\n r1 = Square(1, 2, 3, 4)\n r1.update(1, \"foo\")\n self.assertEqual(r1.size, 1)",
"def test_size(self):\n s1 = Square(5)\n self.assertEqual(s1.size, 5)\n\n s1.size = 9\n self.assertEqual(s1.size, 9)\n\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n s1.size = \"foo\"",
"def test_sizesetterwithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = [1, 2]\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def _assign_sizes(self):",
"def actual_size(self, size, mode='normal', state='on'):\n raise NotImplementedError",
"def size(self):",
"def updateSize(self, *args):\n return None",
"def test_change_size(self, os_mock):\n os_mock.path.isfile.return_value = True\n os_mock.path.getsize.return_value = 42000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 42000)\n\n os_mock.path.getsize.return_value = 43000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 43000)\n\n self.data.status = Data.STATUS_DONE\n os_mock.path.getsize.return_value = 44000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 43000)",
"def size(*args):",
"def size(self): \r\n pass",
"def test_sizesetterwithdict(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = {\"foo\": 2}\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def size(name):",
"def test_sizesetterwithstring(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = \"foo\"\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_ban_size_kwarg(self):\n with pytest.raises(ValueError):\n Dimension(\"yolo\", \"norm\", 0.9, size=(3, 2))",
"def _size(self):\n raise NotImplementedError",
"def calc_size(self):\r\n pass",
"def test_sizesetterwithfloat(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = float(1)\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def test_size_returns_length(dq_3):\n assert dq_3.size() == 3",
"def getSize(self) -> int:\n ...",
"def getSize(self) -> int:\n ...",
"def size(self):\n # Your implementation here",
"def testSize(self):\n v1 = Vector(1, 2, 3, size=6)\n assert v1 == [1, 2, 3, 0, 0, 0]\n failed = False\n try:\n Vector(1, 2, 3, size=2)\n except IndexError:\n failed = True\n assert failed\n\n v3 = Vector(size=7)\n assert v3 == Vector(0, 0, 0, 0, 0, 0, 0)\n assert v3 == (0, 0, 0, 0, 0, 0, 0)",
"def test_sized(self):\n # verify ----------------------\n try:\n len(self.collection)\n except TypeError:\n msg = \"object of type 'Collection' has no len()\"\n self.fail(msg)",
"def getSize(self):\n assert False",
"def get_size(self, valueid):"
]
| [
"0.76074827",
"0.73241836",
"0.7299613",
"0.71340346",
"0.70718694",
"0.70272154",
"0.69879526",
"0.69618094",
"0.69158465",
"0.68551946",
"0.6852976",
"0.68434936",
"0.6834289",
"0.68095237",
"0.68004525",
"0.6782642",
"0.6782323",
"0.6747741",
"0.6726101",
"0.67179966",
"0.67160827",
"0.67019266",
"0.670092",
"0.6649314",
"0.6649314",
"0.6640171",
"0.6618337",
"0.6613494",
"0.66095513",
"0.6557007"
]
| 0.7818356 | 0 |
This function tests the to_dictionary function | def test_to_dict(self):
Square.reset_objects()
s1 = Square(10, 2, 1)
s1_dictionary = s1.to_dictionary()
self.assertEqual(s1_dictionary, {'id': 1, 'x': 2, 'size': 10, 'y': 1}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_dict_to_dict(self):\n @converters.wrap\n def inner_test(param: dict):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, {'foo': 1, 'bar': ['bat', 2]})\n inner_test(param={'foo': 1, 'bar': ['bat', 2]})",
"def test_key_dict(self):\n key = Key({\"warning\": False, \"inCar\": True})\n\n dictionary = key.as_dict()\n assert isinstance(dictionary, dict)\n assert dictionary == {\"warning\": False, \"in_car\": True}",
"def test_todictreturntype(self):\n b1 = BaseModel()\n self.assertEqual(type(b1.to_dict()), dict)",
"def test_f2_to_dictionary(self):\n new_dict = {'x': 14, 'y': 5, 'id': 10, 'width': 22, 'height': 25}\n r1 = Rectangle(10, 2, 1, 9)\n r1.update(**new_dict)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n print(type(r1.to_dictionary()))\n self.assertEqual(f.getvalue(), \"<class 'dict'>\\n\")",
"def test_to_dict(self):\n self.assertEqual('to_dict' in dir(self.place), True)",
"def test_dictionary(self):\n self.assertIsInstance(self.test1json, dict)",
"def test_return_as_dictionary(self):\n inventory_dict = Inventory(123, \"product\", 10, 5).return_as_dictionary()\n self.assertEqual(inventory_dict['product_code'], 123)\n self.assertEqual(inventory_dict['description'], \"product\")\n self.assertEqual(inventory_dict['market_price'], 10)\n self.assertEqual(inventory_dict['rental_price'], 5)",
"def test_dictify(self) -> None:\n r = dictify(['a', 'b', 'c'], [1, 2, 3])\n assert r == {'a': 1, 'b': 2, 'c': 3}, r\n\n r = {}\n dictify(['a'], [1], r)\n dictify(['b'], [2], r)\n dictify(['c'], [3], r)\n assert r == {'a': 1, 'b': 2, 'c': 3}, r",
"def test_to_dictionary(self):\n r = Rectangle(1, 1, 1, 1, 1)\n d = {'id': 1, 'width': 1, 'height': 1, 'x': 1, 'y': 1}\n self.assertEqual(r.to_dictionary(), d)\n r.my_fun_new_attr = 42\n self.assertEqual(r.to_dictionary(), d)",
"def test_asdict():\n car = Car('Peugeot', '406', '2.0 HDI Saint Tropez Sedan', False, 2001, False, 11)\n car_dict = car._asdict()\n car_expected = {\n 'brand': 'Peugeot',\n 'model': '406',\n 'version': '2.0 HDI Saint Tropez Sedan',\n 'availability': False,\n 'year': 2001,\n 'brandNew': False,\n 'id': 11\n }\n assert car_dict == car_expected",
"def test_basedict(self):\n tester = BaseModel()\n self.assertTrue(dict, type(tester.to_dict()))",
"def test_from_to_json_string(self):\n dict1 = {'x': 2, 'width': 10, 'id': 1, 'height': 7, 'y': 8}\n json_dictionary = Base.to_json_string([dict1])\n self.assertEqual(dict1, {'x': 2, 'width': 10, 'id': 1,\n 'height': 7, 'y': 8})\n self.assertEqual(Base.from_json_string(json_dictionary)[0], dict1)\n self.assertEqual(Base.from_json_string(None), [])\n self.assertEqual(Base.from_json_string([]), [])\n self.assertEqual(Base.to_json_string(None), \"[]\")\n self.assertEqual(Base.to_json_string([]), \"[]\")",
"def test_to_json(self):\n\n expected = \"\"\"{\n \"Hello\": \"world\",\n \"Py\": \"Funceble\",\n \"World\": {\n \"world\": \"hello\"\n },\n \"funilrys\": [\n \"Fun\",\n \"Ilrys\"\n ],\n \"pyfunceble\": [\n \"funilrys\"\n ]\n}\"\"\"\n actual = Dict(self.test_subject.copy()).to_json()\n\n self.assertEqual(expected, actual)\n\n actual = Dict().from_json(expected)\n expected = self.test_subject.copy()\n\n self.assertEqual(expected, actual)",
"def test_get_cases_for_dict(self):\n pass",
"def asdict():\n pass",
"def test_dict(self):\n s1 = Square(4)\n s1_dict = s1.to_dictionary()\n s1_correct = {\"id\":1, \"size\":4, \"x\":0, \"y\":0}\n self.assertEqual(s1_dict, s1_correct)\n\n s2 = Square(9)\n s2_new = {\"id\":9, \"size\":4, \"x\":3, \"y\":4}\n s2.update(**s2_new)\n self.assertEqual(s2.to_dictionary(), s2_new)",
"def test_create_mimic_dict_1(self):\n result = self.module.create_mimic_dict(\"imdev.txt\")\n self.assertIsInstance(\n result, dict,\n \"The return value of create_mimic_dict() should be a dict.\"\n )",
"def test_convert(self):\n for test in self.test_dict_data:\n self.assertEqual(dottedDict(test[0]).data, test[1])",
"def test_to_dict_creates_dict(self):\n user = User()\n user_details = {\"student_id\": user.id, \"first_name\": \"Joe\"}\n u = Student(**user_details)\n new_d = u.to_dict()\n self.assertEqual(type(new_d), dict)\n self.assertFalse(\"_sa_instance_state\" in new_d)\n for attr in u.__dict__:\n if attr is not \"_sa_instance_state\":\n self.assertTrue(attr in new_d)\n self.assertTrue(\"__class__\" in new_d)",
"def test_to_dict_values(self):\n a = Review()\n pl = Place()\n st = State()\n a.place_id = pl.id\n a.state_id = st.id\n a.text = \"nice place\"\n dic = a.to_dict()\n self.assertEqual(dic[\"created_at\"], a.created_at.strftime(timeformat))\n self.assertEqual(dic[\"updated_at\"], a.updated_at.strftime(timeformat))\n self.assertEqual(dic[\"__class__\"], \"Review\")\n self.assertEqual(dic[\"text\"], \"nice place\")\n self.assertEqual(dic[\"place_id\"], a.place_id)\n self.assertEqual(dic[\"state_id\"], a.state_id)",
"def test_toDic(self):\n test_dict = OrderedDict([\n (\"a\", OrderedDict([\n (\"1\", \"Some text\"),\n (\"2\", \"Some other Text\")\n ])),\n (\"b\", OrderedDict([\n (\"7\", \"Lorem\"),\n (\"e\", \"Ipsum\")\n ]))\n ])\n a = toNumber(test_dict)\n expected = {\n 0: {\n 0: \"Some text\",\n 1: \"Some other Text\"\n },\n 1: {\n 0: \"Lorem\",\n 1: \"Ipsum\"\n }\n }\n self.assertEqual(a, expected, \"Nested should be converted to nested dictionary with int indexes\")",
"def test_to_dict(self):\n self.assertEqual('to_dict' in dir(self.rev), True)\n rev_direv = self.rev.to_dict()\n self.assertEqual(self.rev.__class__.__name__, 'Review')\n self.assertEqual(rev_direv['__class__'], 'Review')\n self.assertIsInstance(rev_direv['created_at'], str)\n self.assertIsInstance(rev_direv['updated_at'], str)\n self.assertIsInstance(rev_direv['text'], str)",
"def test_obj_dict(self):\n obj = storage.all()\n self.assertIsInstance(obj, dict)",
"def test_to_dictAmenity(self):\n insta = Amenity()\n dict_cont = insta.to_dict()\n self.assertEqual(type(dict_cont), dict)\n for attr in insta.__dict__:\n self.assertTrue(attr in dict_cont)\n self.assertTrue(\"__class__\" in dict_cont)",
"def _to_dict(self) -> dict:\n pass",
"def test_to_dict(self):\n self.user_1.name = \"Test\"\n self.user_1.num = 1\n user_1_dict = self.user_1.to_dict()\n\n self.assertIsInstance(user_1_dict, dict)\n\n user_1_class = type(self.user_1).__name__\n self.assertIn((\"__class__\", user_1_class),\n user_1_dict.items())\n self.assertNotIn((\"__class__\", user_1_class),\n self.user_1.__dict__)\n\n user_1_created_at = self.user_1.created_at.isoformat()\n user_1_updated_at = self.user_1.updated_at.isoformat()\n self.assertIn((\"created_at\", user_1_created_at),\n user_1_dict.items())\n self.assertIn((\"updated_at\", user_1_updated_at),\n user_1_dict.items())\n\n isoformat = '%Y-%m-%dT%H:%M:%S.%f'\n user_1_created_at = datetime.strptime(user_1_dict[\"created_at\"],\n isoformat)\n user_1_updated_at = datetime.strptime(user_1_dict[\"updated_at\"],\n isoformat)\n self.assertEqual(user_1_created_at, self.user_1.created_at)\n self.assertEqual(user_1_updated_at, self.user_1.updated_at)",
"def test_dict(self, testdata: TestData) -> None:\n for data in testdata['observation_type']:\n observation_type = ObservationType.from_dict(data)\n assert data == observation_type.to_dict()",
"def test_hood_dict(self):\n hood = Hood({\"warning\": False, \"closed\": True})\n\n dictionary = hood.as_dict()\n assert isinstance(dictionary, dict)\n assert dictionary == {\"warning\": False, \"closed\": True}",
"def test_15_0_toJsonString(self):\n\n dictionary = self.r1.to_dictionary()\n json_dictionary = Base.to_json_string([dictionary])\n self.assertTrue(type(dictionary), dict)\n self.assertTrue(type(json_dictionary), str)",
"def test_to_dict(self):\n self.insert_row()\n\n instance = Manager.objects().first().run_sync()\n dictionary = instance.to_dict()\n if engine_is(\"cockroach\"):\n self.assertDictEqual(\n dictionary, {\"id\": dictionary[\"id\"], \"name\": \"Guido\"}\n )\n else:\n self.assertDictEqual(dictionary, {\"id\": 1, \"name\": \"Guido\"})"
]
| [
"0.7176084",
"0.7108782",
"0.7050815",
"0.70425564",
"0.7028966",
"0.7027133",
"0.7022392",
"0.696475",
"0.696263",
"0.69530326",
"0.6941427",
"0.6933888",
"0.69253755",
"0.6914849",
"0.68335044",
"0.6824801",
"0.68152386",
"0.67968",
"0.67782664",
"0.6765529",
"0.6721175",
"0.671221",
"0.67115307",
"0.6707808",
"0.67049724",
"0.669902",
"0.6696034",
"0.66722405",
"0.6662064",
"0.6652482"
]
| 0.722105 | 0 |
This function tests the update function with to_dict | def test_updatewithdict(self):
s1 = Square(10, 2, 1)
s1_dictionary = s1.to_dictionary()
s2 = Square(1, 1)
s2.update(**s1_dictionary)
self.assertEqual(s2.size, 10)
self.assertEqual(s2.x, 2)
self.assertEqual(s2.y, 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_dict(new,old):",
"def test_updatewithdictionarybycomparingdictionaries(self):\n s1 = Square(10, 2, 1, 9)\n s1_dictionary = s1.to_dictionary()\n s2 = Square(1, 1)\n s2.update(**s1_dictionary)\n self.assertEqual(s1.__dict__, s2.__dict__)",
"def test_dict(self):\n s1 = Square(4)\n s1_dict = s1.to_dictionary()\n s1_correct = {\"id\":1, \"size\":4, \"x\":0, \"y\":0}\n self.assertEqual(s1_dict, s1_correct)\n\n s2 = Square(9)\n s2_new = {\"id\":9, \"size\":4, \"x\":3, \"y\":4}\n s2.update(**s2_new)\n self.assertEqual(s2.to_dictionary(), s2_new)",
"def test_update(inp):\n atty = AttyDict(a={'aa': 1, 'ab': 2})\n regular = dict(a={'aa': 1, 'ab': 2})\n\n atty.update(**inp)\n assert valid_values(atty)\n\n regular.update(**inp)\n assert dict(atty) == regular",
"def test_update_many(self):\n sample_input = \"\"\"\nfoo=100\nbar=200, baz=300\n\"\"\"\n self.assertNotEquals(self.param_dict.get(\"foo\"), 100)\n self.assertNotEquals(self.param_dict.get(\"bar\"), 200)\n self.assertNotEquals(self.param_dict.get(\"baz\"), 300)\n result = self.param_dict.update_many(sample_input)\n log.debug(\"result: %s\", result)\n self.assertEquals(result[\"foo\"], True)\n self.assertEquals(result[\"bar\"], True)\n self.assertEquals(result[\"baz\"], True)\n self.assertEquals(self.param_dict.get(\"foo\"), 100)\n self.assertEquals(self.param_dict.get(\"bar\"), 200)\n self.assertEquals(self.param_dict.get(\"baz\"), 300)",
"def test_dictionary_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value = {4:5}\r\n assert vm.changed",
"def test_deep_update(self):\n mdict = copy.deepcopy(self.dict1)\n res = dictupdate.update_dict_key_value(\n mdict, \"C:F\", {\"foo\": \"bar\", \"qux\": \"quux\"}\n )\n self.assertEqual(\n {\n \"A\": \"B\",\n \"C\": {\"D\": \"E\", \"F\": {\"G\": \"H\", \"I\": \"J\", \"foo\": \"bar\", \"qux\": \"quux\"}},\n },\n res,\n )\n\n # Test updating a non-existing subkey\n res = dictupdate.update_dict_key_value({}, \"foo:bar:baz\", {\"qux\": \"quux\"})\n self.assertEqual({\"foo\": {\"bar\": {\"baz\": {\"qux\": \"quux\"}}}}, res)\n # Test updating a non-existing subkey, with a different delimiter\n res = dictupdate.update_dict_key_value(\n {}, \"foo bar baz\", {\"qux\": \"quux\"}, delimiter=\" \"\n )\n self.assertEqual({\"foo\": {\"bar\": {\"baz\": {\"qux\": \"quux\"}}}}, res)",
"def test_update_case(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_deep_update_illegal_update(self):\n # Update with an illegal type\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\" \"\".format(type({}), type(update_with)),\n ):\n dictupdate.update_dict_key_value({}, \"foo\", update_with)\n # Again, but now using OrderedDicts\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\"\n \"\".format(type(OrderedDict()), type(update_with)),\n ):\n dictupdate.update_dict_key_value(\n {}, \"foo\", update_with, ordered_dict=True\n )",
"def test_dict_to_updated_at_attr(self):\n r = Review()\n r_dictionary = r.to_dict()\n r2 = Review(**r_dictionary)\n self.assertEqual(r.updated_at, r2.updated_at)",
"def test_f2_to_dictionary(self):\n new_dict = {'x': 14, 'y': 5, 'id': 10, 'width': 22, 'height': 25}\n r1 = Rectangle(10, 2, 1, 9)\n r1.update(**new_dict)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n print(type(r1.to_dictionary()))\n self.assertEqual(f.getvalue(), \"<class 'dict'>\\n\")",
"def test_to_dict_updated_at(self):\n r = Review()\n r_dictionary = r.to_dict()\n self.assertIn('updated_at', r_dictionary)",
"def _update_object(self, data_dict):\r\n pass",
"def test_recursive_update():\n\n test = Status.update_dict({'generation': TEST_1_ATTRS_1},\n {'generation': TEST_1_ATTRS_2})\n\n assert test['generation']['run_id'] == TEST_1_ATTRS_1['run_id']\n assert test['generation']['job_status'] == TEST_1_ATTRS_2['job_status']",
"def test_map_update_updates(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition, cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'})\n TestQueryUpdateModel.objects(\n partition=partition, cluster=cluster).update(\n text_map__update={\"bar\": '3', \"baz\": '4'})\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n self.assertEqual(obj.text_map, {\"foo\": '1', \"bar\": '3', \"baz\": '4'})",
"def _update(value: Dict[str, Any], update: Dict[str, Any]):\n for key, val in update.items():\n\n if key not in value:\n value[key] = val\n elif isinstance(val, dict):\n value[key] = _update(value[key], val)\n else:\n value[key] = val\n return value",
"def test_map_update_updates(self):\r\n partition = uuid4()\r\n cluster = 1\r\n TestQueryUpdateModel.objects.create(\r\n partition=partition, cluster=cluster,\r\n text_map={\"foo\": '1', \"bar\": '2'})\r\n TestQueryUpdateModel.objects(\r\n partition=partition, cluster=cluster).update(\r\n text_map__update={\"bar\": '3', \"baz\": '4'})\r\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\r\n self.assertEqual(obj.text_map, {\"foo\": '1', \"bar\": '3', \"baz\": '4'})",
"def testMapUpdate(self):\n # We only use one map type since they all share the same implementation for\n # this logic.\n m = data_types.StepBuildStatsMap({'step': data_types.BuildStats()})\n with self.assertRaises(AssertionError):\n m.update({1: 2})\n with self.assertRaises(AssertionError):\n m.update(step2=1)\n m.update(step=data_types.BuildStats())\n self.assertEqual(m, {'step': data_types.BuildStats()})",
"def test_dictionary_inplace_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value[4] = 5\r\n assert vm.changed",
"def test_update(self):\n doc_fields = document_fields.DocumentFields({\n 'foo@': 'bar',\n })\n self.assertEquals('bar', doc_fields['foo'])\n doc_fields.update({\n 'foo@': 'bbq',\n })\n self.assertEquals('bbq', doc_fields['foo'])",
"def test_update(self):\n # creating a new sample template\n st = SampleTemplate.create(self.metadata, self.new_study)\n # updating the sample template\n st.update(self.metadata_dict_updated)\n\n # validating values\n exp = self.metadata_dict_updated_dict['Sample1'].values()\n obs = st.get('2.Sample1').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample2'].values()\n obs = st.get('2.Sample2').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample3'].values()\n obs = st.get('2.Sample3').values()\n self.assertItemsEqual(obs, exp)\n\n # checking errors\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_sample_error)\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_column_error)",
"def test_update_facet_dictionary(self):\n pass",
"def test_updating_record_with_dictionary_args(self, test_domain):\n identifier = uuid4()\n person = test_domain.repository_for(Person)._dao.create(\n id=identifier, first_name=\"Johnny\", last_name=\"John\", age=2\n )\n\n test_domain.repository_for(Person)._dao.update(person, {\"age\": 10})\n u_person = test_domain.repository_for(Person)._dao.get(identifier)\n assert u_person is not None\n assert u_person.age == 10",
"def test_dictfield_update(self):\n\n class Club(Document):\n members = DictField()\n\n club = Club()\n club.members[\"John\"] = {\"gender\": \"M\", \"age\": 13}\n club.save()\n\n Club.objects().update(set__members={\"John\": {\"gender\": \"F\", \"age\": 14}})\n\n club = Club.objects().first()\n assert club.members[\"John\"][\"gender\"] == \"F\"\n assert club.members[\"John\"][\"age\"] == 14",
"def update(self, *args, **kwargs):\n super(ReadOnlyDict, self).update(*args, **kwargs) # pragma: no cover",
"def _modify_item(item, update_dict):\n for k in update_dict:\n item[k] = str(update_dict[k])\n\n return item",
"def test_load_updates_dict(self):\n new_dict = {\n 'test_new_key': 'test_new_value',\n 'test_key1': 'new_value',\n }\n self.extension.registration.settings = new_dict\n self.settings.load()\n\n # Should have added test_new_key, and modified test_key1\n self.assertEqual(new_dict['test_new_key'],\n self.settings['test_new_key'])\n self.assertEqual(new_dict['test_key1'], self.settings['test_key1'])\n\n # Should have left test_key2 alone\n self.assertEqual(self.test_dict['test_key2'],\n self.settings['test_key2'])"
]
| [
"0.7573673",
"0.7532664",
"0.7381514",
"0.7352229",
"0.7200704",
"0.6979389",
"0.69530696",
"0.69241667",
"0.6872416",
"0.6872416",
"0.6872416",
"0.6859202",
"0.6837425",
"0.6832763",
"0.6792921",
"0.6743073",
"0.67334944",
"0.6720713",
"0.6713036",
"0.67053515",
"0.6689018",
"0.66778314",
"0.6671391",
"0.6628727",
"0.66270626",
"0.6587946",
"0.6570437",
"0.6536059",
"0.65209097",
"0.651315"
]
| 0.75755227 | 0 |
This function tests the display function | def test_display__method(self):
Rectangle.reset_objects()
s1 = Square(5)
f = io.StringIO()
with contextlib.redirect_stdout(f):
s1.display()
self.assertEqual(f.getvalue(), "#####\n#####\n#####\n#####\n#####\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_updated_display1(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r1 = Rectangle(2, 3, 2, 2)\n r1.display()\n sys.stdout = sys.__stdout__\n desired = '\\n\\n ##\\n ##\\n ##\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)",
"def test_display_method1(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r1 = Rectangle(4, 6)\n r1.display()\n sys.stdout = sys.__stdout__\n desired = '####\\n####\\n####\\n####\\n####\\n####\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)",
"def test_display_method2(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r2 = Rectangle(2, 2)\n r2.display()\n sys.stdout = sys.__stdout__\n desired = '##\\n##\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)",
"def test_updated_display2(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r2 = Rectangle(3, 2, 1, 0)\n r2.display()\n sys.stdout = sys.__stdout__\n desired = ' ###\\n ###\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)",
"def test_updated_display3(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r3 = Rectangle(3, 2, 0, 1)\n r3.display()\n sys.stdout = sys.__stdout__\n desired = '\\n###\\n###\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)",
"def test_updated_display4(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r4 = Rectangle(3, 2, 0, 0)\n r4.display()\n sys.stdout = sys.__stdout__\n desired = '###\\n###\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)",
"def test_display_method4(self):\n with self.assertRaises(TypeError):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r4 = Rectangle(5, 6, 7, 8, 9, 10, 11)\n r4.display()\n sys.stdout = sys.__stdout__",
"def test_display__method2(self):\n Rectangle.reset_objects()\n s2 = Square(2, 2)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n s2.display()\n self.assertEqual(f.getvalue(), \" ##\\n ##\\n\")",
"def test_DisplayReturnsNone(self):\r\n self.assertEqual(self.tv._display([]), None)",
"def test_display_method3(self):\n with self.assertRaises(ValueError):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r3 = Rectangle(-1, 2)\n r3.display()\n sys.stdout = sys.__stdout__",
"def test_perform_display_print(capsys):\n assert sync_perform(stdio_dispatcher, Effect(Display(\"foo\"))) is None\n out, err = capsys.readouterr()\n assert err == \"\"\n assert out == \"foo\\n\"",
"def test_transE_display():\n testing_function('transe', display=True)",
"def should_show():",
"def test_DisplayReturnsNone(self):\r\n self.assertEqual(self.lv._display([]), None)",
"def test_display__method3(self):\n Rectangle.reset_objects()\n s3 = Square(3, 1, 3)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n s3.display()\n self.assertEqual(f.getvalue(), \"\\n\\n\\n ###\\n ###\\n ###\\n\")",
"def test_a0_display__method(self):\n r1 = Rectangle(2, 3)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n r1.display()\n self.assertEqual(f.getvalue(), \"##\\n##\\n##\\n\")\n\n \"\"\"display, with 'x' and 'y' \"\"\"\n r1 = Rectangle(2, 3, 1, 2)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n r1.display()\n self.assertEqual(f.getvalue(), \"\\n\\n ##\\n ##\\n ##\\n\")\n\n \"\"\"display, with 'x'>0\"\"\"\n r2 = Rectangle(5, 4, 1)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n r2.display()\n self.assertEqual(f.getvalue(), \" #####\\n #####\\n #####\\n #####\\n\")\n\n \"\"\"display, with 'x'=0\"\"\"\n r2 = Rectangle(5, 4, 0)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n r2.display()\n self.assertEqual(f.getvalue(), \"#####\\n#####\\n#####\\n#####\\n\")\n\n \"\"\"display, with x= 0 and 'y' \"\"\"\n r2 = Rectangle(5, 4, 0, 2)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n r2.display()\n self.assertEqual(f.getvalue(), \"\\n\\n#####\\n#####\\n#####\\n#####\\n\")",
"def test_display_simple(self):\n r = Rectangle(1, 1)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = \"#\\n\"\n self.assertEqual(f.getvalue(), s)\n r.width = 2\n r.height = 2\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = \"##\\n##\\n\"\n self.assertEqual(f.getvalue(), s)\n\n r = Rectangle(2, 2, 2, 2)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = \"\\n\\n ##\\n ##\\n\"\n self.assertEqual(f.getvalue(), s)",
"def testDisplayClubak(self):\n parser = OptionParser(\"dummy\")\n parser.install_display_options(separator_option=True, dshbak_compat=True)\n options, _ = parser.parse_args([])\n disp = Display(options)\n self.assertEqual(bool(disp.gather), False)\n self.assertEqual(disp.line_mode, False)\n self.assertEqual(disp.label, True)\n self.assertEqual(disp.regroup, False)\n self.assertEqual(bool(disp.groupsource), False)\n self.assertEqual(disp.noprefix, False)\n self.assertEqual(disp.maxrc, False)\n self.assertEqual(disp.node_count, True)\n self.assertEqual(disp.verbosity, VERB_STD)",
"def test_ipython_display(self, r, rep, capsys):\n r._ipython_display_() # pylint: disable=protected-access\n captured = capsys.readouterr()\n assert rep in captured.out",
"def test_display_entry_short_form_displays_correct_output(self):\n entry = self.create_mixed_test_data()['test_log_entries'][0]\n\n expected_output = (\"{}: {} ({}m): {} | {}\\n\".format(\n entry['name'],\n entry['date'],\n entry['duration'],\n entry['task_name'],\n entry['notes']\n ))\n\n # Create a StringIO object to be a capture object\n captured_output = io.StringIO()\n # point stdout at the capture object\n sys.stdout = captured_output\n # Do anything that's going to have a print statement\n # (these will be accumulated in the captured_output object)\n self.menu.display_entry(entry=entry)\n\n # Revert stdout (captured_output still holds the captured items)\n sys.stdout = sys.__stdout__\n # Do any other test code (e.g., asserts)\n self.assertEqual(expected_output, captured_output.getvalue())",
"def setUp(self):\n self.display = StubDisplay()",
"def test_instr_view_display(instr_view):\n show_and_close_widget(instr_view)",
"def display_results():\n pass",
"def testDisplay(self, display):\n\n # Some tedious low-level code in this routine, but the basic idea \n # is that we try to run an 'xhost' command (without any arguments)\n # just as a way to see if we can connect. This is done in a child\n # process; the parent waits for a bit for it to finish, and tests\n # the return code, or after about a second it gives up waiting and\n # assumes that it's not going to work.\n\n if self.verbose:\n print \"Testing display %s\" % display\n \n # On many servers the display name will exclude the localhost so remove \n # hostname for this test\n display = \":\" + display.split(\":\")[1]\n\n pid = os.fork()\n if not pid:\n # ---child---\n\n # discard output\n fhNull = os.open(_Paths.null, os.O_RDWR)\n os.dup2(fhNull, 1) # stdout\n os.dup2(fhNull, 2) # stderr\n\n # and run xhost\n os.putenv(\"DISPLAY\", display)\n os.putenv(\"XAUTHORITY\", self.xauthFile)\n os.execv(_Paths.xhost, [\"xhost\"])\n\n # exec failed\n os._exit(1)\n\n # --- parent ---\n for sleepTime in [0.01, 0.05, 0.1, 0.9]:\n time.sleep(sleepTime)\n pidW, status = os.waitpid(pid, os.WNOHANG)\n if pidW == pid:\n ifSuccess = (status == 0)\n if self.verbose:\n if ifSuccess:\n print \"Display %s allowed connection\" % display\n else:\n print \"Display %s refused connection\" % display\n\n return ifSuccess\n\n # at this point we give up waiting and kill the child proces\n os.kill(pid, signal.SIGKILL)\n\n # wait for it, to avoid zombies\n os.waitpid(pid, 0)\n\n if self.verbose:\n print \"No response from display %s\" % display\n\n return False",
"def test_display_entry_verbose_displays_correct_output(self):\n entry = self.create_mixed_test_data()['test_log_entries'][0]\n\n line0 = entry['name']\n line1 = \"{}: {}\".format(entry['date'], entry['task_name'])\n spacer = \"-\" * len(line1)\n line2 = \"{} minutes\".format(entry['duration'])\n line3 = \"{}\".format(entry['notes'])\n expected_output = (line0 + \"\\n\" +\n line1 + \"\\n\" +\n spacer + \"\\n\" +\n line2 + \"\\n\" +\n line3 + \"\\n\")\n\n # Create a StringIO object to be a capture object\n captured_output = io.StringIO()\n # point stdout at the capture object\n sys.stdout = captured_output\n # Do anything that's going to have a print statement\n # (these will be accumulated in the captured_output object)\n self.menu.display_entry(entry=entry, verbose=True)\n\n # Revert stdout (captured_output still holds the captured items)\n sys.stdout = sys.__stdout__\n # Do any other test code (e.g., asserts)\n self.assertEqual(expected_output, captured_output.getvalue())",
"def test_x2y2_print(self):\n from io import StringIO\n import io\n import contextlib\n r1 = Square(2, 3, 2, 2)\n temp_stdout = io.StringIO()\n with contextlib.redirect_stdout(temp_stdout):\n r1.display()\n output = temp_stdout.getvalue()\n self.assertEqual(output, '\\n\\n ##\\n ##\\n')",
"def test_show(self):\n _help = \"[Usage: show <class name> <id>] or \"\\\n \"[Usage: <class name>.show(<id>)]\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help show\")\n self.assertEqual(f.getvalue(), _help)",
"def _init_display(self):\n raise NotImplementedError",
"def do_show(self, arg):\n obj = self.verify(arg, 1)\n if obj:\n print(obj)",
"def test_card_show(mock_card, capsys):\n mock_card.show()\n captured = capsys.readouterr()\n assert captured.out == \"SPADE, 1\\n\""
]
| [
"0.76996773",
"0.7673609",
"0.7643031",
"0.76228404",
"0.75821286",
"0.75602204",
"0.74880636",
"0.7453267",
"0.74367535",
"0.74138623",
"0.7400215",
"0.7366837",
"0.7365232",
"0.7323895",
"0.727535",
"0.7186175",
"0.713864",
"0.7048027",
"0.6800203",
"0.66780984",
"0.66414315",
"0.6596871",
"0.6529625",
"0.6528844",
"0.65103066",
"0.64960855",
"0.64818585",
"0.64144135",
"0.6401641",
"0.63802475"
]
| 0.7729406 | 0 |
Repeat a dataframe n times. | def repeat(df, n):
return pd.concat([df] * n, ignore_index=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Repeat(dataset, count=None):\n return dataset.repeat(count=count)",
"def repeat(self, fn, *args, **kwargs):\n return repeat_n_times(self.n, fn, *args, **kwargs)",
"def repeat(x, repeats, axis=None):\r\n return RepeatOp(axis=axis)(x, repeats)",
"def repeat(self, n, keys=None, axis=1, wrap_kwargs=None):\n repeated = reshape_fns.repeat(self._obj, n, axis=axis)\n if keys is not None:\n if axis == 1:\n new_columns = index_fns.combine_indexes(self.wrapper.columns, keys)\n return repeated.vbt.wrapper.wrap(\n repeated.values, **merge_dicts(dict(columns=new_columns), wrap_kwargs))\n else:\n new_index = index_fns.combine_indexes(self.wrapper.index, keys)\n return repeated.vbt.wrapper.wrap(\n repeated.values, **merge_dicts(dict(index=new_index), wrap_kwargs))\n return repeated",
"def repeat(self, repeats):\n return SeriesDefault.register(pandas.Series.repeat)(self, repeats=repeats)",
"def repeat(a, repeats, axis=None):\n return afnumpy.asarray(a).repeat(repeats, axis=axis)",
"def repeat(fun, n):\n for i in range(n):\n yield fun()",
"def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e",
"def repeatfunc(func, n, *args):\n return starmap(func, repeat(args, n))",
"def repeat(self, count):\n return self.Sequence((self,) * count)",
"def replicate(self, nx, ny, nz):\n contents_list = []\n numreplicate = 0\n for i in range(nx):\n for j in range(ny):\n for k in range(nz):\n new_df = self.contents.copy()\n new_df['X'] += i * self.lengthx\n new_df['Y'] += j * self.lengthy\n new_df['Z'] += k * self.lengthz\n contents_list.append(new_df)\n numreplicate += 1\n self.numatom *= numreplicate\n self.contents = pd.concat(contents_list)",
"def repeat(self, n, new_axis, domain=None):\n if new_axis is None:\n new_axis = self.get_new_axis_name()\n\n return stack_cuboids([self.copy() for i in xrange(n)], new_axis, domain)",
"def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat",
"def repeat(self, count):\n x = HSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x",
"def repeat_n_times(n, fn, *args, **kwargs):\n if args:\n my_args = _transpose_list_of_lists(\n [_maybe_repeat(arg, n) for arg in args])\n else:\n my_args = [[] for _ in range(n)]\n my_kwargs = [{} for _ in range(n)]\n for k, v in six.iteritems(kwargs):\n vals = _maybe_repeat(v, n)\n for i in range(n):\n my_kwargs[i][k] = vals[i]\n\n # construct lists of functions\n fns = _maybe_repeat(fn, n)\n outputs = [fns[i](*my_args[i], **my_kwargs[i]) for i in range(n)]\n if isinstance(outputs[0], tuple):\n outputs = list(zip(*outputs))\n outputs = tuple([list(o) for o in outputs])\n return outputs",
"def repeat_nd(x, reps):\n return RepeatND(reps)(x)",
"def th_repeat(a, repeats, axis=0):\r\n assert len(a.size()) == 1\r\n return th_flatten(torch.transpose(a.repeat(repeats, 1), 0, 1))",
"async def repeat(self,ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)",
"def data_repeated(data):\n\n def gen(count):\n for _ in range(count):\n yield data\n\n yield gen",
"def repeat(iterable, count=None):\n if count is None:\n while True:\n for sample in iterable:\n yield sample\n else:\n for i in range(count):\n for sample in iterable:\n yield sample",
"def repeat(self, count):\n x = _OSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x",
"async def repeat(ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)",
"def repeat_string_n_times(string, count):\r\n return string * int(count)",
"def sample_rows(df, nrows):",
"def sampling(n):\n\n def _sample(x):\n if n > x.shape[0]:\n # generate dups\n count = n // x.shape[0] + 1\n x = pd.concat([x] * count)\n return x.sample(n=n)\n else:\n return x.sample(n=n)\n\n return _sample",
"def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:",
"def Repeater(arr,n):\n new_arr = np.zeros((arr.shape[0]*n,arr.shape[1]),dtype=object)\n for i in range(0,arr.shape[0]):\n new_row = np.tile(arr[i,:],(n,1))\n new_arr[i*n:(i+1)*n,:] = new_row\n return new_arr",
"def replicate(self, count):\n return map(lambda x: x.clone(),[self]*count)",
"async def repeat(ctx, times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)",
"def repeatexp(n, d, grid_size, reps, tho_scale=0.1, is_classification=True, no_signal=True):\n \n datasetList = ['Train', 'Holdout', 'Test']\n colList = ['perm', 'performance', 'dataset']\n \n df_list_std = []\n df_list_tho = []\n \n for perm in tqdm(range(reps)):\n \n vals_std, vals_tho = fitModels_paramTuning(n, d, grid_size,\n is_classification=is_classification,\n tho_scale=tho_scale,\n no_signal=no_signal)\n for i, ds in enumerate(datasetList):\n df_list_std.append((perm, vals_std[i], ds))\n df_list_tho.append((perm, vals_tho[i], ds))\n\n df_std = pd.DataFrame(df_list_std, columns=colList)\n df_tho = pd.DataFrame(df_list_tho, columns=colList)\n return df_std, df_tho"
]
| [
"0.7628029",
"0.72188824",
"0.6911393",
"0.68845344",
"0.6785106",
"0.6629356",
"0.65931594",
"0.6574859",
"0.6574258",
"0.6532897",
"0.63699496",
"0.6342583",
"0.62876594",
"0.6276754",
"0.623809",
"0.6219316",
"0.6213674",
"0.6166084",
"0.61549324",
"0.61308444",
"0.61251354",
"0.6118566",
"0.60556245",
"0.600757",
"0.6000041",
"0.5913669",
"0.5856707",
"0.5850175",
"0.5772669",
"0.57636434"
]
| 0.8607267 | 0 |
Test type inference works properly for a parquet file with unconventional types types. | def test_infer_parquet_types(tmpdir):
# Create a temporary directory to store the parquet file
tmpdir = str(tmpdir)
# Create a dataframe with all the types
df = pd.DataFrame(
{
"int": [1, 2, 3],
"float": [1.1, 2.2, 3.3],
"string": ["a", "b", "c"],
"datetime": pd.date_range("20130101", periods=3),
"category": pd.Series(["a", "b", "c"], dtype="category"),
"bool": [True, False, True],
}
)
df = repeat(df, 10)
df["float"] = df["float"].apply(Decimal)
df["date"] = df["datetime"].apply(str)
# Write the dataframe to parquet and read it back
dataset_path = os.path.join(tmpdir, "dataset.parquet")
df.to_parquet(dataset_path)
df = pd.read_parquet(dataset_path)
# Test type inference
ds = DataframeSource(df)
ds_info = get_dataset_info_from_source(ds)
metas = get_field_metadata(ds_info.fields, ds_info.row_count, targets=["bool"])
config = yaml.safe_load(
"""
input_features:
- name: int
type: category
- name: float
type: number
- name: string
type: category
- name: datetime
type: date
- name: category
type: category
- name: date
type: date
output_features:
- name: bool
type: binary
combiner:
type: concat
output_size: 14
trainer:
epochs: 2
batch_size: 8
"""
)
meta_dict = {meta.config.name: meta for meta in metas}
for feature in config["input_features"] + config["output_features"]:
meta = meta_dict[feature["name"]]
assert feature["type"] == meta.config.type, f"{feature['name']}: {feature['type']} != {meta.config.type}" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_as_type(loader):\n exp = loader.find_by_id(3)\n assert isinstance(exp.artifacts[\"predictions_df\"], artifact.Artifact)\n # Use as_type for the first time.\n pickle_artifact1 = exp.artifacts[\"predictions_df\"].as_type(artifact.PickleArtifact)\n # Use as_type for the second time.\n pickle_artifact2 = exp.artifacts[\"predictions_df\"].as_type(artifact.PickleArtifact)\n assert isinstance(pickle_artifact2.render(), pd.DataFrame)",
"def test_rdb(parallel, read_rdb):\n text = \"\"\"\n\nA\\tB\\tC\n1n\\tS\\t4N\n1\\t 9\\t4.3\n\"\"\"\n table = read_rdb(text, parallel=parallel)\n expected = Table([[1], [\" 9\"], [4.3]], names=(\"A\", \"B\", \"C\"))\n assert_table_equal(table, expected)\n assert_equal(table[\"A\"].dtype.kind, \"i\")\n assert table[\"B\"].dtype.kind in (\"S\", \"U\")\n assert_equal(table[\"C\"].dtype.kind, \"f\")\n\n with pytest.raises(ValueError) as e:\n text = \"A\\tB\\tC\\nN\\tS\\tN\\n4\\tb\\ta\" # C column contains non-numeric data\n read_rdb(text, parallel=parallel)\n assert \"Column C failed to convert\" in str(e.value)\n\n with pytest.raises(ValueError) as e:\n text = \"A\\tB\\tC\\nN\\tN\\n1\\t2\\t3\" # not enough types specified\n read_rdb(text, parallel=parallel)\n assert \"mismatch between number of column names and column types\" in str(e.value)\n\n with pytest.raises(ValueError) as e:\n text = \"A\\tB\\tC\\nN\\tN\\t5\\n1\\t2\\t3\" # invalid type for column C\n read_rdb(text, parallel=parallel)\n assert \"type definitions do not all match [num](N|S)\" in str(e.value)",
"def test_incorrect_data_type():\n \n test_object = fa.read_in_envision(data_csv=list_A, platemap_csv=plate_map_file, data_type='typo', size=384)",
"def test_convert_xlsx_to_parquet(self):\n # use temp dir where files will be created for testing purposes\n # the underlying context manager will remove the temp dir and all its content when it closes\n with TemporaryDirectory() as tmp:\n # define path to files\n fp_xlsx = os.path.join(tmp, \"myfile.xlsx\")\n fp_parquet = os.path.join(tmp, \"myfile.parquet\")\n\n # create dataframe with some data\n df_xlsx = pd.DataFrame({\"a\": [1, 2], \"b\": [\"hello\", \"world\"]})\n\n # save df to xlsx file\n with atomic_write(fp_xlsx, as_file=False) as f:\n df_xlsx.to_excel(f)\n\n # invoked function 'convert_excel_to_parquet' to convert the xlsx file to a parquet file\n parquet_filepath = convert_excel_to_parquet(fp_xlsx)\n\n # verify filename returned match filepath specified\n self.assertEqual(fp_parquet, parquet_filepath)\n # verify the parquet file was created\n self.assertTrue(os.path.exists(fp_parquet))\n # ensure contents of xlsx and parquet files match\n df_parquet = pd.read_parquet(fp_parquet, engine=\"pyarrow\")\n self.assertTrue(df_xlsx.equals(df_parquet))",
"def test_convert_json():\n schema = pa.schema([\n pa.field(\"foo\", pa.int32()),\n pa.field(\"bar\", pa.int64())\n ])\n\n input_path = \"{}/tests/fixtures/simple_json.txt\".format(os.getcwd())\n expected_file = \"{}/tests/fixtures/simple.parquet\".format(os.getcwd())\n with tempfile.NamedTemporaryFile() as f:\n output_file = f.name\n client.convert_json(input_path, output_file, schema)\n output = pq.ParquetFile(output_file)\n expected = pq.ParquetFile(expected_file)\n assert output.metadata.num_columns == expected.metadata.num_columns\n assert output.metadata.num_rows == expected.metadata.num_rows\n assert output.schema.equals(expected.schema)\n assert output.read_row_group(0).to_pydict() == expected.read_row_group(0).to_pydict()",
"def test_with_inferred_schema(self):\n frame = self.context.frame.import_csv(self.dataset, infer_schema=True)\n expected_inferred_schema = [(\"C0\", int), (\"C1\", str), (\"C2\", int)]\n self.assertEqual(frame.schema, expected_inferred_schema)",
"def testMatchAnythingTypeParameter(self):\n with file_utils.Tempdir() as d:\n d.create_file(\"a.pyi\", \"\"\"\n from typing import Any, List\n class A(List[Any]): pass\n \"\"\")\n ty = self.Infer(\"\"\"\n import a\n n = len(a.A()[0])\n \"\"\", pythonpath=[d.path])\n self.assertTypesMatchPytd(ty, \"\"\"\n a = ... # type: module\n n = ... # type: int\n \"\"\")",
"def _infer_variable_types_from_data(raw_data):\n raise NotImplementedError()",
"def test_unknown_type(testdir: Testdir) -> None:\n schema = '''\n datasource db {{\n provider = \"postgres\"\n url = env(\"POSTGRES_URL\")\n }}\n\n generator db {{\n provider = \"coverage run -m prisma\"\n output = \"{output}\"\n {options}\n }}\n\n model User {{\n id String @id\n meta Json\n }}\n '''\n with pytest.raises(subprocess.CalledProcessError) as exc:\n testdir.generate(schema=schema)\n\n assert 'Unknown scalar type: Json' in str(exc.value.output, 'utf-8')",
"def test_type_checking_with_inconsistent_types(self):\n @component\n def a_op(field_m: {'GCSPath': {'path_type': 'file', 'file_type':'tsv'}}, field_o: 'Integer'):\n return ContainerOp(\n name = 'operator a',\n image = 'gcr.io/ml-pipeline/component-b',\n arguments = [\n '--field-l', field_m,\n '--field-o', field_o,\n ],\n )\n\n @pipeline(\n name='p1',\n description='description1'\n )\n def my_pipeline(a: {'GCSPath': {'path_type':'file', 'file_type': 'csv'}}='good', b: Integer()=12):\n a_op(field_m=a, field_o=b)\n\n test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n sys.path.append(test_data_dir)\n tmpdir = tempfile.mkdtemp()\n try:\n simple_package_path = os.path.join(tmpdir, 'simple.tar.gz')\n with self.assertRaises(InconsistentTypeException):\n compiler.Compiler().compile(my_pipeline, simple_package_path, type_check=True)\n compiler.Compiler().compile(my_pipeline, simple_package_path, type_check=False)\n\n finally:\n shutil.rmtree(tmpdir)",
"def test_as_python_types(self):\n obs = _as_python_types(self.metadata_map, self.headers)\n exp = [[2.1, 3.1, 3],\n ['str1', '200', 'string30'],\n [1, 2, 3]]\n self.assertEqual(obs, exp)",
"def test_type_inference_lens(self):\n # Create new work trail and retrieve the HEAD workflow of the default\n # branch\n f_handle = self.filestore.upload_file(INCOMPLETE_CSV_FILE)\n ds = self.datastore.load_dataset(f_handle=f_handle)\n # Infer type\n command = cmd.mimir_type_inference(DATASET_NAME, 0.6)\n result = self.compute_lens_result(ds, command)\n self.assertTrue(result.is_success)\n # Get dataset\n ds2 = self.datastore.get_dataset(result.provenance.write[DATASET_NAME].identifier)\n self.assertEqual(len(ds2.columns), 3)\n self.assertEqual(ds2.row_count, 7)\n ds1_rows = ds.fetch_rows()\n ds2_rows = ds2.fetch_rows()\n for i in range(ds2.row_count):\n self.assertEqual(ds1_rows[i].values, ds2_rows[i].values)",
"def test_artefact_wrong_type() -> None:\n server = MockServer()\n db, store = server.new_connection()\n\n art = _graph.variable_artefact(db, hash_t(\"1\"), \"file\", Encoding.blob)\n _graph.set_data(\n db,\n store,\n art.hash,\n _serdes.encode(art.kind, \"what\"),\n _graph.ArtefactStatus.done,\n )\n out = _graph.get_data(db, store, art)\n assert isinstance(out, Error)\n assert out.kind == ErrorKind.WrongType\n\n art = _graph.variable_artefact(db, hash_t(\"2\"), \"file\", Encoding.json)\n _graph.set_data(\n db,\n store,\n art.hash,\n _serdes.encode(art.kind, [\"what\", 1]),\n _graph.ArtefactStatus.done,\n )\n out = _graph.get_data(db, store, art)\n assert out == [\"what\", 1]",
"def test_mixed_dtypes(suffix: str) -> None:\n path = rsc / mixed_dtypes_file\n df = read_ods(path.with_suffix(suffix), 1)\n\n assert isinstance(df, pd.DataFrame)\n assert len(df) == 10\n assert len(df.columns) == 5\n\n type_list = [float, object, float, float, object]\n assert df.dtypes.tolist() == type_list\n col_b_types = [type(v) for v in df.B.values]\n assert str in col_b_types and float in col_b_types",
"def test_datatype(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.datatype == str\n np.testing.assert_array_equal(hist2.datatype, [numpy.number, str])\n np.testing.assert_array_equal(hist3.datatype, [numpy.datetime64, numpy.number, str])",
"def test_datatype():\n\n assert isinstance(pf.get_datatype(), torch.dtype)\n assert pf.get_datatype() == torch.float32\n\n pf.set_datatype(torch.float64)\n assert isinstance(pf.get_datatype(), torch.dtype)\n assert pf.get_datatype() == torch.float64\n pf.set_datatype(torch.float32)\n\n with pytest.raises(TypeError):\n pf.set_datatype(\"lala\")",
"def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_type(\"missing_type\", {})",
"def test_incorrect_data_type_plate():\n \n test_object = fa.read_in_envision(data_csv=list_A, platemap_csv=plate_map_file, data_type='plate', size=384)",
"def test__specification_type_to_python_type_unsupported_type(self):\n with self.assertRaises(TypeError):\n _specification_type_to_python_type(\"unsupported_type\")",
"def test_with_no_specified_or_inferred_schema(self):\n # should default to creating a schema of all strings\n frame = self.context.frame.import_csv(self.dataset, infer_schema=False)\n expected_schema = [(\"C0\", str), (\"C1\", str), (\"C2\", str)]\n self.assertEqual(frame.schema, expected_schema)",
"def test_untyped_files(self):\n for path in PATHS:\n with open(path, 'r', encoding='utf-8') as py_file:\n original_code = py_file.read()\n tree = ast.parse(source=original_code, filename=path)\n code = typed_astunparse.unparse(tree)\n roundtrip_tree = ast.parse(source=code)\n tree_dump = ast.dump(tree, include_attributes=False)\n roundtrip_tree_dump = ast.dump(roundtrip_tree, include_attributes=False)\n self.assertEqual(tree_dump, roundtrip_tree_dump, msg=path)",
"def test_mapping_column_types():\n\n dr1 = date_range(\"2020-01-01\", periods=3, freq=\"D\")\n dr2 = date_range(\"2019-06-23\", periods=3, freq=\"D\")\n df = DataFrame(\n {\n \"String\": list(\"abc\"),\n \"pd_String\": Series(list(\"abc\"), dtype=\"string\"),\n \"Int\": [1, 2, 3],\n \"Int16\": array([1, 2, 3], dtype=\"int16\"),\n \"pd_Int64\": Series([1, 2, 3], dtype=\"Int64\"),\n \"Float\": [4.0, 5.0, 6.0],\n \"Float32\": array([4, 4, 6], dtype=\"float32\"),\n \"Date\": dr1,\n \"Timedelta\": dr1 - dr2,\n \"Bool\": [True, False, True],\n }\n )\n adf_client, run_response = df_to_azure(\n df, tablename=\"test_df_to_azure\", schema=\"test\", method=\"create\"\n )\n wait_till_pipeline_is_done(adf_client, run_response)\n\n expected = DataFrame(\n {\n \"COLUMN_NAME\": [\n \"String\",\n \"pd_String\",\n \"Int\",\n \"Int16\",\n \"pd_Int64\",\n \"Float\",\n \"Float32\",\n \"Date\",\n \"Timedelta\",\n \"Bool\",\n ],\n \"DATA_TYPE\": [\n \"varchar\",\n \"varchar\",\n \"int\",\n \"int\",\n \"int\",\n \"real\",\n \"real\",\n \"datetime\",\n \"real\",\n \"bit\",\n ],\n \"CHARACTER_MAXIMUM_LENGTH\": [255, 255, nan, nan, nan, nan, nan, nan, nan, nan],\n \"NUMERIC_PRECISION\": [nan, nan, 10, 10, 10, 24, 24, nan, 24, nan],\n }\n )\n\n query = \"\"\"\n SELECT\n COLUMN_NAME,\n DATA_TYPE,\n CHARACTER_MAXIMUM_LENGTH,\n NUMERIC_PRECISION\n FROM\n INFORMATION_SCHEMA.COLUMNS\n WHERE\n TABLE_NAME = 'test_df_to_azure';\n \"\"\"\n\n with auth_azure() as con:\n result = read_sql_query(query, con=con)\n\n assert_frame_equal(expected, result)",
"def test_train_invalid_dtype():\n os.chdir(pathlib.Path(__file__).parent.absolute())\n loc = shutil.which(\"parrot-train\")\n script_descriptor = open(os.path.abspath(loc))\n script = script_descriptor.read()\n sys.argv = [\"parrot-train\", \"../data/seq_class_dataset.tsv\",\n \"../data/output_network.pt\", \"-d\", \"gibberish\", \"-c\", \"4\"]\n\n with pytest.raises(ValueError):\n exec(script)\n\n script_descriptor.close()",
"def test_get_datatypes(self):\n obs = _get_datatypes(self.metadata_map.ix[:, self.headers])\n exp = ['float8', 'varchar', 'integer']\n self.assertEqual(obs, exp)",
"def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')",
"def test_wrong_type_error(self, parse_input_mocked_metadata):\n with pytest.raises(ValueError, match=\"invalid value\"):\n bb = parse_input_mocked_metadata(\n \"for int m in [1, 4.2, 9]\\n\\tMZgate(0, 1) | [0, 1]\"\n )",
"def test_type_checking_with_consistent_types(self):\n @component\n def a_op(field_m: {'GCSPath': {'path_type': 'file', 'file_type':'tsv'}}, field_o: 'Integer'):\n return ContainerOp(\n name = 'operator a',\n image = 'gcr.io/ml-pipeline/component-b',\n arguments = [\n '--field-l', field_m,\n '--field-o', field_o,\n ],\n )\n\n @pipeline(\n name='p1',\n description='description1'\n )\n def my_pipeline(a: {'GCSPath': {'path_type':'file', 'file_type': 'tsv'}}='good', b: Integer()=12):\n a_op(field_m=a, field_o=b)\n\n test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n sys.path.append(test_data_dir)\n tmpdir = tempfile.mkdtemp()\n try:\n simple_package_path = os.path.join(tmpdir, 'simple.tar.gz')\n compiler.Compiler().compile(my_pipeline, simple_package_path, type_check=True)\n\n finally:\n shutil.rmtree(tmpdir)",
"def test_type_conversion(registry: AdapterLoader) -> None:\n registry.add(\"dummy\", FakeAdapterWithDateTime)\n\n connection = connect(\":memory:\", [\"dummy\"], isolation_level=\"IMMEDIATE\")\n cursor = connection.cursor()\n\n cursor.execute('SELECT * FROM \"dummy://\"')\n assert cursor.fetchall() == []\n\n cursor.execute(\n 'INSERT INTO \"dummy://\" (birthday) VALUES (?)',\n (datetime(2021, 1, 1, 0, 0),),\n )\n cursor.execute('SELECT * FROM \"dummy://\"')\n assert cursor.fetchall() == [\n (\n None,\n datetime(2021, 1, 1, 0, 0),\n None,\n None,\n ),\n ]\n\n # make sure datetime is stored as a datetime\n assert FakeAdapterWithDateTime.data == [\n {\n \"age\": None,\n \"birthday\": datetime(2021, 1, 1, 0, 0),\n \"name\": None,\n \"pets\": None,\n \"rowid\": 1,\n },\n ]\n assert isinstance(FakeAdapterWithDateTime.data[0][\"birthday\"], datetime)\n\n cursor.execute(\n 'SELECT * FROM \"dummy://\" WHERE birthday > ?',\n (datetime(2020, 12, 31, 0, 0),),\n )\n assert cursor.fetchall() == [\n (None, datetime(2021, 1, 1, 0, 0), None, None),\n ]",
"def test_all_datatypes_read(self):\n self.all_datatypes_prepare()\n\n tempfile = self.get_temp_file()\n\n with open(tempfile.name, 'w') as csvfile:\n writer = csv.writer(csvfile)\n # serializing blob bytearray in friendly format\n data_set = list(self.data)\n\n data_set[2] = self.format_blob(self.data[2])\n # Here we convert containers of blobs to strings that match exactly the output of the SELECT *\n # because otherwise the comparison fails due to extra quotes added by the csv writer around the blobs\n # that were converted to strings. White spaces do matter\n data_set[24] = '{3: ' + self.format_blob(self.data[24][3]) + '}'\n data_set[25] = '[' + ', '.join(self.format_blob(b) for b in self.data[25]) + ']'\n data_set[26] = '{' + ', '.join(self.format_blob(b) for b in self.data[26]) + '}'\n writer.writerow(data_set)\n\n def _test(prepared_statements):\n logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))\n out, err, _ = self.run_cqlsh(cmds=\"COPY ks.testdatatype FROM '{}' WITH PREPAREDSTATEMENTS = {}\"\n .format(tempfile.name, prepared_statements))\n\n out, err, _ = self.run_cqlsh(cmds=\"SELECT * FROM ks.testdatatype\")\n results = self.parse_cqlsh_query(out=out, num_cols=len(self.data), timestamps_to_be_rounded=[10, 17])\n\n self.assertCsvResultEqual(tempfile.name, results, 'testdatatype')\n\n _test(True)\n _test(False)",
"def test_read_type_error():\n filename = {}\n with pytest.raises(TypeError):\n read_file(filename)"
]
| [
"0.5770566",
"0.57687217",
"0.5768028",
"0.5666308",
"0.5590542",
"0.558034",
"0.5571467",
"0.55513895",
"0.5543602",
"0.55353683",
"0.55230504",
"0.5512718",
"0.55033225",
"0.5497453",
"0.5489926",
"0.54898447",
"0.5441415",
"0.5438022",
"0.5430504",
"0.540938",
"0.5402098",
"0.5398859",
"0.5389514",
"0.5388948",
"0.5374464",
"0.5372286",
"0.5358338",
"0.5334757",
"0.5332803",
"0.53255945"
]
| 0.7926908 | 0 |
Shows all the installations from the database | def show_installations(self):
database = Database('data/database.db')
installations = database.read_installations()
view = Template(filename="view/template.html", lookup=lookup)
return view.render(
rows = [[item.number, item.name, item.address, item.zip_code, item.city, item.latitude, item.longitude] for item in installations],
pageTitle = "Installations",
tableTitle = "Liste de toutes les installations",
ths = ["Numéro", "Nom", "Adresse", "Code postal", "Ville", "Latitude", "Longitude"]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_installation(self):\n\t\tself.db = DB()\n\t\tinstallation_all = self.db.select_all_from(\"installations\")\n\t\ttmpl = lookup.get_template(\"installation.html\")\n\t\treturn (tmpl.render(installation=installation_all))",
"def installation(request):\n return render(request, 'ecosystem/installation.html',\n {'page': 'installation', 'category': 'publish'})",
"def show_installation(self, number): \n database = Database('data/database.db')\n inst = database.read_installation(number)\n view = Template(filename=\"view/template.html\", lookup=lookup)\n\n try:\n render = view.render(\n rows = [[inst.number, inst.name, inst.address, inst.zip_code, inst.city, inst.latitude, inst.longitude]],\n pageTitle = \"Installation \" + number,\n tableTitle = \"Installation \" + number,\n ths = [\"Numéro\", \"Nom\", \"Adresse\", \"Code postal\", \"Ville\", \"Latitude\", \"Longitude\"]\n )\n except AttributeError:\n render = view.render(\n rows = [],\n pageTitle = \"Installation \" + number,\n tableTitle = \"Installation \" + number,\n ths = [\"Numéro\", \"Nom\", \"Adresse\", \"Code postal\", \"Ville\", \"Latitude\", \"Longitude\"]\n )\n \n return render",
"def installation(request):\n return jingo.render(request, 'ecosystem/installation.html',\n {'page': 'installation', 'category': 'publish'})",
"def show_equipments(self): \n database = Database('data/database.db')\n equipments = database.read_equipments()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name, item.installation_number] for item in equipments],\n pageTitle = \"Équipements\",\n tableTitle = \"Liste de tous les équipements\",\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )",
"def get_installations():\n github_app = get_default_app()\n pprint(github_app.get_installations())",
"def show_all_products():\n\n data = cur.execute(\"\"\"SELECT productid, productname, unitcost, stock FROM catalogue\"\"\").fetchall()\n\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Cost\", \"Stock\"]))",
"def run(self):\n logging.debug('List Installed Programs')\n if self.short:\n print(' '.join([ent for ent in pakit.conf.IDB]))\n return\n\n nchars = 12\n fmt = str(nchars).join(['{prog:', '} {repo:',\n '} {hash:', '} {date}'])\n installed = ['Program Repo Hash Date']\n for prog in pakit.conf.IDB:\n entry = pakit.conf.IDB[prog]\n installed.append(fmt.format(prog=prog[0:nchars],\n repo=entry['repo'][0:nchars],\n date=entry['date'],\n hash=entry['hash'][0:nchars]))\n\n msg = 'Installed Programs:'\n msg += PREFIX + PREFIX.join(installed)\n print(msg)\n return msg",
"def show_db_overview(self):\n\n models_list = sorted_models_list()\n apps = [p.app_label for p in settings.SITE.installed_plugins]\n s = \"%d apps: %s.\" % (len(apps), \", \".join(apps))\n s += \"\\n%d models:\\n\" % len(models_list)\n i = 0\n headers = [\n #~ \"No.\",\n \"Name\",\n \"Default table\",\n #~ \"M\",\n \"#fields\",\n \"#rows\",\n #~ ,\"first\",\"last\"\n ]\n rows = []\n for model in models_list:\n if True: # model._meta.managed:\n i += 1\n cells = []\n #~ cells.append(str(i))\n cells.append(fmn(model))\n cells.append(model.get_default_table())\n #~ cells.append(str(model))\n #~ if model._meta.managed:\n #~ cells.append('X')\n #~ else:\n #~ cells.append('')\n cells.append(str(len(model._meta.concrete_fields)))\n qs = model.objects.all()\n n = qs.count()\n cells.append(str(n))\n #~ if n:\n #~ cells.append(obj2str(qs[0]))\n #~ cells.append(obj2str(qs[n-1]))\n #~ else:\n #~ cells.append('')\n #~ cells.append('')\n\n rows.append(cells)\n s += rstgen.table(headers, rows)\n return s",
"def show_entries():\n db = get_db()\n cur = db.execute('select distinct name,repo_id,stars, description from python_repos order by stars desc')\n entries = cur.fetchall()\n # get api\n results = get_api()\n # The update operation will consist of deletion and insertion for efficiency\n delete_entry(results)\n add_entry(results)\n return render_template('index.html', entries=entries)",
"def all_products(request):\n\n products = Product.objects.all()\n return render(request, 'products.html', {'products': products})",
"def all_products(request):\n products = Product.objects.all()\n return render(request, \"products.html\", {\"products\": products})",
"def show_catalogue(self):\n\n data = cur.execute(\"\"\"SELECT productid, productname, unitcost, stock, location \n FROM catalogue WHERE vendorname = ?\"\"\", (self.vendorname,)).fetchall()\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Unit Cost\", \"Stock\", \"Location\"]))",
"def show():\n conn = sqlite3.connect(\"lite.db\")\n cursor = conn.cursor()\n\n # Query to display all records from the db\n cursor.execute(\"SELECT * from store\")\n\n # Fetch this data\n rows = cursor.fetchall()\n\n conn.close()\n\n return rows",
"def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()",
"def previewinstall(self, installed=[]):\n\n if( self.mode == \"install\"):\n \n # resolve circular dependencies\n if( self.name in installed ):\n return\n else:\n installed.append( self.name )\n \n print \"\\n\" + 20*'-' + \" Starting \" + self.name + \" Installation Test \" + 20*'-' + '\\n'\n \n # additional modules\n mods = self.optmodules + self.reqmodules + self.reqmodules_external + self.reqmodules_buildonly\n if( len(mods) > 0 ):\n for modname in mods:\n mod = self.parent.module(modname)\n if( mod.mode == \"install\" and not mod.name in installed ):\n print \"+ \" + self.name + \" will launch installation of \" + mod.name\n mod.previewinstall(installed)\n print \"+ \"+ self.name + \" using \" + mod.name + \" at [ \" + mod.installPath + \" ]\"\n\n print \"\\n+ Environment Settings used for building \" + self.name + \":\"\n # print environment settings recursively\n self.setEnv(self, [], True )\n\n if( self.hasCMakeBuildSupport ):\n #self.setCMakeVars(self, [])\n print \"\\n+ Generated CMake command for building \" + self.name + \":\"\n print ' $ ',self.genCMakeCmd()\n \n print \"\\n+ \" + self.name + \" installation finished.\"\n print '\\n' + 20*'-' + \" Finished \" + self.name + \" Installation Test \" + 20*'-' + '\\n'",
"def show_versions():\n sys_info = _get_sys_info()\n versions = _get_autogluon_versions()\n sorted_keys = sorted(versions.keys(), key=lambda x: x.lower())\n\n maxlen = 0 if len(versions) == 0 else max(len(x) for x in versions)\n print(\"\\nINSTALLED VERSIONS\")\n print(\"------------------\")\n for k, v in sys_info.items():\n print(f\"{k:<{maxlen}}: {v}\")\n print(\"\")\n for k in sorted_keys:\n print(f\"{k:<{maxlen}}: {versions[k]}\")",
"def show_entries():\n db = get_db()\n cur = db.execute('select id, title, ingredients, steps, tags, \\\n url from entries order by id asc')\n entries = cur.fetchall()\n return render_template('show_entries.html', entries=entries)",
"def show_all_training():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n \n training = Training.query.all()\n\n \n return render_template(\"training_display.html\", training = training)",
"def install(self):\n conn = sqlite3.connect(self.__DB)\n cursor = conn.cursor()\n\n # creating tables...\n\n cursor.execute('''\n CREATE TABLE users (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n agency TEXT NOT NULL,\n account TEXT NOT NULL,\n password TEXT NOT NULL,\n balance REAL NOT NULL\n );\n ''')\n\n cursor.execute('''\n CREATE TABLE history (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n register TEXT NOT NULL,\n owner INTEGER NOT NULL\n );\n ''')\n\n # inserting a few users by default (there isn't 'sign up' requirement for this app)...\n\n hasher = User('', '', '')\n users_data = [\n ('A1', '00000-0', hasher.str_to_hash('pass0'), 1500),\n ('A1', '11111-1', hasher.str_to_hash('pass1'), 400),\n ('A2', '22222-2', hasher.str_to_hash('pass2'), 260),\n ('A3', '33333-3', hasher.str_to_hash('pass3'), 380),\n ('A2', '44444-4', hasher.str_to_hash('pass4'), 240),\n ]\n\n cursor.executemany('''\n INSERT INTO users (agency, account, password, balance)\n VALUES (?, ?, ?, ?);\n ''', users_data)\n\n conn.commit()\n conn.close()\n\n self.load_users()",
"def display(auth_context):\n\n products = product_catalog.list_products()\n # Get promoted products recommended by the AutoML model.\n promos = product_catalog.get_promos()\n return render_template('product_catalog.html',\n products=products,\n promos=promos,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)",
"def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)",
"def index():\n\tresults = queries.index()\n\ttags = queries.tags()\n\treturn render_template('index.html', packages=results, tags=tags, currentFilter=None)",
"def install_by_version():\n respond_model = RespondModel()\n respond_model.data = get_all_user_info()\n return respond_model",
"def products(request):\n\n return render(request, \"core/products.html\", {\n \"products\": Product.objects.all()\n })",
"def LoadInstallations(counter):\n process = subprocess.Popen([\"pip\", \"list\", \"--format=json\"],\n stdout=subprocess.PIPE)\n output, _ = process.communicate()\n installations = json.loads(output.decode())\n for i in installations:\n counter.labels(i[\"name\"], i[\"version\"]).inc()",
"def install(cls):\n return cls.interface.set_table(cls.schema)",
"def show_all(thing):\n db = get_db()\n cursor = db.cursor()\n cursor.execute(\"select * from %s\" % thing.table_name)\n entries = cursor.fetchall()\n\n add_template_variable('thing', thing)\n add_template_variable('entries', entries)\n return my_render_template('generic/show_all.html')",
"def install():\n ArticleDataProvider.register()\n ProductDataProvider.register()",
"def check_database(self):\r\n\r\n sql_command = \"\"\"\r\n SELECT *\r\n FROM UserRecommendations\r\n \"\"\"\r\n self.controller.execute(sql_command)\r\n\r\n for col in self.controller.fetchall():\r\n print(col)"
]
| [
"0.81771755",
"0.65928066",
"0.65425545",
"0.6422345",
"0.6343154",
"0.61781883",
"0.6152795",
"0.60611415",
"0.5992566",
"0.5938621",
"0.5864382",
"0.58546007",
"0.57651806",
"0.5752968",
"0.57020676",
"0.5668832",
"0.565056",
"0.5634714",
"0.5633282",
"0.56175447",
"0.5613888",
"0.559688",
"0.55948967",
"0.5542765",
"0.5525702",
"0.55171484",
"0.55124146",
"0.54842824",
"0.5476212",
"0.54319817"
]
| 0.84590834 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.